diff --git a/.asf.yaml b/.asf.yaml deleted file mode 100644 index ac29efed9ff..00000000000 --- a/.asf.yaml +++ /dev/null @@ -1,37 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -notifications: - commits: commits@cassandra.apache.org - issues: commits@cassandra.apache.org - pullrequests: pr@cassandra.apache.org - jira_options: link worklog - -github: - description: "Java Driver for Apache Cassandra®" - homepage: https://cassandra.apache.org/ - enabled_merge_buttons: - squash: false - merge: false - rebase: true - features: - wiki: false - issues: false - projects: false - autolink_jira: - - CASSANDRA - - CASSJAVA diff --git a/.gitignore b/.gitignore index 07449882cc0..060d9a0ff1d 100644 --- a/.gitignore +++ b/.gitignore @@ -1,16 +1,15 @@ +target/ +cobertura-history/ +testing/ .settings +.classpath +.project +doc +docs +notes .DS_Store -.documenter_local_last_run /.idea *.iml -.classpath -.project - -.java-version -.flattened-pom.xml -.documenter_local_last_run -/docs -target/ -dependency-reduced-pom.xml +/driver-core/dependency-reduced-pom.xml diff --git a/.snyk b/.snyk deleted file mode 100644 index a081b17225c..00000000000 --- a/.snyk +++ /dev/null @@ -1,35 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# Snyk (https://snyk.io) policy file, patches or ignores known vulnerabilities. -version: v1.22.2 -# ignores vulnerabilities until expiry date; change duration by modifying expiry date -ignore: - SNYK-JAVA-ORGGRAALVMSDK-2767964: - - '*': - reason: cannot upgrade to graal-sdk 22.1.0+ until we move off Java8, which is slated for later this year - expires: 2024-01-10T00:00:00.000Z - created: 2023-06-21T00:00:00.000Z - SNYK-JAVA-ORGGRAALVMSDK-2769618: - - '*': - reason: cannot upgrade to graal-sdk 22.1.0+ until we move off Java8, which is slated for later this year - expires: 2024-01-10T00:00:00.000Z - created: 2023-06-21T00:00:00.000Z - SNYK-JAVA-ORGGRAALVMSDK-5457933: - - '*': - reason: cannot upgrade to graal-sdk 22.1.0+ until we move off Java8, which is slated for later this year - expires: 2024-01-10T00:00:00.000Z - created: 2023-06-21T00:00:00.000Z diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 00000000000..43ea0337216 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,5 @@ +language: java +jdk: + - openjdk6 + - oraclejdk7 + - oraclejdk8 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 53857383cf2..1fd91f997aa 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,534 +1,100 @@ - - # Contributing guidelines -## Code formatting - -### Java - -We follow the [Google Java Style Guide](https://google.github.io/styleguide/javaguide.html). See -https://github.com/google/google-java-format for IDE plugins. The rules are not configurable. - -The build will fail if the code is not formatted. To format all files from the command line, run: - -``` -mvn fmt:format -``` - -Some aspects are not covered by the formatter: braces must be used with `if`, `else`, `for`, `do` -and `while` statements, even when the body is empty or contains only a single statement. - -### XML - -The build will fail if XML files are not formatted correctly. Run the following command before you -commit: - -```java -mvn xml-format:xml-format -``` - -The formatter does not enforce a maximum line length, but please try to keep it below 100 characters -to keep files readable across all mediums (IDE, terminal, Github...). - -### Other text files (markdown, etc) - -Similarly, enforce a right margin of 100 characters in those files. Editors and IDEs generally have -a way to configure this (for IDEA, install the "Wrap to column" plugin). - -## Coding style -- production code - -Do not use static imports. They make things harder to understand when you look at the code -someplace where you don't have IDE support, like Github's code view. - -Avoid abbreviations in class and variable names. A good rule of thumb is that you should only use -them if you would also do so verbally, for example "id" and "config" are probably reasonable. -Single-letter variables are permissible if the variable scope is only a few lines, or for commonly -understood cases (like `i` for a loop index). - -Keep source files short. Short files are easy to understand and test. The average should probably -be around 200-300 lines. - -### Javadoc - -All types in "API" packages must be documented. For "internal" packages, documentation is optional, -but in no way discouraged: it's generally a good idea to have a class-level comment that explains -where the component fits in the architecture, and anything else that you feel is important. - -You don't need to document every parameter or return type, or even every method. Don't document -something if it is completely obvious, we don't want to end up with this: - -```java -/** - * Returns the name. - * - * @return the name - */ -String getName(); -``` - -On the other hand, there is often something useful to say about a method, so most should have at -least a one-line comment. Use common sense. - -Driver users coding in their IDE should find the right documentation at the right time. Try to -think of how they will come into contact with the class. For example, if a type is constructed with -a builder, each builder method should probably explain what the default is when you don't call it. - -Avoid using too many links, they can make comments harder to read, especially in the IDE. Link to a -type the first time it's mentioned, then use a text description ("this registry"...) or an `@code` -block. Don't link to a class in its own documentation. Don't link to types that appear right below -in the documented item's signature. - -```java -/** -* @return this {@link Builder} <-- completely unnecessary -*/ -Builder withLimit(int limit) { -``` - -### Logs - -We use SLF4J; loggers are declared like this: - -```java -private static final Logger LOG = LoggerFactory.getLogger(TheEnclosingClass.class); -``` - -Logs are intended for two personae: - -* Ops who manage the application in production. -* Developers (maybe you) who debug a particular issue. - -The first 3 log levels are for ops: - -* `ERROR`: something that renders the driver -- or a part of it -- completely unusable. An action is - required to fix it: bouncing the client, applying a patch, etc. -* `WARN`: something that the driver can recover from automatically, but indicates a configuration or - programming error that should be addressed. For example: the driver connected successfully, but - one of the contact points in the configuration was malformed; the same prepared statement is being - prepared multiple time by the application code. -* `INFO`: something that is part of the normal operation of the driver, but might be useful to know - for an operator. For example: the driver has initialized successfully and is ready to process - queries; an optional dependency was detected in the classpath and activated an enhanced feature. - -Do not log errors that are rethrown to the client (such as the error that you're going to complete a -request with). This is annoying for ops because they see a lot of stack traces that require no -actual action on their part, because they're already handled by application code. - -Similarly, do not log stack traces for non-critical errors. If you still want the option to get the -trace for debugging, see the `Loggers.warnWithException` utility. - -The last 2 levels are for developers, to help follow what the driver is doing from a "black box" -perspective (think about debugging an issue remotely, and all you have are the logs). - -* `TRACE`: anything that happens **for every user request**. Not only request handling, but all - related components (e.g. timestamp generators, policies, etc). -* `DEBUG`: everything else. For example, node state changes, control connection activity, etc. - -Note that `DEBUG` and `TRACE` can coexist within the same component, for example the LBP -initializing is a one-time event, but returning a query plan is a per-request event. - -Logs statements start with a prefix that identifies its origin, for example: - -* for components that are unique to the cluster instance, just the cluster name: `[c0]`. -* for sessions, the cluster name + a generated unique identifier: `[c0|s0]`. -* for channel pools, the session identifier + the address of the node: `[c0|s0|/127.0.0.2:9042]`. -* for channels, the identifier of the owner (session or control connection) + the Netty identifier, - which indicates the local and remote ports: - `[c0|s0|id: 0xf9ef0b15, L:/127.0.0.1:51482 - R:/127.0.0.1:9042]`. -* for request handlers, the session identifier, a unique identifier, and the index of the - speculative execution: `[c0|s0|1077199500|0]`. - -Tests run with the configuration defined in `src/test/resources/logback-test.xml`. The default level -for driver classes is `WARN`, but you can override it with a system property: `-DdriverLevel=DEBUG`. -A nice setup is to use `DEBUG` when you run from your IDE, and keep the default for the command -line. - -When you add or review new code, take a moment to run the tests in `DEBUG` mode and check if the -output looks good. - -### Don't abuse the stream API - -The `java.util.stream` API is often used (abused?) as a "functional API for collections": - -```java -List sizes = words.stream().map(String::length).collect(Collectors.toList()); -``` - -The perceived advantages of this approach over traditional for-loops are debatable: - -* readability: this is highly subjective. But consider the following: - * everyone can read for-loops, whether they are familiar with the Stream API or not. The opposite - is not true. - * the stream API does not spell out all the details: what kind of list does `Collectors.toList()` - return? Is it pre-sized? Mutable? Thread-safe? - * the stream API looks pretty on simple examples, but things can get ugly fast. Try rewriting - `NetworkTopologyReplicationStrategy` with streams. -* concision: this is irrelevant. When we look at code we care about maintainability, not how many - keystrokes the author saved. The for-loop version of the above example is just 5 lines long, and - your brain doesn't take longer to parse it. - -The bottom line: don't try to "be functional" at all cost. Plain old for-loops are often just as -simple. - -### Never assume a specific format for `toString()` - -Only use `toString()` for debug logs or exception messages, and always assume that its format is -unspecified and can change at any time. - -If you need a specific string representation for a class, make it a dedicated method with a -documented format, for example `toCqlLiteral`. Otherwise it's too easy to lose track of the intended -usage and break things: for example, someone modifies your `toString()` method to make their logs -prettier, but unintentionally breaks the script export feature that expected it to produce CQL -literals. - -`toString()` can delegate to `toCqlLiteral()` if that is appropriate for logs. - - -### Concurrency annotations - -We use the [JCIP annotations](http://jcip.net/annotations/doc/index.html) to document thread-safety -policies. - -Add them for all new code, with the exception of: - -* enums and interfaces; -* utility classes (only static methods); -* test code. - -Make sure you import the types from `net.jcip`, there are homonyms in the classpath. - - -### Nullability annotations - -We use the [Spotbugs annotations](https://spotbugs.github.io) to document nullability of parameters, -method return types and class members. - -Please annotate any new class or interface with the appropriate annotations: `@NonNull`, `@Nullable`. Make sure you import -the types from `edu.umd.cs.findbugs.annotations`, there are homonyms in the classpath. - - -## Coding style -- test code +## Working on an issue -Static imports are permitted in a couple of places: -* All AssertJ methods, e.g.: - ```java - assertThat(node.getDatacenter()).isNotNull(); - fail("Expecting IllegalStateException to be thrown"); - ``` -* All Mockito methods, e.g.: - ```java - when(codecRegistry.codecFor(DataTypes.INT)).thenReturn(codec); - verify(codec).decodePrimitive(any(ByteBuffer.class), eq(ProtocolVersion.DEFAULT)); - ``` -* All Awaitility methods, e.g.: - ```java - await().until(() -> somethingBecomesTrue()); - ``` +Before starting to work on something, please comment in JIRA or ask on the mailing list +to make sure nobody else is working on it. -Test methods names use lower snake case, generally start with `should`, and clearly indicate the -purpose of the test, for example: `should_fail_if_key_already_exists`. If you have trouble coming -up with a simple name, it might be a sign that your test does too much, and should be split. +If a fix applies both to 2.0 and 2.1, work on the 2.0 branch, your commit will eventually +get merged in 2.1. -We use AssertJ (`assertThat`) for assertions. Don't use JUnit assertions (`assertEquals`, -`assertNull`, etc). +Before you send your pull request, make sure that: -Don't try to generify at all cost: a bit of duplication is acceptable, if that helps keep the tests -simple to understand (a newcomer should be able to understand how to fix a failing test without -having to read too much code). +- you have a unit test that failed before the fix and succeeds after. +- the fix is mentioned in `driver-core/CHANGELOG.rst`. +- the commit message include the reference of the JIRA ticket for automatic linking + (example: `Fix NPE when a connection fails during pool construction (JAVA-503).`). -Test classes can be a bit longer, since they often enumerate similar test cases. You can also -factor some common code in a parent abstract class named with "XxxTestBase", and then split -different families of tests into separate child classes. For example, `CqlRequestHandlerTestBase`, -`CqlRequestHandlerRetryTest`, `CqlRequestHandlerSpeculativeExecutionTest`... +As long as your pull request is not merged, it's OK to rebase your branch and push with +`--force`. -### Unit tests +If you want to contribute but don't have a specific issue in mind, the [lhf](https://datastax-oss.atlassian.net/secure/IssueNavigator.jspa?reset=true&mode=hide&jqlQuery=project%20%3D%20JAVA%20AND%20status%20in%20(Open%2C%20Reopened)%20AND%20labels%20%3D%20lhf) +label in JIRA is a good place to start: it marks "low hanging fruits" that don't require +in-depth knowledge of the codebase. -They live in the same module as the code they are testing. They should be fast and not start any -external process. They usually target one specific component and mock the rest of the driver -context. +## Editor configuration -### Integration tests +### General -They live in the `integration-tests` module, and exercise the whole driver stack against an external -process, which can be either one of: -* [Simulacron](https://github.com/datastax/simulacron): simulates Cassandra nodes on loopback - addresses; your test must "prime" data, i.e. tell the nodes what results to return for - pre-determined queries. - - For an example of a Simulacron-based test, see `NodeTargetingIT`. -* [CCM](https://github.com/pcmanus/ccm): launches actual Cassandra nodes locally. The `ccm` - executable must be in the path. - - You can pass a `-Dccm.version` system property to the build to target a particular Cassandra - version (it defaults to 3.11.0). `-Dccm.directory` allows you to point to a local installation - -- this can be a checkout of the Cassandra codebase, as long as it's built. See `CcmBridge` in - the driver codebase for more details. - - For an example of a CCM-based test, see `PlainTextAuthProviderIT`. +We consider automatic formatting as a help, not a crutch. Sometimes it makes sense to +break the rules to make the code more readable, for instance aligning columns (see the +constant declarations in `DataType.Name` for an example of this). -#### Categories +**Please do not reformat whole files, only the lines that you have added or modified**. -Integration tests are divided into three categories: -##### Parallelizable tests +### Eclipse -These tests can be run in parallel, to speed up the build. They either use: -* dedicated Simulacron instances. These are lightweight, and Simulacron will manage the ports to - make sure that there are no collisions. -* a shared, one-node CCM cluster. Each test works in its own keyspace. +Formatter: -The build runs them with a configurable degree of parallelism (currently 8). The shared CCM cluster -is initialized the first time it's used, and stopped before moving on to serial tests. Note that we -run with `parallel=classes`, which means methods within the same class never run concurrent to each -other. +- Preferences > Java > Code Style > Formatter. +- Click "Import". +- Select `src/main/config/ide/eclipse-formatter.xml`. -To make an integration test parallelizable, annotate it with `@Category(ParallelizableTests.class)`. -If you use CCM, it **must** be with `CcmRule`. +Import order: -For an example of a Simulacron-based parallelizable test, see `NodeTargetingIT`. For a CCM-based -test, see `DirectCompressionIT`. +- Preferences > Java > Code Style > Organize imports. +- Click "Import". +- Select `src/main/config/ide/eclipse.importorder`. -##### Serial tests +Prevent trailing whitespaces: -These tests cannot run in parallel, in general because they require CCM clusters of different sizes, -or with a specific configuration (we never run more than one CCM cluster simultaneously: it would be -too resource-intensive, and too complicated to manage all the ports). +- Preferences > Java > Editor > Save Actions. +- Check "Perform the selected actions on save". +- Ensure "Format source code" and "Organize imports" are unchecked. +- Check "Additional actions". +- Click "Configure". +- In the "Code Organizing" tab, check "Remove trailing whitespace" and "All lines". +- Click "OK" (the text area should only have one action "Remove trailing white spaces"). -The build runs them one by one, after the parallelizable tests. -To make an integration test serial, do not annotate it with `@Category`. The CCM rule **must** be -`CustomCcmRule`. +### IntelliJ IDEA -For an example, see `DefaultLoadBalancingPolicyIT`. - -Note: if multiple serial tests have a common "base" class, do not pull up `CustomCcmRule`, each -child class must have its own instance. Otherwise they share the same CCM instance, and the first -one destroys it on teardown. See `TokenITBase` for how to organize code in those cases. +- File > Import Settings... +- Select `src/main/config/ide/intellij-code-style.jar`. -##### Isolated tests - -Not only can those tests not run in parallel, they also require specific environment tweaks, -typically system properties that need to be set before initialization. - -The build runs them one by one, *each in its own JVM fork*, after the serial tests. - -To isolate an integration test, annotate it with `@Category(IsolatedTests.class)`. The CCM rule -**must** be `CustomCcmRule`. - -For an example, see `HeapCompressionIT`. - -#### About test rules - -Do not mix `CcmRule` and `SimulacronRule` in the same test. It makes things harder to follow, and -can be inefficient (if the `SimulacronRule` is method-level, it will create a Simulacron cluster for -every test method, even those that only need CCM). - -##### Class-level rules - -Rules annotated with `@ClassRule` wrap the whole test class, and are reused across methods. Try to -use this as much as possible, as it's more efficient. The fields need to be static; also make them -final and use constant naming conventions, like `CCM_RULE`. - -When you use a server rule (`CcmRule` or `SimulacronRule`) and a `SessionRule` at the same level, -wrap them into a rule chain to ensure proper initialization order: - -```java -private static final CcmRule CCM_RULE = CcmRule.getInstance(); -private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - -@ClassRule -public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); -``` - -##### Method-level rules - -Rules annotated with `@Rule` wrap each test method. Use lower-camel case for field names: - -```java -private CcmRule ccmRule = CcmRule.getInstance(); -private SessionRule sessionRule = SessionRule.builder(ccmRule).build(); - -@ClassRule -public TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); -``` - -Only use this for: - -* CCM tests that use `@CassandraRequirement` or `@DseRequirement` restrictions at the method level - (ex: `BatchStatementIT`). -* tests where you *really* need to restart from a clean state for every method. - -##### Mixed - -It's also possible to use a `@ClassRule` for CCM / Simulacron, and a `@Rule` for the session rule. -In that case, you don't need to use a rule chain. +This should add a new Code Style scheme called "java-driver". ## Running the tests -### Unit tests - - mvn clean test - -This currently takes about 30 seconds. The goal is to keep it within a couple of minutes (it runs -for each commit if you enable the pre-commit hook -- see below). - -### Integration tests - - mvn clean verify - -This currently takes about 9 minutes. We don't have a hard limit, but ideally it should stay within -30 minutes to 1 hour. - -You can skip test categories individually with `-DskipParallelizableITs`, `-DskipSerialITs` and -`-DskipIsolatedITs` (`-DskipITs` still works to skip them all at once). - -### Configuring MacOS for Simulacron - -Simulacron (used in integration tests) relies on loopback aliases to simulate multiple nodes. On -Linux or Windows, you shouldn't have anything to do. On MacOS, run this script: - -``` -#!/bin/bash -for sub in {0..4}; do - echo "Opening for 127.0.$sub" - for i in {0..255}; do sudo ifconfig lo0 alias 127.0.$sub.$i up; done -done -``` - -Note that this is known to cause temporary increased CPU usage in OS X initially while mDNSResponder -acclimates itself to the presence of added IP addresses. This lasts several minutes. Also, this does -not survive reboots. +We use TestNG. There are 3 test categories: +- "unit": pure Java unit tests. +- "short" and "long": integration tests that launch Cassandra instances. -## License headers - -The build will fail if some license headers are missing. To update all files from the command line, -run: - -``` -mvn license:format -``` - -## Pre-commit hook (highly recommended) - -Ensure `pre-commit.sh` is executable, then run: - -``` -ln -s ../../pre-commit.sh .git/hooks/pre-commit -``` - -This will only allow commits if the tests pass. It is also a good reminder to keep the test suite -short. - -Note: the tests run on the current state of the working directory. I tried to add a `git stash` in -the script to only test what's actually being committed, but I couldn't get it to run reliably -(it's still in there but commented). Keep this in mind when you commit, and don't forget to re-add -the changes if the first attempt failed and you fixed the tests. - -## Speeding up the build for local tests - -If you need to install something in your local repository quickly, you can use the `fast` profile to -skip all "non-essential" checks (licenses, formatting, tests, etc): - -``` -mvn clean install -Pfast -``` - -You can speed things up even more by targeting specific modules with the `-pl` option: +The Maven build uses profiles named after the categories to choose which tests to run: ``` -mvn clean install -Pfast -pl core,query-builder,mapper-runtime,mapper-processor,bom +mvn test -Pshort ``` -Please run the normal build at least once before you push your changes. +The default is "unit". Each profile runs the ones before it ("short" runs unit, etc.) -## Commits +Integration tests use [CCM](https://github.com/pcmanus/ccm) to bootstrap Cassandra instances. +Two Maven properties control its execution: -Keep your changes **focused**. Each commit should have a single, clear purpose expressed in its -message. +- `cassandra.version`: the Cassandra version. This has a default value in the root POM, + you can override it on the command line (`-Dcassandra.version=...`). +- `ipprefix`: the prefix of the IP addresses that the Cassandra instances will bind to (see + below). This defaults to `127.0.1.`. -Resist the urge to "fix" cosmetic issues (add/remove blank lines, move methods, etc.) in existing -code. This adds cognitive load for reviewers, who have to figure out which changes are relevant to -the actual issue. If you see legitimate issues, like typos, address them in a separate commit (it's -fine to group multiple typo fixes in a single commit). -Isolate trivial refactorings into separate commits. For example, a method rename that affects dozens -of call sites can be reviewed in a few seconds, but if it's part of a larger diff it gets mixed up -with more complex changes (that might affect the same lines), and reviewers have to check every -line. +CCM launches multiple Cassandra instances on localhost by binding to different addresses. The +driver uses up to 6 different instances (127.0.1.1 to 127.0.1.6 with the default prefix). +You'll need to define loopback aliases for this to work, on Mac OS X your can do it with: -Commit message subjects start with a capital letter, use the imperative form and do **not** end -with a period: - -* correct: "Add test for CQL request handler" -* incorrect: "~~Added test for CQL request handler~~" -* incorrect: "~~New test for CQL request handler~~" - -Avoid catch-all messages like "Minor cleanup", "Various fixes", etc. They don't provide any useful -information to reviewers, and might be a sign that your commit contains unrelated changes. - -We don't enforce a particular subject line length limit, but try to keep it short. - -You can add more details after the subject line, separated by a blank line. The following pattern -(inspired by [Netty](http://netty.io/wiki/writing-a-commit-message.html)) is not mandatory, but -welcome for complex changes: - -``` -One line description of your change - -Motivation: - -Explain here the context, and why you're making that change. -What is the problem you're trying to solve. - -Modifications: - -Describe the modifications you've done. - -Result: - -After your change, what will change. ``` - -## Pull requests - -Like commits, pull requests should be focused on a single, clearly stated goal. - -Don't base a pull request onto another one, it's too complicated to follow two branches that evolve -at the same time. If a ticket depends on another, wait for the first one to be merged. - -If you have to address feedback, avoid rewriting the history (e.g. squashing or amending commits): -this makes the reviewers' job harder, because they have to re-read the full diff and figure out -where your new changes are. Instead, push a new commit on top of the existing history; it will be -squashed later when the PR gets merged. If the history is complex, it's a good idea to indicate in -the message where the changes should be squashed: - -``` -* 20c88f4 - Address feedback (to squash with "Add metadata parsing logic") (36 minutes ago) -* 7044739 - Fix various typos in Javadocs (2 days ago) -* 574dd08 - Add metadata parsing logic (2 days ago) +sudo ifconfig lo0 alias 127.0.1.1 up +sudo ifconfig lo0 alias 127.0.1.2 up +... ``` - -(Note that the message refers to the other commit's subject line, not the SHA-1. This way it's still -relevant if there are intermediary rebases.) - -If you need new stuff from the base branch, it's fine to rebase and force-push, as long as you don't -rewrite the history. Just give a heads up to the reviewers beforehand. Don't push a merge commit to -a pull request. diff --git a/Jenkinsfile-asf b/Jenkinsfile-asf deleted file mode 100644 index 4b5041903c1..00000000000 --- a/Jenkinsfile-asf +++ /dev/null @@ -1,81 +0,0 @@ -#!groovy - -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -pipeline { - agent { - label 'cassandra-small' - } - - triggers { - // schedules only run against release branches (i.e. 3.x, 4.x, 4.5.x, etc.) - cron(branchPatternCron().matcher(env.BRANCH_NAME).matches() ? '@weekly' : '') - } - - stages { - stage('Matrix') { - matrix { - axes { - axis { - name 'TEST_JAVA_VERSION' - values 'openjdk@1.8.0-292', 'openjdk@1.11.0-9', 'openjdk@1.17.0', 'openjdk@1.21.0' - } - axis { - name 'SERVER_VERSION' - values '3.11', - '4.0', - '4.1', - '5.0' - } - } - stages { - stage('Tests') { - agent { - label 'cassandra-medium' - } - steps { - script { - executeTests() - junit testResults: '**/target/surefire-reports/TEST-*.xml', allowEmptyResults: true - junit testResults: '**/target/failsafe-reports/TEST-*.xml', allowEmptyResults: true - } - } - } - } - } - } - } -} - -def executeTests() { - def testJavaMajorVersion = (TEST_JAVA_VERSION =~ /@(?:1\.)?(\d+)/)[0][1] - sh """ - container_id=\$(docker run -td -e TEST_JAVA_VERSION=${TEST_JAVA_VERSION} -e SERVER_VERSION=${SERVER_VERSION} -e TEST_JAVA_MAJOR_VERSION=${testJavaMajorVersion} -v \$(pwd):/home/docker/cassandra-java-driver apache.jfrog.io/cassan-docker/apache/cassandra-java-driver-testing-ubuntu2204 'sleep 2h') - docker exec --user root \$container_id bash -c \"sudo bash /home/docker/cassandra-java-driver/ci/create-user.sh docker \$(id -u) \$(id -g) /home/docker/cassandra-java-driver\" - docker exec --user docker \$container_id './cassandra-java-driver/ci/run-tests.sh' - ( nohup docker stop \$container_id >/dev/null 2>/dev/null & ) - """ -} - -// branch pattern for cron -// should match 3.x, 4.x, 4.5.x, etc -def branchPatternCron() { - ~'((\\d+(\\.[\\dx]+)+))' -} diff --git a/Jenkinsfile-datastax b/Jenkinsfile-datastax deleted file mode 100644 index 602f33101ca..00000000000 --- a/Jenkinsfile-datastax +++ /dev/null @@ -1,639 +0,0 @@ -#!groovy -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -def initializeEnvironment() { - env.DRIVER_DISPLAY_NAME = 'Java Driver for Apache CassandraⓇ' - env.DRIVER_METRIC_TYPE = 'oss' - - env.GIT_SHA = "${env.GIT_COMMIT.take(7)}" - env.GITHUB_PROJECT_URL = "https://${GIT_URL.replaceFirst(/(git@|http:\/\/|https:\/\/)/, '').replace(':', '/').replace('.git', '')}" - env.GITHUB_BRANCH_URL = "${GITHUB_PROJECT_URL}/tree/${env.BRANCH_NAME}" - env.GITHUB_COMMIT_URL = "${GITHUB_PROJECT_URL}/commit/${env.GIT_COMMIT}" - - env.MAVEN_HOME = "${env.HOME}/.mvn/apache-maven-3.8.8" - env.PATH = "${env.MAVEN_HOME}/bin:${env.PATH}" - - /* - * As of JAVA-3042 JAVA_HOME is always set to JDK8 and this is currently necessary for mvn compile and DSE Search/Graph. - * To facilitate testing with JDK11/17 we feed the appropriate JAVA_HOME into the maven build via commandline. - * - * Maven command-line flags: - * - -DtestJavaHome=/path/to/java/home: overrides JAVA_HOME for surefire/failsafe tests, defaults to environment JAVA_HOME. - * - -Ptest-jdk-N: enables profile for running tests with a specific JDK version (substitute N for 8/11/17). - * - * Note test-jdk-N is also automatically loaded based off JAVA_HOME SDK version so testing with an older SDK is not supported. - * - * Environment variables: - * - JAVA_HOME: Path to JDK used for mvn (all steps except surefire/failsafe), Cassandra, DSE. - * - JAVA8_HOME: Path to JDK8 used for Cassandra/DSE if ccm determines JAVA_HOME is not compatible with the chosen backend. - * - TEST_JAVA_HOME: PATH to JDK used for surefire/failsafe testing. - * - TEST_JAVA_VERSION: TEST_JAVA_HOME SDK version number [8/11/17], used to configure test-jdk-N profile in maven (see above) - */ - - env.JAVA_HOME = sh(label: 'Get JAVA_HOME',script: '''#!/bin/bash -le - . ${JABBA_SHELL} - jabba which ${JABBA_VERSION}''', returnStdout: true).trim() - env.JAVA8_HOME = sh(label: 'Get JAVA8_HOME',script: '''#!/bin/bash -le - . ${JABBA_SHELL} - jabba which 1.8''', returnStdout: true).trim() - - sh label: 'Download Apache CassandraⓇ, DataStax Enterprise or DataStax HCD ',script: '''#!/bin/bash -le - . ${JABBA_SHELL} - jabba use 1.8 - . ${CCM_ENVIRONMENT_SHELL} ${SERVER_VERSION} - ''' - - if (env.SERVER_VERSION.split('-')[0] == 'dse') { - env.DSE_FIXED_VERSION = env.SERVER_VERSION.split('-')[1] - sh label: 'Update environment for DataStax Enterprise', script: '''#!/bin/bash -le - cat >> ${HOME}/environment.txt << ENVIRONMENT_EOF -CCM_CASSANDRA_VERSION=${DSE_FIXED_VERSION} # maintain for backwards compatibility -CCM_VERSION=${DSE_FIXED_VERSION} -CCM_SERVER_TYPE=dse -DSE_VERSION=${DSE_FIXED_VERSION} -CCM_BRANCH=${DSE_FIXED_VERSION} -DSE_BRANCH=${DSE_FIXED_VERSION} -ENVIRONMENT_EOF - ''' - } - - if (env.SERVER_VERSION.split('-')[0] == 'hcd') { - env.HCD_FIXED_VERSION = env.SERVER_VERSION.split('-')[1] - sh label: 'Update environment for DataStax HCD', script: '''#!/bin/bash -le - cat >> ${HOME}/environment.txt << ENVIRONMENT_EOF -CCM_CASSANDRA_VERSION=${HCD_FIXED_VERSION} # maintain for backwards compatibility -CCM_VERSION=${HCD_FIXED_VERSION} -CCM_SERVER_TYPE=hcd -HCD_VERSION=${HCD_FIXED_VERSION} -CCM_BRANCH=${HCD_FIXED_VERSION} -HCD_BRANCH=${HCD_FIXED_VERSION} -ENVIRONMENT_EOF - ''' - } - - sh label: 'Display Java and environment information',script: '''#!/bin/bash -le - # Load CCM environment variables - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - . ${JABBA_SHELL} - jabba use 1.8 - - java -version - mvn -v - printenv | sort - ''' -} - -def buildDriver(jabbaVersion) { - def buildDriverScript = '''#!/bin/bash -le - - . ${JABBA_SHELL} - jabba use '''+jabbaVersion+''' - - echo "Building with Java version '''+jabbaVersion+'''" - - mvn -B -V install -DskipTests -Dmaven.javadoc.skip=true - ''' - sh label: 'Build driver', script: buildDriverScript -} - -def executeTests() { - def testJavaHome = sh(label: 'Get TEST_JAVA_HOME',script: '''#!/bin/bash -le - . ${JABBA_SHELL} - jabba which ${JABBA_VERSION}''', returnStdout: true).trim() - def testJavaVersion = (JABBA_VERSION =~ /.*\.(\d+)/)[0][1] - - def executeTestScript = '''#!/bin/bash -le - # Load CCM environment variables - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - . ${JABBA_SHELL} - jabba use 1.8 - - if [ "${JABBA_VERSION}" != "1.8" ]; then - SKIP_JAVADOCS=true - else - SKIP_JAVADOCS=false - fi - - INTEGRATION_TESTS_FILTER_ARGUMENT="" - if [ ! -z "${INTEGRATION_TESTS_FILTER}" ]; then - INTEGRATION_TESTS_FILTER_ARGUMENT="-Dit.test=${INTEGRATION_TESTS_FILTER}" - fi - printenv | sort - - mvn -B -V ${INTEGRATION_TESTS_FILTER_ARGUMENT} -T 1 verify \ - -Ptest-jdk-'''+testJavaVersion+''' \ - -DtestJavaHome='''+testJavaHome+''' \ - -DfailIfNoTests=false \ - -Dmaven.test.failure.ignore=true \ - -Dmaven.javadoc.skip=${SKIP_JAVADOCS} \ - -Dccm.version=${CCM_CASSANDRA_VERSION} \ - -Dccm.distribution=${CCM_SERVER_TYPE:cassandra} \ - -Dproxy.path=${HOME}/proxy \ - ${SERIAL_ITS_ARGUMENT} \ - ${ISOLATED_ITS_ARGUMENT} \ - ${PARALLELIZABLE_ITS_ARGUMENT} - ''' - echo "Invoking Maven with parameters test-jdk-${testJavaVersion} and testJavaHome = ${testJavaHome}" - sh label: 'Execute tests', script: executeTestScript -} - -def executeCodeCoverage() { - jacoco( - execPattern: '**/target/jacoco.exec', - classPattern: '**/classes', - sourcePattern: '**/src/main/java' - ) -} - -def notifySlack(status = 'started') { - // Notify Slack channel for every build except adhoc executions - if (params.ADHOC_BUILD_TYPE != 'BUILD-AND-EXECUTE-TESTS') { - // Set the global pipeline scoped environment (this is above each matrix) - env.BUILD_STATED_SLACK_NOTIFIED = 'true' - - def buildType = 'Commit' - if (params.CI_SCHEDULE != 'DO-NOT-CHANGE-THIS-SELECTION') { - buildType = "${params.CI_SCHEDULE.toLowerCase().capitalize()}" - } - - def color = 'good' // Green - if (status.equalsIgnoreCase('aborted')) { - color = '808080' // Grey - } else if (status.equalsIgnoreCase('unstable')) { - color = 'warning' // Orange - } else if (status.equalsIgnoreCase('failed')) { - color = 'danger' // Red - } - - def message = """Build ${status} for ${env.DRIVER_DISPLAY_NAME} [${buildType}] -<${env.GITHUB_BRANCH_URL}|${env.BRANCH_NAME}> - <${env.RUN_DISPLAY_URL}|#${env.BUILD_NUMBER}> - <${env.GITHUB_COMMIT_URL}|${env.GIT_SHA}>""" - if (!status.equalsIgnoreCase('Started')) { - message += """ -${status} after ${currentBuild.durationString - ' and counting'}""" - } - - slackSend color: "${color}", - channel: "#java-driver-dev-bots", - message: "${message}" - } -} - -def describePerCommitStage() { - script { - currentBuild.displayName = "Per-Commit build" - currentBuild.description = 'Per-Commit build and testing of development Apache CassandraⓇ and current DataStax Enterprise against Oracle JDK 8' - } -} - -def describeAdhocAndScheduledTestingStage() { - script { - if (params.CI_SCHEDULE == 'DO-NOT-CHANGE-THIS-SELECTION') { - // Ad-hoc build - currentBuild.displayName = "Adhoc testing" - currentBuild.description = "Testing ${params.ADHOC_BUILD_AND_EXECUTE_TESTS_SERVER_VERSION} against JDK version ${params.ADHOC_BUILD_AND_EXECUTE_TESTS_JABBA_VERSION}" - } else { - // Scheduled build - currentBuild.displayName = "${params.CI_SCHEDULE.toLowerCase().replaceAll('_', ' ').capitalize()} schedule" - currentBuild.description = "Testing server versions [${params.CI_SCHEDULE_SERVER_VERSIONS}] against JDK version ${params.CI_SCHEDULE_JABBA_VERSION}" - } - } -} - -// branch pattern for cron -// should match 3.x, 4.x, 4.5.x, etc -def branchPatternCron() { - ~"((\\d+(\\.[\\dx]+)+))" -} - -pipeline { - agent none - - // Global pipeline timeout - options { - timeout(time: 10, unit: 'HOURS') - buildDiscarder(logRotator(artifactNumToKeepStr: '10', // Keep only the last 10 artifacts - numToKeepStr: '50')) // Keep only the last 50 build records - } - - parameters { - choice( - name: 'ADHOC_BUILD_TYPE', - choices: ['BUILD', 'BUILD-AND-EXECUTE-TESTS'], - description: '''

Perform a adhoc build operation

- - - - - - - - - - - - - - - -
ChoiceDescription
BUILDPerforms a Per-Commit build
BUILD-AND-EXECUTE-TESTSPerforms a build and executes the integration and unit tests
''') - choice( - name: 'ADHOC_BUILD_AND_EXECUTE_TESTS_SERVER_VERSION', - choices: ['4.0', // Previous Apache CassandraⓇ - '4.1', // Previous Apache CassandraⓇ - '5.0', // Current Apache CassandraⓇ - 'dse-4.8.16', // Previous EOSL DataStax Enterprise - 'dse-5.0.15', // Long Term Support DataStax Enterprise - 'dse-5.1.35', // Legacy DataStax Enterprise - 'dse-6.0.18', // Previous DataStax Enterprise - 'dse-6.7.17', // Previous DataStax Enterprise - 'dse-6.8.30', // Current DataStax Enterprise - 'dse-6.9.0', // Current DataStax Enterprise - 'hcd-1.0.0', // Current DataStax HCD - 'ALL'], - description: '''Apache Cassandra® and DataStax Enterprise server version to use for adhoc BUILD-AND-EXECUTE-TESTS builds - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ChoiceDescription
4.0Apache Cassandra® v4.0.x
4.1Apache Cassandra® v4.1.x
5.0Apache Cassandra® v5.0.x
dse-4.8.16DataStax Enterprise v4.8.x (END OF SERVICE LIFE)
dse-5.0.15DataStax Enterprise v5.0.x (Long Term Support)
dse-5.1.35DataStax Enterprise v5.1.x
dse-6.0.18DataStax Enterprise v6.0.x
dse-6.7.17DataStax Enterprise v6.7.x
dse-6.8.30DataStax Enterprise v6.8.x
dse-6.9.0DataStax Enterprise v6.9.x
hcd-1.0.0DataStax HCD v1.0.x
''') - choice( - name: 'ADHOC_BUILD_AND_EXECUTE_TESTS_JABBA_VERSION', - choices: [ - '1.8', // Oracle JDK version 1.8 (current default) - 'openjdk@1.11', // OpenJDK version 11 - 'openjdk@1.17', // OpenJDK version 17 - 'openjdk@1.21' // OpenJDK version 21 - ], - description: '''JDK version to use for TESTING when running adhoc BUILD-AND-EXECUTE-TESTS builds. All builds will use JDK8 for building the driver - - - - - - - - - - - - - - - - - - - - - - - -
ChoiceDescription
1.8Oracle JDK version 1.8 (Used for compiling regardless of choice)
openjdk@1.11OpenJDK version 11
openjdk@1.17OpenJDK version 17
openjdk@1.21OpenJDK version 21
''') - booleanParam( - name: 'SKIP_SERIAL_ITS', - defaultValue: false, - description: 'Flag to determine if serial integration tests should be skipped') - booleanParam( - name: 'SKIP_ISOLATED_ITS', - defaultValue: false, - description: 'Flag to determine if isolated integration tests should be skipped') - booleanParam( - name: 'SKIP_PARALLELIZABLE_ITS', - defaultValue: false, - description: 'Flag to determine if parallel integration tests should be skipped') - string( - name: 'INTEGRATION_TESTS_FILTER', - defaultValue: '', - description: '''

Run only the tests whose name match patterns

- See Maven Failsafe Plugin for more information on filtering integration tests''') - choice( - name: 'CI_SCHEDULE', - choices: ['DO-NOT-CHANGE-THIS-SELECTION', 'WEEKNIGHTS', 'WEEKENDS', 'MONTHLY'], - description: 'CI testing schedule to execute periodically scheduled builds and tests of the driver (DO NOT CHANGE THIS SELECTION)') - string( - name: 'CI_SCHEDULE_SERVER_VERSIONS', - defaultValue: 'DO-NOT-CHANGE-THIS-SELECTION', - description: 'CI testing server version(s) to utilize for scheduled test runs of the driver (DO NOT CHANGE THIS SELECTION)') - string( - name: 'CI_SCHEDULE_JABBA_VERSION', - defaultValue: 'DO-NOT-CHANGE-THIS-SELECTION', - description: 'CI testing JDK version(s) to utilize for scheduled test runs of the driver (DO NOT CHANGE THIS SELECTION)') - } - - triggers { - // schedules only run against release branches (i.e. 3.x, 4.x, 4.5.x, etc.) - parameterizedCron(branchPatternCron().matcher(env.BRANCH_NAME).matches() ? """ - # Every weekend (Saturday, Sunday) around 2:00 AM - H 2 * * 0 %CI_SCHEDULE=WEEKENDS;CI_SCHEDULE_SERVER_VERSIONS=4.0 4.1 5.0 dse-4.8.16 dse-5.0.15 dse-5.1.35 dse-6.0.18 dse-6.7.17;CI_SCHEDULE_JABBA_VERSION=1.8 - # Every weeknight (Monday - Friday) around 12:00 PM noon - H 12 * * 1-5 %CI_SCHEDULE=WEEKNIGHTS;CI_SCHEDULE_SERVER_VERSIONS=4.1 5.0 dse-6.8.30 dse-6.9.0 hcd-1.0.0;CI_SCHEDULE_JABBA_VERSION=openjdk@1.11 - H 12 * * 1-5 %CI_SCHEDULE=WEEKNIGHTS;CI_SCHEDULE_SERVER_VERSIONS=4.1 5.0 dse-6.8.30 dse-6.9.0 hcd-1.0.0;CI_SCHEDULE_JABBA_VERSION=openjdk@1.17 - """ : "") - } - - environment { - OS_VERSION = 'ubuntu/focal64/java-driver' - JABBA_SHELL = '/usr/lib/jabba/jabba.sh' - CCM_ENVIRONMENT_SHELL = '/usr/local/bin/ccm_environment.sh' - SERIAL_ITS_ARGUMENT = "-DskipSerialITs=${params.SKIP_SERIAL_ITS}" - ISOLATED_ITS_ARGUMENT = "-DskipIsolatedITs=${params.SKIP_ISOLATED_ITS}" - PARALLELIZABLE_ITS_ARGUMENT = "-DskipParallelizableITs=${params.SKIP_PARALLELIZABLE_ITS}" - INTEGRATION_TESTS_FILTER = "${params.INTEGRATION_TESTS_FILTER}" - } - - stages { - stage ('Per-Commit') { - options { - timeout(time: 2, unit: 'HOURS') - } - when { - beforeAgent true - allOf { - expression { params.ADHOC_BUILD_TYPE == 'BUILD' } - expression { params.CI_SCHEDULE == 'DO-NOT-CHANGE-THIS-SELECTION' } - expression { params.CI_SCHEDULE_SERVER_VERSIONS == 'DO-NOT-CHANGE-THIS-SELECTION' } - expression { params.CI_SCHEDULE_JABBA_VERSION == 'DO-NOT-CHANGE-THIS-SELECTION' } - not { buildingTag() } - } - } - - matrix { - axes { - axis { - name 'SERVER_VERSION' - values '4.0', // Previous Apache CassandraⓇ - '5.0', // Current Apache CassandraⓇ - 'dse-6.8.30', // Current DataStax Enterprise - 'dse-6.9.0', // Current DataStax Enterprise - 'hcd-1.0.0' // Current DataStax HCD - } - axis { - name 'JABBA_VERSION' - values '1.8', // jdk8 - 'openjdk@1.11', // jdk11 - 'openjdk@1.17', // jdk17 - 'openjdk@1.21' // jdk21 - } - } - - agent { - label "${OS_VERSION}" - } - - stages { - stage('Initialize-Environment') { - steps { - initializeEnvironment() - script { - if (env.BUILD_STATED_SLACK_NOTIFIED != 'true') { - notifySlack() - } - } - } - } - stage('Describe-Build') { - steps { - describePerCommitStage() - } - } - stage('Build-Driver') { - steps { - buildDriver('1.8') - } - } - stage('Execute-Tests') { - steps { - catchError { - // Use the matrix JDK for testing - executeTests() - } - } - post { - always { - /* - * Empty results are possible - * - * - Build failures during mvn verify may exist so report may not be available - */ - junit testResults: '**/target/surefire-reports/TEST-*.xml', allowEmptyResults: true - junit testResults: '**/target/failsafe-reports/TEST-*.xml', allowEmptyResults: true - } - } - } - stage('Execute-Code-Coverage') { - // Ensure the code coverage is run only once per-commit - when { environment name: 'SERVER_VERSION', value: '4.0' } - steps { - executeCodeCoverage() - } - } - } - } - post { - aborted { - notifySlack('aborted') - } - success { - notifySlack('completed') - } - unstable { - notifySlack('unstable') - } - failure { - notifySlack('FAILED') - } - } - } - - stage('Adhoc-And-Scheduled-Testing') { - when { - beforeAgent true - allOf { - expression { (params.ADHOC_BUILD_TYPE == 'BUILD' && params.CI_SCHEDULE != 'DO-NOT-CHANGE-THIS-SELECTION') || - params.ADHOC_BUILD_TYPE == 'BUILD-AND-EXECUTE-TESTS' } - not { buildingTag() } - anyOf { - expression { params.ADHOC_BUILD_TYPE == 'BUILD-AND-EXECUTE-TESTS' } - allOf { - expression { params.ADHOC_BUILD_TYPE == 'BUILD' } - expression { params.CI_SCHEDULE != 'DO-NOT-CHANGE-THIS-SELECTION' } - expression { params.CI_SCHEDULE_SERVER_VERSIONS != 'DO-NOT-CHANGE-THIS-SELECTION' } - } - } - } - } - - environment { - SERVER_VERSIONS = "${params.CI_SCHEDULE_SERVER_VERSIONS == 'DO-NOT-CHANGE-THIS-SELECTION' ? params.ADHOC_BUILD_AND_EXECUTE_TESTS_SERVER_VERSION : params.CI_SCHEDULE_SERVER_VERSIONS}" - JABBA_VERSION = "${params.CI_SCHEDULE_JABBA_VERSION == 'DO-NOT-CHANGE-THIS-SELECTION' ? params.ADHOC_BUILD_AND_EXECUTE_TESTS_JABBA_VERSION : params.CI_SCHEDULE_JABBA_VERSION}" - } - - matrix { - axes { - axis { - name 'SERVER_VERSION' - values '4.0', // Previous Apache CassandraⓇ - '4.1', // Previous Apache CassandraⓇ - '5.0', // Current Apache CassandraⓇ - 'dse-4.8.16', // Previous EOSL DataStax Enterprise - 'dse-5.0.15', // Last EOSL DataStax Enterprise - 'dse-5.1.35', // Legacy DataStax Enterprise - 'dse-6.0.18', // Previous DataStax Enterprise - 'dse-6.7.17', // Previous DataStax Enterprise - 'dse-6.8.30', // Current DataStax Enterprise - 'dse-6.9.0', // Current DataStax Enterprise - 'hcd-1.0.0' // Current DataStax HCD - } - } - when { - beforeAgent true - allOf { - expression { return env.SERVER_VERSIONS.split(' ').any { it =~ /(ALL|${env.SERVER_VERSION})/ } } - } - } - agent { - label "${env.OS_VERSION}" - } - - stages { - stage('Initialize-Environment') { - steps { - initializeEnvironment() - script { - if (env.BUILD_STATED_SLACK_NOTIFIED != 'true') { - notifySlack() - } - } - } - } - stage('Describe-Build') { - steps { - describeAdhocAndScheduledTestingStage() - } - } - stage('Build-Driver') { - steps { - buildDriver('1.8') - } - } - stage('Execute-Tests') { - steps { - catchError { - // Use the matrix JDK for testing - executeTests() - } - } - post { - always { - /* - * Empty results are possible - * - * - Build failures during mvn verify may exist so report may not be available - * - With boolean parameters to skip tests a failsafe report may not be available - */ - junit testResults: '**/target/surefire-reports/TEST-*.xml', allowEmptyResults: true - junit testResults: '**/target/failsafe-reports/TEST-*.xml', allowEmptyResults: true - } - } - } - stage('Execute-Code-Coverage') { - // Ensure the code coverage is run only once per-commit - when { - allOf { - environment name: 'SERVER_VERSION', value: '4.0' - environment name: 'JABBA_VERSION', value: '1.8' - } - } - steps { - executeCodeCoverage() - } - } - } - } - post { - aborted { - notifySlack('aborted') - } - success { - notifySlack('completed') - } - unstable { - notifySlack('unstable') - } - failure { - notifySlack('FAILED') - } - } - } - } -} diff --git a/LICENSE b/LICENSE index a157e31d058..d6456956733 100644 --- a/LICENSE +++ b/LICENSE @@ -200,24 +200,3 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - -Apache Cassandra Java Driver bundles code and files from the following projects: - -JNR project -Copyright (C) 2008-2010 Wayne Meissner -This product includes software developed as part of the JNR project ( https://github.com/jnr/jnr-ffi )s. -see core/src/main/java/com/datastax/oss/driver/internal/core/os/CpuInfo.java - -Protocol Buffers -Copyright 2008 Google Inc. -This product includes software developed as part of the Protocol Buffers project ( https://developers.google.com/protocol-buffers/ ). -see core/src/main/java/com/datastax/oss/driver/internal/core/type/util/VIntCoding.java - -Guava -Copyright (C) 2007 The Guava Authors -This product includes software developed as part of the Guava project ( https://guava.dev ). -see core/src/main/java/com/datastax/oss/driver/internal/core/util/CountingIterator.java - -Copyright (C) 2018 Christian Stein -This product includes software developed by Christian Stein -see ci/install-jdk.sh diff --git a/LICENSE_binary b/LICENSE_binary deleted file mode 100644 index b59c6ec22bb..00000000000 --- a/LICENSE_binary +++ /dev/null @@ -1,247 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -Apache Cassandra Java Driver bundles code and files from the following projects: - -JNR project -Copyright (C) 2008-2010 Wayne Meissner -This product includes software developed as part of the JNR project ( https://github.com/jnr/jnr-ffi )s. -see core/src/main/java/com/datastax/oss/driver/internal/core/os/CpuInfo.java - -Protocol Buffers -Copyright 2008 Google Inc. -This product includes software developed as part of the Protocol Buffers project ( https://developers.google.com/protocol-buffers/ ). -see core/src/main/java/com/datastax/oss/driver/internal/core/type/util/VIntCoding.java - -Guava -Copyright (C) 2007 The Guava Authors -This product includes software developed as part of the Guava project ( https://guava.dev ). -see core/src/main/java/com/datastax/oss/driver/internal/core/util/CountingIterator.java - -Copyright (C) 2018 Christian Stein -This product includes software developed by Christian Stein -see ci/install-jdk.sh - -This product bundles Java Native Runtime - POSIX 3.1.15, -which is available under the Eclipse Public License version 2.0. -see licenses/jnr-posix.txt - -This product bundles jnr-x86asm 1.0.2, -which is available under the MIT License. -see licenses/jnr-x86asm.txt - -This product bundles ASM 9.2: a very small and fast Java bytecode manipulation framework, -which is available under the 3-Clause BSD License. -see licenses/asm.txt - -This product bundles HdrHistogram 2.1.12: A High Dynamic Range (HDR) Histogram, -which is available under the 2-Clause BSD License. -see licenses/HdrHistogram.txt - -This product bundles The Simple Logging Facade for Java (SLF4J) API 1.7.26, -which is available under the MIT License. -see licenses/slf4j-api.txt - -This product bundles Reactive Streams 1.0.3, -which is available under the MIT License. -see licenses/reactive-streams.txt diff --git a/NOTICE.txt b/NOTICE.txt deleted file mode 100644 index 8e27ae3e52f..00000000000 --- a/NOTICE.txt +++ /dev/null @@ -1,5 +0,0 @@ -Apache Cassandra Java Driver -Copyright 2012- The Apache Software Foundation - -This product includes software developed at The Apache Software -Foundation (http://www.apache.org/). diff --git a/NOTICE_binary.txt b/NOTICE_binary.txt deleted file mode 100644 index f6f11c298f6..00000000000 --- a/NOTICE_binary.txt +++ /dev/null @@ -1,249 +0,0 @@ -Apache Cassandra Java Driver -Copyright 2012- The Apache Software Foundation - -This product includes software developed at The Apache Software -Foundation (http://www.apache.org/). - -This compiled product also includes Apache-licensed dependencies -that contain the following NOTICE information: - -================================================================== -io.netty:netty-handler NOTICE.txt -================================================================== -This product contains the extensions to Java Collections Framework which has -been derived from the works by JSR-166 EG, Doug Lea, and Jason T. Greene: - - * LICENSE: - * license/LICENSE.jsr166y.txt (Public Domain) - * HOMEPAGE: - * http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/ - * http://viewvc.jboss.org/cgi-bin/viewvc.cgi/jbosscache/experimental/jsr166/ - -This product contains a modified version of Robert Harder's Public Domain -Base64 Encoder and Decoder, which can be obtained at: - - * LICENSE: - * license/LICENSE.base64.txt (Public Domain) - * HOMEPAGE: - * http://iharder.sourceforge.net/current/java/base64/ - -This product contains a modified portion of 'Webbit', an event based -WebSocket and HTTP server, which can be obtained at: - - * LICENSE: - * license/LICENSE.webbit.txt (BSD License) - * HOMEPAGE: - * https://github.com/joewalnes/webbit - -This product contains a modified portion of 'SLF4J', a simple logging -facade for Java, which can be obtained at: - - * LICENSE: - * license/LICENSE.slf4j.txt (MIT License) - * HOMEPAGE: - * https://www.slf4j.org/ - -This product contains a modified portion of 'Apache Harmony', an open source -Java SE, which can be obtained at: - - * NOTICE: - * license/NOTICE.harmony.txt - * LICENSE: - * license/LICENSE.harmony.txt (Apache License 2.0) - * HOMEPAGE: - * https://archive.apache.org/dist/harmony/ - -This product contains a modified portion of 'jbzip2', a Java bzip2 compression -and decompression library written by Matthew J. Francis. It can be obtained at: - - * LICENSE: - * license/LICENSE.jbzip2.txt (MIT License) - * HOMEPAGE: - * https://code.google.com/p/jbzip2/ - -This product contains a modified portion of 'libdivsufsort', a C API library to construct -the suffix array and the Burrows-Wheeler transformed string for any input string of -a constant-size alphabet written by Yuta Mori. It can be obtained at: - - * LICENSE: - * license/LICENSE.libdivsufsort.txt (MIT License) - * HOMEPAGE: - * https://github.com/y-256/libdivsufsort - -This product contains a modified portion of Nitsan Wakart's 'JCTools', Java Concurrency Tools for the JVM, - which can be obtained at: - - * LICENSE: - * license/LICENSE.jctools.txt (ASL2 License) - * HOMEPAGE: - * https://github.com/JCTools/JCTools - -This product optionally depends on 'JZlib', a re-implementation of zlib in -pure Java, which can be obtained at: - - * LICENSE: - * license/LICENSE.jzlib.txt (BSD style License) - * HOMEPAGE: - * http://www.jcraft.com/jzlib/ - -This product optionally depends on 'Compress-LZF', a Java library for encoding and -decoding data in LZF format, written by Tatu Saloranta. It can be obtained at: - - * LICENSE: - * license/LICENSE.compress-lzf.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/ning/compress - -This product optionally depends on 'lz4', a LZ4 Java compression -and decompression library written by Adrien Grand. It can be obtained at: - - * LICENSE: - * license/LICENSE.lz4.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/yawkat/lz4-java - -This product optionally depends on 'lzma-java', a LZMA Java compression -and decompression library, which can be obtained at: - - * LICENSE: - * license/LICENSE.lzma-java.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/jponge/lzma-java - -This product optionally depends on 'zstd-jni', a zstd-jni Java compression -and decompression library, which can be obtained at: - - * LICENSE: - * license/LICENSE.zstd-jni.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/luben/zstd-jni - -This product contains a modified portion of 'jfastlz', a Java port of FastLZ compression -and decompression library written by William Kinney. It can be obtained at: - - * LICENSE: - * license/LICENSE.jfastlz.txt (MIT License) - * HOMEPAGE: - * https://code.google.com/p/jfastlz/ - -This product contains a modified portion of and optionally depends on 'Protocol Buffers', Google's data -interchange format, which can be obtained at: - - * LICENSE: - * license/LICENSE.protobuf.txt (New BSD License) - * HOMEPAGE: - * https://github.com/google/protobuf - -This product optionally depends on 'Bouncy Castle Crypto APIs' to generate -a temporary self-signed X.509 certificate when the JVM does not provide the -equivalent functionality. It can be obtained at: - - * LICENSE: - * license/LICENSE.bouncycastle.txt (MIT License) - * HOMEPAGE: - * https://www.bouncycastle.org/ - -This product optionally depends on 'Snappy', a compression library produced -by Google Inc, which can be obtained at: - - * LICENSE: - * license/LICENSE.snappy.txt (New BSD License) - * HOMEPAGE: - * https://github.com/google/snappy - -This product optionally depends on 'JBoss Marshalling', an alternative Java -serialization API, which can be obtained at: - - * LICENSE: - * license/LICENSE.jboss-marshalling.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/jboss-remoting/jboss-marshalling - -This product optionally depends on 'Caliper', Google's micro- -benchmarking framework, which can be obtained at: - - * LICENSE: - * license/LICENSE.caliper.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/google/caliper - -This product optionally depends on 'Apache Commons Logging', a logging -framework, which can be obtained at: - - * LICENSE: - * license/LICENSE.commons-logging.txt (Apache License 2.0) - * HOMEPAGE: - * https://commons.apache.org/logging/ - -This product optionally depends on 'Apache Log4J', a logging framework, which -can be obtained at: - - * LICENSE: - * license/LICENSE.log4j.txt (Apache License 2.0) - * HOMEPAGE: - * https://logging.apache.org/log4j/ - -This product optionally depends on 'Aalto XML', an ultra-high performance -non-blocking XML processor, which can be obtained at: - - * LICENSE: - * license/LICENSE.aalto-xml.txt (Apache License 2.0) - * HOMEPAGE: - * https://wiki.fasterxml.com/AaltoHome - -This product contains a modified version of 'HPACK', a Java implementation of -the HTTP/2 HPACK algorithm written by Twitter. It can be obtained at: - - * LICENSE: - * license/LICENSE.hpack.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/twitter/hpack - -This product contains a modified version of 'HPACK', a Java implementation of -the HTTP/2 HPACK algorithm written by Cory Benfield. It can be obtained at: - - * LICENSE: - * license/LICENSE.hyper-hpack.txt (MIT License) - * HOMEPAGE: - * https://github.com/python-hyper/hpack/ - -This product contains a modified version of 'HPACK', a Java implementation of -the HTTP/2 HPACK algorithm written by Tatsuhiro Tsujikawa. It can be obtained at: - - * LICENSE: - * license/LICENSE.nghttp2-hpack.txt (MIT License) - * HOMEPAGE: - * https://github.com/nghttp2/nghttp2/ - -This product contains a modified portion of 'Apache Commons Lang', a Java library -provides utilities for the java.lang API, which can be obtained at: - - * LICENSE: - * license/LICENSE.commons-lang.txt (Apache License 2.0) - * HOMEPAGE: - * https://commons.apache.org/proper/commons-lang/ - - -This product contains the Maven wrapper scripts from 'Maven Wrapper', that provides an easy way to ensure a user has everything necessary to run the Maven build. - - * LICENSE: - * license/LICENSE.mvn-wrapper.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/takari/maven-wrapper - -This product contains the dnsinfo.h header file, that provides a way to retrieve the system DNS configuration on MacOS. -This private header is also used by Apple's open source - mDNSResponder (https://opensource.apple.com/tarballs/mDNSResponder/). - - * LICENSE: - * license/LICENSE.dnsinfo.txt (Apple Public Source License 2.0) - * HOMEPAGE: - * https://www.opensource.apple.com/source/configd/configd-453.19/dnsinfo/dnsinfo.h - -This product optionally depends on 'Brotli4j', Brotli compression and -decompression for Java., which can be obtained at: - - * LICENSE: - * license/LICENSE.brotli4j.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/hyperxpro/Brotli4j diff --git a/README.md b/README.md index d8ef01d0964..b70bf5009f9 100644 --- a/README.md +++ b/README.md @@ -1,88 +1,125 @@ -# Java Driver for Apache Cassandra® - -[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) -[![Maven Central](https://maven-badges.herokuapp.com/maven-central/org.apache.cassandra/java-driver-core/badge.svg)](https://maven-badges.herokuapp.com/maven-central/org.apache.cassandra/java-driver-core) - -*If you're reading this on github.com, please note that this is the readme for the development -version and that some features described here might not yet have been released. You can find the -documentation for latest version through [DataStax Docs] or via the release tags, e.g. -[4.17.0](https://github.com/datastax/java-driver/tree/4.17.0).* - -A modern, feature-rich and highly tunable Java client library for [Apache Cassandra®] \(2.1+) and -[DataStax Enterprise] \(4.7+), and [DataStax Astra], using exclusively Cassandra's binary protocol -and Cassandra Query Language (CQL) v3. - -[DataStax Docs]: http://docs.datastax.com/en/developer/java-driver/ -[Apache Cassandra®]: http://cassandra.apache.org/ - -## Getting the driver - -The driver artifacts are published in Maven central, under the group id [org.apache.cassandra]; there -are multiple modules, all prefixed with `java-driver-`. +# Datastax Java Driver for Apache Cassandra + +[![Build Status](https://travis-ci.org/datastax/java-driver.svg?branch=2.1)](https://travis-ci.org/datastax/java-driver) + +*If you're reading this on github.com, please note that this is the readme +for the development version and that some features described here might +not yet have been released. You can find the documentation for latest +version through [Java driver +docs](http://datastax.github.io/java-driver/) or via the release tags, +[e.g. +2.1.7](https://github.com/datastax/java-driver/tree/2.1.7).* + +A modern, [feature-rich](features/) and highly tunable Java client +library for Apache Cassandra (1.2+) and DataStax Enterprise (3.1+) using +exclusively Cassandra's binary protocol and Cassandra Query Language v3. + +**Features:** + +* [Sync][sync] and [Async][async] API +* [Simple][simple_st], [Prepared][prepared_st], and [Batch][batch_st] statements +* Asynchronous IO, parallel execution, request pipelining +* [Connection pooling][pool] +* Auto node discovery +* Automatic reconnection +* Configurable [load balancing][lbp] and [retry policies][retry_policy] +* Works with any cluster size +* [Query builder][query_builder] +* [Object mapper][mapper] + + +[sync]: http://docs.datastax.com/en/drivers/java/2.1/com/datastax/driver/core/Session.html#execute(com.datastax.driver.core.Statement) +[async]: http://docs.datastax.com/en/drivers/java/2.1/com/datastax/driver/core/Session.html#executeAsync(com.datastax.driver.core.Statement) +[simple_st]: http://docs.datastax.com/en/drivers/java/2.1/com/datastax/driver/core/SimpleStatement.html +[prepared_st]: http://docs.datastax.com/en/drivers/java/2.1/com/datastax/driver/core/Session.html#prepare(com.datastax.driver.core.RegularStatement) +[batch_st]: http://docs.datastax.com/en/drivers/java/2.1/com/datastax/driver/core/BatchStatement.html +[pool]: features/pooling/ +[lbp]: http://docs.datastax.com/en/drivers/java/2.1/com/datastax/driver/core/policies/LoadBalancingPolicy.html +[retry_policy]: http://docs.datastax.com/en/drivers/java/2.1/com/datastax/driver/core/policies/RetryPolicy.html +[query_builder]: http://docs.datastax.com/en/drivers/java/2.1/com/datastax/driver/core/querybuilder/QueryBuilder.html +[mapper]: http://docs.datastax.com/en/drivers/java/2.1/com/datastax/driver/mapping/MappingManager.html + +The driver architecture is based on layers. At the bottom lies the driver core. +This core handles everything related to the connections to a Cassandra +cluster (for example, connection pool, discovering new nodes, etc.) and exposes a simple, +relatively low-level API on top of which higher level layers can be built. + +The driver contains the following modules: + +- driver-core: the core layer. +- driver-mapping: the object mapper. +- driver-examples: example applications using the other modules which are + only meant for demonstration purposes. + +**Community:** + +- JIRA: https://datastax-oss.atlassian.net/browse/JAVA +- MAILING LIST: https://groups.google.com/a/lists.datastax.com/forum/#!forum/java-driver-user +- IRC: #datastax-drivers on [irc.freenode.net](http://freenode.net) +- TWITTER: Follow the latest news about DataStax Drivers - [@olim7t](http://twitter.com/olim7t), [@mfiguiere](http://twitter.com/mfiguiere) +- DOCS: the [user guide](http://docs.datastax.com/en/developer/java-driver/2.1/java-driver/whatsNew2.html) + has introductory material. We are progressively migrating the doc + [here](features/) with more technical details. +- API: http://www.datastax.com/drivers/java/2.1 + +**Feeback requested:** help us focus our efforts, provide your input on the [Platform and Runtime Survey](http://goo.gl/forms/qwUE6qnL7U) (we kept it short). + + +## Maven + +The last release of the driver is available on Maven Central. You can install +it in your application using the following Maven dependency: ```xml - org.apache.cassandra - java-driver-core - ${driver.version} + com.datastax.cassandra + cassandra-driver-core + 2.1.7 +``` - - org.apache.cassandra - java-driver-query-builder - ${driver.version} - +Note that the object mapper is published as a separate artifact: +```xml - org.apache.cassandra - java-driver-mapper-runtime - ${driver.version} + com.datastax.cassandra + cassandra-driver-mapping + 2.1.7 ``` -Note that the query builder is now published as a separate artifact, you'll need to add the -dependency if you plan to use it. - -Refer to each module's manual for more details ([core](manual/core/), [query -builder](manual/query_builder/), [mapper](manual/mapper)). - -[org.apache.cassandra]: http://search.maven.org/#search%7Cga%7C1%7Cg%3A%22org.apache.cassandra%22 +We also provide a [shaded JAR](features/shaded_jar/) +to avoid the explicit dependency to Netty. ## Compatibility -The driver is compatible with Apache Cassandra® 2.1 and higher, DataStax Enterprise 4.7 and -higher, and DataStax Astra. +The Java client driver 2.1 ([branch 2.1](https://github.com/datastax/java-driver/tree/2.1)) is compatible with Apache +Cassandra 1.2, 2.0 and 2.1. -It requires Java 8 or higher. +UDT and tuple support is available only when using Apache Cassandra 2.1 (see [CQL improvements in Cassandra 2.1](http://www.datastax.com/dev/blog/cql-in-2-1)). -Disclaimer: Some DataStax/DataStax Enterprise products might partially work on big-endian systems, -but DataStax does not officially support these systems. +Other features are available only when using Apache Cassandra 2.0 or higher (e.g. result set paging, +[BatchStatement](https://github.com/datastax/java-driver/blob/2.1/driver-core/src/main/java/com/datastax/driver/core/BatchStatement.java), +[lightweight transactions](http://www.datastax.com/documentation/cql/3.1/cql/cql_using/use_ltweight_transaction_t.html) +-- see [What's new in Cassandra 2.0](http://www.datastax.com/documentation/cassandra/2.0/cassandra/features/features_key_c.html)). +Trying to use these with a cluster running Cassandra 1.2 will result in +an [UnsupportedFeatureException](https://github.com/datastax/java-driver/blob/2.1/driver-core/src/main/java/com/datastax/driver/core/exceptions/UnsupportedFeatureException.java) being thrown. -## Migrating from previous versions -Java Driver 4 is **not binary compatible** with previous versions. However, most of the concepts -remain unchanged, and the new API will look very familiar to 2.x and 3.x users. +## Upgrading from previous versions -See the [upgrade guide](upgrade_guide/) for details. +If you are upgrading from a previous version of the driver, be sure to have a look at +the [upgrade guide](/upgrade_guide/). -## Useful links -* [Manual](manual/) -* [API docs] -* Bug tracking: [JIRA] -* [Mailing list] -* [Changelog] -* [FAQ] +### Troubleshooting -[API docs]: https://docs.datastax.com/en/drivers/java/4.17 -[JIRA]: https://issues.apache.org/jira/issues/?jql=project%20%3D%20CASSJAVA%20ORDER%20BY%20key%20DESC -[Mailing list]: https://groups.google.com/a/lists.datastax.com/forum/#!forum/java-driver-user -[Changelog]: changelog/ -[FAQ]: faq/ +If you are having issues connecting to the cluster (seeing `NoHostAvailableConnection` exceptions) please check the +[connection requirements](https://github.com/datastax/java-driver/wiki/Connection-requirements). -## License -© The Apache Software Foundation +## License +Copyright 2012-2015, DataStax Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -95,11 +132,3 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - ----- - -Apache Cassandra, Apache, Tomcat, Lucene, Solr, Hadoop, Spark, TinkerPop, and Cassandra are -trademarks of the [Apache Software Foundation](http://www.apache.org/) or its subsidiaries in -Canada, the United States and/or other countries. - -Binary artifacts of this product bundle Java Native Runtime libraries, which is available under the Eclipse Public License version 2.0. diff --git a/bom/pom.xml b/bom/pom.xml deleted file mode 100644 index dd76153a9b1..00000000000 --- a/bom/pom.xml +++ /dev/null @@ -1,121 +0,0 @@ - - - - 4.0.0 - - org.apache.cassandra - java-driver-parent - 4.19.3-SNAPSHOT - - java-driver-bom - pom - Apache Cassandra Java Driver - Bill Of Materials - - - - org.apache.cassandra - java-driver-core - 4.19.3-SNAPSHOT - - - org.apache.cassandra - java-driver-core-shaded - 4.19.3-SNAPSHOT - - - org.apache.cassandra - java-driver-mapper-processor - 4.19.3-SNAPSHOT - - - org.apache.cassandra - java-driver-mapper-runtime - 4.19.3-SNAPSHOT - - - org.apache.cassandra - java-driver-query-builder - 4.19.3-SNAPSHOT - - - org.apache.cassandra - java-driver-guava-shaded - 4.19.3-SNAPSHOT - - - org.apache.cassandra - java-driver-test-infra - 4.19.3-SNAPSHOT - - - org.apache.cassandra - java-driver-metrics-micrometer - 4.19.3-SNAPSHOT - - - org.apache.cassandra - java-driver-metrics-microprofile - 4.19.3-SNAPSHOT - - - com.datastax.oss - native-protocol - 1.5.2 - - - - - - - org.codehaus.mojo - flatten-maven-plugin - - - flatten - process-resources - - flatten - - - - keep - expand - expand - expand - expand - expand - expand - expand - expand - expand - expand - expand - expand - remove - - true - - - - - - - diff --git a/changelog/README.md b/changelog/README.md index b01c3db3bf9..285316146d6 100644 --- a/changelog/README.md +++ b/changelog/README.md @@ -1,1866 +1,242 @@ - - ## Changelog - - -### 4.19.2 - -- [bug] CASSJAVA-116: Retry or Speculative Execution with RequestIdGenerator throws "Duplicate Key" - -### 4.19.1 - -- [improvement] CASSJAVA-97: Let users inject an ID for each request and write to the custom payload -- [improvement] CASSJAVA-92: Add Local DC to driver connection info and provide visibility with nodetool clientstats -- [bug] PR 2025: Eliminate lock in ConcurrencyLimitingRequestThrottler -- [improvement] CASSJAVA-89: Fix deprecated table configs in Cassandra 5 -- [improvement] PR 2028: Remove unnecessary locking in DefaultNettyOptions -- [improvement] CASSJAVA-102: Fix revapi spurious complaints about optional dependencies -- [improvement] PR 2013: Add SubnetAddressTranslator -- [improvement] CASSJAVA-68: Improve DefaultCodecRegistry.CacheKey#hashCode() to eliminate Object[] allocation -- [improvement] PR 1989: Bump Jackson version to la(te)st 2.13.x, 2.13.5 -- [improvement] CASSJAVA-76: Make guava an optional dependency of java-driver-guava-shaded -- [bug] PR 2035: Prevent long overflow in SNI address resolution -- [improvement] CASSJAVA-77: 4.x: Upgrade Netty to 4.1.119 -- [improvement] CASSJAVA-40: Driver testing against Java 21 -- [improvement] CASSJAVA-90: Update native-protocol -- [improvement] CASSJAVA-80: Support configuration to disable DNS reverse-lookups for SAN validation - -### 4.19.0 - -- [bug] JAVA-3055: Prevent PreparedStatement cache to be polluted if a request is cancelled. -- [bug] JAVA-3168: Copy node info for contact points on initial node refresh only from first match by endpoint -- [improvement] JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0) -- [improvement] CASSJAVA-53: Update Guava version used in cassandra-java-driver -- [improvement] JAVA-3118: Add support for vector data type in Schema Builder, QueryBuilder -- [bug] CASSJAVA-55: Remove setting "Host" header for metadata requests -- [bug] JAVA-3057: Allow decoding a UDT that has more fields than expected -- [improvement] CASSJAVA-52: Bring java-driver-shaded-guava into the repo as a submodule -- [bug] CASSJAVA-2: TableMetadata#describe produces invalid CQL when a type of a column is a vector -- [bug] JAVA-3051: Memory leak in DefaultLoadBalancingPolicy measurement of response times -- [improvement] CASSJAVA-14: Query builder support for NOT CQL syntax -- [bug] CASSJAVA-12: DefaultSslEngineFactory missing null check on close -- [improvement] CASSJAVA-46: Expose table extensions via schema builders -- [bug] PR 1938: Fix uncaught exception during graceful channel shutdown after exceeding max orphan ids -- [improvement] PR 1607: Annotate BatchStatement, Statement, SimpleStatement methods with CheckReturnValue -- [improvement] CASSJAVA-41: Reduce lock held duration in ConcurrencyLimitingRequestThrottler -- [bug] JAVA-3149: Async Query Cancellation Not Propagated To RequestThrottler -- [bug] JAVA-3167: CompletableFutures.allSuccessful() may return never completed future -- [bug] PR 1620: Don't return empty routing key when partition key is unbound -- [improvement] PR 1623: Limit calls to Conversions.resolveExecutionProfile -- [improvement] CASSJAVA-29: Update target Cassandra versions for integration tests, support new 5.0.x - -### 4.18.1 - -- [improvement] JAVA-3142: Ability to specify ordering of remote local dc's via new configuration for graceful automatic failovers -- [bug] CASSANDRA-19457: Object reference in Micrometer metrics prevent GC from reclaiming Session instances -- [improvement] CASSANDRA-19468: Don't swallow exception during metadata refresh -- [bug] CASSANDRA-19333: Fix data corruption in VectorCodec when using heap buffers -- [improvement] CASSANDRA-19290: Replace uses of AttributeKey.newInstance -- [improvement] CASSANDRA-19352: Support native_transport_(address|port) + native_transport_port_ssl for DSE 6.8 (4.x edition) -- [improvement] CASSANDRA-19180: Support reloading keystore in cassandra-java-driver - -### 4.18.0 - -- [improvement] PR 1689: Add support for publishing percentile time series for the histogram metrics (nparaddi-walmart) -- [improvement] JAVA-3104: Do not eagerly pre-allocate array when deserializing CqlVector -- [improvement] JAVA-3111: upgrade jackson-databind to 2.13.4.2 to address gradle dependency issue -- [improvement] PR 1617: Improve ByteBufPrimitiveCodec readBytes (chibenwa) -- [improvement] JAVA-3095: Fix CREATE keyword in vector search example in upgrade guide -- [improvement] JAVA-3100: Update jackson-databind to 2.13.4.1 and jackson-jaxrs-json-provider to 2.13.4 to address recent CVEs -- [improvement] JAVA-3089: Forbid wildcard imports - -### 4.17.0 - -- [improvement] JAVA-3070: Make CqlVector and CqlDuration serializable -- [improvement] JAVA-3085: Initialize c.d.o.d.i.core.util.Dependency at Graal native image build-time -- [improvement] JAVA-3061: CqlVector API improvements, add support for accessing vectors directly as float arrays -- [improvement] JAVA-3042: Enable automated testing for Java17 -- [improvement] JAVA-3050: Upgrade Netty to 4.1.94 - -### 4.16.0 - -- [improvement] JAVA-3058: Clear prepared statement cache on UDT type change event -- [improvement] JAVA-3060: Add vector type, codec + support for parsing CQL type -- [improvement] DOC-2813: Add error handling guidance linking to a helpful blog post -- [improvement] JAVA-3045: Fix GraalVM native image support for GraalVM 22.2 - -### 4.15.0 - -- [improvement] JAVA-3041: Update Guava session sample code to use ProgrammaticArguments -- [improvement] JAVA-3022: Implement AddressTranslator for AWS PrivateLink -- [bug] JAVA-3021: Update table SchemaBuilder page to replace withPrimaryKey with withPartitionKey -- [bug] JAVA-3005: Node list refresh behavior in 4.x is different from 3.x -- [bug] JAVA-3002: spring-boot app keeps connecting to IP of replaced node -- [improvement] JAVA-3023 Upgrade Netty to 4.1.77 -- [improvement] JAVA-2995: CodecNotFoundException doesn't extend DriverException - -### 4.14.1 - -- [improvement] JAVA-3013: Upgrade dependencies to address CVEs and other security issues, 4.14.1 edition -- [improvement] JAVA-2977: Update Netty to resolve higher-priority CVEs -- [improvement] JAVA-3003: Update jnr-posix to address CVE-2014-4043 - -### 4.14.0 - -- [bug] JAVA-2976: Support missing protocol v5 error codes CAS_WRITE_UNKNOWN, CDC_WRITE_FAILURE -- [bug] JAVA-2987: BasicLoadBalancingPolicy remote computation assumes local DC is up and live -- [bug] JAVA-2992: Include options into DefaultTableMetadata equals and hash methods -- [improvement] JAVA-2982: Switch Esri geometry lib to an optional dependency -- [improvement] JAVA-2959: Don't throw NoNodeAvailableException when all connections busy - -### 4.13.0 - -- [improvement] JAVA-2940: Add GraalVM native image build configurations -- [improvement] JAVA-2953: Promote ProgrammaticPlainTextAuthProvider to the public API and add - credentials hot-reload -- [improvement] JAVA-2951: Accept multiple node state listeners, schema change listeners and request - trackers - -Merged from 4.12.x: - -- [bug] JAVA-2949: Provide mapper support for CompletionStage> -- [bug] JAVA-2950: Remove reference to Reflection class from DependencyCheck - -### 4.12.1 - -Merged from 4.11.x: - -- [bug] JAVA-2949: Provide mapper support for CompletionStage> -- [bug] JAVA-2950: Remove reference to Reflection class from DependencyCheck - -### 4.12.0 - -- [improvement] JAVA-2935: Make GetEntity and SetEntity methods resilient to incomplete data -- [improvement] JAVA-2944: Upgrade MicroProfile Metrics to 3.0 - -Merged from 4.11.x: - -- [bug] JAVA-2932: Make DefaultDriverConfigLoader.close() resilient to terminated executors -- [bug] JAVA-2945: Reinstate InternalDriverContext.getNodeFilter method -- [bug] JAVA-2947: Release buffer after decoding multi-slice frame -- [bug] JAVA-2946: Make MapperResultProducerService instances be located with user-provided class loader -- [bug] JAVA-2942: GraphStatement.setConsistencyLevel() is not effective -- [bug] JAVA-2941: Cannot add a single static column with the alter table API -- [bug] JAVA-2943: Prevent session leak with wrong keyspace name -- [bug] JAVA-2938: OverloadedException message is misleading - -### 4.11.3 - -- [bug] JAVA-2949: Provide mapper support for CompletionStage> -- [bug] JAVA-2950: Remove reference to Reflection class from DependencyCheck - -### 4.11.2 - -- [bug] JAVA-2932: Make DefaultDriverConfigLoader.close() resilient to terminated executors -- [bug] JAVA-2945: Reinstate InternalDriverContext.getNodeFilter method -- [bug] JAVA-2947: Release buffer after decoding multi-slice frame -- [bug] JAVA-2946: Make MapperResultProducerService instances be located with user-provided class loader -- [bug] JAVA-2942: GraphStatement.setConsistencyLevel() is not effective -- [bug] JAVA-2941: Cannot add a single static column with the alter table API -- [bug] JAVA-2943: Prevent session leak with wrong keyspace name -- [bug] JAVA-2938: OverloadedException message is misleading - -### 4.11.1 - -- [bug] JAVA-2910: Add a configuration option to support strong values for prepared statements cache -- [bug] JAVA-2936: Support Protocol V6 -- [bug] JAVA-2934: Handle empty non-final pages in ReactiveResultSetSubscription - -### 4.11.0 - -- [improvement] JAVA-2930: Allow Micrometer to record histograms for timers -- [improvement] JAVA-2914: Transform node filter into a more flexible node distance evaluator -- [improvement] JAVA-2929: Revisit node-level metric eviction -- [new feature] JAVA-2830: Add mapper support for Java streams -- [bug] JAVA-2928: Generate counter increment/decrement constructs compatible with legacy C* - versions -- [new feature] JAVA-2872: Ability to customize metric names and tags -- [bug] JAVA-2925: Consider protocol version unsupported when server requires USE_BETA flag for it -- [improvement] JAVA-2704: Remove protocol v5 beta status, add v6-beta -- [improvement] JAVA-2916: Annotate generated classes with `@SuppressWarnings` -- [bug] JAVA-2927: Make Dropwizard truly optional -- [improvement] JAVA-2917: Include GraalVM substitutions for request processors and geo codecs -- [bug] JAVA-2918: Exclude invalid peers from schema agreement checks - -### 4.10.0 - -- [improvement] JAVA-2907: Switch Tinkerpop to an optional dependency -- [improvement] JAVA-2904: Upgrade Jackson to 2.12.0 and Tinkerpop to 3.4.9 -- [bug] JAVA-2911: Prevent control connection from scheduling too many reconnections -- [bug] JAVA-2902: Consider computed values when validating constructors for immutable entities -- [new feature] JAVA-2899: Re-introduce cross-DC failover in driver 4 -- [new feature] JAVA-2900: Re-introduce consistency downgrading retries -- [new feature] JAVA-2903: BlockHound integration -- [improvement] JAVA-2877: Allow skipping validation for individual mapped entities -- [improvement] JAVA-2871: Allow keyspace exclusions in the metadata, and exclude system keyspaces - by default -- [improvement] JAVA-2449: Use non-cryptographic random number generation in Uuids.random() -- [improvement] JAVA-2893: Allow duplicate keys in DefaultProgrammaticDriverConfigLoaderBuilder -- [documentation] JAVA-2894: Clarify usage of Statement.setQueryTimestamp -- [bug] JAVA-2889: Remove TypeSafe imports from DriverConfigLoader -- [bug] JAVA-2883: Use root locale explicitly when changing string case -- [bug] JAVA-2890: Fix off-by-one error in UdtCodec -- [improvement] JAVA-2905: Prevent new connections from using a protocol version higher than the negotiated one -- [bug] JAVA-2647: Handle token types in QueryBuilder.literal() -- [bug] JAVA-2887: Handle composite profiles with more than one key and/or backed by only one profile - -### 4.9.0 - -- [documentation] JAVA-2823: Make Astra more visible in the docs -- [documentation] JAVA-2869: Advise against using 4.5.x-4.6.0 in the upgrade guide -- [documentation] JAVA-2868: Cover reconnect-on-init in the manual -- [improvement] JAVA-2827: Exclude unused Tinkerpop transitive dependencies -- [improvement] JAVA-2827: Remove dependency to Tinkerpop gremlin-driver -- [task] JAVA-2859: Upgrade Tinkerpop to 3.4.8 -- [bug] JAVA-2726: Fix Tinkerpop incompatibility with JPMS -- [bug] JAVA-2842: Remove security vulnerabilities introduced by Tinkerpop -- [bug] JAVA-2867: Revisit compressor substitutions -- [improvement] JAVA-2870: Optimize memory usage of token map -- [improvement] JAVA-2855: Allow selection of the metrics framework via the config -- [improvement] JAVA-2864: Revisit mapper processor's messaging -- [new feature] JAVA-2816: Support immutability and fluent accessors in the mapper -- [new feature] JAVA-2721: Add counter support in the mapper -- [bug] JAVA-2863: Reintroduce mapper processor dependency to SLF4J - -### 4.8.0 - -- [improvement] JAVA-2811: Add aliases for driver 3 method names -- [new feature] JAVA-2808: Provide metrics bindings for Micrometer and MicroProfile -- [new feature] JAVA-2773: Support new protocol v5 message format -- [improvement] JAVA-2841: Raise timeouts during connection initialization -- [bug] JAVA-2331: Unregister old metrics when a node gets removed or changes RPC address -- [improvement] JAVA-2850: Ignore credentials in secure connect bundle [DataStax Astra] -- [improvement] JAVA-2813: Don't fail when secure bundle is specified together with other options -- [bug] JAVA-2800: Exclude SLF4J from mapper-processor dependencies -- [new feature] JAVA-2819: Add DriverConfigLoader.fromString -- [improvement] JAVA-2431: Set all occurrences when bound variables are used multiple times -- [improvement] JAVA-2829: Log protocol negotiation messages at DEBUG level -- [bug] JAVA-2846: Give system properties the highest precedence in DefaultDriverConfigLoader -- [new feature] JAVA-2691: Provide driver 4 support for extra codecs -- [improvement] Allow injection of CodecRegistry on session builder -- [improvement] JAVA-2828: Add safe paging state wrapper -- [bug] JAVA-2835: Correctly handle unresolved addresses in DefaultEndPoint.equals -- [bug] JAVA-2838: Avoid ConcurrentModificationException when closing connection -- [bug] JAVA-2837: make StringCodec strict about unicode in ascii - -### 4.7.2 - -- [bug] JAVA-2821: Can't connect to DataStax Astra using driver 4.7.x - -### 4.7.1 - -- [bug] JAVA-2818: Remove root path only after merging non-programmatic configs - -### 4.7.0 - -- [improvement] JAVA-2301: Introduce OSGi tests for the mapper -- [improvement] JAVA-2658: Refactor OSGi tests -- [bug] JAVA-2657: Add ability to specify the class loader to use for application-specific classpath resources -- [improvement] JAVA-2803: Add Graal substitutions for protocol compression -- [documentation] JAVA-2666: Document BOM and driver modules -- [documentation] JAVA-2613: Improve connection pooling documentation -- [new feature] JAVA-2793: Add composite config loader -- [new feature] JAVA-2792: Allow custom results in the mapper -- [improvement] JAVA-2663: Add Graal substitutions for native functions -- [improvement] JAVA-2747: Revisit semantics of Statement.setExecutionProfile/Name - -### 4.6.1 - -- [bug] JAVA-2676: Don't reschedule write coalescer after empty runs - -### 4.6.0 - -- [improvement] JAVA-2741: Make keyspace/table metadata impls serializable -- [bug] JAVA-2740: Extend peer validity check to include datacenter, rack and tokens -- [bug] JAVA-2744: Recompute token map when node is added -- [new feature] JAVA-2614: Provide a utility to emulate offset paging on the client side -- [new feature] JAVA-2718: Warn when the number of sessions exceeds a configurable threshold -- [improvement] JAVA-2664: Add a callback to inject the session in listeners -- [bug] JAVA-2698: TupleCodec and UdtCodec give wrong error message when parsing fails -- [improvement] JAVA-2435: Add automatic-module-names to the manifests -- [new feature] JAVA-2054: Add now_in_seconds to protocol v5 query messages -- [bug] JAVA-2711: Fix handling of UDT keys in the mapper -- [improvement] JAVA-2631: Add getIndex() shortcuts to TableMetadata -- [improvement] JAVA-2679: Add port information to QueryTrace and TraceEvent -- [improvement] JAVA-2184: Refactor DescribeIT to improve maintainability -- [new feature] JAVA-2600: Add map-backed config loader -- [new feature] JAVA-2105: Add support for transient replication -- [new feature] JAVA-2670: Provide base class for mapped custom codecs -- [new feature] JAVA-2633: Add execution profile argument to DAO mapper factory methods -- [improvement] JAVA-2667: Add ability to fail the build when integration tests fail -- [bug] JAVA-1861: Add Metadata.getClusterName() - -### 4.5.1 - -- [bug] JAVA-2673: Fix mapper generated code for UPDATE with TTL and IF condition - -### 4.5.0 - -- [bug] JAVA-2654: Make AdminRequestHandler handle integer serialization -- [improvement] JAVA-2618: Improve error handling in request handlers -- [new feature] JAVA-2064: Add support for DSE 6.8 graph options in schema builder -- [documentation] JAVA-2559: Fix GraphNode javadocs -- [improvement] JAVA-2281: Extend GraphBinaryDataTypesTest to other graph protocols -- [new feature] JAVA-2498: Add support for reactive graph queries -- [bug] JAVA-2572: Prevent race conditions when cancelling a continuous paging query -- [improvement] JAVA-2566: Introduce specific metrics for Graph queries -- [improvement] JAVA-2556: Make ExecutionInfo compatible with any Request type -- [improvement] JAVA-2571: Revisit usages of DseGraph.g -- [improvement] JAVA-2558: Revisit GraphRequestHandler -- [bug] JAVA-2508: Preserve backward compatibility in schema metadata types -- [bug] JAVA-2465: Avoid requesting 0 page when executing continuous paging queries -- [improvement] JAVA-2472: Enable speculative executions for paged graph queries -- [improvement] JAVA-1579: Change default result format to latest GraphSON format -- [improvement] JAVA-2496: Revisit timeouts for paged graph queries -- [bug] JAVA-2510: Fix GraphBinaryDataTypesTest Codec registry initialization -- [bug] JAVA-2492: Parse edge metadata using internal identifiers -- [improvement] JAVA-2282: Remove GraphSON3 support -- [new feature] JAVA-2098: Add filter predicates for collections -- [improvement] JAVA-2245: Rename graph engine Legacy to Classic and Modern to Core -- [new feature] JAVA-2099: Enable Paging Through DSE Driver for Gremlin Traversals (2.x) -- [new feature] JAVA-1898: Expose new table-level graph metadata -- [bug] JAVA-2642: Fix default value of max-orphan-requests -- [bug] JAVA-2644: Revisit channel selection when pool size > 1 -- [bug] JAVA-2630: Correctly handle custom classes in IndexMetadata.describe -- [improvement] JAVA-1556: Publish Maven Bill Of Materials POM -- [improvement] JAVA-2637: Bump Netty to 4.1.45 -- [bug] JAVA-2617: Reinstate generation of deps.txt for Insights -- [new feature] JAVA-2625: Provide user-friendly programmatic configuration for kerberos -- [improvement] JAVA-2624: Expose a config option for the connect timeout -- [improvement] JAVA-2592: Make reload support parameterizable for DefaultDriverConfigLoader -- [new feature] JAVA-2263: Add optional schema validation to the mapper - -### 4.4.0 - -This version brings in all functionality that was formerly only in the DataStax Enterprise driver, -such as the built-in support for reactive programming. Going forward, all new features will be -implemented in this single driver (for past DataStax Enterprise driver versions before the merge, -refer to the [DSE driver -changelog](https://docs.datastax.com/en/developer/java-driver-dse/latest/changelog/)). - -- [documentation] JAVA-2607: Improve visibility of driver dependencies section -- [documentation] JAVA-1975: Document the importance of using specific TinkerPop version -- [improvement] JAVA-2529: Standardize optional/excludable dependency checks -- [bug] JAVA-2598: Do not use context class loader when attempting to load classes -- [improvement] JAVA-2582: Don't propagate a future into SchemaQueriesFactory -- [documentation] JAVA-2542: Improve the javadocs of methods in CqlSession -- [documentation] JAVA-2609: Add docs for proxy authentication to unified driver -- [improvement] JAVA-2554: Improve efficiency of InsightsClient by improving supportsInsights check -- [improvement] JAVA-2601: Inject Google Tag Manager scripts in generated API documentation -- [improvement] JAVA-2551: Improve support for DETERMINISTIC and MONOTONIC functions -- [documentation] JAVA-2446: Revisit continuous paging javadocs -- [improvement] JAVA-2550: Remove warnings in ContinuousCqlRequestHandler when coordinator is not replica -- [improvement] JAVA-2569: Make driver compatible with Netty < 4.1.34 again -- [improvement] JAVA-2541: Improve error messages during connection initialization -- [improvement] JAVA-2530: Expose shortcuts for name-based UUIDs -- [improvement] JAVA-2547: Add method DriverConfigLoader.fromPath -- [improvement] JAVA-2528: Store suppressed exceptions in AllNodesFailedException -- [new feature] JAVA-2581: Add query builder support for indexed list assignments -- [improvement] JAVA-2596: Consider collection removals as idempotent in query builder -- [bug] JAVA-2555: Generate append/prepend constructs compatible with legacy C* versions -- [bug] JAVA-2584: Ensure codec registry is able to create codecs for collections of UDTs and tuples -- [bug] JAVA-2583: IS NOT NULL clause should be idempotent -- [improvement] JAVA-2442: Don't check for schema agreement twice when completing a DDL query -- [improvement] JAVA-2473: Don't reconnect control connection if protocol is downgraded -- [bug] JAVA-2556: Make ExecutionInfo compatible with any Request type -- [new feature] JAVA-2532: Add BoundStatement ReturnType for insert, update, and delete DAO methods -- [improvement] JAVA-2107: Add XML formatting plugin -- [bug] JAVA-2527: Allow AllNodesFailedException to accept more than one error per node -- [improvement] JAVA-2546: Abort schema refresh if a query fails - -### 4.3.1 - -- [bug] JAVA-2557: Accept any negative length when decoding elements of tuples and UDTs - -### 4.3.0 - -- [improvement] JAVA-2497: Ensure nodes and exceptions are serializable -- [bug] JAVA-2464: Fix initial schema refresh when reconnect-on-init is enabled -- [improvement] JAVA-2516: Enable hostname validation with Cloud -- [documentation]: JAVA-2460: Document how to determine the local DC -- [improvement] JAVA-2476: Improve error message when codec registry inspects a collection with a - null element -- [documentation] JAVA-2509: Mention file-based approach for Cloud configuration in the manual -- [improvement] JAVA-2447: Mention programmatic local DC method in Default LBP error message -- [improvement] JAVA-2459: Improve extensibility of existing load balancing policies -- [documentation] JAVA-2428: Add developer docs -- [documentation] JAVA-2503: Migrate Cloud "getting started" page to driver manual -- [improvement] JAVA-2484: Add errors for cloud misconfiguration -- [improvement] JAVA-2490: Allow to read the secure bundle from an InputStream -- [new feature] JAVA-2478: Allow to provide the secure bundle via URL -- [new feature] JAVA-2356: Support for DataStax Cloud API -- [improvement] JAVA-2407: Improve handling of logback configuration files in IDEs -- [improvement] JAVA-2434: Add support for custom cipher suites and host name validation to ProgrammaticSslEngineFactory -- [improvement] JAVA-2480: Upgrade Jackson to 2.10.0 -- [documentation] JAVA-2505: Annotate Node.getHostId() as nullable -- [improvement] JAVA-1708: Support DSE "everywhere" replication strategy -- [improvement] JAVA-2471: Consider DSE version when parsing the schema -- [improvement] JAVA-2444: Add method setRoutingKey(ByteBuffer...) to StatementBuilder -- [improvement] JAVA-2398: Improve support for optional dependencies in OSGi -- [improvement] JAVA-2452: Allow "none" as a compression option -- [improvement] JAVA-2419: Allow registration of user codecs at runtime -- [documentation] JAVA-2384: Add quick overview section to each manual page -- [documentation] JAVA-2412: Cover DDL query debouncing in FAQ and upgrade guide -- [documentation] JAVA-2416: Update paging section in the manual -- [improvement] JAVA-2402: Add setTracing(boolean) to StatementBuilder -- [bug] JAVA-2466: Set idempotence to null in BatchStatement.newInstance - -### 4.2.2 - -- [bug] JAVA-2475: Fix message size when query string contains Unicode surrogates -- [bug] JAVA-2470: Fix Session.OSS_DRIVER_COORDINATES for shaded JAR - -### 4.2.1 - -- [bug] JAVA-2454: Handle "empty" CQL type while parsing schema -- [improvement] JAVA-2455: Improve logging of schema refresh errors -- [documentation] JAVA-2429: Document expected types on DefaultDriverOption -- [documentation] JAVA-2426: Fix month pattern in CqlDuration documentation -- [bug] JAVA-2451: Make zero a valid estimated size for PagingIterableSpliterator -- [bug] JAVA-2443: Compute prepared statement PK indices for protocol v3 -- [bug] JAVA-2430: Use variable metadata to infer the routing keyspace on bound statements - -### 4.2.0 - -- [improvement] JAVA-2390: Add methods to set the SSL engine factory programmatically -- [improvement] JAVA-2379: Fail fast if prepared id doesn't match when repreparing on the fly -- [bug] JAVA-2375: Use per-request keyspace when repreparing on the fly -- [improvement] JAVA-2370: Remove auto-service plugin from mapper processor -- [improvement] JAVA-2377: Add a config option to make driver threads daemon -- [improvement] JAVA-2371: Handle null elements in collections on the decode path -- [improvement] JAVA-2351: Add a driver example for the object mapper -- [bug] JAVA-2323: Handle restart of a node with same host_id but a different address -- [improvement] JAVA-2303: Ignore peer rows matching the control host's RPC address -- [improvement] JAVA-2236: Add methods to set the auth provider programmatically -- [improvement] JAVA-2369: Change mapper annotations retention to runtime -- [improvement] JAVA-2365: Redeclare default constants when an enum is abstracted behind an - interface -- [improvement] JAVA-2302: Better target mapper errors and warnings for inherited methods -- [improvement] JAVA-2336: Expose byte utility methods in the public API -- [improvement] JAVA-2338: Revisit toString() for data container types -- [bug] JAVA-2367: Fix column names in EntityHelper.updateByPrimaryKey -- [bug] JAVA-2358: Fix list of reserved CQL keywords -- [improvement] JAVA-2359: Allow default keyspace at the mapper level -- [improvement] JAVA-2306: Clear security tokens from memory immediately after use -- [improvement] JAVA-2320: Expose more attributes on mapper Select for individual query clauses -- [bug] JAVA-2332: Destroy connection pool when a node gets removed -- [bug] JAVA-2324: Add support for primitive shorts in mapper -- [bug] JAVA-2325: Allow "is" prefix for boolean getters in mapped entities -- [improvement] JAVA-2308: Add customWhereClause to `@Delete` -- [improvement] JAVA-2247: PagingIterable implementations should implement spliterator() -- [bug] JAVA-2312: Handle UDTs with names that clash with collection types -- [improvement] JAVA-2307: Improve `@Select` and `@Delete` by not requiring full primary key -- [improvement] JAVA-2315: Improve extensibility of session builder -- [bug] JAVA-2394: BaseCcmRule DseRequirement max should use DseVersion, not Cassandra version - -### 4.1.0 - -- [documentation] JAVA-2294: Fix wrong examples in manual page on batch statements -- [bug] JAVA-2304: Avoid direct calls to ByteBuffer.array() -- [new feature] JAVA-2078: Add object mapper -- [improvement] JAVA-2297: Add a NettyOptions method to set socket options -- [bug] JAVA-2280: Ignore peer rows with missing host id or RPC address -- [bug] JAVA-2264: Adjust HashedWheelTimer tick duration from 1 to 100 ms -- [bug] JAVA-2260: Handle empty collections in PreparedStatement.bind(...) -- [improvement] JAVA-2278: Pass the request's log prefix to RequestTracker -- [bug] JAVA-2253: Don't strip trailing zeros in ByteOrderedToken -- [improvement] JAVA-2207: Add bulk value assignment to QueryBuilder Insert -- [bug] JAVA-2234: Handle terminated executor when the session is closed twice -- [documentation] JAVA-2220: Emphasize that query builder is now a separate artifact in root README -- [documentation] JAVA-2217: Cover contact points and local datacenter earlier in the manual -- [improvement] JAVA-2242: Allow skipping all integration tests with -DskipITs -- [improvement] JAVA-2241: Make DefaultDriverContext.cycleDetector protected -- [bug] JAVA-2226: Support IPv6 contact points in the configuration - -### 4.0.1 - -- [new feature] JAVA-2201: Expose a public API for programmatic config -- [new feature] JAVA-2205: Expose public factory methods for alternative config loaders -- [bug] JAVA-2214: Fix flaky RequestLoggerIT test -- [bug] JAVA-2203: Handle unresolved addresses in DefaultEndPoint -- [bug] JAVA-2210: Add ability to set TTL for modification queries -- [improvement] JAVA-2212: Add truncate to QueryBuilder -- [improvement] JAVA-2211: Upgrade Jersey examples to fix security issue sid-3606 -- [bug] JAVA-2193: Fix flaky tests in ExecutionInfoWarningsIT -- [improvement] JAVA-2197: Skip deployment of examples and integration tests to Maven central - -### 4.0.0 - -- [improvement] JAVA-2192: Don't return generic types with wildcards -- [improvement] JAVA-2148: Add examples -- [bug] JAVA-2189: Exclude virtual keyspaces from token map computation -- [improvement] JAVA-2183: Enable materialized views when testing against Cassandra 4 -- [improvement] JAVA-2182: Add insertInto().json() variant that takes an object in QueryBuilder -- [improvement] JAVA-2161: Annotate mutating methods with `@CheckReturnValue` -- [bug] JAVA-2177: Don't exclude down nodes when initializing LBPs -- [improvement] JAVA-2143: Rename Statement.setTimestamp() to setQueryTimestamp() -- [improvement] JAVA-2165: Abstract node connection information -- [improvement] JAVA-2090: Add support for additional_write_policy and read_repair table options -- [improvement] JAVA-2164: Rename statement builder methods to setXxx -- [bug] JAVA-2178: QueryBuilder: Alias after function column is not included in a query -- [improvement] JAVA-2158: Allow BuildableQuery to build statement with values -- [improvement] JAVA-2150: Improve query builder error message on unsupported literal type -- [documentation] JAVA-2149: Improve Term javadocs in the query builder - -### 4.0.0-rc1 - -- [improvement] JAVA-2106: Log server side warnings returned from a query -- [improvement] JAVA-2151: Drop "Dsl" suffix from query builder main classes -- [new feature] JAVA-2144: Expose internal API to hook into the session lifecycle -- [improvement] JAVA-2119: Add PagingIterable abstraction as a supertype of ResultSet -- [bug] JAVA-2063: Normalize authentication logging -- [documentation] JAVA-2034: Add performance recommendations in the manual -- [improvement] JAVA-2077: Allow reconnection policy to detect first connection attempt -- [improvement] JAVA-2067: Publish javadocs JAR for the shaded module -- [improvement] JAVA-2103: Expose partitioner name in TokenMap API -- [documentation] JAVA-2075: Document preference for LZ4 over Snappy - -### 4.0.0-beta3 - -- [bug] JAVA-2066: Array index range error when fetching routing keys on bound statements -- [documentation] JAVA-2061: Add section to upgrade guide about updated type mappings -- [improvement] JAVA-2038: Add jitter to delays between reconnection attempts -- [improvement] JAVA-2053: Cache results of session.prepare() -- [improvement] JAVA-2058: Make programmatic config reloading part of the public API -- [improvement] JAVA-1943: Fail fast in execute() when the session is closed -- [improvement] JAVA-2056: Reduce HashedWheelTimer tick duration -- [bug] JAVA-2057: Do not create pool when SUGGEST\_UP topology event received -- [improvement] JAVA-2049: Add shorthand method to SessionBuilder to specify local DC -- [bug] JAVA-2037: Fix NPE when preparing statement with no bound variables -- [improvement] JAVA-2014: Schedule timeouts on a separate Timer -- [bug] JAVA-2029: Handle schema refresh failure after a DDL query -- [bug] JAVA-1947: Make schema parsing more lenient and allow missing system_virtual_schema -- [bug] JAVA-2028: Use CQL form when parsing UDT types in system tables -- [improvement] JAVA-1918: Document temporal types -- [improvement] JAVA-1914: Optimize use of System.nanoTime in CqlRequestHandlerBase -- [improvement] JAVA-1945: Document corner cases around UDT and tuple attachment -- [improvement] JAVA-2026: Make CqlDuration implement TemporalAmount -- [improvement] JAVA-2017: Slightly optimize conversion methods on the hot path -- [improvement] JAVA-2010: Make dependencies to annotations required again -- [improvement] JAVA-1978: Add a config option to keep contact points unresolved -- [bug] JAVA-2000: Fix ConcurrentModificationException during channel shutdown -- [improvement] JAVA-2002: Reimplement TypeCodec.accepts to improve performance -- [improvement] JAVA-2011: Re-add ResultSet.getAvailableWithoutFetching() and isFullyFetched() -- [improvement] JAVA-2007: Make driver threads extend FastThreadLocalThread -- [bug] JAVA-2001: Handle zero timeout in admin requests - -### 4.0.0-beta2 - -- [new feature] JAVA-1919: Provide a timestamp <=> ZonedDateTime codec -- [improvement] JAVA-1989: Add BatchStatement.newInstance(BatchType, Iterable) -- [improvement] JAVA-1988: Remove pre-fetching from ResultSet API -- [bug] JAVA-1948: Close session properly when LBP fails to initialize -- [improvement] JAVA-1949: Improve error message when contact points are wrong -- [improvement] JAVA-1956: Add statementsCount accessor to BatchStatementBuilder -- [bug] JAVA-1946: Ignore protocol version in equals comparison for UdtValue/TupleValue -- [new feature] JAVA-1932: Send Driver Name and Version in Startup message -- [new feature] JAVA-1917: Add ability to set node on statement -- [improvement] JAVA-1916: Base TimestampCodec.parse on java.util.Date. -- [improvement] JAVA-1940: Clean up test resources when CCM integration tests finish -- [bug] JAVA-1938: Make CassandraSchemaQueries classes public -- [improvement] JAVA-1925: Rename context getters -- [improvement] JAVA-1544: Check API compatibility with Revapi -- [new feature] JAVA-1900: Add support for virtual tables - -### 4.0.0-beta1 - -- [new feature] JAVA-1869: Add DefaultDriverConfigLoaderBuilder -- [improvement] JAVA-1913: Expose additional counters on Node -- [improvement] JAVA-1880: Rename "config profile" to "execution profile" -- [improvement] JAVA-1889: Upgrade dependencies to the latest minor versions -- [improvement] JAVA-1819: Propagate more attributes to bound statements -- [improvement] JAVA-1897: Improve extensibility of schema metadata classes -- [improvement] JAVA-1437: Enable SSL hostname validation by default -- [improvement] JAVA-1879: Duplicate basic.request options as Request/Statement attributes -- [improvement] JAVA-1870: Use sensible defaults in RequestLogger if config options are missing -- [improvement] JAVA-1877: Use a separate reconnection schedule for the control connection -- [improvement] JAVA-1763: Generate a binary tarball as part of the build process -- [improvement] JAVA-1884: Add additional methods from TypeToken to GenericType -- [improvement] JAVA-1883: Use custom queue implementation for LBP's query plan -- [improvement] JAVA-1890: Add more configuration options to DefaultSslEngineFactory -- [bug] JAVA-1895: Rename PreparedStatement.getPrimaryKeyIndices to getPartitionKeyIndices -- [bug] JAVA-1891: Allow null items when setting values in bulk -- [improvement] JAVA-1767: Improve message when column not in result set -- [improvement] JAVA-1624: Expose ExecutionInfo on exceptions where applicable -- [improvement] JAVA-1766: Revisit nullability -- [new feature] JAVA-1860: Allow reconnection at startup if no contact point is available -- [improvement] JAVA-1866: Make all public policies implement AutoCloseable -- [new feature] JAVA-1762: Build alternate core artifact with Netty shaded -- [new feature] JAVA-1761: Add OSGi descriptors -- [bug] JAVA-1560: Correctly propagate policy initialization errors -- [improvement] JAVA-1865: Add RelationMetadata.getPrimaryKey() -- [improvement] JAVA-1862: Add ConsistencyLevel.isDcLocal and isSerial -- [improvement] JAVA-1858: Implement Serializable in implementations, not interfaces -- [improvement] JAVA-1830: Surface response frame size in ExecutionInfo -- [improvement] JAVA-1853: Add newValue(Object...) to TupleType and UserDefinedType -- [improvement] JAVA-1815: Reorganize configuration into basic/advanced categories -- [improvement] JAVA-1848: Add logs to DefaultRetryPolicy -- [new feature] JAVA-1832: Add Ec2MultiRegionAddressTranslator -- [improvement] JAVA-1825: Add remaining Typesafe config primitive types to DriverConfigProfile -- [new feature] JAVA-1846: Add ConstantReconnectionPolicy -- [improvement] JAVA-1824: Make policies overridable in profiles -- [bug] JAVA-1569: Allow null to be used in positional and named values in statements -- [new feature] JAVA-1592: Expose request's total Frame size through API -- [new feature] JAVA-1829: Add metrics for bytes-sent and bytes-received -- [improvement] JAVA-1755: Normalize usage of DEBUG/TRACE log levels -- [improvement] JAVA-1803: Log driver version on first use -- [improvement] JAVA-1792: Add AuthProvider callback to handle missing challenge from server -- [improvement] JAVA-1775: Assume default packages for built-in policies -- [improvement] JAVA-1774: Standardize policy locations -- [improvement] JAVA-1798: Allow passing the default LBP filter as a session builder argument -- [new feature] JAVA-1523: Add query logger -- [improvement] JAVA-1801: Revisit NodeStateListener and SchemaChangeListener APIs -- [improvement] JAVA-1759: Revisit metrics API -- [improvement] JAVA-1776: Use concurrency annotations -- [improvement] JAVA-1799: Use CqlIdentifier for simple statement named values -- [new feature] JAVA-1515: Add query builder -- [improvement] JAVA-1773: Make DriverConfigProfile enumerable -- [improvement] JAVA-1787: Use standalone shaded Guava artifact -- [improvement] JAVA-1769: Allocate exact buffer size for outgoing requests -- [documentation] JAVA-1780: Add manual section about case sensitivity -- [new feature] JAVA-1536: Add request throttling -- [improvement] JAVA-1772: Revisit multi-response callbacks -- [new feature] JAVA-1537: Add remaining socket options -- [bug] JAVA-1756: Propagate custom payload when preparing a statement -- [improvement] JAVA-1847: Add per-node request tracking - -### 4.0.0-alpha3 - -- [new feature] JAVA-1518: Expose metrics -- [improvement] JAVA-1739: Add host_id and schema_version to node metadata -- [improvement] JAVA-1738: Convert enums to allow extensibility -- [bug] JAVA-1727: Override DefaultUdtValue.equals -- [bug] JAVA-1729: Override DefaultTupleValue.equals -- [improvement] JAVA-1720: Merge Cluster and Session into a single interface -- [improvement] JAVA-1713: Use less nodes in DefaultLoadBalancingPolicyIT -- [improvement] JAVA-1707: Add test infrastructure for running DSE clusters with CCM -- [bug] JAVA-1715: Propagate unchecked exceptions to CompletableFuture in SyncAuthenticator methods -- [improvement] JAVA-1714: Make replication strategies pluggable -- [new feature] JAVA-1647: Handle metadata_changed flag in protocol v5 -- [new feature] JAVA-1633: Handle per-request keyspace in protocol v5 -- [improvement] JAVA-1678: Warn if auth is configured on the client but not the server -- [improvement] JAVA-1673: Remove schema agreement check when repreparing on up -- [new feature] JAVA-1526: Provide a single load balancing policy implementation -- [improvement] JAVA-1680: Improve error message on batch log write timeout -- [improvement] JAVA-1675: Remove dates from copyright headers -- [improvement] JAVA-1645: Don't log stack traces at WARN level -- [new feature] JAVA-1524: Add query trace API -- [improvement] JAVA-1646: Provide a more readable error when connecting to Cassandra 2.0 or lower -- [improvement] JAVA-1662: Raise default request timeout -- [improvement] JAVA-1566: Enforce API rules automatically -- [bug] JAVA-1584: Validate that no bound values are unset in protocol v3 - -### 4.0.0-alpha2 - -- [new feature] JAVA-1525: Handle token metadata -- [new feature] JAVA-1638: Check schema agreement -- [new feature] JAVA-1494: Implement Snappy and LZ4 compression -- [new feature] JAVA-1514: Port Uuids utility class -- [new feature] JAVA-1520: Add node state listeners -- [new feature] JAVA-1493: Handle schema metadata -- [improvement] JAVA-1605: Refactor request execution model -- [improvement] JAVA-1597: Fix raw usages of Statement -- [improvement] JAVA-1542: Enable JaCoCo code coverage -- [improvement] JAVA-1295: Auto-detect best protocol version in mixed cluster -- [bug] JAVA-1565: Mark node down when it loses its last connection and was already reconnecting -- [bug] JAVA-1594: Don't create pool if node comes back up but is ignored -- [bug] JAVA-1593: Reconnect control connection if current node is removed, forced down or ignored -- [bug] JAVA-1595: Don't use system.local.rpc_address when refreshing node list -- [bug] JAVA-1568: Handle Reconnection#reconnectNow/stop while the current attempt is still in - progress -- [improvement] JAVA-1585: Add GenericType#where -- [improvement] JAVA-1590: Properly skip deployment of integration-tests module -- [improvement] JAVA-1576: Expose AsyncResultSet's iterator through a currentPage() method -- [improvement] JAVA-1591: Add programmatic way to get driver version - -### 4.0.0-alpha1 - -- [improvement] JAVA-1586: Throw underlying exception when codec not found in cache -- [bug] JAVA-1583: Handle write failure in ChannelHandlerRequest -- [improvement] JAVA-1541: Reorganize configuration -- [improvement] JAVA-1577: Set default consistency level to LOCAL_ONE -- [bug] JAVA-1548: Retry idempotent statements on READ_TIMEOUT and UNAVAILABLE -- [bug] JAVA-1562: Fix various issues around heart beats -- [improvement] JAVA-1546: Make all statement implementations immutable -- [bug] JAVA-1554: Include VIEW and CDC in WriteType -- [improvement] JAVA-1498: Add a cache above Typesafe config -- [bug] JAVA-1547: Abort pending requests when connection dropped -- [new feature] JAVA-1497: Port timestamp generators from 3.x -- [improvement] JAVA-1539: Configure for deployment to Maven central -- [new feature] JAVA-1519: Close channel if number of orphan stream ids exceeds a configurable - threshold -- [new feature] JAVA-1529: Make configuration reloadable -- [new feature] JAVA-1502: Reprepare statements on newly added/up nodes -- [new feature] JAVA-1530: Add ResultSet.wasApplied -- [improvement] JAVA-1531: Merge CqlSession and Session -- [new feature] JAVA-1513: Handle batch statements -- [improvement] JAVA-1496: Improve log messages -- [new feature] JAVA-1501: Reprepare on the fly when we get an UNPREPARED response -- [bug] JAVA-1499: Wait for load balancing policy at cluster initialization -- [new feature] JAVA-1495: Add prepared statements - -## 3.11.5 -- [improvement] JAVA-3114: Shade io.dropwizard.metrics:metrics-core in shaded driver -- [improvement] JAVA-3115: SchemaChangeListener#onKeyspaceChanged can fire when keyspace has not changed if using SimpleStrategy replication - -## 3.11.4 -- [improvement] JAVA-3079: Upgrade Netty to 4.1.94, 3.x edition -- [improvement] JAVA-3082: Fix maven build for Apple-silicon -- [improvement] PR 1671: Fix LatencyAwarePolicy scale docstring - -## 3.11.3 - -- [improvement] JAVA-3023: Upgrade Netty to 4.1.77, 3.x edition - -## 3.11.2 - -- [improvement] JAVA-3008: Upgrade Netty to 4.1.75, 3.x edition -- [improvement] JAVA-2984: Upgrade Jackson to resolve high-priority CVEs - -## 3.11.1 - -- [bug] JAVA-2967: Support native transport peer information for DSE 6.8. -- [bug] JAVA-2976: Support missing protocol v5 error codes CAS_WRITE_UNKNOWN, CDC_WRITE_FAILURE. - -## 3.11.0 - -- [improvement] JAVA-2705: Remove protocol v5 beta status, add v6-beta. -- [bug] JAVA-2923: Detect and use Guava's new HostAndPort.getHost method. -- [bug] JAVA-2922: Switch to modern framing format inside a channel handler. -- [bug] JAVA-2924: Consider protocol version unsupported when server requires USE_BETA flag for it. - -### 3.10.2 - -- [bug] JAVA-2860: Avoid NPE if channel initialization crashes. - -### 3.10.1 - -- [bug] JAVA-2857: Fix NPE when built statements without parameters are logged at TRACE level. -- [bug] JAVA-2843: Successfully parse DSE table schema in OSS driver. - -### 3.10.0 - -- [improvement] JAVA-2676: Don't reschedule flusher after empty runs -- [new feature] JAVA-2772: Support new protocol v5 message format - -### 3.9.0 - -- [bug] JAVA-2627: Avoid logging error message including stack trace in request handler. -- [new feature] JAVA-2706: Add now_in_seconds to protocol v5 query messages. -- [improvement] JAVA-2730: Add support for Cassandra® 4.0 table options -- [improvement] JAVA-2702: Transient Replication Support for Cassandra® 4.0 - -### 3.8.0 - -- [new feature] JAVA-2356: Support for DataStax Cloud API. -- [improvement] JAVA-2483: Allow to provide secure bundle via URL. -- [improvement] JAVA-2499: Allow to read the secure bundle from an InputStream. -- [improvement] JAVA-2457: Detect CaaS and change default consistency. -- [improvement] JAVA-2485: Add errors for Cloud misconfiguration. -- [documentation] JAVA-2504: Migrate Cloud "getting started" page to driver manual. -- [improvement] JAVA-2516: Enable hostname validation with Cloud -- [bug] JAVA-2515: NEW_NODE and REMOVED_NODE events should trigger ADDED and REMOVED. - - -### 3.7.2 - -- [bug] JAVA-2249: Stop stripping trailing zeros in ByteOrderedTokens. -- [bug] JAVA-1492: Don't immediately reuse busy connections for another request. -- [bug] JAVA-2198: Handle UDTs with names that clash with collection types. -- [bug] JAVA-2204: Avoid memory leak when client holds onto a stale TableMetadata instance. - - -### 3.7.1 - -- [bug] JAVA-2174: Metadata.needsQuote should accept empty strings. -- [bug] JAVA-2193: Fix flaky tests in WarningsTest. - - -### 3.7.0 - -- [improvement] JAVA-2025: Include exception message in Abstract\*Codec.accepts(null). -- [improvement] JAVA-1980: Use covariant return types in RemoteEndpointAwareJdkSSLOptions.Builder methods. -- [documentation] JAVA-2062: Document frozen collection preference with Mapper. -- [bug] JAVA-2071: Fix NPE in ArrayBackedRow.toString(). -- [bug] JAVA-2070: Call onRemove instead of onDown when rack and/or DC information changes for a host. -- [improvement] JAVA-1256: Log parameters of BuiltStatement in QueryLogger. -- [documentation] JAVA-2074: Document preference for LZ4 over Snappy. -- [bug] JAVA-1612: Include netty-common jar in binary tarball. -- [improvement] JAVA-2003: Simplify CBUtil internal API to improve performance. -- [improvement] JAVA-2002: Reimplement TypeCodec.accepts to improve performance. -- [documentation] JAVA-2041: Deprecate cross-DC failover in DCAwareRoundRobinPolicy. -- [documentation] JAVA-1159: Document workaround for using tuple with udt field in Mapper. -- [documentation] JAVA-1964: Complete remaining "Coming Soon" sections in docs. -- [improvement] JAVA-1950: Log server side warnings returned from a query. -- [improvement] JAVA-2123: Allow to use QueryBuilder for building queries against Materialized Views. -- [bug] JAVA-2082: Avoid race condition during cluster close and schema refresh. - - -### 3.6.0 - -- [improvement] JAVA-1394: Add request-queue-depth metric. -- [improvement] JAVA-1857: Add Statement.setHost. -- [bug] JAVA-1920: Use nanosecond precision in LocalTimeCodec#format(). -- [bug] JAVA-1794: Driver tries to create a connection array of size -1. -- [new feature] JAVA-1899: Support virtual tables. -- [bug] JAVA-1908: TableMetadata.asCQLQuery does not add table option 'memtable_flush_period_in_ms' in the generated query. -- [bug] JAVA-1924: StatementWrapper setters should return the wrapping statement. -- [new feature] JAVA-1532: Add Codec support for Java 8's LocalDateTime and ZoneId. -- [improvement] JAVA-1786: Use Google code formatter. -- [bug] JAVA-1871: Change LOCAL\_SERIAL.isDCLocal() to return true. -- [documentation] JAVA-1902: Clarify unavailable & request error in DefaultRetryPolicy javadoc. -- [new feature] JAVA-1903: Add WhiteListPolicy.ofHosts. -- [bug] JAVA-1928: Fix GuavaCompatibility for Guava 26. -- [bug] JAVA-1935: Add null check in QueryConsistencyException.getHost. -- [improvement] JAVA-1771: Send driver name and version in STARTUP message. -- [improvement] JAVA-1388: Add dynamic port discovery for system.peers\_v2. -- [documentation] JAVA-1810: Note which setters are not propagated to PreparedStatement. -- [bug] JAVA-1944: Surface Read and WriteFailureException to RetryPolicy. -- [bug] JAVA-1211: Fix NPE in cluster close when cluster init fails. -- [bug] JAVA-1220: Fail fast on cluster init if previous init failed. -- [bug] JAVA-1929: Preempt session execute queries if session was closed. - -Merged from 3.5.x: - -- [bug] JAVA-1872: Retain table's views when processing table update. - - -### 3.5.0 - -- [improvement] JAVA-1448: TokenAwarePolicy should respect child policy ordering. -- [bug] JAVA-1751: Include defaultTimestamp length in encodedSize for protocol version >= 3. -- [bug] JAVA-1770: Fix message size when using Custom Payload. -- [documentation] JAVA-1760: Add metrics documentation. -- [improvement] JAVA-1765: Update dependencies to latest patch versions. -- [improvement] JAVA-1752: Deprecate DowngradingConsistencyRetryPolicy. -- [improvement] JAVA-1735: Log driver version on first use. -- [documentation] JAVA-1380: Add FAQ entry for errors arising from incompatibilities. -- [improvement] JAVA-1748: Support IS NOT NULL and != in query builder. -- [documentation] JAVA-1740: Mention C*2.2/3.0 incompatibilities in paging state manual. -- [improvement] JAVA-1725: Add a getNodeCount method to CCMAccess for easier automation. -- [new feature] JAVA-708: Add means to measure request sizes. -- [documentation] JAVA-1788: Add example for enabling host name verification to SSL docs. -- [improvement] JAVA-1791: Revert "JAVA-1677: Warn if auth is configured on the client but not the server." -- [bug] JAVA-1789: Account for flags in Prepare encodedSize. -- [bug] JAVA-1797: Use jnr-ffi version required by jnr-posix. - - -### 3.4.0 - -- [improvement] JAVA-1671: Remove unnecessary test on prepared statement metadata. -- [bug] JAVA-1694: Upgrade to jackson-databind 2.7.9.2 to address CVE-2015-15095. -- [documentation] JAVA-1685: Clarify recommendation on preparing SELECT *. -- [improvement] JAVA-1679: Improve error message on batch log write timeout. -- [improvement] JAVA-1672: Remove schema agreement check when repreparing on up. -- [improvement] JAVA-1677: Warn if auth is configured on the client but not the server. -- [new feature] JAVA-1651: Add NO_COMPACT startup option. -- [improvement] JAVA-1683: Add metrics to track writes to nodes. -- [new feature] JAVA-1229: Allow specifying the keyspace for individual queries. -- [improvement] JAVA-1682: Provide a way to record latencies for cancelled speculative executions. -- [improvement] JAVA-1717: Add metrics to latency-aware policy. -- [improvement] JAVA-1675: Remove dates from copyright headers. - -Merged from 3.3.x: - -- [bug] JAVA-1555: Include VIEW and CDC in WriteType. -- [bug] JAVA-1599: exportAsString improvements (sort, format, clustering order) -- [improvement] JAVA-1587: Deterministic ordering of columns used in Mapper#saveQuery -- [improvement] JAVA-1500: Add a metric to report number of in-flight requests. -- [bug] JAVA-1438: QueryBuilder check for empty orderings. -- [improvement] JAVA-1490: Allow zero delay for speculative executions. -- [documentation] JAVA-1607: Add FAQ entry for netty-transport-native-epoll. -- [bug] JAVA-1630: Fix Metadata.addIfAbsent. -- [improvement] JAVA-1619: Update QueryBuilder methods to support Iterable input. -- [improvement] JAVA-1527: Expose host_id and schema_version on Host metadata. -- [new feature] JAVA-1377: Add support for TWCS in SchemaBuilder. -- [improvement] JAVA-1631: Publish a sources jar for driver-core-tests. -- [improvement] JAVA-1632: Add a withIpPrefix(String) method to CCMBridge.Builder. -- [bug] JAVA-1639: VersionNumber does not fullfill equals/hashcode contract. -- [bug] JAVA-1613: Fix broken shaded Netty detection in NettyUtil. -- [bug] JAVA-1666: Fix keyspace export when a UDT has case-sensitive field names. -- [improvement] JAVA-1196: Include hash of result set metadata in prepared statement id. -- [improvement] JAVA-1670: Support user-provided JMX ports for CCMBridge. -- [improvement] JAVA-1661: Avoid String.toLowerCase if possible in Metadata. -- [improvement] JAVA-1659: Expose low-level flusher tuning options. -- [improvement] JAVA-1660: Support netty-transport-native-epoll in OSGi container. - - -### 3.3.2 - -- [bug] JAVA-1666: Fix keyspace export when a UDT has case-sensitive field names. -- [improvement] JAVA-1196: Include hash of result set metadata in prepared statement id. -- [improvement] JAVA-1670: Support user-provided JMX ports for CCMBridge. -- [improvement] JAVA-1661: Avoid String.toLowerCase if possible in Metadata. -- [improvement] JAVA-1659: Expose low-level flusher tuning options. -- [improvement] JAVA-1660: Support netty-transport-native-epoll in OSGi container. - - -### 3.3.1 - -- [bug] JAVA-1555: Include VIEW and CDC in WriteType. -- [bug] JAVA-1599: exportAsString improvements (sort, format, clustering order) -- [improvement] JAVA-1587: Deterministic ordering of columns used in Mapper#saveQuery -- [improvement] JAVA-1500: Add a metric to report number of in-flight requests. -- [bug] JAVA-1438: QueryBuilder check for empty orderings. -- [improvement] JAVA-1490: Allow zero delay for speculative executions. -- [documentation] JAVA-1607: Add FAQ entry for netty-transport-native-epoll. -- [bug] JAVA-1630: Fix Metadata.addIfAbsent. -- [improvement] JAVA-1619: Update QueryBuilder methods to support Iterable input. -- [improvement] JAVA-1527: Expose host_id and schema_version on Host metadata. -- [new feature] JAVA-1377: Add support for TWCS in SchemaBuilder. -- [improvement] JAVA-1631: Publish a sources jar for driver-core-tests. -- [improvement] JAVA-1632: Add a withIpPrefix(String) method to CCMBridge.Builder. -- [bug] JAVA-1639: VersionNumber does not fullfill equals/hashcode contract. -- [bug] JAVA-1613: Fix broken shaded Netty detection in NettyUtil. - - -### 3.3.0 - -- [bug] JAVA-1469: Update LoggingRetryPolicy to deal with SLF4J-353. -- [improvement] JAVA-1203: Upgrade Metrics to allow usage in OSGi. -- [bug] JAVA-1407: KeyspaceMetadata exportAsString should export user types in topological sort order. -- [bug] JAVA-1455: Mapper support using unset for null values. -- [bug] JAVA-1464: Allow custom codecs with non public constructors in @Param. -- [bug] JAVA-1470: Querying multiple pages overrides WrappedStatement. -- [improvement] JAVA-1428: Upgrade logback and jackson dependencies. -- [documentation] JAVA-1463: Revisit speculative execution docs. -- [documentation] JAVA-1466: Revisit timestamp docs. -- [documentation] JAVA-1445: Clarify how nodes are penalized in LatencyAwarePolicy docs. -- [improvement] JAVA-1446: Support 'DEFAULT UNSET' in Query Builder JSON Insert. -- [improvement] JAVA-1443: Add groupBy method to Select statement. -- [improvement] JAVA-1458: Check thread in mapper sync methods. -- [improvement] JAVA-1488: Upgrade Netty to 4.0.47.Final. -- [improvement] JAVA-1460: Add speculative execution number to ExecutionInfo -- [improvement] JAVA-1431: Improve error handling during pool initialization. - - -### 3.2.0 - -- [new feature] JAVA-1347: Add support for duration type. -- [new feature] JAVA-1248: Implement "beta" flag for native protocol v5. -- [new feature] JAVA-1362: Send query options flags as [int] for Protocol V5+. -- [new feature] JAVA-1364: Enable creation of SSLHandler with remote address information. -- [improvement] JAVA-1367: Make protocol negotiation more resilient. -- [bug] JAVA-1397: Handle duration as native datatype in protocol v5+. -- [improvement] JAVA-1308: CodecRegistry performance improvements. -- [improvement] JAVA-1287: Add CDC to TableOptionsMetadata and Schema Builder. -- [improvement] JAVA-1392: Reduce lock contention in RPTokenFactory. -- [improvement] JAVA-1328: Provide compatibility with Guava 20. -- [improvement] JAVA-1247: Disable idempotence warnings. -- [improvement] JAVA-1286: Support setting and retrieving udt fields in QueryBuilder. -- [bug] JAVA-1415: Correctly report if a UDT column is frozen. -- [bug] JAVA-1418: Make Guava version detection more reliable. -- [new feature] JAVA-1174: Add ifNotExists option to mapper. -- [improvement] JAVA-1414: Optimize Metadata.escapeId and Metadata.handleId. -- [improvement] JAVA-1310: Make mapper's ignored properties configurable. -- [improvement] JAVA-1316: Add strategy for resolving properties into CQL names. -- [bug] JAVA-1424: Handle new WRITE_FAILURE and READ_FAILURE format in v5 protocol. - -Merged from 3.1.x branch: - -- [bug] JAVA-1371: Reintroduce connection pool timeout. -- [bug] JAVA-1313: Copy SerialConsistencyLevel to PreparedStatement. -- [documentation] JAVA-1334: Clarify documentation of method `addContactPoints`. -- [improvement] JAVA-1357: Document that getReplicas only returns replicas of the last token in range. -- [bug] JAVA-1404: Fix min token handling in TokenRange.contains. -- [bug] JAVA-1429: Prevent heartbeats until connection is fully initialized. - - -### 3.1.4 - -Merged from 3.0.x branch: - -- [bug] JAVA-1371: Reintroduce connection pool timeout. -- [bug] JAVA-1313: Copy SerialConsistencyLevel to PreparedStatement. -- [documentation] JAVA-1334: Clarify documentation of method `addContactPoints`. -- [improvement] JAVA-1357: Document that getReplicas only returns replicas of the last token in range. - - -### 3.1.3 - -Merged from 3.0.x branch: - -- [bug] JAVA-1330: Add un/register for SchemaChangeListener in DelegatingCluster -- [bug] JAVA-1351: Include Custom Payload in Request.copy. -- [bug] JAVA-1346: Reset heartbeat only on client reads (not writes). -- [improvement] JAVA-866: Support tuple notation in QueryBuilder.eq/in. - - -### 3.1.2 - -- [bug] JAVA-1321: Wrong OSGi dependency version for Guava. - -Merged from 3.0.x branch: - -- [bug] JAVA-1312: QueryBuilder modifies selected columns when manually selected. -- [improvement] JAVA-1303: Add missing BoundStatement.setRoutingKey(ByteBuffer...) -- [improvement] JAVA-262: Make internal executors customizable - - -### 3.1.1 - -- [bug] JAVA-1284: ClockFactory should check system property before attempting to load Native class. -- [bug] JAVA-1255: Allow nested UDTs to be used in Mapper. -- [bug] JAVA-1279: Mapper should exclude Groovy's "metaClass" property when looking for mapped properties - -Merged from 3.0.x branch: - -- [improvement] JAVA-1246: Driver swallows the real exception in a few cases -- [improvement] JAVA-1261: Throw error when attempting to page in I/O thread. -- [bug] JAVA-1258: Regression: Mapper cannot map a materialized view after JAVA-1126. -- [bug] JAVA-1101: Batch and BatchStatement should consider inner statements to determine query idempotence -- [improvement] JAVA-1262: Use ParseUtils for quoting & unquoting. -- [improvement] JAVA-1275: Use Netty's default thread factory -- [bug] JAVA-1285: QueryBuilder routing key auto-discovery should handle case-sensitive column names. -- [bug] JAVA-1283: Don't cache failed query preparations in the mapper. -- [improvement] JAVA-1277: Expose AbstractSession.checkNotInEventLoop. -- [bug] JAVA-1272: BuiltStatement not able to print its query string if it contains mapped UDTs. -- [bug] JAVA-1292: 'Adjusted frame length' error breaks driver's ability to read data. -- [improvement] JAVA-1293: Make DecoderForStreamIdSize.MAX_FRAME_LENGTH configurable. -- [improvement] JAVA-1053: Add a metric for authentication errors -- [improvement] JAVA-1263: Eliminate unnecessary memory copies in FrameCompressor implementations. -- [improvement] JAVA-893: Make connection pool non-blocking - - -### 3.1.0 - -- [new feature] JAVA-1153: Add PER PARTITION LIMIT to Select QueryBuilder. -- [improvement] JAVA-743: Add JSON support to QueryBuilder. -- [improvement] JAVA-1233: Update HdrHistogram to 2.1.9. -- [improvement] JAVA-1233: Update Snappy to 1.1.2.6. -- [bug] JAVA-1161: Preserve full time zone info in ZonedDateTimeCodec and DateTimeCodec. -- [new feature] JAVA-1157: Allow asynchronous paging of Mapper Result. -- [improvement] JAVA-1212: Don't retry non-idempotent statements by default. -- [improvement] JAVA-1192: Make EventDebouncer settings updatable at runtime. -- [new feature] JAVA-541: Add polymorphism support to object mapper. -- [new feature] JAVA-636: Allow @Column annotations on getters/setters as well as fields. -- [new feature] JAVA-984: Allow non-void setters in object mapping. -- [new feature] JAVA-1055: Add ErrorAware load balancing policy. - -Merged from 3.0.x branch: - -- [bug] JAVA-1179: Request objects should be copied when executed. -- [improvement] JAVA-1182: Throw error when synchronous call made on I/O thread. -- [bug] JAVA-1184: Unwrap StatementWrappers when extracting column definitions. -- [bug] JAVA-1132: Executing bound statement with no variables results in exception with protocol v1. -- [improvement] JAVA-1040: SimpleStatement parameters support in QueryLogger. -- [improvement] JAVA-1151: Fail fast if HdrHistogram is not in the classpath. -- [improvement] JAVA-1154: Allow individual Statement to cancel the read timeout. -- [bug] JAVA-1074: Fix documentation around default timestamp generator. -- [improvement] JAVA-1109: Document SSLOptions changes in upgrade guide. -- [improvement] JAVA-1065: Add method to create token from partition key values. -- [improvement] JAVA-1136: Enable JDK signature check in module driver-extras. -- [improvement] JAVA-866: Support tuple notation in QueryBuilder.eq/in. -- [bug] JAVA-1140: Use same connection to check for schema agreement after a DDL query. -- [improvement] JAVA-1113: Support Cassandra 3.4 LIKE operator in QueryBuilder. -- [improvement] JAVA-1086: Support Cassandra 3.2 CAST function in QueryBuilder. -- [bug] JAVA-1095: Check protocol version for custom payload before sending the query. -- [improvement] JAVA-1133: Add OSGi headers to cassandra-driver-extras. -- [bug] JAVA-1137: Incorrect string returned by DataType.asFunctionParameterString() for collections and tuples. -- [bug] JAVA-1046: (Dynamic)CompositeTypes need to be parsed as string literal, not blob. -- [improvement] JAVA-1164: Clarify documentation on Host.listenAddress and broadcastAddress. -- [improvement] JAVA-1171: Add Host method to determine if DSE Graph is enabled. -- [improvement] JAVA-1069: Bootstrap driver-examples module. -- [documentation] JAVA-1150: Add example and FAQ entry about ByteBuffer/BLOB. -- [improvement] JAVA-1011: Expose PoolingOptions default values. -- [improvement] JAVA-630: Don't process DOWN events for nodes that have active connections. -- [improvement] JAVA-851: Improve UUIDs javadoc with regard to user-provided timestamps. -- [improvement] JAVA-979: Update javadoc for RegularStatement toString() and getQueryString() to indicate that consistency level and other parameters are not maintained in the query string. -- [bug] JAVA-1068: Unwrap StatementWrappers when hashing the paging state. -- [improvement] JAVA-1021: Improve error message when connect() is called with an invalid keyspace name. -- [improvement] JAVA-879: Mapper.map() accepts mapper-generated and user queries. -- [bug] JAVA-1100: Exception when connecting with shaded java driver in OSGI -- [bug] JAVA-1064: getTable create statement doesn't properly handle quotes in primary key. -- [bug] JAVA-1089: Set LWT made from BuiltStatements to non-idempotent. -- [improvement] JAVA-923: Position idempotent flag on object mapper queries. -- [bug] JAVA-1070: The Mapper should not prepare queries synchronously. -- [new feature] JAVA-982: Introduce new method ConsistencyLevel.isSerial(). -- [bug] JAVA-764: Retry with the normal consistency level (not the serial one) when a write times out on the Paxos phase. -- [improvement] JAVA-852: Ignore peers with null entries during discovery. -- [bug] JAVA-1005: DowngradingConsistencyRetryPolicy does not work with EACH_QUORUM when 1 DC is down. -- [bug] JAVA-1002: Avoid deadlock when re-preparing a statement on other hosts. -- [bug] JAVA-1072: Ensure defunct connections are properly evicted from the pool. -- [bug] JAVA-1152: Fix NPE at ControlConnection.refreshNodeListAndTokenMap(). - -Merged from 2.1 branch: - -- [improvement] JAVA-1038: Fetch node info by rpc_address if its broadcast_address is not in system.peers. -- [improvement] JAVA-888: Add cluster-wide percentile tracker. -- [improvement] JAVA-963: Automatically register PercentileTracker from components that use it. -- [new feature] JAVA-1019: SchemaBuilder support for CREATE/ALTER/DROP KEYSPACE. -- [bug] JAVA-727: Allow monotonic timestamp generators to drift in the future + use microsecond precision when possible. -- [improvement] JAVA-444: Add Java process information to UUIDs.makeNode() hash. - - -### 3.0.7 - -- [bug] JAVA-1371: Reintroduce connection pool timeout. -- [bug] JAVA-1313: Copy SerialConsistencyLevel to PreparedStatement. -- [documentation] JAVA-1334: Clarify documentation of method `addContactPoints`. -- [improvement] JAVA-1357: Document that getReplicas only returns replicas of the last token in range. - - -### 3.0.6 - -- [bug] JAVA-1330: Add un/register for SchemaChangeListener in DelegatingCluster -- [bug] JAVA-1351: Include Custom Payload in Request.copy. -- [bug] JAVA-1346: Reset heartbeat only on client reads (not writes). -- [improvement] JAVA-866: Support tuple notation in QueryBuilder.eq/in. - - -### 3.0.5 - -- [bug] JAVA-1312: QueryBuilder modifies selected columns when manually selected. -- [improvement] JAVA-1303: Add missing BoundStatement.setRoutingKey(ByteBuffer...) -- [improvement] JAVA-262: Make internal executors customizable -- [bug] JAVA-1320: prevent unnecessary task creation on empty pool - - -### 3.0.4 - -- [improvement] JAVA-1246: Driver swallows the real exception in a few cases -- [improvement] JAVA-1261: Throw error when attempting to page in I/O thread. -- [bug] JAVA-1258: Regression: Mapper cannot map a materialized view after JAVA-1126. -- [bug] JAVA-1101: Batch and BatchStatement should consider inner statements to determine query idempotence -- [improvement] JAVA-1262: Use ParseUtils for quoting & unquoting. -- [improvement] JAVA-1275: Use Netty's default thread factory -- [bug] JAVA-1285: QueryBuilder routing key auto-discovery should handle case-sensitive column names. -- [bug] JAVA-1283: Don't cache failed query preparations in the mapper. -- [improvement] JAVA-1277: Expose AbstractSession.checkNotInEventLoop. -- [bug] JAVA-1272: BuiltStatement not able to print its query string if it contains mapped UDTs. -- [bug] JAVA-1292: 'Adjusted frame length' error breaks driver's ability to read data. -- [improvement] JAVA-1293: Make DecoderForStreamIdSize.MAX_FRAME_LENGTH configurable. -- [improvement] JAVA-1053: Add a metric for authentication errors -- [improvement] JAVA-1263: Eliminate unnecessary memory copies in FrameCompressor implementations. -- [improvement] JAVA-893: Make connection pool non-blocking - - -### 3.0.3 - -- [improvement] JAVA-1147: Upgrade Netty to 4.0.37. -- [bug] JAVA-1213: Allow updates and inserts to BLOB column using read-only ByteBuffer. -- [bug] JAVA-1209: ProtocolOptions.getProtocolVersion() should return null instead of throwing NPE if Cluster has not - been init'd. -- [improvement] JAVA-1204: Update documentation to indicate tcnative version requirement. -- [bug] JAVA-1186: Fix duplicated hosts in DCAwarePolicy warn message. -- [bug] JAVA-1187: Fix warning message when local CL used with RoundRobinPolicy. -- [improvement] JAVA-1175: Warn if DCAwarePolicy configuration is inconsistent. -- [bug] JAVA-1139: ConnectionException.getMessage() throws NPE if address is null. -- [bug] JAVA-1202: Handle null rpc_address when checking schema agreement. -- [improvement] JAVA-1198: Document that BoundStatement is not thread-safe. -- [improvement] JAVA-1200: Upgrade LZ4 to 1.3.0. -- [bug] JAVA-1232: Fix NPE in IdempotenceAwareRetryPolicy.isIdempotent. -- [improvement] JAVA-1227: Document "SELECT *" issue with prepared statement. -- [bug] JAVA-1160: Fix NPE in VersionNumber.getPreReleaseLabels(). -- [improvement] JAVA-1126: Handle schema changes in Mapper. -- [bug] JAVA-1193: Refresh token and replica metadata synchronously when schema is altered. -- [bug] JAVA-1120: Skip schema refresh debouncer when checking for agreement as a result of schema change made by client. -- [improvement] JAVA-1242: Fix driver-core dependency in driver-stress -- [improvement] JAVA-1235: Move the query to the end of "re-preparing .." log message as a key value. - - -### 3.0.2 - -Merged from 2.1 branch: - -- [bug] JAVA-1179: Request objects should be copied when executed. -- [improvement] JAVA-1182: Throw error when synchronous call made on I/O thread. -- [bug] JAVA-1184: Unwrap StatementWrappers when extracting column definitions. - - -### 3.0.1 - -- [bug] JAVA-1132: Executing bound statement with no variables results in exception with protocol v1. -- [improvement] JAVA-1040: SimpleStatement parameters support in QueryLogger. -- [improvement] JAVA-1151: Fail fast if HdrHistogram is not in the classpath. -- [improvement] JAVA-1154: Allow individual Statement to cancel the read timeout. -- [bug] JAVA-1074: Fix documentation around default timestamp generator. -- [improvement] JAVA-1109: Document SSLOptions changes in upgrade guide. -- [improvement] JAVA-1065: Add method to create token from partition key values. -- [improvement] JAVA-1136: Enable JDK signature check in module driver-extras. -- [improvement] JAVA-866: Support tuple notation in QueryBuilder.eq/in. -- [bug] JAVA-1140: Use same connection to check for schema agreement after a DDL query. -- [improvement] JAVA-1113: Support Cassandra 3.4 LIKE operator in QueryBuilder. -- [improvement] JAVA-1086: Support Cassandra 3.2 CAST function in QueryBuilder. -- [bug] JAVA-1095: Check protocol version for custom payload before sending the query. -- [improvement] JAVA-1133: Add OSGi headers to cassandra-driver-extras. -- [bug] JAVA-1137: Incorrect string returned by DataType.asFunctionParameterString() for collections and tuples. -- [bug] JAVA-1046: (Dynamic)CompositeTypes need to be parsed as string literal, not blob. -- [improvement] JAVA-1164: Clarify documentation on Host.listenAddress and broadcastAddress. -- [improvement] JAVA-1171: Add Host method to determine if DSE Graph is enabled. -- [improvement] JAVA-1069: Bootstrap driver-examples module. -- [documentation] JAVA-1150: Add example and FAQ entry about ByteBuffer/BLOB. - -Merged from 2.1 branch: - -- [improvement] JAVA-1011: Expose PoolingOptions default values. -- [improvement] JAVA-630: Don't process DOWN events for nodes that have active connections. -- [improvement] JAVA-851: Improve UUIDs javadoc with regard to user-provided timestamps. -- [improvement] JAVA-979: Update javadoc for RegularStatement toString() and getQueryString() to indicate that consistency level and other parameters are not maintained in the query string. -- [bug] JAVA-1068: Unwrap StatementWrappers when hashing the paging state. -- [improvement] JAVA-1021: Improve error message when connect() is called with an invalid keyspace name. -- [improvement] JAVA-879: Mapper.map() accepts mapper-generated and user queries. -- [bug] JAVA-1100: Exception when connecting with shaded java driver in OSGI -- [bug] JAVA-1064: getTable create statement doesn't properly handle quotes in primary key. -- [bug] JAVA-1089: Set LWT made from BuiltStatements to non-idempotent. -- [improvement] JAVA-923: Position idempotent flag on object mapper queries. -- [bug] JAVA-1070: The Mapper should not prepare queries synchronously. -- [new feature] JAVA-982: Introduce new method ConsistencyLevel.isSerial(). -- [bug] JAVA-764: Retry with the normal consistency level (not the serial one) when a write times out on the Paxos phase. -- [improvement] JAVA-852: Ignore peers with null entries during discovery. -- [bug] JAVA-1005: DowngradingConsistencyRetryPolicy does not work with EACH_QUORUM when 1 DC is down. -- [bug] JAVA-1002: Avoid deadlock when re-preparing a statement on other hosts. -- [bug] JAVA-1072: Ensure defunct connections are properly evicted from the pool. -- [bug] JAVA-1152: Fix NPE at ControlConnection.refreshNodeListAndTokenMap(). - - -### 3.0.0 - -- [bug] JAVA-1034: fix metadata parser for collections of custom types. -- [improvement] JAVA-1035: Expose host broadcast_address and listen_address if available. -- [new feature] JAVA-1037: Allow named parameters in simple statements. -- [improvement] JAVA-1033: Allow per-statement read timeout. -- [improvement] JAVA-1042: Include DSE version and workload in Host data. - -Merged from 2.1 branch: - -- [improvement] JAVA-1030: Log token to replica map computation times. -- [bug] JAVA-1039: Minor bugs in Event Debouncer. - - -### 3.0.0-rc1 - -- [bug] JAVA-890: fix mapper for case-sensitive UDT. - - -### 3.0.0-beta1 - -- [bug] JAVA-993: Support for "custom" types after CASSANDRA-10365. -- [bug] JAVA-999: Handle unset parameters in QueryLogger. -- [bug] JAVA-998: SchemaChangeListener not invoked for Functions or Aggregates having UDT arguments. -- [bug] JAVA-1009: use CL ONE to compute query plan when reconnecting - control connection. -- [improvement] JAVA-1003: Change default consistency level to LOCAL_ONE (amends JAVA-926). -- [improvement] JAVA-863: Idempotence propagation in prepared statements. -- [improvement] JAVA-996: Make CodecRegistry available to ProtocolDecoder. -- [bug] JAVA-819: Driver shouldn't retry on client timeout if statement is not idempotent. -- [improvement] JAVA-1007: Make SimpleStatement and QueryBuilder "detached" again. - -Merged from 2.1 branch: - -- [improvement] JAVA-989: Include keyspace name when invalid replication found when generating token map. -- [improvement] JAVA-664: Reduce heap consumption for TokenMap. -- [bug] JAVA-994: Don't call on(Up|Down|Add|Remove) methods if Cluster is closed/closing. - - -### 3.0.0-alpha5 - -- [improvement] JAVA-958: Make TableOrView.Order visible. -- [improvement] JAVA-968: Update metrics to the latest version. -- [improvement] JAVA-965: Improve error handling for when a non-type 1 UUID is given to bind() on a timeuuid column. -- [improvement] JAVA-885: Pass the authenticator name from the server to the auth provider. -- [improvement] JAVA-961: Raise an exception when an older version of guava (<16.01) is found. -- [bug] JAVA-972: TypeCodec.parse() implementations should be case insensitive when checking for keyword NULL. -- [bug] JAVA-971: Make type codecs invariant. -- [bug] JAVA-986: Update documentation links to reference 3.0. -- [improvement] JAVA-841: Refactor SSLOptions API. -- [improvement] JAVA-948: Don't limit cipher suites by default. -- [improvement] JAVA-917: Document SSL configuration. -- [improvement] JAVA-936: Adapt schema metadata parsing logic to new storage format of CQL types in C* 3.0. -- [new feature] JAVA-846: Provide custom codecs library as an extra module. -- [new feature] JAVA-742: Codec Support for JSON. -- [new feature] JAVA-606: Codec support for Java 8. -- [new feature] JAVA-565: Codec support for Java arrays. -- [new feature] JAVA-605: Codec support for Java enums. -- [bug] JAVA-884: Fix UDT mapper to process fields in the correct order. - -Merged from 2.1 branch: - -- [bug] JAVA-854: avoid early return in Cluster.init when a node doesn't support the protocol version. -- [bug] JAVA-978: Fix quoting issue that caused Mapper.getTableMetadata() to return null. -- [improvement] JAVA-920: Downgrade "error creating pool" message to WARN. -- [bug] JAVA-954: Don't trigger reconnection before initialization complete. -- [improvement] JAVA-914: Avoid rejected tasks at shutdown. -- [improvement] JAVA-921: Add SimpleStatement.getValuesCount(). -- [bug] JAVA-901: Move call to connection.release() out of cancelHandler. -- [bug] JAVA-960: Avoid race in control connection shutdown. -- [bug] JAVA-656: Fix NPE in ControlConnection.updateLocationInfo. -- [bug] JAVA-966: Count uninitialized connections in conviction policy. -- [improvement] JAVA-917: Document SSL configuration. -- [improvement] JAVA-652: Add DCAwareRoundRobinPolicy builder. -- [improvement] JAVA-808: Add generic filtering policy that can be used to exclude specific DCs. -- [bug] JAVA-988: Metadata.handleId should handle escaped double quotes. -- [bug] JAVA-983: QueryBuilder cannot handle collections containing function calls. - - -### 3.0.0-alpha4 - -- [improvement] JAVA-926: Change default consistency level to LOCAL_QUORUM. -- [bug] JAVA-942: Fix implementation of UserType.hashCode(). -- [improvement] JAVA-877: Don't delay UP/ADDED notifications if protocol version = V4. -- [improvement] JAVA-938: Parse 'extensions' column in table metadata. -- [bug] JAVA-900: Fix Configuration builder to allow disabled metrics. -- [new feature] JAVA-902: Prepare API for async query trace. -- [new feature] JAVA-930: Add BoundStatement#unset. -- [bug] JAVA-946: Make table metadata options class visible. -- [bug] JAVA-939: Add crcCheckChance to TableOptionsMetadata#equals/hashCode. -- [bug] JAVA-922: Make TypeCodec return mutable collections. -- [improvement] JAVA-932: Limit visibility of codec internals. -- [improvement] JAVA-934: Warn if a custom codec collides with an existing one. -- [improvement] JAVA-940: Allow typed getters/setters to target any CQL type. -- [bug] JAVA-950: Fix Cluster.connect with a case-sensitive keyspace. -- [bug] JAVA-953: Fix MaterializedViewMetadata when base table name is case sensitive. - - -### 3.0.0-alpha3 - -- [new feature] JAVA-571: Support new system tables in C* 3.0. -- [improvement] JAVA-919: Move crc_check_chance out of compressions options. - -Merged from 2.0 branch: - -- [improvement] JAVA-718: Log streamid at the trace level on sending request and receiving response. -- [bug] JAVA-796: Fix SpeculativeExecutionPolicy.init() and close() are never called. -- [improvement] JAVA-710: Suppress unnecessary warning at shutdown. -- [improvement] #340: Allow DNS name with multiple A-records as contact point. -- [bug] JAVA-794: Allow tracing across multiple result pages. -- [bug] JAVA-737: DowngradingConsistencyRetryPolicy ignores write timeouts. -- [bug] JAVA-736: Forbid bind marker in QueryBuilder add/append/prepend. -- [bug] JAVA-712: Prevent QueryBuilder.quote() from applying duplicate double quotes. -- [bug] JAVA-688: Prevent QueryBuilder from trying to serialize raw string. -- [bug] JAVA-679: Support bind marker in QueryBuilder DELETE's list index. -- [improvement] JAVA-475: Improve QueryBuilder API for SELECT DISTINCT. -- [improvement] JAVA-225: Create values() function for Insert builder using List. -- [improvement] JAVA-702: Warn when ReplicationStrategy encounters invalid - replication factors. -- [improvement] JAVA-662: Add PoolingOptions method to set both core and max - connections. -- [improvement] JAVA-766: Do not include epoll JAR in binary distribution. -- [improvement] JAVA-726: Optimize internal copies of Request objects. -- [bug] JAVA-815: Preserve tracing across retries. -- [improvement] JAVA-709: New RetryDecision.tryNextHost(). -- [bug] JAVA-733: Handle function calls and raw strings as non-idempotent in QueryBuilder. -- [improvement] JAVA-765: Provide API to retrieve values of a Parameterized SimpleStatement. -- [improvement] JAVA-827: implement UPDATE .. IF EXISTS in QueryBuilder. -- [improvement] JAVA-618: Randomize contact points list to prevent hotspots. -- [improvement] JAVA-720: Surface the coordinator used on query failure. -- [bug] JAVA-792: Handle contact points removed during init. -- [improvement] JAVA-719: Allow PlainTextAuthProvider to change its credentials at runtime. -- [new feature] JAVA-151: Make it possible to register for SchemaChange Events. -- [improvement] JAVA-861: Downgrade "Asked to rebuild table" log from ERROR to INFO level. -- [improvement] JAVA-797: Provide an option to prepare statements only on one node. -- [improvement] JAVA-658: Provide an option to not re-prepare all statements in onUp. -- [improvement] JAVA-853: Customizable creation of netty timer. -- [bug] JAVA-859: Avoid quadratic ring processing with invalid replication factors. -- [improvement] JAVA-657: Debounce control connection queries. -- [bug] JAVA-784: LoadBalancingPolicy.distance() called before init(). -- [new feature] JAVA-828: Make driver-side metadata optional. -- [improvement] JAVA-544: Allow hosts to remain partially up. -- [improvement] JAVA-821, JAVA-822: Remove internal blocking calls and expose async session - creation. -- [improvement] JAVA-725: Use parallel calls when re-preparing statement on other - hosts. -- [bug] JAVA-629: Don't use connection timeout for unrelated internal queries. -- [bug] JAVA-892: Fix NPE in speculative executions when metrics disabled. - - -### 3.0.0-alpha2 - -- [new feature] JAVA-875, JAVA-882: Move secondary index metadata out of column definitions. - -Merged from 2.2 branch: - -- [bug] JAVA-847: Propagate CodecRegistry to nested UDTs. -- [improvement] JAVA-848: Ability to store a default, shareable CodecRegistry - instance. -- [bug] JAVA-880: Treat empty ByteBuffers as empty values in TupleCodec and - UDTCodec. - - -### 3.0.0-alpha1 - -- [new feature] JAVA-876: Support new system tables in C* 3.0.0-alpha1. - -Merged from 2.2 branch: - -- [improvement] JAVA-810: Rename DateWithoutTime to LocalDate. -- [bug] JAVA-816: DateCodec does not format values correctly. -- [bug] JAVA-817: TimeCodec does not format values correctly. -- [bug] JAVA-818: TypeCodec.getDataTypeFor() does not handle LocalDate instances. -- [improvement] JAVA-836: Make ResultSet#fetchMoreResult return a - ListenableFuture. -- [improvement] JAVA-843: Disable frozen checks in mapper. -- [improvement] JAVA-721: Allow user to register custom type codecs. -- [improvement] JAVA-722: Support custom type codecs in mapper. - - -### 2.2.0-rc3 - -- [bug] JAVA-847: Propagate CodecRegistry to nested UDTs. -- [improvement] JAVA-848: Ability to store a default, shareable CodecRegistry - instance. -- [bug] JAVA-880: Treat empty ByteBuffers as empty values in TupleCodec and - UDTCodec. - - -### 2.2.0-rc2 - -- [improvement] JAVA-810: Rename DateWithoutTime to LocalDate. -- [bug] JAVA-816: DateCodec does not format values correctly. -- [bug] JAVA-817: TimeCodec does not format values correctly. -- [bug] JAVA-818: TypeCodec.getDataTypeFor() does not handle LocalDate instances. -- [improvement] JAVA-836: Make ResultSet#fetchMoreResult return a - ListenableFuture. -- [improvement] JAVA-843: Disable frozen checks in mapper. -- [improvement] JAVA-721: Allow user to register custom type codecs. -- [improvement] JAVA-722: Support custom type codecs in mapper. - -Merged from 2.1 branch: - -- [bug] JAVA-834: Special case check for 'null' string in index_options column. -- [improvement] JAVA-835: Allow accessor methods with less parameters in case - named bind markers are repeated. -- [improvement] JAVA-475: Improve QueryBuilder API for SELECT DISTINCT. -- [improvement] JAVA-715: Make NativeColumnType a top-level class. -- [improvement] JAVA-700: Expose ProtocolVersion#toInt. -- [bug] JAVA-542: Handle void return types in accessors. -- [improvement] JAVA-225: Create values() function for Insert builder using List. -- [improvement] JAVA-713: HashMap throws an OOM Exception when logging level is set to TRACE. -- [bug] JAVA-679: Support bind marker in QueryBuilder DELETE's list index. -- [improvement] JAVA-732: Expose KEYS and FULL indexing options in IndexMetadata. -- [improvement] JAVA-589: Allow @Enumerated in Accessor method parameters. -- [improvement] JAVA-554: Allow access to table metadata from Mapper. -- [improvement] JAVA-661: Provide a way to map computed fields. -- [improvement] JAVA-824: Ignore missing columns in mapper. -- [bug] JAVA-724: Preserve default timestamp for retries and speculative executions. -- [improvement] JAVA-738: Use same pool implementation for protocol v2 and v3. -- [improvement] JAVA-677: Support CONTAINS / CONTAINS KEY in QueryBuilder. -- [improvement] JAVA-477/JAVA-540: Add USING options in mapper for delete and save - operations. -- [improvement] JAVA-473: Add mapper option to configure whether to save null fields. - -Merged from 2.0 branch: - -- [bug] JAVA-737: DowngradingConsistencyRetryPolicy ignores write timeouts. -- [bug] JAVA-736: Forbid bind marker in QueryBuilder add/append/prepend. -- [bug] JAVA-712: Prevent QueryBuilder.quote() from applying duplicate double quotes. -- [bug] JAVA-688: Prevent QueryBuilder from trying to serialize raw string. -- [bug] JAVA-679: Support bind marker in QueryBuilder DELETE's list index. -- [improvement] JAVA-475: Improve QueryBuilder API for SELECT DISTINCT. -- [improvement] JAVA-225: Create values() function for Insert builder using List. -- [improvement] JAVA-702: Warn when ReplicationStrategy encounters invalid - replication factors. -- [improvement] JAVA-662: Add PoolingOptions method to set both core and max - connections. -- [improvement] JAVA-766: Do not include epoll JAR in binary distribution. -- [improvement] JAVA-726: Optimize internal copies of Request objects. -- [bug] JAVA-815: Preserve tracing across retries. -- [improvement] JAVA-709: New RetryDecision.tryNextHost(). -- [bug] JAVA-733: Handle function calls and raw strings as non-idempotent in QueryBuilder. - - -### 2.2.0-rc1 - -- [new feature] JAVA-783: Protocol V4 enum support. -- [new feature] JAVA-776: Use PK columns in protocol v4 PREPARED response. -- [new feature] JAVA-777: Distinguish NULL and UNSET values. -- [new feature] JAVA-779: Add k/v payload for 3rd party usage. -- [new feature] JAVA-780: Expose server-side warnings on ExecutionInfo. -- [new feature] JAVA-749: Expose new read/write failure exceptions. -- [new feature] JAVA-747: Expose function and aggregate metadata. -- [new feature] JAVA-778: Add new client exception for CQL function failure. -- [improvement] JAVA-700: Expose ProtocolVersion#toInt. -- [new feature] JAVA-404: Support new C* 2.2 CQL date and time types. - -Merged from 2.1 branch: - -- [improvement] JAVA-782: Unify "Target" enum for schema elements. - - -### 2.1.10.2 - -Merged from 2.0 branch: - -- [bug] JAVA-1179: Request objects should be copied when executed. -- [improvement] JAVA-1182: Throw error when synchronous call made on I/O thread. -- [bug] JAVA-1184: Unwrap StatementWrappers when extracting column definitions. - - -### 2.1.10.1 - -- [bug] JAVA-1152: Fix NPE at ControlConnection.refreshNodeListAndTokenMap(). -- [bug] JAVA-1156: Fix NPE at TableMetadata.equals(). - - -### 2.1.10 - -- [bug] JAVA-988: Metadata.handleId should handle escaped double quotes. -- [bug] JAVA-983: QueryBuilder cannot handle collections containing function calls. -- [improvement] JAVA-863: Idempotence propagation in PreparedStatements. -- [bug] JAVA-937: TypeCodec static initializers not always correctly executed. -- [improvement] JAVA-989: Include keyspace name when invalid replication found when generating token map. -- [improvement] JAVA-664: Reduce heap consumption for TokenMap. -- [improvement] JAVA-1030: Log token to replica map computation times. -- [bug] JAVA-1039: Minor bugs in Event Debouncer. -- [improvement] JAVA-843: Disable frozen checks in mapper. -- [improvement] JAVA-833: Improve message when a nested type can't be serialized. -- [improvement] JAVA-1011: Expose PoolingOptions default values. -- [improvement] JAVA-630: Don't process DOWN events for nodes that have active connections. -- [improvement] JAVA-851: Improve UUIDs javadoc with regard to user-provided timestamps. -- [improvement] JAVA-979: Update javadoc for RegularStatement toString() and getQueryString() to indicate that consistency level and other parameters are not maintained in the query string. -- [improvement] JAVA-1038: Fetch node info by rpc_address if its broadcast_address is not in system.peers. -- [improvement] JAVA-974: Validate accessor parameter types against bound statement. -- [bug] JAVA-1068: Unwrap StatementWrappers when hashing the paging state. -- [bug] JAVA-831: Mapper can't load an entity where the PK is a UDT. -- [improvement] JAVA-1021: Improve error message when connect() is called with an invalid keyspace name. -- [improvement] JAVA-879: Mapper.map() accepts mapper-generated and user queries. -- [bug] JAVA-1100: Exception when connecting with shaded java driver in OSGI -- [bug] JAVA-819: Expose more errors in RetryPolicy + provide idempotent-aware wrapper. -- [improvement] JAVA-1040: SimpleStatement parameters support in QueryLogger. -- [bug] JAVA-1064: getTable create statement doesn't properly handle quotes in primary key. -- [improvement] JAVA-888: Add cluster-wide percentile tracker. -- [improvement] JAVA-963: Automatically register PercentileTracker from components that use it. -- [bug] JAVA-1089: Set LWT made from BuiltStatements to non-idempotent. -- [improvement] JAVA-923: Position idempotent flag on object mapper queries. -- [new feature] JAVA-1019: SchemaBuilder support for CREATE/ALTER/DROP KEYSPACE. -- [bug] JAVA-1070: The Mapper should not prepare queries synchronously. -- [new feature] JAVA-982: Introduce new method ConsistencyLevel.isSerial(). -- [bug] JAVA-764: Retry with the normal consistency level (not the serial one) when a write times out on the Paxos phase. -- [bug] JAVA-727: Allow monotonic timestamp generators to drift in the future + use microsecond precision when possible. -- [improvement] JAVA-444: Add Java process information to UUIDs.makeNode() hash. -- [improvement] JAVA-977: Preserve original cause when BuiltStatement value can't be serialized. -- [bug] JAVA-1094: Backport TypeCodec parse and format fixes from 3.0. -- [improvement] JAVA-852: Ignore peers with null entries during discovery. -- [bug] JAVA-1132: Executing bound statement with no variables results in exception with protocol v1. -- [bug] JAVA-1005: DowngradingConsistencyRetryPolicy does not work with EACH_QUORUM when 1 DC is down. -- [bug] JAVA-1002: Avoid deadlock when re-preparing a statement on other hosts. - -Merged from 2.0 branch: - -- [bug] JAVA-994: Don't call on(Up|Down|Add|Remove) methods if Cluster is closed/closing. -- [improvement] JAVA-805: Document that metrics are null until Cluster is initialized. -- [bug] JAVA-1072: Ensure defunct connections are properly evicted from the pool. - - -### 2.1.9 - -- [bug] JAVA-942: Fix implementation of UserType.hashCode(). -- [bug] JAVA-854: avoid early return in Cluster.init when a node doesn't support the protocol version. -- [bug] JAVA-978: Fix quoting issue that caused Mapper.getTableMetadata() to return null. - -Merged from 2.0 branch: - -- [bug] JAVA-950: Fix Cluster.connect with a case-sensitive keyspace. -- [improvement] JAVA-920: Downgrade "error creating pool" message to WARN. -- [bug] JAVA-954: Don't trigger reconnection before initialization complete. -- [improvement] JAVA-914: Avoid rejected tasks at shutdown. -- [improvement] JAVA-921: Add SimpleStatement.getValuesCount(). -- [bug] JAVA-901: Move call to connection.release() out of cancelHandler. -- [bug] JAVA-960: Avoid race in control connection shutdown. -- [bug] JAVA-656: Fix NPE in ControlConnection.updateLocationInfo. -- [bug] JAVA-966: Count uninitialized connections in conviction policy. -- [improvement] JAVA-917: Document SSL configuration. -- [improvement] JAVA-652: Add DCAwareRoundRobinPolicy builder. -- [improvement] JAVA-808: Add generic filtering policy that can be used to exclude specific DCs. - - -### 2.1.8 - -Merged from 2.0 branch: - -- [improvement] JAVA-718: Log streamid at the trace level on sending request and receiving response. - -- [bug] JAVA-796: Fix SpeculativeExecutionPolicy.init() and close() are never called. -- [improvement] JAVA-710: Suppress unnecessary warning at shutdown. -- [improvement] #340: Allow DNS name with multiple A-records as contact point. -- [bug] JAVA-794: Allow tracing across multiple result pages. -- [bug] JAVA-737: DowngradingConsistencyRetryPolicy ignores write timeouts. -- [bug] JAVA-736: Forbid bind marker in QueryBuilder add/append/prepend. -- [bug] JAVA-712: Prevent QueryBuilder.quote() from applying duplicate double quotes. -- [bug] JAVA-688: Prevent QueryBuilder from trying to serialize raw string. -- [bug] JAVA-679: Support bind marker in QueryBuilder DELETE's list index. -- [improvement] JAVA-475: Improve QueryBuilder API for SELECT DISTINCT. -- [improvement] JAVA-225: Create values() function for Insert builder using List. -- [improvement] JAVA-702: Warn when ReplicationStrategy encounters invalid - replication factors. -- [improvement] JAVA-662: Add PoolingOptions method to set both core and max - connections. -- [improvement] JAVA-766: Do not include epoll JAR in binary distribution. -- [improvement] JAVA-726: Optimize internal copies of Request objects. -- [bug] JAVA-815: Preserve tracing across retries. -- [improvement] JAVA-709: New RetryDecision.tryNextHost(). -- [bug] JAVA-733: Handle function calls and raw strings as non-idempotent in QueryBuilder. -- [improvement] JAVA-765: Provide API to retrieve values of a Parameterized SimpleStatement. -- [improvement] JAVA-827: implement UPDATE .. IF EXISTS in QueryBuilder. -- [improvement] JAVA-618: Randomize contact points list to prevent hotspots. -- [improvement] JAVA-720: Surface the coordinator used on query failure. -- [bug] JAVA-792: Handle contact points removed during init. -- [improvement] JAVA-719: Allow PlainTextAuthProvider to change its credentials at runtime. -- [new feature] JAVA-151: Make it possible to register for SchemaChange Events. -- [improvement] JAVA-861: Downgrade "Asked to rebuild table" log from ERROR to INFO level. -- [improvement] JAVA-797: Provide an option to prepare statements only on one node. -- [improvement] JAVA-658: Provide an option to not re-prepare all statements in onUp. -- [improvement] JAVA-853: Customizable creation of netty timer. -- [bug] JAVA-859: Avoid quadratic ring processing with invalid replication factors. -- [improvement] JAVA-657: Debounce control connection queries. -- [bug] JAVA-784: LoadBalancingPolicy.distance() called before init(). -- [new feature] JAVA-828: Make driver-side metadata optional. -- [improvement] JAVA-544: Allow hosts to remain partially up. -- [improvement] JAVA-821, JAVA-822: Remove internal blocking calls and expose async session - creation. -- [improvement] JAVA-725: Use parallel calls when re-preparing statement on other - hosts. -- [bug] JAVA-629: Don't use connection timeout for unrelated internal queries. -- [bug] JAVA-892: Fix NPE in speculative executions when metrics disabled. - - -### 2.1.7.1 - -- [bug] JAVA-834: Special case check for 'null' string in index_options column. -- [improvement] JAVA-835: Allow accessor methods with less parameters in case - named bind markers are repeated. - - ### 2.1.7 -- [improvement] JAVA-475: Improve QueryBuilder API for SELECT DISTINCT. -- [improvement] JAVA-715: Make NativeColumnType a top-level class. -- [improvement] JAVA-782: Unify "Target" enum for schema elements. -- [improvement] JAVA-700: Expose ProtocolVersion#toInt. -- [bug] JAVA-542: Handle void return types in accessors. -- [improvement] JAVA-225: Create values() function for Insert builder using List. -- [improvement] JAVA-713: HashMap throws an OOM Exception when logging level is set to TRACE. -- [bug] JAVA-679: Support bind marker in QueryBuilder DELETE's list index. -- [improvement] JAVA-732: Expose KEYS and FULL indexing options in IndexMetadata. -- [improvement] JAVA-589: Allow @Enumerated in Accessor method parameters. -- [improvement] JAVA-554: Allow access to table metadata from Mapper. -- [improvement] JAVA-661: Provide a way to map computed fields. -- [improvement] JAVA-824: Ignore missing columns in mapper. -- [bug] JAVA-724: Preserve default timestamp for retries and speculative executions. -- [improvement] JAVA-738: Use same pool implementation for protocol v2 and v3. -- [improvement] JAVA-677: Support CONTAINS / CONTAINS KEY in QueryBuilder. -- [improvement] JAVA-477/JAVA-540: Add USING options in mapper for delete and save - operations. -- [improvement] JAVA-473: Add mapper option to configure whether to save null fields. +- [improvement] Improve QueryBuilder API for SELECT DISTINCT (JAVA-475) +- [improvement] Make NativeColumnType a top-level class (JAVA-715) +- [improvement] Unify "Target" enum for schema elements (JAVA-782) +- [improvement] Expose ProtocolVersion#toInt (JAVA-700) +- [bug] Handle void return types in accessors (JAVA-542) +- [improvement] Create values() function for Insert builder using List (JAVA-225) +- [improvement] HashMap throws an OOM Exception when logging level is set to TRACE (JAVA-713) +- [bug] Support bind marker in QueryBuilder DELETE's list index (JAVA-679) +- [improvement] Expose KEYS and FULL indexing options in IndexMetadata (JAVA-732) +- [improvement] Allow @Enumerated in Accessor method parameters (JAVA-589) +- [improvement] Allow access to table metadata from Mapper (JAVA-554) +- [improvement] Provide a way to map computed fields (JAVA-661) +- [improvement] Ignore missing columns in mapper (JAVA-824) +- [bug] Preserve default timestamp for retries and speculative executions (JAVA-724) +- [improvement] Use same pool implementation for protocol v2 and v3 + (JAVA-738). +- [improvement] Support CONTAINS / CONTAINS KEY in QueryBuilder (JAVA-677) +- [improvement] Add USING options in mapper for delete and save + operations (JAVA-477/JAVA-540) +- [improvement] Add mapper option to configure whether to save null fields (JAVA-473) Merged from 2.0 branch: -- [bug] JAVA-737: DowngradingConsistencyRetryPolicy ignores write timeouts. -- [bug] JAVA-736: Forbid bind marker in QueryBuilder add/append/prepend. -- [bug] JAVA-712: Prevent QueryBuilder.quote() from applying duplicate double quotes. -- [bug] JAVA-688: Prevent QueryBuilder from trying to serialize raw string. -- [bug] JAVA-679: Support bind marker in QueryBuilder DELETE's list index. -- [improvement] JAVA-475: Improve QueryBuilder API for SELECT DISTINCT. -- [improvement] JAVA-225: Create values() function for Insert builder using List. -- [improvement] JAVA-702: Warn when ReplicationStrategy encounters invalid - replication factors. -- [improvement] JAVA-662: Add PoolingOptions method to set both core and max - connections. -- [improvement] JAVA-766: Do not include epoll JAR in binary distribution. -- [improvement] JAVA-726: Optimize internal copies of Request objects. -- [bug] JAVA-815: Preserve tracing across retries. -- [improvement] JAVA-709: New RetryDecision.tryNextHost(). -- [bug] JAVA-733: Handle function calls and raw strings as non-idempotent in QueryBuilder. +- [bug] DowngradingConsistencyRetryPolicy ignores write timeouts (JAVA-737) +- [bug] Forbid bind marker in QueryBuilder add/append/prepend (JAVA-736) +- [bug] Prevent QueryBuilder.quote() from applying duplicate double quotes (JAVA-712) +- [bug] Prevent QueryBuilder from trying to serialize raw string (JAVA-688) +- [bug] Support bind marker in QueryBuilder DELETE's list index (JAVA-679) +- [improvement] Improve QueryBuilder API for SELECT DISTINCT (JAVA-475) +- [improvement] Create values() function for Insert builder using List (JAVA-225) +- [improvement] Warn when ReplicationStrategy encounters invalid + replication factors (JAVA-702) +- [improvement] Add PoolingOptions method to set both core and max + connections (JAVA-662). +- [improvement] Do not include epoll JAR in binary distribution (JAVA-766) +- [improvement] Optimize internal copies of Request objects (JAVA-726) +- [bug] Preserve tracing across retries (JAVA-815) +- [improvement] New RetryDecision.tryNextHost() (JAVA-709) +- [bug] Handle function calls and raw strings as non-idempotent in QueryBuilder (JAVA-733) ### 2.1.6 Merged from 2.0 branch: -- [new feature] JAVA-584: Add getObject to BoundStatement and Row. -- [improvement] JAVA-419: Improve connection pool resizing algorithm. -- [bug] JAVA-599: Fix race condition between pool expansion and shutdown. -- [improvement] JAVA-622: Upgrade Netty to 4.0.27. -- [improvement] JAVA-562: Coalesce frames before flushing them to the connection. -- [improvement] JAVA-583: Rename threads to indicate that they are for the driver. -- [new feature] JAVA-550: Expose paging state. -- [new feature] JAVA-646: Slow Query Logger. -- [improvement] JAVA-698: Exclude some errors from measurements in LatencyAwarePolicy. -- [bug] JAVA-641: Fix issue when executing a PreparedStatement from another cluster. -- [improvement] JAVA-534: Log keyspace xxx does not exist at WARN level. -- [improvement] JAVA-619: Allow Cluster subclasses to delegate to another instance. -- [new feature] JAVA-669: Expose an API to check for schema agreement after a - schema-altering statement. -- [improvement] JAVA-692: Make connection and pool creation fully async. -- [improvement] JAVA-505: Optimize connection use after reconnection. -- [improvement] JAVA-617: Remove "suspected" mechanism. -- [improvement] reverts JAVA-425: Don't mark connection defunct on client timeout. -- [new feature] JAVA-561: Speculative query executions. -- [bug] JAVA-666: Release connection before completing the ResultSetFuture. -- [new feature BETA] JAVA-723: Percentile-based variant of query logger and speculative - executions. -- [bug] JAVA-734: Fix buffer leaks when compression is enabled. -- [improvement] JAVA-756: Use Netty's pooled ByteBufAllocator by default. -- [improvement] JAVA-759: Expose "unsafe" paging state API. -- [bug] JAVA-768: Prevent race during pool initialization. +- [new feature] Add getObject to BoundStatement and Row (JAVA-584) +- [improvement] Improve connection pool resizing algorithm (JAVA-419) +- [bug] Fix race condition between pool expansion and shutdown (JAVA-599) +- [improvement] Upgrade Netty to 4.0.27 (JAVA-622) +- [improvement] Coalesce frames before flushing them to the connection + (JAVA-562) +- [improvement] Rename threads to indicate that they are for the driver + (JAVA-583) +- [new feature] Expose paging state (JAVA-550) +- [new feature] Slow Query Logger (JAVA-646) +- [improvement] Exclude some errors from measurements in LatencyAwarePolicy + (JAVA-698) +- [bug] Fix issue when executing a PreparedStatement from another cluster + (JAVA-641) +- [improvement] Log keyspace xxx does not exist at WARN level (JAVA-534) +- [improvement] Allow Cluster subclasses to delegate to another instance + (JAVA-619) +- [new feature] Expose an API to check for schema agreement after a + schema-altering statement (JAVA-669) +- [improvement] Make connection and pool creation fully async (JAVA-692) +- [improvement] Optimize connection use after reconnection (JAVA-505) +- [improvement] Remove "suspected" mechanism (JAVA-617) +- [improvement] Don't mark connection defunct on client timeout (reverts + JAVA-425) +- [new feature] Speculative query executions (JAVA-561) +- [bug] Release connection before completing the ResultSetFuture (JAVA-666) +- [new feature BETA] Percentile-based variant of query logger and speculative + executions (JAVA-723) +- [bug] Fix buffer leaks when compression is enabled (JAVA-734). +- [improvement] Use Netty's pooled ByteBufAllocator by default (JAVA-756) +- [improvement] Expose "unsafe" paging state API (JAVA-759) +- [bug] Prevent race during pool initialization (JAVA-768) ### 2.1.5 -- [bug] JAVA-575: Authorize Null parameter in Accessor method. -- [improvement] JAVA-570: Support C* 2.1.3's nested collections. -- [bug] JAVA-612: Fix checks on mapped collection types. -- [bug] JAVA-672: Fix QueryBuilder.putAll() when the collection contains UDTs. +- [bug] Authorize Null parameter in Accessor method (JAVA-575) +- [improvement] Support C* 2.1.3's nested collections (JAVA-570) +- [bug] Fix checks on mapped collection types (JAVA-612) +- [bug] Fix QueryBuilder.putAll() when the collection contains UDTs (JAVA-672) Merged from 2.0 branch: -- [new feature] JAVA-518: Add AddressTranslater for EC2 multi-region deployment. -- [improvement] JAVA-533: Add connection heartbeat. -- [improvement] JAVA-568: Reduce level of logs on missing rpc_address. -- [improvement] JAVA-312, JAVA-681: Expose node token and range information. -- [bug] JAVA-595: Fix cluster name mismatch check at startup. -- [bug] JAVA-620: Fix guava dependency when using OSGI. -- [bug] JAVA-678: Fix handling of DROP events when ks name is case-sensitive. -- [improvement] JAVA-631: Use List instead of List in QueryBuilder API. -- [improvement] JAVA-654: Exclude Netty POM from META-INF in shaded JAR. -- [bug] JAVA-655: Quote single quotes contained in table comments in asCQLQuery method. -- [bug] JAVA-684: Empty TokenRange returned in a one token cluster. -- [improvement] JAVA-687: Expose TokenRange#contains. -- [bug] JAVA-614: Prevent race between cancellation and query completion. -- [bug] JAVA-632: Prevent cancel and timeout from cancelling unrelated ResponseHandler if - streamId was already released and reused. -- [bug] JAVA-642: Fix issue when newly opened pool fails before we could mark the node UP. -- [bug] JAVA-613: Fix unwanted LBP notifications when a contact host is down. -- [bug] JAVA-651: Fix edge cases where a connection was released twice. -- [bug] JAVA-653: Fix edge cases in query cancellation. +- [new feature] Add AddressTranslater for EC2 multi-region deployment (JAVA-518) +- [improvement] Add connection heartbeat (JAVA-533) +- [improvement] Reduce level of logs on missing rpc_address (JAVA-568) +- [improvement] Expose node token and range information (JAVA-312, JAVA-681) +- [bug] Fix cluster name mismatch check at startup (JAVA-595) +- [bug] Fix guava dependency when using OSGI (JAVA-620) +- [bug] Fix handling of DROP events when ks name is case-sensitive (JAVA-678) +- [improvement] Use List instead of List in QueryBuilder API + (JAVA-631) +- [improvement] Exclude Netty POM from META-INF in shaded JAR (JAVA-654) +- [bug] Quote single quotes contained in table comments in asCQLQuery method + (JAVA-655) +- [bug] Empty TokenRange returned in a one token cluster (JAVA-684) +- [improvement] Expose TokenRange#contains (JAVA-687) +- [bug] Prevent race between cancellation and query completion (JAVA-614) +- [bug] Prevent cancel and timeout from cancelling unrelated ResponseHandler if + streamId was already released and reused (JAVA-632). +- [bug] Fix issue when newly opened pool fails before we could mark the node UP + (JAVA-642) +- [bug] Fix unwanted LBP notifications when a contact host is down (JAVA-613) +- [bug] Fix edge cases where a connection was released twice (JAVA-651). +- [bug] Fix edge cases in query cancellation (JAVA-653). ### 2.1.4 Merged from 2.0 branch: -- [improvement] JAVA-538: Shade Netty dependency. -- [improvement] JAVA-543: Target schema refreshes more precisely. -- [bug] JAVA-546: Don't check rpc_address for control host. -- [improvement] JAVA-409: Improve message of NoHostAvailableException. -- [bug] JAVA-556: Rework connection reaper to avoid deadlock. -- [bug] JAVA-557: Avoid deadlock when multiple connections to the same host get write - errors. -- [improvement] JAVA-504: Make shuffle=true the default for TokenAwarePolicy. -- [bug] JAVA-577: Fix bug when SUSPECT reconnection succeeds, but one of the pooled - connections fails while bringing the node back up. -- [bug] JAVA-419: JAVA-587: Prevent faulty control connection from ignoring reconnecting hosts. -- temporarily revert "Add idle timeout to the connection pool". -- [bug] JAVA-593: Ensure updateCreatedPools does not add pools for suspected hosts. -- [bug] JAVA-594: Ensure state change notifications for a given host are handled serially. -- [bug] JAVA-597: Ensure control connection reconnects when control host is removed. +- [improvement] Shade Netty dependency (JAVA-538) +- [improvement] Target schema refreshes more precisely (JAVA-543) +- [bug] Don't check rpc_address for control host (JAVA-546) +- [improvement] Improve message of NoHostAvailableException (JAVA-409) +- [bug] Rework connection reaper to avoid deadlock (JAVA-556) +- [bug] Avoid deadlock when multiple connections to the same host get write + errors (JAVA-557) +- [improvement] Make shuffle=true the default for TokenAwarePolicy (JAVA-504) +- [bug] Fix bug when SUSPECT reconnection succeeds, but one of the pooled + connections fails while bringing the node back up (JAVA-577) +- [bug] Prevent faulty control connection from ignoring reconnecting hosts + (JAVA-587) +- temporarily revert "Add idle timeout to the connection pool" (JAVA-419) +- [bug] Ensure updateCreatedPools does not add pools for suspected hosts + (JAVA-593) +- [bug] Ensure state change notifications for a given host are handled serially + (JAVA-594) +- [bug] Ensure control connection reconnects when control host is removed + (JAVA-597) ### 2.1.3 -- [bug] JAVA-510: Ignore static fields in mapper. -- [bug] JAVA-509: Fix UDT parsing at init when using the default protocol version. -- [bug] JAVA-495: Fix toString, equals and hashCode on accessor proxies. -- [bug] JAVA-528: Allow empty name on Column and Field annotations. +- [bug] Ignore static fields in mapper (JAVA-510) +- [bug] Fix UDT parsing at init when using the default protocol version (JAVA-509) +- [bug] Fix toString, equals and hashCode on accessor proxies (JAVA-495) +- [bug] Allow empty name on Column and Field annotations (JAVA-528) Merged from 2.0 branch: -- [bug] JAVA-497: Ensure control connection does not trigger concurrent reconnects. -- [improvement] JAVA-472: Keep trying to reconnect on authentication errors. -- [improvement] JAVA-463: Expose close method on load balancing policy. -- [improvement] JAVA-459: Allow load balancing policy to trigger refresh for a single host. -- [bug] JAVA-493: Expose an API to cancel reconnection attempts. -- [bug] JAVA-503: Fix NPE when a connection fails during pool construction. -- [improvement] JAVA-423: Log datacenter name in DCAware policy's init when it is explicitly provided. -- [improvement] JAVA-504: Shuffle the replicas in TokenAwarePolicy.newQueryPlan. -- [improvement] JAVA-507: Make schema agreement wait tuneable. -- [improvement] JAVA-494: Document how to inject the driver metrics into another registry. -- [improvement] JAVA-419: Add idle timeout to the connection pool. -- [bug] JAVA-516: LatencyAwarePolicy does not shutdown executor on invocation of close. -- [improvement] JAVA-451: Throw an exception when DCAwareRoundRobinPolicy is built with - an explicit but null or empty local datacenter. -- [bug] JAVA-511: Fix check for local contact points in DCAware policy's init. -- [improvement] JAVA-457: Make timeout on saturated pool customizable. -- [improvement] JAVA-521: Downgrade Guava to 14.0.1. -- [bug] JAVA-526: Fix token awareness for case-sensitive keyspaces and tables. -- [bug] JAVA-515: Check maximum number of values passed to SimpleStatement. -- [improvement] JAVA-532: Expose the driver version through the API. -- [improvement] JAVA-522: Optimize session initialization when some hosts are not - responsive. +- [bug] Ensure control connection does not trigger concurrent reconnects (JAVA-497) +- [improvement] Keep trying to reconnect on authentication errors (JAVA-472) +- [improvement] Expose close method on load balancing policy (JAVA-463) +- [improvement] Allow load balancing policy to trigger refresh for a single host (JAVA-459) +- [bug] Expose an API to cancel reconnection attempts (JAVA-493) +- [bug] Fix NPE when a connection fails during pool construction (JAVA-503) +- [improvement] Log datacenter name in DCAware policy's init when it is explicitly provided + (JAVA-423) +- [improvement] Shuffle the replicas in TokenAwarePolicy.newQueryPlan (JAVA-504) +- [improvement] Make schema agreement wait tuneable (JAVA-507) +- [improvement] Document how to inject the driver metrics into another registry (JAVA-494) +- [improvement] Add idle timeout to the connection pool (JAVA-419) +- [bug] LatencyAwarePolicy does not shutdown executor on invocation of close (JAVA-516) +- [improvement] Throw an exception when DCAwareRoundRobinPolicy is built with + an explicit but null or empty local datacenter (JAVA-451). +- [bug] Fix check for local contact points in DCAware policy's init (JAVA-511) +- [improvement] Make timeout on saturated pool customizable (JAVA-457) +- [improvement] Downgrade Guava to 14.0.1 (JAVA-521) +- [bug] Fix token awareness for case-sensitive keyspaces and tables (JAVA-526) +- [bug] Check maximum number of values passed to SimpleStatement (JAVA-515) +- [improvement] Expose the driver version through the API (JAVA-532) +- [improvement] Optimize session initialization when some hosts are not + responsive (JAVA-522) ### 2.1.2 -- [improvement] JAVA-361, JAVA-364, JAVA-467: Support for native protocol v3. -- [bug] JAVA-454: Fix UDT fields of type inet in QueryBuilder. -- [bug] JAVA-455: Exclude transient fields from Frozen checks. -- [bug] JAVA-453: Fix handling of null collections in mapper. -- [improvement] JAVA-452: Make implicit column names case-insensitive in mapper. -- [bug] JAVA-433: Fix named bind markers in QueryBuilder. -- [bug] JAVA-458: Fix handling of BigInteger in object mapper. -- [bug] JAVA-465: Ignore synthetic fields in mapper. -- [improvement] JAVA-451: Throw an exception when DCAwareRoundRobinPolicy is built with - an explicit but null or empty local datacenter. -- [improvement] JAVA-469: Add backwards-compatible DataType.serialize methods. -- [bug] JAVA-487: Handle null enum fields in object mapper. -- [bug] JAVA-499: Handle null UDT fields in object mapper. +- [improvement] Support for native protocol v3 (JAVA-361, JAVA-364, JAVA-467) +- [bug] Fix UDT fields of type inet in QueryBuilder (JAVA-454) +- [bug] Exclude transient fields from Frozen checks (JAVA-455) +- [bug] Fix handling of null collections in mapper (JAVA-453) +- [improvement] Make implicit column names case-insensitive in mapper (JAVA-452) +- [bug] Fix named bind markers in QueryBuilder (JAVA-433) +- [bug] Fix handling of BigInteger in object mapper (JAVA-458) +- [bug] Ignore synthetic fields in mapper (JAVA-465) +- [improvement] Throw an exception when DCAwareRoundRobinPolicy is built with + an explicit but null or empty local datacenter (JAVA-451) +- [improvement] Add backwards-compatible DataType.serialize methods (JAVA-469) +- [bug] Handle null enum fields in object mapper (JAVA-487) +- [bug] Handle null UDT fields in object mapper (JAVA-499) Merged from 2.0 branch: -- [bug] JAVA-449: Handle null pool in PooledConnection.release. -- [improvement] JAVA-425: Defunct connection on request timeout. -- [improvement] JAVA-426: Try next host when we get a SERVER_ERROR. -- [bug] JAVA-449, JAVA-460, JAVA-471: Handle race between query timeout and completion. -- [bug] JAVA-496: Fix DCAwareRoundRobinPolicy datacenter auto-discovery. +- [bug] Handle null pool in PooledConnection.release (JAVA-449) +- [improvement] Defunct connection on request timeout (JAVA-425) +- [improvement] Try next host when we get a SERVER_ERROR (JAVA-426) +- [bug] Handle race between query timeout and completion (JAVA-449, JAVA-460, JAVA-471) +- [bug] Fix DCAwareRoundRobinPolicy datacenter auto-discovery (JAVA-496) ### 2.1.1 -- [new] JAVA-441: Support for new "frozen" keyword. +- [new] Support for new "frozen" keyword (JAVA-441) Merged from 2.0 branch: -- [bug] JAVA-397: Check cluster name when connecting to a new node. -- [bug] JAVA-326: Add missing CAS delete support in QueryBuilder. -- [bug] JAVA-363: Add collection and data length checks during serialization. -- [improvement] JAVA-329: Surface number of retries in metrics. -- [bug] JAVA-428: Do not use a host when no rpc_address found for it. -- [improvement] JAVA-358: Add ResultSet.wasApplied() for conditional queries. -- [bug] JAVA-349: Fix negative HostConnectionPool open count. -- [improvement] JAVA-436: Log more connection details at trace and debug levels. -- [bug] JAVA-445: Fix cluster shutdown. +- [bug] Check cluster name when connecting to a new node (JAVA-397) +- [bug] Add missing CAS delete support in QueryBuilder (JAVA-326) +- [bug] Add collection and data length checks during serialization (JAVA-363) +- [improvement] Surface number of retries in metrics (JAVA-329) +- [bug] Do not use a host when no rpc_address found for it (JAVA-428) +- [improvement] Add ResultSet.wasApplied() for conditional queries (JAVA-358) +- [bug] Fix negative HostConnectionPool open count (JAVA-349) +- [improvement] Log more connection details at trace and debug levels (JAVA-436) +- [bug] Fix cluster shutdown (JAVA-445) ### 2.1.0 -- [bug] JAVA-408: ClusteringColumn annotation not working with specified ordering. -- [improvement] JAVA-410: Fail BoundStatement if null values are not set explicitly. -- [bug] JAVA-416: Handle UDT and tuples in BuiltStatement.toString. +- [bug] ClusteringColumn annotation not working with specified ordering (JAVA-408) +- [improvement] Fail BoundStatement if null values are not set explicitly (JAVA-410) +- [bug] Handle UDT and tuples in BuiltStatement.toString (JAVA-416) Merged from 2.0 branch: -- [bug] JAVA-407: Release connections on ResultSetFuture#cancel. -- [bug] JAVA-393: Fix handling of SimpleStatement with values in query builder - batches. -- [bug] JAVA-417: Ensure pool is properly closed in onDown. -- [bug] JAVA-415: Fix tokenMap initialization at startup. -- [bug] JAVA-418: Avoid deadlock on close. +- [bug] Release connections on ResultSetFuture#cancel (JAVA-407) +- [bug] Fix handling of SimpleStatement with values in query builder + batches (JAVA-393) +- [bug] Ensure pool is properly closed in onDown (JAVA-417) +- [bug] Fix tokenMap initialization at startup (JAVA-415) +- [bug] Avoid deadlock on close (JAVA-418) ### 2.1.0-rc1 Merged from 2.0 branch: -- [bug] JAVA-394: Ensure defunct connections are completely closed. -- [bug] JAVA-342, JAVA-390: Fix memory and resource leak on closed Sessions. +- [bug] Ensure defunct connections are completely closed (JAVA-394) +- [bug] Fix memory and resource leak on closed Sessions (JAVA-342, JAVA-390) ### 2.1.0-beta1 @@ -1870,280 +246,245 @@ Merged from 2.0 branch: Merged from 2.0 branch: everything up to 2.0.3 (included), and the following. -- [improvement] JAVA-204: Better handling of dead connections. -- [bug] JAVA-373: Fix potential NPE in ControlConnection. -- [bug] JAVA-291: Throws NPE when passed null for a contact point. -- [bug] JAVA-315: Avoid LoadBalancingPolicy onDown+onUp at startup. -- [bug] JAVA-343: Avoid classloader leak in Tomcat. -- [bug] JAVA-387: Avoid deadlock in onAdd/onUp. -- [bug] JAVA-377, JAVA-391: Make metadata parsing more lenient. - - -### 2.0.12.2 - -- [bug] JAVA-1179: Request objects should be copied when executed. -- [improvement] JAVA-1182: Throw error when synchronous call made on I/O thread. -- [bug] JAVA-1184: Unwrap StatementWrappers when extracting column definitions. - - -### 2.0.12.1 - -- [bug] JAVA-994: Don't call on(Up|Down|Add|Remove) methods if Cluster is closed/closing. -- [improvement] JAVA-805: Document that metrics are null until Cluster is initialized. -- [bug] JAVA-1072: Ensure defunct connections are properly evicted from the pool. - - -### 2.0.12 - -- [bug] JAVA-950: Fix Cluster.connect with a case-sensitive keyspace. -- [improvement] JAVA-920: Downgrade "error creating pool" message to WARN. -- [bug] JAVA-954: Don't trigger reconnection before initialization complete. -- [improvement] JAVA-914: Avoid rejected tasks at shutdown. -- [improvement] JAVA-921: Add SimpleStatement.getValuesCount(). -- [bug] JAVA-901: Move call to connection.release() out of cancelHandler. -- [bug] JAVA-960: Avoid race in control connection shutdown. -- [bug] JAVA-656: Fix NPE in ControlConnection.updateLocationInfo. -- [bug] JAVA-966: Count uninitialized connections in conviction policy. -- [improvement] JAVA-917: Document SSL configuration. -- [improvement] JAVA-652: Add DCAwareRoundRobinPolicy builder. -- [improvement] JAVA-808: Add generic filtering policy that can be used to exclude specific DCs. - - -### 2.0.11 - -- [improvement] JAVA-718: Log streamid at the trace level on sending request and receiving response. -- [bug] JAVA-796: Fix SpeculativeExecutionPolicy.init() and close() are never called. -- [improvement] JAVA-710: Suppress unnecessary warning at shutdown. -- [improvement] #340: Allow DNS name with multiple A-records as contact point. -- [bug] JAVA-794: Allow tracing across multiple result pages. -- [bug] JAVA-737: DowngradingConsistencyRetryPolicy ignores write timeouts. -- [bug] JAVA-736: Forbid bind marker in QueryBuilder add/append/prepend. -- [bug] JAVA-712: Prevent QueryBuilder.quote() from applying duplicate double quotes. -- [bug] JAVA-688: Prevent QueryBuilder from trying to serialize raw string. -- [bug] JAVA-679: Support bind marker in QueryBuilder DELETE's list index. -- [improvement] JAVA-475: Improve QueryBuilder API for SELECT DISTINCT. -- [improvement] JAVA-225: Create values() function for Insert builder using List. -- [improvement] JAVA-702: Warn when ReplicationStrategy encounters invalid - replication factors. -- [improvement] JAVA-662: Add PoolingOptions method to set both core and max - connections. -- [improvement] JAVA-766: Do not include epoll JAR in binary distribution. -- [improvement] JAVA-726: Optimize internal copies of Request objects. -- [bug] JAVA-815: Preserve tracing across retries. -- [improvement] JAVA-709: New RetryDecision.tryNextHost(). -- [bug] JAVA-733: Handle function calls and raw strings as non-idempotent in QueryBuilder. -- [improvement] JAVA-765: Provide API to retrieve values of a Parameterized SimpleStatement. -- [improvement] JAVA-827: implement UPDATE .. IF EXISTS in QueryBuilder. -- [improvement] JAVA-618: Randomize contact points list to prevent hotspots. -- [improvement] JAVA-720: Surface the coordinator used on query failure. -- [bug] JAVA-792: Handle contact points removed during init. -- [improvement] JAVA-719: Allow PlainTextAuthProvider to change its credentials at runtime. -- [new feature] JAVA-151: Make it possible to register for SchemaChange Events. -- [improvement] JAVA-861: Downgrade "Asked to rebuild table" log from ERROR to INFO level. -- [improvement] JAVA-797: Provide an option to prepare statements only on one node. -- [improvement] JAVA-658: Provide an option to not re-prepare all statements in onUp. -- [improvement] JAVA-853: Customizable creation of netty timer. -- [bug] JAVA-859: Avoid quadratic ring processing with invalid replication factors. -- [improvement] JAVA-657: Debounce control connection queries. -- [bug] JAVA-784: LoadBalancingPolicy.distance() called before init(). -- [new feature] JAVA-828: Make driver-side metadata optional. -- [improvement] JAVA-544: Allow hosts to remain partially up. -- [improvement] JAVA-821, JAVA-822: Remove internal blocking calls and expose async session - creation. -- [improvement] JAVA-725: Use parallel calls when re-preparing statement on other - hosts. -- [bug] JAVA-629: Don't use connection timeout for unrelated internal queries. -- [bug] JAVA-892: Fix NPE in speculative executions when metrics disabled. +- [improvement] Better handling of dead connections (JAVA-204) +- [bug] Fix potential NPE in ControlConnection (JAVA-373) +- [bug] Throws NPE when passed null for a contact point (JAVA-291) +- [bug] Avoid LoadBalancingPolicy onDown+onUp at startup (JAVA-315) +- [bug] Avoid classloader leak in Tomcat (JAVA-343) +- [bug] Avoid deadlock in onAdd/onUp (JAVA-387) +- [bug] Make metadata parsing more lenient (JAVA-377, JAVA-391) + + +### 2.0.11 (in progress) + +- [bug] Fix SpeculativeExecutionPolicy.init() and close() are never called (JAVA-796) +- [improvement] Suppress unnecessary warning at shutdown (JAVA-710) +- [improvement] Allow DNS name with multiple A-records as contact point (#340) +- [bug] Allow tracing across multiple result pages (JAVA-794) +- [bug] DowngradingConsistencyRetryPolicy ignores write timeouts (JAVA-737) +- [bug] Forbid bind marker in QueryBuilder add/append/prepend (JAVA-736) +- [bug] Prevent QueryBuilder.quote() from applying duplicate double quotes (JAVA-712) +- [bug] Prevent QueryBuilder from trying to serialize raw string (JAVA-688) +- [bug] Support bind marker in QueryBuilder DELETE's list index (JAVA-679) +- [improvement] Improve QueryBuilder API for SELECT DISTINCT (JAVA-475) +- [improvement] Create values() function for Insert builder using List (JAVA-225) +- [improvement] Warn when ReplicationStrategy encounters invalid + replication factors (JAVA-702) +- [improvement] Add PoolingOptions method to set both core and max + connections (JAVA-662). +- [improvement] Do not include epoll JAR in binary distribution (JAVA-766) +- [improvement] Optimize internal copies of Request objects (JAVA-726) +- [bug] Preserve tracing across retries (JAVA-815) +- [improvement] New RetryDecision.tryNextHost() (JAVA-709) +- [bug] Handle function calls and raw strings as non-idempotent in QueryBuilder (JAVA-733) Merged from 2.0.10_fixes branch: -- [improvement] JAVA-756: Use Netty's pooled ByteBufAllocator by default. -- [improvement] JAVA-759: Expose "unsafe" paging state API. -- [bug] JAVA-767: Fix getObject by name. -- [bug] JAVA-768: Prevent race during pool initialization. +- [improvement] Use Netty's pooled ByteBufAllocator by default (JAVA-756) +- [improvement] Expose "unsafe" paging state API (JAVA-759) +- [bug] Fix getObject by name (JAVA-767) +- [bug] Prevent race during pool initialization (JAVA-768) ### 2.0.10.1 -- [improvement] JAVA-756: Use Netty's pooled ByteBufAllocator by default. -- [improvement] JAVA-759: Expose "unsafe" paging state API. -- [bug] JAVA-767: Fix getObject by name. -- [bug] JAVA-768: Prevent race during pool initialization. +- [improvement] Use Netty's pooled ByteBufAllocator by default (JAVA-756) +- [improvement] Expose "unsafe" paging state API (JAVA-759) +- [bug] Fix getObject by name (JAVA-767) +- [bug] Prevent race during pool initialization (JAVA-768) ### 2.0.10 -- [new feature] JAVA-518: Add AddressTranslater for EC2 multi-region deployment. -- [improvement] JAVA-533: Add connection heartbeat. -- [improvement] JAVA-568: Reduce level of logs on missing rpc_address. -- [improvement] JAVA-312, JAVA-681: Expose node token and range information. -- [bug] JAVA-595: Fix cluster name mismatch check at startup. -- [bug] JAVA-620: Fix guava dependency when using OSGI. -- [bug] JAVA-678: Fix handling of DROP events when ks name is case-sensitive. -- [improvement] JAVA-631: Use List instead of List in QueryBuilder API. -- [improvement] JAVA-654: Exclude Netty POM from META-INF in shaded JAR. -- [bug] JAVA-655: Quote single quotes contained in table comments in asCQLQuery method. -- [bug] JAVA-684: Empty TokenRange returned in a one token cluster. -- [improvement] JAVA-687: Expose TokenRange#contains. -- [new feature] JAVA-547: Expose values of BoundStatement. -- [new feature] JAVA-584: Add getObject to BoundStatement and Row. -- [improvement] JAVA-419: Improve connection pool resizing algorithm. -- [bug] JAVA-599: Fix race condition between pool expansion and shutdown. -- [improvement] JAVA-622: Upgrade Netty to 4.0.27. -- [improvement] JAVA-562: Coalesce frames before flushing them to the connection. -- [improvement] JAVA-583: Rename threads to indicate that they are for the driver. -- [new feature] JAVA-550: Expose paging state. -- [new feature] JAVA-646: Slow Query Logger. -- [improvement] JAVA-698: Exclude some errors from measurements in LatencyAwarePolicy. -- [bug] JAVA-641: Fix issue when executing a PreparedStatement from another cluster. -- [improvement] JAVA-534: Log keyspace xxx does not exist at WARN level. -- [improvement] JAVA-619: Allow Cluster subclasses to delegate to another instance. -- [new feature] JAVA-669: Expose an API to check for schema agreement after a - schema-altering statement. -- [improvement] JAVA-692: Make connection and pool creation fully async. -- [improvement] JAVA-505: Optimize connection use after reconnection. -- [improvement] JAVA-617: Remove "suspected" mechanism. -- [improvement] reverts JAVA-425: Don't mark connection defunct on client timeout. -- [new feature] JAVA-561: Speculative query executions. -- [bug] JAVA-666: Release connection before completing the ResultSetFuture. -- [new feature BETA] JAVA-723: Percentile-based variant of query logger and speculative - executions. -- [bug] JAVA-734: Fix buffer leaks when compression is enabled. +- [new feature] Add AddressTranslater for EC2 multi-region deployment (JAVA-518) +- [improvement] Add connection heartbeat (JAVA-533) +- [improvement] Reduce level of logs on missing rpc_address (JAVA-568) +- [improvement] Expose node token and range information (JAVA-312, JAVA-681) +- [bug] Fix cluster name mismatch check at startup (JAVA-595) +- [bug] Fix guava dependency when using OSGI (JAVA-620) +- [bug] Fix handling of DROP events when ks name is case-sensitive (JAVA-678) +- [improvement] Use List instead of List in QueryBuilder API + (JAVA-631) +- [improvement] Exclude Netty POM from META-INF in shaded JAR (JAVA-654) +- [bug] Quote single quotes contained in table comments in asCQLQuery method + (JAVA-655) +- [bug] Empty TokenRange returned in a one token cluster (JAVA-684) +- [improvement] Expose TokenRange#contains (JAVA-687) +- [new feature] Expose values of BoundStatement (JAVA-547) +- [new feature] Add getObject to BoundStatement and Row (JAVA-584) +- [improvement] Improve connection pool resizing algorithm (JAVA-419) +- [bug] Fix race condition between pool expansion and shutdown (JAVA-599) +- [improvement] Upgrade Netty to 4.0.27 (JAVA-622) +- [improvement] Coalesce frames before flushing them to the connection + (JAVA-562) +- [improvement] Rename threads to indicate that they are for the driver + (JAVA-583) +- [new feature] Expose paging state (JAVA-550) +- [new feature] Slow Query Logger (JAVA-646) +- [improvement] Exclude some errors from measurements in LatencyAwarePolicy + (JAVA-698) +- [bug] Fix issue when executing a PreparedStatement from another cluster + (JAVA-641) +- [improvement] Log keyspace xxx does not exist at WARN level (JAVA-534) +- [improvement] Allow Cluster subclasses to delegate to another instance + (JAVA-619) +- [new feature] Expose an API to check for schema agreement after a + schema-altering statement (JAVA-669) +- [improvement] Make connection and pool creation fully async (JAVA-692) +- [improvement] Optimize connection use after reconnection (JAVA-505) +- [improvement] Remove "suspected" mechanism (JAVA-617) +- [improvement] Don't mark connection defunct on client timeout (reverts + JAVA-425) +- [new feature] Speculative query executions (JAVA-561) +- [bug] Release connection before completing the ResultSetFuture (JAVA-666) +- [new feature BETA] Percentile-based variant of query logger and speculative + executions (JAVA-723) +- [bug] Fix buffer leaks when compression is enabled (JAVA-734). Merged from 2.0.9_fixes branch: -- [bug] JAVA-614: Prevent race between cancellation and query completion. -- [bug] JAVA-632: Prevent cancel and timeout from cancelling unrelated ResponseHandler if - streamId was already released and reused. -- [bug] JAVA-642: Fix issue when newly opened pool fails before we could mark the node UP. -- [bug] JAVA-613: Fix unwanted LBP notifications when a contact host is down. -- [bug] JAVA-651: Fix edge cases where a connection was released twice. -- [bug] JAVA-653: Fix edge cases in query cancellation. +- [bug] Prevent race between cancellation and query completion (JAVA-614) +- [bug] Prevent cancel and timeout from cancelling unrelated ResponseHandler if + streamId was already released and reused (JAVA-632). +- [bug] Fix issue when newly opened pool fails before we could mark the node UP + (JAVA-642) +- [bug] Fix unwanted LBP notifications when a contact host is down (JAVA-613) +- [bug] Fix edge cases where a connection was released twice (JAVA-651). +- [bug] Fix edge cases in query cancellation (JAVA-653). ### 2.0.9.2 -- [bug] JAVA-651: Fix edge cases where a connection was released twice. -- [bug] JAVA-653: Fix edge cases in query cancellation. +- [bug] Fix edge cases where a connection was released twice (JAVA-651). +- [bug] Fix edge cases in query cancellation (JAVA-653). ### 2.0.9.1 -- [bug] JAVA-614: Prevent race between cancellation and query completion. -- [bug] JAVA-632: Prevent cancel and timeout from cancelling unrelated ResponseHandler if - streamId was already released and reused. -- [bug] JAVA-642: Fix issue when newly opened pool fails before we could mark the node UP. -- [bug] JAVA-613: Fix unwanted LBP notifications when a contact host is down. +- [bug] Prevent race between cancellation and query completion (JAVA-614) +- [bug] Prevent cancel and timeout from cancelling unrelated ResponseHandler if + streamId was already released and reused (JAVA-632). +- [bug] Fix issue when newly opened pool fails before we could mark the node UP + (JAVA-642) +- [bug] Fix unwanted LBP notifications when a contact host is down (JAVA-613) ### 2.0.9 -- [improvement] JAVA-538: Shade Netty dependency. -- [improvement] JAVA-543: Target schema refreshes more precisely. -- [bug] JAVA-546: Don't check rpc_address for control host. -- [improvement] JAVA-409: Improve message of NoHostAvailableException. -- [bug] JAVA-556: Rework connection reaper to avoid deadlock. -- [bug] JAVA-557: Avoid deadlock when multiple connections to the same host get write - errors. -- [improvement] JAVA-504: Make shuffle=true the default for TokenAwarePolicy. -- [bug] JAVA-577: Fix bug when SUSPECT reconnection succeeds, but one of the pooled - connections fails while bringing the node back up. -- [bug] JAVA-419: JAVA-587: Prevent faulty control connection from ignoring reconnecting hosts. -- temporarily revert "Add idle timeout to the connection pool". -- [bug] JAVA-593: Ensure updateCreatedPools does not add pools for suspected hosts. -- [bug] JAVA-594: Ensure state change notifications for a given host are handled serially. -- [bug] JAVA-597: Ensure control connection reconnects when control host is removed. +- [improvement] Shade Netty dependency (JAVA-538) +- [improvement] Target schema refreshes more precisely (JAVA-543) +- [bug] Don't check rpc_address for control host (JAVA-546) +- [improvement] Improve message of NoHostAvailableException (JAVA-409) +- [bug] Rework connection reaper to avoid deadlock (JAVA-556) +- [bug] Avoid deadlock when multiple connections to the same host get write + errors (JAVA-557) +- [improvement] Make shuffle=true the default for TokenAwarePolicy (JAVA-504) +- [bug] Fix bug when SUSPECT reconnection succeeds, but one of the pooled + connections fails while bringing the node back up (JAVA-577) +- [bug] Prevent faulty control connection from ignoring reconnecting hosts + (JAVA-587) +- temporarily revert "Add idle timeout to the connection pool" (JAVA-419) +- [bug] Ensure updateCreatedPools does not add pools for suspected hosts + (JAVA-593) +- [bug] Ensure state change notifications for a given host are handled serially + (JAVA-594) +- [bug] Ensure control connection reconnects when control host is removed + (JAVA-597) ### 2.0.8 -- [bug] JAVA-526: Fix token awareness for case-sensitive keyspaces and tables. -- [bug] JAVA-515: Check maximum number of values passed to SimpleStatement. -- [improvement] JAVA-532: Expose the driver version through the API. -- [improvement] JAVA-522: Optimize session initialization when some hosts are not - responsive. +- [bug] Fix token awareness for case-sensitive keyspaces and tables (JAVA-526) +- [bug] Check maximum number of values passed to SimpleStatement (JAVA-515) +- [improvement] Expose the driver version through the API (JAVA-532) +- [improvement] Optimize session initialization when some hosts are not + responsive (JAVA-522) ### 2.0.7 -- [bug] JAVA-449: Handle null pool in PooledConnection.release. -- [improvement] JAVA-425: Defunct connection on request timeout. -- [improvement] JAVA-426: Try next host when we get a SERVER_ERROR. -- [bug] JAVA-449, JAVA-460, JAVA-471: Handle race between query timeout and completion. -- [bug] JAVA-496: Fix DCAwareRoundRobinPolicy datacenter auto-discovery. -- [bug] JAVA-497: Ensure control connection does not trigger concurrent reconnects. -- [improvement] JAVA-472: Keep trying to reconnect on authentication errors. -- [improvement] JAVA-463: Expose close method on load balancing policy. -- [improvement] JAVA-459: Allow load balancing policy to trigger refresh for a single host. -- [bug] JAVA-493: Expose an API to cancel reconnection attempts. -- [bug] JAVA-503: Fix NPE when a connection fails during pool construction. -- [improvement] JAVA-423: Log datacenter name in DCAware policy's init when it is explicitly provided. -- [improvement] JAVA-504: Shuffle the replicas in TokenAwarePolicy.newQueryPlan. -- [improvement] JAVA-507: Make schema agreement wait tuneable. -- [improvement] JAVA-494: Document how to inject the driver metrics into another registry. -- [improvement] JAVA-419: Add idle timeout to the connection pool. -- [bug] JAVA-516: LatencyAwarePolicy does not shutdown executor on invocation of close. -- [improvement] JAVA-451: Throw an exception when DCAwareRoundRobinPolicy is built with - an explicit but null or empty local datacenter. -- [bug] JAVA-511: Fix check for local contact points in DCAware policy's init. -- [improvement] JAVA-457: Make timeout on saturated pool customizable. -- [improvement] JAVA-521: Downgrade Guava to 14.0.1. +- [bug] Handle null pool in PooledConnection.release (JAVA-449) +- [improvement] Defunct connection on request timeout (JAVA-425) +- [improvement] Try next host when we get a SERVER_ERROR (JAVA-426) +- [bug] Handle race between query timeout and completion (JAVA-449, JAVA-460, JAVA-471) +- [bug] Fix DCAwareRoundRobinPolicy datacenter auto-discovery (JAVA-496) +- [bug] Ensure control connection does not trigger concurrent reconnects (JAVA-497) +- [improvement] Keep trying to reconnect on authentication errors (JAVA-472) +- [improvement] Expose close method on load balancing policy (JAVA-463) +- [improvement] Allow load balancing policy to trigger refresh for a single host (JAVA-459) +- [bug] Expose an API to cancel reconnection attempts (JAVA-493) +- [bug] Fix NPE when a connection fails during pool construction (JAVA-503) +- [improvement] Log datacenter name in DCAware policy's init when it is explicitly provided + (JAVA-423) +- [improvement] Shuffle the replicas in TokenAwarePolicy.newQueryPlan (JAVA-504) +- [improvement] Make schema agreement wait tuneable (JAVA-507) +- [improvement] Document how to inject the driver metrics into another registry (JAVA-494) +- [improvement] Add idle timeout to the connection pool (JAVA-419) +- [bug] LatencyAwarePolicy does not shutdown executor on invocation of close (JAVA-516) +- [improvement] Throw an exception when DCAwareRoundRobinPolicy is built with + an explicit but null or empty local datacenter (JAVA-451). +- [bug] Fix check for local contact points in DCAware policy's init (JAVA-511) +- [improvement] Make timeout on saturated pool customizable (JAVA-457) +- [improvement] Downgrade Guava to 14.0.1 (JAVA-521) ### 2.0.6 -- [bug] JAVA-397: Check cluster name when connecting to a new node. -- [bug] JAVA-326: Add missing CAS delete support in QueryBuilder. -- [bug] JAVA-363: Add collection and data length checks during serialization. -- [improvement] JAVA-329: Surface number of retries in metrics. -- [bug] JAVA-428: Do not use a host when no rpc_address found for it. -- [improvement] JAVA-358: Add ResultSet.wasApplied() for conditional queries. -- [bug] JAVA-349: Fix negative HostConnectionPool open count. -- [improvement] JAVA-436: Log more connection details at trace and debug levels. -- [bug] JAVA-445: Fix cluster shutdown. -- [improvement] JAVA-439: Expose child policy in chainable load balancing policies. +- [bug] Check cluster name when connecting to a new node (JAVA-397) +- [bug] Add missing CAS delete support in QueryBuilder (JAVA-326) +- [bug] Add collection and data length checks during serialization (JAVA-363) +- [improvement] Surface number of retries in metrics (JAVA-329) +- [bug] Do not use a host when no rpc_address found for it (JAVA-428) +- [improvement] Add ResultSet.wasApplied() for conditional queries (JAVA-358) +- [bug] Fix negative HostConnectionPool open count (JAVA-349) +- [improvement] Log more connection details at trace and debug levels (JAVA-436) +- [bug] Fix cluster shutdown (JAVA-445) +- [improvement] Expose child policy in chainable load balancing policies (JAVA-439) ### 2.0.5 -- [bug] JAVA-407: Release connections on ResultSetFuture#cancel. -- [bug] JAVA-393: Fix handling of SimpleStatement with values in query builder - batches. -- [bug] JAVA-417: Ensure pool is properly closed in onDown. -- [bug] JAVA-415: Fix tokenMap initialization at startup. -- [bug] JAVA-418: Avoid deadlock on close. +- [bug] Release connections on ResultSetFuture#cancel (JAVA-407) +- [bug] Fix handling of SimpleStatement with values in query builder + batches (JAVA-393) +- [bug] Ensure pool is properly closed in onDown (JAVA-417) +- [bug] Fix tokenMap initialization at startup (JAVA-415) +- [bug] Avoid deadlock on close (JAVA-418) ### 2.0.4 -- [improvement] JAVA-204: Better handling of dead connections. -- [bug] JAVA-373: Fix potential NPE in ControlConnection. -- [bug] JAVA-291: Throws NPE when passed null for a contact point. -- [bug] JAVA-315: Avoid LoadBalancingPolicy onDown+onUp at startup. -- [bug] JAVA-343: Avoid classloader leak in Tomcat. -- [bug] JAVA-387: Avoid deadlock in onAdd/onUp. -- [bug] JAVA-377, JAVA-391: Make metadata parsing more lenient. -- [bug] JAVA-394: Ensure defunct connections are completely closed. -- [bug] JAVA-342, JAVA-390: Fix memory and resource leak on closed Sessions. +- [improvement] Better handling of dead connections (JAVA-204) +- [bug] Fix potential NPE in ControlConnection (JAVA-373) +- [bug] Throws NPE when passed null for a contact point (JAVA-291) +- [bug] Avoid LoadBalancingPolicy onDown+onUp at startup (JAVA-315) +- [bug] Avoid classloader leak in Tomcat (JAVA-343) +- [bug] Avoid deadlock in onAdd/onUp (JAVA-387) +- [bug] Make metadata parsing more lenient (JAVA-377, JAVA-391) +- [bug] Ensure defunct connections are completely closed (JAVA-394) +- [bug] Fix memory and resource leak on closed Sessions (JAVA-342, JAVA-390) ### 2.0.3 - [new] The new AbsractSession makes mocking of Session easier. -- [new] JAVA-309: Allow to trigger a refresh of connected hosts. -- [new] JAVA-265: New Session#getState method allows to grab information on - which nodes a session is connected to. -- [new] JAVA-327: Add QueryBuilder syntax for tuples in where clauses (syntax - introduced in Cassandra 2.0.6). -- [improvement] JAVA-359: Properly validate arguments of PoolingOptions methods. -- [bug] JAVA-368: Fix bogus rejection of BigInteger in 'execute with values'. -- [bug] JAVA-367: Signal connection failure sooner to avoid missing them. -- [bug] JAVA-337: Throw UnsupportedOperationException for protocol batch - setSerialCL. +- [new] Allow to trigger a refresh of connected hosts (JAVA-309) +- [new] New Session#getState method allows to grab information on + which nodes a session is connected to (JAVA-265) +- [new] Add QueryBuilder syntax for tuples in where clauses (syntax + introduced in Cassandra 2.0.6) (JAVA-327) +- [improvement] Properly validate arguments of PoolingOptions methods + (JAVA-359) +- [bug] Fix bogus rejection of BigInteger in 'execute with values' + (JAVA-368) +- [bug] Signal connection failure sooner to avoid missing them + (JAVA-367) +- [bug] Throw UnsupportedOperationException for protocol batch + setSerialCL (JAVA-337) Merged from 1.0 branch: -- [bug] JAVA-325: Fix periodic reconnection to down hosts. +- [bug] Fix periodic reconnection to down hosts (JAVA-325) ### 2.0.2 @@ -2151,101 +492,101 @@ Merged from 1.0 branch: - [api] The type of the map key returned by NoHostAvailable#getErrors has changed from InetAddress to InetSocketAddress. Same for Initializer#getContactPoints return and for AuthProvider#newAuthenticator. -- [api] JAVA-296: The default load balacing policy is now DCAwareRoundRobinPolicy, and the local +- [api] The default load balacing policy is now DCAwareRoundRobinPolicy, and the local datacenter is automatically picked based on the first connected node. Furthermore, - the TokenAwarePolicy is also used by default. -- [new] JAVA-145: New optional AddressTranslater. -- [bug] JAVA-321: Don't remove quotes on keyspace in the query builder. -- [bug] JAVA-320: Fix potential NPE while cluster undergo schema changes. -- [bug] JAVA-319: Fix thread-safety of page fetching. -- [bug] JAVA-318: Fix potential NPE using fetchMoreResults. + the TokenAwarePolicy is also used by default (JAVA-296) +- [new] New optional AddressTranslater (JAVA-145) +- [bug] Don't remove quotes on keyspace in the query builder (JAVA-321) +- [bug] Fix potential NPE while cluster undergo schema changes (JAVA-320) +- [bug] Fix thread-safety of page fetching (JAVA-319) +- [bug] Fix potential NPE using fetchMoreResults (JAVA-318) Merged from 1.0 branch: -- [new] JAVA-179: Expose the name of the partitioner in use in the cluster metadata. -- [new] Add new WhiteListPolicy to limit the nodes connected to a particular list. -- [improvement] JAVA-289: Do not hop DC for LOCAL_* CL in DCAwareRoundRobinPolicy. -- [bug] JAVA-313: Revert back to longs for dates in the query builder. -- [bug] JAVA-314: Don't reconnect to nodes ignored by the load balancing policy. +- [new] Expose the name of the partitioner in use in the cluster metadata (JAVA-179) +- [new] Add new WhiteListPolicy to limit the nodes connected to a particular list +- [improvement] Do not hop DC for LOCAL_* CL in DCAwareRoundRobinPolicy (JAVA-289) +- [bug] Revert back to longs for dates in the query builder (JAVA-313) +- [bug] Don't reconnect to nodes ignored by the load balancing policy (JAVA-314) ### 2.0.1 -- [improvement] JAVA-278: Handle the static columns introduced in Cassandra 2.0.6. -- [improvement] JAVA-208: Add Cluster#newSession method to create Session without connecting - right away. -- [bug] JAVA-279: Add missing iso8601 patterns for parsing dates. -- [bug] Properly parse BytesType as the blob type. -- [bug] JAVA-280: Potential NPE when parsing schema of pre-CQL tables of C* 1.2 nodes. +- [improvement] Handle the static columns introduced in Cassandra 2.0.6 (JAVA-278) +- [improvement] Add Cluster#newSession method to create Session without connecting + right away (JAVA-208) +- [bug] Add missing iso8601 patterns for parsing dates (JAVA-279) +- [bug] Properly parse BytesType as the blob type +- [bug] Potential NPE when parsing schema of pre-CQL tables of C* 1.2 nodes (JAVA-280) Merged from 1.0 branch: -- [bug] JAVA-275: LatencyAwarePolicy.Builder#withScale doesn't set the scale. -- [new] JAVA-114: Add methods to check if a Cluster/Session instance has been closed already. +- [bug] LatencyAwarePolicy.Builder#withScale doesn't set the scale (JAVA-275) +- [new] Add methods to check if a Cluster/Session instance has been closed already (JAVA-114) ### 2.0.0 -- [api] JAVA-269: Case sensitive identifier by default in Metadata. -- [bug] JAVA-274: Fix potential NPE in Cluster#connect. +- [api] Case sensitive identifier by default in Metadata (JAVA-269) +- [bug] Fix potential NPE in Cluster#connect (JAVA-274) Merged from 1.0 branch: -- [bug] JAVA-263: Always return the PreparedStatement object that is cache internally. -- [bug] JAVA-261: Fix race when multiple connect are done in parallel. -- [bug] JAVA-270: Don't connect at all to nodes that are ignored by the load balancing - policy. +- [bug] Always return the PreparedStatement object that is cache internally (JAVA-263) +- [bug] Fix race when multiple connect are done in parallel (JAVA-261) +- [bug] Don't connect at all to nodes that are ignored by the load balancing + policy (JAVA-270) ### 2.0.0-rc3 - [improvement] The protocol version 1 is now supported (features only supported by the version 2 of the protocol throw UnsupportedFeatureException). -- [improvement] JAVA-195: Make most main objects interface to facilitate testing/mocking. +- [improvement] Make most main objects interface to facilitate testing/mocking (JAVA-195) - [improvement] Adds new getStatements and clear methods to BatchStatement. -- [api] JAVA-247: Renamed shutdown to closeAsync and ShutdownFuture to CloseFuture. Clustering - and Session also now implement Closeable. -- [bug] JAVA-232: Fix potential thread leaks when shutting down Metrics. -- [bug] JAVA-231: Fix potential NPE in HostConnectionPool. -- [bug] JAVA-244: Avoid NPE when node is in an unconfigured DC. -- [bug] JAVA-258: Don't block for scheduled reconnections on Cluster#close. +- [api] Renamed shutdown to closeAsync and ShutdownFuture to CloseFuture. Clustering + and Session also now implement Closeable (JAVA-247). +- [bug] Fix potential thread leaks when shutting down Metrics (JAVA-232) +- [bug] Fix potential NPE in HostConnectionPool (JAVA-231) +- [bug] Avoid NPE when node is in an unconfigured DC (JAVA-244) +- [bug] Don't block for scheduled reconnections on Cluster#close (JAVA-258) Merged from 1.0 branch: -- [new] JAVA-224: Added Session#prepareAsync calls. -- [new] JAVA-249: Added Cluster#getLoggedKeyspace. -- [improvement] Avoid preparing a statement multiple time per host with multiple sessions. -- [bug] JAVA-255: Make sure connections are returned to the right pools. -- [bug] JAVA-264: Use date string in query build to work-around CASSANDRA-6718. +- [new] Added Session#prepareAsync calls (JAVA-224) +- [new] Added Cluster#getLoggedKeyspace (JAVA-249) +- [improvement] Avoid preparing a statement multiple time per host with multiple sessions +- [bug] Make sure connections are returned to the right pools (JAVA-255) +- [bug] Use date string in query build to work-around CASSANDRA-6718 (JAVA-264) ### 2.0.0-rc2 -- [new] JAVA-207: Add LOCAL_ONE consistency level support (requires using C* 2.0.2+). -- [bug] JAVA-219: Fix parsing of counter types. -- [bug] JAVA-218: Fix missing whitespace for IN clause in the query builder. -- [bug] JAVA-221: Fix replicas computation for token aware balancing. +- [new] Add LOCAL_ONE consistency level support (requires using C* 2.0.2+) (JAVA-207) +- [bug] Fix parsing of counter types (JAVA-219) +- [bug] Fix missing whitespace for IN clause in the query builder (JAVA-218) +- [bug] Fix replicas computation for token aware balancing (JAVA-221) Merged from 1.0 branch: -- [bug] JAVA-213: Fix regression from JAVA-201. +- [bug] Fix regression from JAVA-201 (JAVA-213) - [improvement] New getter to obtain a snapshot of the scores maintained by LatencyAwarePolicy. ### 2.0.0-rc1 -- [new] JAVA-199: Mark compression dependencies optional in maven. +- [new] Mark compression dependencies optional in maven (JAVA-199). - [api] Renamed TableMetadata#getClusteringKey to TableMetadata#getClusteringColumns. Merged from 1.0 branch: -- [new] JAVA-142: OSGi bundle. -- [improvement] JAVA-205: Make collections returned by Row immutable. -- [improvement] JAVA-203: Limit internal thread pool size. -- [bug] JAVA-201: Don't retain unused PreparedStatement in memory. +- [new] OSGi bundle (JAVA-142) +- [improvement] Make collections returned by Row immutable (JAVA-205) +- [improvement] Limit internal thread pool size (JAVA-203) +- [bug] Don't retain unused PreparedStatement in memory (JAVA-201) - [bug] Add missing clustering order info in TableMetadata -- [bug] JAVA-196: Allow bind markers for collections in the query builder. +- [bug] Allow bind markers for collections in the query builder (JAVA-196) ### 2.0.0-beta2 @@ -2256,10 +597,11 @@ Merged from 1.0 branch: allows to handle the potential last token sent by the server. - [new] The query builder don't serialize large values to strings anymore by default by making use the new ability to send values alongside the query string. -- [new] JAVA-140: The query builder has been updated for new CQL features. +- [new] The query builder has been updated for new CQL features (JAVA-140). - [bug] Fix exception when a conditional write timeout C* side. -- [bug] JAVA-182: Ensure connection is created when Cluster metadata are asked for. -- [bug] JAVA-187: Fix potential NPE during authentication. +- [bug] Ensure connection is created when Cluster metadata are asked for + (JAVA-182). +- [bug] Fix potential NPE during authentication (JAVA-187) ### 2.0.0-beta1 @@ -2269,7 +611,7 @@ Merged from 1.0 branch: and you are encouraged to look at the Upgrade_guide_to_2.0 file that describe those changes in details. - [new] LZ4 compression is supported for the protocol. -- [new] JAVA-39: The driver does not depend on cassandra-all anymore. +- [new] The driver does not depend on cassandra-all anymore (JAVA-39) - [new] New BatchStatement class allows to execute batch other statements. - [new] Large ResultSet are now paged (incrementally fetched) by default. - [new] SimpleStatement support values for bind-variables, to allow @@ -2278,42 +620,44 @@ Merged from 1.0 branch: configured globally. - [new] New Cassandra 2.0 SERIAL and LOCAL_SERIAL consistency levels are supported. -- [new] JAVA-116: Cluster#shutdown now waits for ongoing queries to complete by default. +- [new] Cluster#shutdown now waits for ongoing queries to complete by default + (JAVA-116). - [new] Generic authentication through SASL is now exposed. -- [bug] JAVA-88: TokenAwarePolicy now takes all replica into account, instead of only the - first one. +- [bug] TokenAwarePolicy now takes all replica into account, instead of only the + first one (JAVA-88). ### 1.0.5 -- [new] JAVA-142: OSGi bundle. -- [new] JAVA-207: Add support for ConsistencyLevel.LOCAL_ONE; note that this - require Cassandra 1.2.12+. -- [improvement] JAVA-205: Make collections returned by Row immutable. -- [improvement] JAVA-203: Limit internal thread pool size. +- [new] OSGi bundle (JAVA-142) +- [new] Add support for ConsistencyLevel.LOCAL_ONE; note that this + require Cassandra 1.2.12+ (JAVA-207) +- [improvement] Make collections returned by Row immutable (JAVA-205) +- [improvement] Limit internal thread pool size (JAVA-203) - [improvement] New getter to obtain a snapshot of the scores maintained by LatencyAwarePolicy. -- [improvement] JAVA-222: Avoid synchronization when getting codec for collection - types. -- [bug] JAVA-201, JAVA-213: Don't retain unused PreparedStatement in memory. +- [improvement] Avoid synchronization when getting codec for collection + types (JAVA-222) +- [bug] Don't retain unused PreparedStatement in memory (JAVA-201, JAVA-213) - [bug] Add missing clustering order info in TableMetadata -- [bug] JAVA-196: Allow bind markers for collections in the query builder. +- [bug] Allow bind markers for collections in the query builder (JAVA-196) ### 1.0.4 -- [api] JAVA-163: The Cluster.Builder#poolingOptions and Cluster.Builder#socketOptions +- [api] The Cluster.Builder#poolingOptions and Cluster.Builder#socketOptions are now deprecated. They are replaced by the new withPoolingOptions and - withSocketOptions methods. -- [new] JAVA-129: A new LatencyAwarePolicy wrapping policy has been added, allowing to - add latency awareness to a wrapped load balancing policy. -- [new] JAVA-161: Cluster.Builder#deferInitialization: Allow defering cluster initialization. -- [new] JAVA-117: Add truncate statement in query builder. -- [new] JAVA-106: Support empty IN in the query builder. -- [bug] JAVA-166: Fix spurious "No current pool set; this should not happen" error - message. -- [bug] JAVA-184: Fix potential overflow in RoundRobinPolicy and correctly errors if - a balancing policy throws. + withSocketOptions methods (JAVA-163). +- [new] A new LatencyAwarePolicy wrapping policy has been added, allowing to + add latency awareness to a wrapped load balancing policy (JAVA-129). +- [new] Allow defering cluster initialization (Cluster.Builder#deferInitialization) + (JAVA-161) +- [new] Add truncate statement in query builder (JAVA-117). +- [new] Support empty IN in the query builder (JAVA-106). +- [bug] Fix spurious "No current pool set; this should not happen" error + message (JAVA-166) +- [bug] Fix potential overflow in RoundRobinPolicy and correctly errors if + a balancing policy throws (JAVA-184) - [bug] Don't release Stream ID for timeouted queries (unless we do get back the response) - [bug] Correctly escape identifiers and use fully qualified table names when @@ -2327,8 +671,8 @@ Merged from 1.0 branch: - [new] SocketOptions#setReadTimeout allows to set a timeout on how long we wait for the answer of one node. See the javadoc for more details. - [new] New Session#prepare method that takes a Statement. -- [bug] JAVA-143: Always take per-query CL, tracing, etc. into account for QueryBuilder - statements. +- [bug] Always take per-query CL, tracing, etc. into account for QueryBuilder + statements (JAVA-143). - [bug] Temporary fixup for TimestampType when talking to C* 2.0 nodes. @@ -2339,27 +683,29 @@ Merged from 1.0 branch: in the monitor and you should now register Host.StateListener against the Cluster object directly (registering against a host HealthMonitor was much more limited anyway). -- [new] JAVA-92: New serialize/deserialize methods in DataType to serialize/deserialize - values to/from bytes. -- [new] JAVA-128: New getIndexOf() method in ColumnDefinitions to find the index of - a given column name. -- [bug] JAVA-131: Fix a bug when thread could get blocked while setting the current - keyspace. -- [bug] JAVA-136: Quote inet addresses in the query builder since CQL3 requires it. +- [new] New serialize/deserialize methods in DataType to serialize/deserialize + values to/from bytes (JAVA-92). +- [new] New getIndexOf() method in ColumnDefinitions to find the index of + a given column name (JAVA-128). +- [bug] Fix a bug when thread could get blocked while setting the current + keyspace (JAVA-131). +- [bug] Quote inet addresses in the query builder since CQL3 requires it + (JAVA-136) ### 1.0.1 -- [api] JAVA-100: Function call handling in the query builder has been modified in a +- [api] Function call handling in the query builder has been modified in a backward incompatible way. Function calls are not parsed from string values - anymore as this wasn't safe. Instead the new 'fcall' method should be used. + anymore as this wasn't safe. Instead the new 'fcall' method should be used + (JAVA-100). - [api] Some typos in method names in PoolingOptions have been fixed in a backward incompatible way before the API get widespread. -- [bug] JAVA-123: Don't destroy composite partition key with BoundStatement and - TokenAwarePolicy. +- [bug] Don't destroy composite partition key with BoundStatement and + TokenAwarePolicy (JAVA-123). - [new] null values support in the query builder. -- [new] JAVA-5: SSL support (requires C* >= 1.2.1). -- [new] JAVA-113: Allow generating unlogged batch in the query builder. +- [new] SSL support (requires C* >= 1.2.1) (JAVA-5). +- [new] Allow generating unlogged batch in the query builder (JAVA-113). - [improvement] Better error message when no host are available. - [improvement] Improves performance of the stress example application been. @@ -2374,38 +720,39 @@ Merged from 1.0 branch: - [api] The isMetricsEnabled() method in Configuration has been replaced by getMetricsOptions(). An option to disabled JMX reporting (on by default) has been added. -- [bug] JAVA-91: Don't make default load balancing policy a static singleton since it - is stateful. +- [bug] Don't make default load balancing policy a static singleton since it + is stateful (JAVA-91). ### 1.0.0-RC1 -- [new] JAVA-79: Null values are now supported in BoundStatement (but you will need at +- [new] Null values are now supported in BoundStatement (but you will need at least Cassandra 1.2.3 for it to work). The API of BoundStatement has been slightly changed so that not binding a variable is not an error anymore, the variable is simply considered null by default. The isReady() method has - been removed. -- [improvement] JAVA-75: The Cluster/Session shutdown methods now properly block until - the shutdown is complete. A version with at timeout has been added. -- [bug] JAVA-44: Fix use of CQL3 functions in the query builder. -- [bug] JAVA-77: Fix case where multiple schema changes too quickly wouldn't work - (only triggered when 0.0.0.0 was used for the rpc_address on the Cassandra - nodes). -- [bug] JAVA-72: Fix IllegalStateException thrown due to a reconnection made on an I/O - thread. -- [bug] JAVA-82: Correctly reports errors during authentication phase. + been removed (JAVA-79). +- [improvement] The Cluster/Session shutdown methods now properly block until + the shutdown is complete. A version with at timeout has been added (JAVA-75). +- [bug] Fix use of CQL3 functions in the query builder (JAVA-44). +- [bug] Fix case where multiple schema changes too quickly wouldn't work + (only triggered when 0.0.0.0 was use for the rpc_address on the Cassandra + nodes) (JAVA-77). +- [bug] Fix IllegalStateException thrown due to a reconnection made on an I/O + thread (JAVA-72). +- [bug] Correctly reports errors during authentication phase (JAVA-82). ### 1.0.0-beta2 -- [new] JAVA-51, JAVA-60, JAVA-58: Support blob constants, BigInteger, BigDecimal and counter batches in - the query builder. -- [new] JAVA-61: Basic support for custom CQL3 types. -- [new] JAVA-65: Add "execution infos" for a result set (this also move the query +- [new] Support blob constants, BigInteger, BigDecimal and counter batches in + the query builder (JAVA-51, JAVA-60, JAVA-58) +- [new] Basic support for custom CQL3 types (JAVA-61) +- [new] Add "execution infos" for a result set (this also move the query trace in the new ExecutionInfos object, so users of beta1 will have to - update). -- [bug] JAVA-62: Fix failover bug in DCAwareRoundRobinPolicy. -- [bug] JAVA-66: Fix use of bind markers for routing keys in the query builder. + update) (JAVA-65) +- [bug] Fix failover bug in DCAwareRoundRobinPolicy (JAVA-62) +- [bug] Fix use of bind markers for routing keys in the query builder + (JAVA-66) ### 1.0.0-beta1 diff --git a/ci/create-user.sh b/ci/create-user.sh deleted file mode 100644 index fb193df9a00..00000000000 --- a/ci/create-user.sh +++ /dev/null @@ -1,60 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -################################ -# -# Prep -# -################################ - -if [ "$1" == "-h" ]; then - echo "$0 [-h] " - echo " this script is used internally by other scripts in the same directory to create a user with the running host user's same uid and gid" - exit 1 -fi - -# arguments -username=$1 -uid=$2 -gid=$3 -BUILD_HOME=$4 - -################################ -# -# Main -# -################################ - -# disable git directory ownership checks -su ${username} -c "git config --global safe.directory '*'" - -if grep "^ID=" /etc/os-release | grep -q 'debian\|ubuntu' ; then - deluser docker - adduser --quiet --disabled-login --no-create-home --uid $uid --gecos ${username} ${username} - groupmod --non-unique -g $gid $username - gpasswd -a ${username} sudo >/dev/null -else - adduser --no-create-home --uid $uid ${username} -fi - -# sudo priviledges -echo "${username} ALL=(root) NOPASSWD:ALL" > /etc/sudoers.d/${username} -chmod 0440 /etc/sudoers.d/${username} - -# proper permissions -chown -R ${username}:${username} /home/docker -chmod og+wx ${BUILD_HOME} \ No newline at end of file diff --git a/ci/install-jdk.sh b/ci/install-jdk.sh deleted file mode 100644 index 674961c2daf..00000000000 --- a/ci/install-jdk.sh +++ /dev/null @@ -1,318 +0,0 @@ -#!/usr/bin/env bash - -# -# Install JDK for Linux and Mac OS -# -# This script determines the most recent early-access build number, -# downloads the JDK archive to the user home directory and extracts -# it there. -# -# Exported environment variables (when sourcing this script) -# -# JAVA_HOME is set to the extracted JDK directory -# PATH is prepended with ${JAVA_HOME}/bin -# -# (C) 2018 Christian Stein -# -# https://github.com/sormuras/bach/blob/master/install-jdk.sh -# - -set -o errexit -#set -o nounset # https://github.com/travis-ci/travis-ci/issues/5434 -#set -o xtrace - -function initialize() { - readonly script_name="$(basename "${BASH_SOURCE[0]}")" - readonly script_version='2018-10-17' - - dry=false - silent=false - verbose=false - emit_java_home=false - - feature='ea' - license='GPL' - os='?' - url='?' - workspace="${HOME}" - target='?' - cacerts=false -} - -function usage() { -cat << EOF -Usage: ${script_name} [OPTION]... -Download and extract the latest-and-greatest JDK from java.net or Oracle. - -Version: ${script_version} -Options: - -h|--help Displays this help - -d|--dry-run Activates dry-run mode - -s|--silent Displays no output - -e|--emit-java-home Print value of "JAVA_HOME" to stdout (ignores silent mode) - -v|--verbose Displays verbose output - - -f|--feature 9|10|...|ea JDK feature release number, defaults to "ea" - -l|--license GPL|BCL License defaults to "GPL", BCL also indicates OTN-LA for Oracle Java SE - -o|--os linux-x64|osx-x64 Operating system identifier (works best with GPL license) - -u|--url "https://..." Use custom JDK archive (provided as .tar.gz file) - -w|--workspace PATH Working directory defaults to \${HOME} [${HOME}] - -t|--target PATH Target directory, defaults to first component of the tarball - -c|--cacerts Link system CA certificates (currently only Debian/Ubuntu is supported) -EOF -} - -function script_exit() { - if [[ $# -eq 1 ]]; then - printf '%s\n' "$1" - exit 0 - fi - - if [[ $# -eq 2 && $2 =~ ^[0-9]+$ ]]; then - printf '%b\n' "$1" - exit "$2" - fi - - script_exit 'Invalid arguments passed to script_exit()!' 2 -} - -function say() { - if [[ ${silent} != true ]]; then - echo "$@" - fi -} - -function verbose() { - if [[ ${verbose} == true ]]; then - echo "$@" - fi -} - -function parse_options() { - local option - while [[ $# -gt 0 ]]; do - option="$1" - shift - case ${option} in - -h|-H|--help) - usage - exit 0 - ;; - -v|-V|--verbose) - verbose=true - ;; - -s|-S|--silent) - silent=true - verbose "Silent mode activated" - ;; - -d|-D|--dry-run) - dry=true - verbose "Dry-run mode activated" - ;; - -e|-E|--emit-java-home) - emit_java_home=true - verbose "Emitting JAVA_HOME" - ;; - -f|-F|--feature) - feature="$1" - verbose "feature=${feature}" - shift - ;; - -l|-L|--license) - license="$1" - verbose "license=${license}" - shift - ;; - -o|-O|--os) - os="$1" - verbose "os=${os}" - shift - ;; - -u|-U|--url) - url="$1" - verbose "url=${url}" - shift - ;; - -w|-W|--workspace) - workspace="$1" - verbose "workspace=${workspace}" - shift - ;; - -t|-T|--target) - target="$1" - verbose "target=${target}" - shift - ;; - -c|-C|--cacerts) - cacerts=true - verbose "Linking system CA certificates" - ;; - *) - script_exit "Invalid argument was provided: ${option}" 2 - ;; - esac - done -} - -function determine_latest_jdk() { - local number - local curl_result - local url - - verbose "Determine latest JDK feature release number" - number=9 - while [[ ${number} != 99 ]] - do - url=http://jdk.java.net/${number} - curl_result=$(curl -o /dev/null --silent --head --write-out %{http_code} ${url}) - if [[ ${curl_result} -ge 400 ]]; then - break - fi - verbose " Found ${url} [${curl_result}]" - latest_jdk=${number} - number=$[$number +1] - done - - verbose "Latest JDK feature release number is: ${latest_jdk}" -} - -function perform_sanity_checks() { - if [[ ${feature} == '?' ]] || [[ ${feature} == 'ea' ]]; then - feature=${latest_jdk} - fi - if [[ ${feature} -lt 9 ]] || [[ ${feature} -gt ${latest_jdk} ]]; then - script_exit "Expected feature release number in range of 9 to ${latest_jdk}, but got: ${feature}" 3 - fi - if [[ -d "$target" ]]; then - script_exit "Target directory must not exist, but it does: $(du -hs '${target}')" 3 - fi -} - -function determine_url() { - local DOWNLOAD='https://download.java.net/java' - local ORACLE='http://download.oracle.com/otn-pub/java/jdk' - - # Archived feature or official GA build? - case "${feature}-${license}" in - 9-GPL) url="${DOWNLOAD}/GA/jdk9/9.0.4/binaries/openjdk-9.0.4_${os}_bin.tar.gz"; return;; - 9-BCL) url="${ORACLE}/9.0.4+11/c2514751926b4512b076cc82f959763f/jdk-9.0.4_${os}_bin.tar.gz"; return;; - 10-GPL) url="${DOWNLOAD}/GA/jdk10/10.0.2/19aef61b38124481863b1413dce1855f/13/openjdk-10.0.2_${os}_bin.tar.gz"; return;; - 10-BCL) url="${ORACLE}/10.0.2+13/19aef61b38124481863b1413dce1855f/jdk-10.0.2_${os}_bin.tar.gz"; return;; - 11-GPL) url="${DOWNLOAD}/GA/jdk11/13/GPL/openjdk-11.0.1_${os}_bin.tar.gz"; return;; - 11-BCL) url="${ORACLE}/11.0.1+13/90cf5d8f270a4347a95050320eef3fb7/jdk-11.0.1_${os}_bin.tar.gz"; return;; - esac - - # EA or RC build? - local JAVA_NET="http://jdk.java.net/${feature}" - local candidates=$(wget --quiet --output-document - ${JAVA_NET} | grep -Eo 'href[[:space:]]*=[[:space:]]*"[^\"]+"' | grep -Eo '(http|https)://[^"]+') - url=$(echo "${candidates}" | grep -Eo "${DOWNLOAD}/.+/jdk${feature}/.+/${license}/.*jdk-${feature}.+${os}_bin.tar.gz$" || true) - - if [[ -z ${url} ]]; then - script_exit "Couldn't determine a download url for ${feature}-${license} on ${os}" 1 - fi -} - -function prepare_variables() { - if [[ ${os} == '?' ]]; then - if [[ "$OSTYPE" == "darwin"* ]]; then - os='osx-x64' - else - os='linux-x64' - fi - fi - if [[ ${url} == '?' ]]; then - determine_latest_jdk - perform_sanity_checks - determine_url - else - feature='' - license='' - os='' - fi - archive="${workspace}/$(basename ${url})" - status=$(curl -o /dev/null --silent --head --write-out %{http_code} ${url}) -} - -function print_variables() { -cat << EOF -Variables: - feature = ${feature} - license = ${license} - os = ${os} - url = ${url} - status = ${status} - archive = ${archive} -EOF -} - -function download_and_extract_and_set_target() { - local quiet='--quiet'; if [[ ${verbose} == true ]]; then quiet=''; fi - local local="--directory-prefix ${workspace}" - local remote='--timestamping --continue' - local wget_options="${quiet} ${local} ${remote}" - local tar_options="--file ${archive}" - - say "Downloading JDK from ${url}..." - verbose "Using wget options: ${wget_options}" - if [[ ${license} == 'GPL' ]]; then - wget ${wget_options} ${url} - else - wget ${wget_options} --header "Cookie: oraclelicense=accept-securebackup-cookie" ${url} - fi - - verbose "Using tar options: ${tar_options}" - if [[ ${target} == '?' ]]; then - tar --extract ${tar_options} -C "${workspace}" - if [[ "$OSTYPE" != "darwin"* ]]; then - target="${workspace}"/$(tar --list ${tar_options} | grep 'bin/javac' | tr '/' '\n' | tail -3 | head -1) - else - target="${workspace}"/$(tar --list ${tar_options} | head -2 | tail -1 | cut -f 2 -d '/' -)/Contents/Home - fi - else - if [[ "$OSTYPE" != "darwin"* ]]; then - mkdir --parents "${target}" - tar --extract ${tar_options} -C "${target}" --strip-components=1 - else - mkdir -p "${target}" - tar --extract ${tar_options} -C "${target}" --strip-components=4 # . / / Contents / Home - fi - fi - - if [[ ${verbose} == true ]]; then - echo "Set target to: ${target}" - echo "Content of target directory:" - ls "${target}" - echo "Content of release file:" - [[ ! -f "${target}/release" ]] || cat "${target}/release" - fi - - # Link to system certificates - # http://openjdk.java.net/jeps/319 - # https://bugs.openjdk.java.net/browse/JDK-8196141 - # TODO: Provide support for other distributions than Debian/Ubuntu - if [[ ${cacerts} == true ]]; then - mv "${target}/lib/security/cacerts" "${target}/lib/security/cacerts.jdk" - ln -s /etc/ssl/certs/java/cacerts "${target}/lib/security/cacerts" - fi -} - -function main() { - initialize - say "$script_name $script_version" - - parse_options "$@" - prepare_variables - - if [[ ${silent} == false ]]; then print_variables; fi - if [[ ${dry} == true ]]; then exit 0; fi - - download_and_extract_and_set_target - - export JAVA_HOME=$(cd "${target}"; pwd) - export PATH=${JAVA_HOME}/bin:$PATH - - if [[ ${silent} == false ]]; then java -version; fi - if [[ ${emit_java_home} == true ]]; then echo "${JAVA_HOME}"; fi -} - -main "$@" \ No newline at end of file diff --git a/ci/run-tests.sh b/ci/run-tests.sh deleted file mode 100755 index 5268bdd7113..00000000000 --- a/ci/run-tests.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash -x - -. ~/.jabba/jabba.sh -. ~/env.txt -cd $(dirname "$(readlink -f "$0")")/.. -printenv | sort -mvn -B -V install -DskipTests -Dmaven.javadoc.skip=true -jabba use ${TEST_JAVA_VERSION} -# Find out the latest patch version of Cassandra -PATCH_SERVER_VERSION=$(curl -s https://downloads.apache.org/cassandra/ | grep -oP '(?<=href=\")[0-9]+\.[0-9]+\.[0-9]+(?=)' | sort -rV | uniq -w 3 | grep $SERVER_VERSION) -printenv | sort -mvn -B -V verify -T 1 -Ptest-jdk-${TEST_JAVA_MAJOR_VERSION} -DtestJavaHome=$(jabba which ${TEST_JAVA_VERSION}) -Dccm.version=${PATCH_SERVER_VERSION} -Dccm.dse=false -Dmaven.test.failure.ignore=true -Dmaven.javadoc.skip=true diff --git a/clirr-ignores.xml b/clirr-ignores.xml new file mode 100644 index 00000000000..82cbd435f17 --- /dev/null +++ b/clirr-ignores.xml @@ -0,0 +1,32 @@ + + + + + 6006 + com/datastax/driver/core/ProtocolVersion + NEWEST_SUPPORTED + This was an oversight, this field was never intended to be non final + + + + 8001 + com/datastax/driver/core/schemabuilder/ColumnType$NativeColumnType + This class was accidentally exposed, it was meant to be package-private + + + + 1001 + com/datastax/driver/core/HostConnectionPool$Phase + False positive: HostConnectionPool is not exposed to clients + + + diff --git a/core-shaded/pom.xml b/core-shaded/pom.xml deleted file mode 100644 index 84cb4b15398..00000000000 --- a/core-shaded/pom.xml +++ /dev/null @@ -1,366 +0,0 @@ - - - - 4.0.0 - - org.apache.cassandra - java-driver-parent - 4.19.3-SNAPSHOT - - java-driver-core-shaded - Apache Cassandra Java Driver - core with shaded deps - - - - ${project.groupId} - java-driver-bom - ${project.version} - pom - import - - - - - - - org.apache.cassandra - java-driver-core - - - - com.datastax.oss - native-protocol - - - org.apache.cassandra - java-driver-guava-shaded - - - com.typesafe - config - - - com.github.jnr - jnr-posix - - - org.xerial.snappy - snappy-java - true - - - at.yawk.lz4 - lz4-java - true - - - org.slf4j - slf4j-api - - - io.dropwizard.metrics - metrics-core - - - org.hdrhistogram - HdrHistogram - - - com.esri.geometry - esri-geometry-api - true - - - org.apache.tinkerpop - gremlin-core - true - - - org.apache.tinkerpop - tinkergraph-gremlin - true - - - org.reactivestreams - reactive-streams - - - com.github.stephenc.jcip - jcip-annotations - provided - - - com.github.spotbugs - spotbugs-annotations - provided - - - - - - - src/main/resources - - - ${project.basedir}/.. - - LICENSE - NOTICE_binary.txt - NOTICE.txt - - META-INF - - - - - maven-shade-plugin - - - shade-core-dependencies - package - - shade - - - true - true - - - - org.apache.cassandra:java-driver-core - io.netty:* - com.fasterxml.jackson.core:* - - - - - - io.netty - com.datastax.oss.driver.shaded.netty - - - com.fasterxml.jackson - com.datastax.oss.driver.shaded.fasterxml.jackson - - - - - - org.apache.cassandra:* - - - META-INF/MANIFEST.MF - META-INF/maven/** - - - - io.netty:* - - META-INF/** - - - - com.fasterxml.jackson.core:* - - META-INF/** - - - - - - - - - maven-dependency-plugin - - - unpack-shaded-classes - package - - unpack - - - - - org.apache.cassandra - java-driver-core-shaded - jar - ${project.build.outputDirectory} - - - - - - - unpack-shaded-sources - package - - unpack - - - - - org.apache.cassandra - java-driver-core-shaded - jar - sources - ${project.build.directory}/shaded-sources - - - - - - - - - com.google.code.maven-replacer-plugin - replacer - 1.5.3 - - - shade-graalvm-files - package - - replace - - - - - false - ${project.build.directory}/classes/META-INF/native-image/com.datastax.oss/java-driver-core/reflection.json,${project.build.directory}/shaded-sources/META-INF/native-image/com.datastax.oss/java-driver-core/reflection.json - - - io.netty - com.datastax.oss.driver.shaded.netty - - - - - - org.apache.felix - maven-bundle-plugin - true - - - generate-shaded-manifest - package - - manifest - - - - com.datastax.oss.driver.core - com.datastax.oss.driver.core - - * - - !com.datastax.oss.driver.shaded.netty.*, !com.datastax.oss.driver.shaded.fasterxml.jackson.*, - !net.jcip.annotations.*, !edu.umd.cs.findbugs.annotations.*, - !org.graalvm.*, !com.oracle.svm.*, - jnr.*;resolution:=optional, com.esri.core.geometry.*;resolution:=optional,org.reactivestreams.*;resolution:=optional, org.apache.tinkerpop.*;resolution:=optional, org.javatuples.*;resolution:=optional, reactor.blockhound.*;resolution:=optional, - !com.google.protobuf.*, !com.jcraft.jzlib.*, !com.ning.compress.*, !lzma.sdk.*, !net.jpountz.xxhash.*, !org.bouncycastle.*, !org.conscrypt.*, !org.apache.commons.logging.*, !org.apache.log4j.*, !org.apache.logging.log4j.*, !org.eclipse.jetty.*, !org.jboss.marshalling.*, !sun.misc.*, !sun.security.*, !com.barchart.udt.*, !com.fasterxml.aalto.*, !com.sun.nio.sctp.*, !gnu.io.*, !org.xml.sax.*, !org.w3c.dom.*, !com.aayushatharva.brotli4j.*, !com.github.luben.zstd.*, * - - - com.datastax.oss.driver.api.core.*, com.datastax.oss.driver.internal.core.*, com.datastax.dse.driver.api.core.*, com.datastax.dse.driver.internal.core.*, com.datastax.oss.driver.shaded.netty.*, com.datastax.oss.driver.shaded.fasterxml.jackson.*, - - true - - - - - - maven-assembly-plugin - - - generate-final-shaded-jar - package - - single - - - - - ${project.build.outputDirectory}/META-INF/MANIFEST.MF - - - src/assembly/shaded-jar.xml - - - false - - - - - - org.revapi - revapi-maven-plugin - - true - - - - - diff --git a/core-shaded/src/assembly/shaded-jar.xml b/core-shaded/src/assembly/shaded-jar.xml deleted file mode 100644 index 449eb77bd1a..00000000000 --- a/core-shaded/src/assembly/shaded-jar.xml +++ /dev/null @@ -1,45 +0,0 @@ - - - - shaded-jar - - jar - - false - - - - ${project.build.outputDirectory} - - - - - - - ${project.basedir}/dependency-reduced-pom.xml - META-INF/maven/com.datastax.oss/java-driver-core-shaded - pom.xml - - - diff --git a/core/console.scala b/core/console.scala deleted file mode 100644 index 491add7edea..00000000000 --- a/core/console.scala +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Allows quick manual tests from the Scala console: - * - * cd core/ - * mvn scala:console - * - * The script below is run at init, then you can do `val cluster = builder.build()` and play with - * it. - * - * Note: on MacOS, the Scala plugin seems to break the terminal if you exit the console with `:q`. - * Use Ctrl+C instead. - */ -import com.datastax.oss.driver.api.core._ -import com.datastax.oss.driver.internal.core.metadata.TopologyEvent -import com.datastax.oss.driver.internal.core.context.InternalDriverContext -import java.net.InetSocketAddress - -import CqlSession - -// Heartbeat logs every 30 seconds are annoying in the console, raise the interval -System.setProperty("datastax-java-driver.advanced.heartbeat.interval", "1 hour") - -val address1 = new InetSocketAddress("127.0.0.1", 9042) -val address2 = new InetSocketAddress("127.0.0.2", 9042) -val address3 = new InetSocketAddress("127.0.0.3", 9042) -val address4 = new InetSocketAddress("127.0.0.4", 9042) -val address5 = new InetSocketAddress("127.0.0.5", 9042) -val address6 = new InetSocketAddress("127.0.0.6", 9042) - -val builder = CqlSession.builder().addContactPoint(address1) - -println("********************************************") -println("* To start a driver instance, run: *") -println("* implicit val session = builder.build *") -println("********************************************") - -def fire(event: AnyRef)(implicit session: CqlSession): Unit = { - session.getContext.asInstanceOf[InternalDriverContext].getEventBus().fire(event) -} diff --git a/core/pom.xml b/core/pom.xml deleted file mode 100644 index 8758d20d78a..00000000000 --- a/core/pom.xml +++ /dev/null @@ -1,356 +0,0 @@ - - - - 4.0.0 - - org.apache.cassandra - java-driver-parent - 4.19.3-SNAPSHOT - - java-driver-core - bundle - Apache Cassandra Java Driver - core - - - - ${project.groupId} - java-driver-bom - ${project.version} - pom - import - - - - - - com.datastax.oss - native-protocol - - - io.netty - netty-handler - - - org.apache.cassandra - java-driver-guava-shaded - - - com.typesafe - config - - - - com.github.jnr - jnr-posix - - - org.xerial.snappy - snappy-java - true - - - at.yawk.lz4 - lz4-java - true - - - org.slf4j - slf4j-api - - - io.dropwizard.metrics - metrics-core - - - org.hdrhistogram - HdrHistogram - - - com.esri.geometry - esri-geometry-api - true - - - org.apache.tinkerpop - gremlin-core - true - - - org.apache.tinkerpop - tinkergraph-gremlin - true - - - com.fasterxml.jackson.core - jackson-core - - - com.fasterxml.jackson.core - jackson-databind - - - org.reactivestreams - reactive-streams - - - com.github.stephenc.jcip - jcip-annotations - provided - - - com.github.spotbugs - spotbugs-annotations - provided - - - org.graalvm.sdk - graal-sdk - provided - - - org.graalvm.nativeimage - svm - provided - - - io.projectreactor.tools - blockhound - provided - - - ch.qos.logback - logback-classic - test - - - junit - junit - test - - - com.tngtech.java - junit-dataprovider - test - - - org.assertj - assertj-core - test - - - org.mockito - mockito-core - test - - - io.reactivex.rxjava2 - rxjava - test - - - org.reactivestreams - reactive-streams-tck - test - - - org.awaitility - awaitility - test - - - org.testng - testng - test - - - com.github.tomakehurst - wiremock - test - - - - - - src/main/resources - - com/datastax/oss/driver/Driver.properties - - true - - - src/main/resources - - com/datastax/oss/driver/Driver.properties - - false - - - ${project.basedir}/.. - - LICENSE - NOTICE_binary.txt - NOTICE.txt - - META-INF - - - - - src/test/resources - - project.properties - - true - - - src/test/resources - - project.properties - - false - - - - - maven-jar-plugin - - - - com.datastax.oss.driver.core - - - - - - test-jar - - test-jar - - - - logback-test.xml - - - - - - - maven-surefire-plugin - - ${testing.jvm}/bin/java - ${mockitoopens.argline} - 1 - - - listener - com.datastax.oss.driver.DriverRunListener - - - - junit - false - - - suitename - Reactive Streams TCK - - - - - - org.apache.maven.surefire - surefire-junit47 - ${surefire.version} - - - org.apache.maven.surefire - surefire-testng - ${surefire.version} - - - - - org.apache.felix - maven-bundle-plugin - true - - - - bundle - - - - com.datastax.oss.driver.core - - * - - !net.jcip.annotations.*, !edu.umd.cs.findbugs.annotations.*, - !org.graalvm.*, !com.oracle.svm.*, - jnr.*;resolution:=optional, com.esri.core.geometry.*;resolution:=optional, org.reactivestreams.*;resolution:=optional, org.apache.tinkerpop.*;resolution:=optional, org.javatuples.*;resolution:=optional, reactor.blockhound.*;resolution:=optional, * - - com.datastax.oss.driver.*.core.*, com.datastax.dse.driver.*.core.* - - - - - - - maven-dependency-plugin - - - generate-dependency-list - - list - - generate-resources - - runtime - true - com.datastax.cassandra,com.datastax.dse - ${project.build.outputDirectory}/com/datastax/dse/driver/internal/deps.txt - - - - - - - diff --git a/core/revapi.json b/core/revapi.json deleted file mode 100644 index 8c707659c13..00000000000 --- a/core/revapi.json +++ /dev/null @@ -1,7418 +0,0 @@ -{ - "revapi": { - "java": { - "filter": { - "packages": { - "regex": true, - "exclude": [ - "com\\.datastax\\.(oss|dse)\\.protocol\\.internal(\\..+)?", - "com\\.datastax\\.(oss|dse)\\.driver\\.internal(\\..+)?", - "com\\.datastax\\.oss\\.driver\\.shaded(\\..+)?", - "org\\.assertj(\\..+)?" - ] - } - } - }, - "ignore": [ - { - "code": "java.method.removed", - "old": "method com.datastax.oss.driver.api.core.cql.BatchStatementBuilder com.datastax.oss.driver.api.core.cql.BatchStatementBuilder::withKeyspace(com.datastax.oss.driver.api.core.CqlIdentifier)", - "justification": "JAVA-2164: Rename statement builder methods to setXxx" - }, - { - "code": "java.method.removed", - "old": "method com.datastax.oss.driver.api.core.cql.BatchStatementBuilder com.datastax.oss.driver.api.core.cql.BatchStatementBuilder::withKeyspace(java.lang.String)", - "justification": "JAVA-2164: Rename statement builder methods to setXxx" - }, - { - "code": "java.method.removed", - "old": "method com.datastax.oss.driver.api.core.cql.SimpleStatementBuilder com.datastax.oss.driver.api.core.cql.SimpleStatementBuilder::withKeyspace(com.datastax.oss.driver.api.core.CqlIdentifier)", - "justification": "JAVA-2164: Rename statement builder methods to setXxx" - }, - { - "code": "java.method.removed", - "old": "method com.datastax.oss.driver.api.core.cql.SimpleStatementBuilder com.datastax.oss.driver.api.core.cql.SimpleStatementBuilder::withKeyspace(java.lang.String)", - "justification": "JAVA-2164: Rename statement builder methods to setXxx" - }, - { - "code": "java.method.removed", - "old": "method com.datastax.oss.driver.api.core.cql.SimpleStatementBuilder com.datastax.oss.driver.api.core.cql.SimpleStatementBuilder::withQuery(java.lang.String)", - "justification": "JAVA-2164: Rename statement builder methods to setXxx" - }, - { - "code": "java.method.removed", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.StatementBuilder>, StatementT>, StatementT extends com.datastax.oss.driver.api.core.cql.Statement>>::withConsistencyLevel(com.datastax.oss.driver.api.core.ConsistencyLevel)", - "justification": "JAVA-2164: Rename statement builder methods to setXxx" - }, - { - "code": "java.method.removed", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.StatementBuilder>, StatementT>, StatementT extends com.datastax.oss.driver.api.core.cql.Statement>>::withExecutionProfile(com.datastax.oss.driver.api.core.config.DriverExecutionProfile)", - "justification": "JAVA-2164: Rename statement builder methods to setXxx" - }, - { - "code": "java.method.removed", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.StatementBuilder>, StatementT>, StatementT extends com.datastax.oss.driver.api.core.cql.Statement>>::withExecutionProfileName(java.lang.String)", - "justification": "JAVA-2164: Rename statement builder methods to setXxx" - }, - { - "code": "java.method.removed", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.StatementBuilder>, StatementT>, StatementT extends com.datastax.oss.driver.api.core.cql.Statement>>::withIdempotence(java.lang.Boolean)", - "justification": "JAVA-2164: Rename statement builder methods to setXxx" - }, - { - "code": "java.method.removed", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.StatementBuilder>, StatementT>, StatementT extends com.datastax.oss.driver.api.core.cql.Statement>>::withNode(com.datastax.oss.driver.api.core.metadata.Node)", - "justification": "JAVA-2164: Rename statement builder methods to setXxx" - }, - { - "code": "java.method.removed", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.StatementBuilder>, StatementT>, StatementT extends com.datastax.oss.driver.api.core.cql.Statement>>::withPageSize(int)", - "justification": "JAVA-2164: Rename statement builder methods to setXxx" - }, - { - "code": "java.method.removed", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.StatementBuilder>, StatementT>, StatementT extends com.datastax.oss.driver.api.core.cql.Statement>>::withPagingState(java.nio.ByteBuffer)", - "justification": "JAVA-2164: Rename statement builder methods to setXxx" - }, - { - "code": "java.method.removed", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.StatementBuilder>, StatementT>, StatementT extends com.datastax.oss.driver.api.core.cql.Statement>>::withRoutingKey(java.nio.ByteBuffer)", - "justification": "JAVA-2164: Rename statement builder methods to setXxx" - }, - { - "code": "java.method.removed", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.StatementBuilder>, StatementT>, StatementT extends com.datastax.oss.driver.api.core.cql.Statement>>::withRoutingKeyspace(com.datastax.oss.driver.api.core.CqlIdentifier)", - "justification": "JAVA-2164: Rename statement builder methods to setXxx" - }, - { - "code": "java.method.removed", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.StatementBuilder>, StatementT>, StatementT extends com.datastax.oss.driver.api.core.cql.Statement>>::withRoutingKeyspace(java.lang.String)", - "justification": "JAVA-2164: Rename statement builder methods to setXxx" - }, - { - "code": "java.method.removed", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.StatementBuilder>, StatementT>, StatementT extends com.datastax.oss.driver.api.core.cql.Statement>>::withRoutingToken(com.datastax.oss.driver.api.core.metadata.token.Token)", - "justification": "JAVA-2164: Rename statement builder methods to setXxx" - }, - { - "code": "java.method.removed", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.StatementBuilder>, StatementT>, StatementT extends com.datastax.oss.driver.api.core.cql.Statement>>::withSerialConsistencyLevel(com.datastax.oss.driver.api.core.ConsistencyLevel)", - "justification": "JAVA-2164: Rename statement builder methods to setXxx" - }, - { - "code": "java.method.removed", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.StatementBuilder>, StatementT>, StatementT extends com.datastax.oss.driver.api.core.cql.Statement>>::withTimeout(java.time.Duration)", - "justification": "JAVA-2164: Rename statement builder methods to setXxx" - }, - { - "code": "java.method.removed", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.StatementBuilder>, StatementT>, StatementT extends com.datastax.oss.driver.api.core.cql.Statement>>::withTimestamp(long)", - "justification": "JAVA-2164: Rename statement builder methods to setXxx" - }, - { - "code": "java.method.removed", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.StatementBuilder>, StatementT>, StatementT extends com.datastax.oss.driver.api.core.cql.Statement>>::withTracing()", - "justification": "JAVA-2164: Rename statement builder methods to setXxx" - }, - { - "code": "java.method.parameterTypeChanged", - "old": "parameter void com.datastax.oss.driver.api.core.UnsupportedProtocolVersionException::(===java.net.SocketAddress===, java.lang.String, java.util.List)", - "new": "parameter void com.datastax.oss.driver.api.core.UnsupportedProtocolVersionException::(===com.datastax.oss.driver.api.core.metadata.EndPoint===, java.lang.String, java.util.List)", - "justification": "JAVA-2165: Abstract node connection information" - }, - { - "code": "java.method.parameterTypeChanged", - "old": "parameter com.datastax.oss.driver.api.core.UnsupportedProtocolVersionException com.datastax.oss.driver.api.core.UnsupportedProtocolVersionException::forNegotiation(===java.net.SocketAddress===, java.util.List)", - "new": "parameter com.datastax.oss.driver.api.core.UnsupportedProtocolVersionException com.datastax.oss.driver.api.core.UnsupportedProtocolVersionException::forNegotiation(===com.datastax.oss.driver.api.core.metadata.EndPoint===, java.util.List)", - "justification": "JAVA-2165: Abstract node connection information" - }, - { - "code": "java.method.parameterTypeChanged", - "old": "parameter com.datastax.oss.driver.api.core.UnsupportedProtocolVersionException com.datastax.oss.driver.api.core.UnsupportedProtocolVersionException::forSingleAttempt(===java.net.SocketAddress===, com.datastax.oss.driver.api.core.ProtocolVersion)", - "new": "parameter com.datastax.oss.driver.api.core.UnsupportedProtocolVersionException com.datastax.oss.driver.api.core.UnsupportedProtocolVersionException::forSingleAttempt(===com.datastax.oss.driver.api.core.metadata.EndPoint===, com.datastax.oss.driver.api.core.ProtocolVersion)", - "justification": "JAVA-2165: Abstract node connection information" - }, - { - "code": "java.method.removed", - "old": "method java.net.SocketAddress com.datastax.oss.driver.api.core.UnsupportedProtocolVersionException::getAddress()", - "justification": "JAVA-2165: Abstract node connection information" - }, - { - "code": "java.method.parameterTypeChanged", - "old": "parameter com.datastax.oss.driver.api.core.auth.Authenticator com.datastax.oss.driver.api.core.auth.AuthProvider::newAuthenticator(===java.net.SocketAddress===, java.lang.String) throws com.datastax.oss.driver.api.core.auth.AuthenticationException", - "new": "parameter com.datastax.oss.driver.api.core.auth.Authenticator com.datastax.oss.driver.api.core.auth.AuthProvider::newAuthenticator(===com.datastax.oss.driver.api.core.metadata.EndPoint===, java.lang.String) throws com.datastax.oss.driver.api.core.auth.AuthenticationException", - "justification": "JAVA-2165: Abstract node connection information" - }, - { - "code": "java.method.parameterTypeChanged", - "old": "parameter void com.datastax.oss.driver.api.core.auth.AuthProvider::onMissingChallenge(===java.net.SocketAddress===) throws com.datastax.oss.driver.api.core.auth.AuthenticationException", - "new": "parameter void com.datastax.oss.driver.api.core.auth.AuthProvider::onMissingChallenge(===com.datastax.oss.driver.api.core.metadata.EndPoint===) throws com.datastax.oss.driver.api.core.auth.AuthenticationException", - "justification": "JAVA-2165: Abstract node connection information" - }, - { - "code": "java.method.parameterTypeChanged", - "old": "parameter void com.datastax.oss.driver.api.core.auth.AuthenticationException::(===java.net.SocketAddress===, java.lang.String)", - "new": "parameter void com.datastax.oss.driver.api.core.auth.AuthenticationException::(===com.datastax.oss.driver.api.core.metadata.EndPoint===, java.lang.String)", - "justification": "JAVA-2165: Abstract node connection information" - }, - { - "code": "java.method.parameterTypeChanged", - "old": "parameter void com.datastax.oss.driver.api.core.auth.AuthenticationException::(===java.net.SocketAddress===, java.lang.String, java.lang.Throwable)", - "new": "parameter void com.datastax.oss.driver.api.core.auth.AuthenticationException::(===com.datastax.oss.driver.api.core.metadata.EndPoint===, java.lang.String, java.lang.Throwable)", - "justification": "JAVA-2165: Abstract node connection information" - }, - { - "code": "java.method.removed", - "old": "method java.net.SocketAddress com.datastax.oss.driver.api.core.auth.AuthenticationException::getAddress()", - "justification": "JAVA-2165: Abstract node connection information" - }, - { - "code": "java.method.numberOfParametersChanged", - "old": "method void com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy::init(java.util.Map, com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy.DistanceReporter, java.util.Set)", - "new": "method void com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy::init(java.util.Map, com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy.DistanceReporter)", - "justification": "JAVA-2165: Abstract node connection information" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method java.util.Map com.datastax.oss.driver.api.core.metadata.Metadata::getNodes()", - "new": "method java.util.Map com.datastax.oss.driver.api.core.metadata.Metadata::getNodes()", - "justification": "JAVA-2165: Abstract node connection information" - }, - { - "code": "java.method.addedToInterface", - "new": "method java.util.Optional com.datastax.oss.driver.api.core.metadata.Node::getBroadcastRpcAddress()", - "justification": "JAVA-2165: Abstract node connection information" - }, - { - "code": "java.method.removed", - "old": "method java.net.InetSocketAddress com.datastax.oss.driver.api.core.metadata.Node::getConnectAddress()", - "justification": "JAVA-2165: Abstract node connection information" - }, - { - "code": "java.method.addedToInterface", - "new": "method com.datastax.oss.driver.api.core.metadata.EndPoint com.datastax.oss.driver.api.core.metadata.Node::getEndPoint()", - "package": "com.datastax.oss.driver.api.core.metadata", - "justification": "JAVA-2165: Abstract node connection information" - }, - { - "code": "java.method.parameterTypeChanged", - "old": "parameter javax.net.ssl.SSLEngine com.datastax.oss.driver.api.core.ssl.SslEngineFactory::newSslEngine(===java.net.SocketAddress===)", - "new": "parameter javax.net.ssl.SSLEngine com.datastax.oss.driver.api.core.ssl.SslEngineFactory::newSslEngine(===com.datastax.oss.driver.api.core.metadata.EndPoint===)", - "justification": "JAVA-2165: Abstract node connection information" - }, - { - "code": "java.method.addedToInterface", - "new": "method long com.datastax.oss.driver.api.core.cql.Statement>>::getQueryTimestamp()", - "justification": "JAVA-2143: Rename Statement.setTimestamp() to setQueryTimestamp()" - }, - { - "code": "java.method.removed", - "old": "method long com.datastax.oss.driver.api.core.cql.Statement>>::getTimestamp()", - "justification": "JAVA-2143: Rename Statement.setTimestamp() to setQueryTimestamp()" - }, - { - "code": "java.method.addedToInterface", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setQueryTimestamp(long)", - "justification": "JAVA-2143: Rename Statement.setTimestamp() to setQueryTimestamp()" - }, - { - "code": "java.method.removed", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setTimestamp(long)", - "justification": "JAVA-2143: Rename Statement.setTimestamp() to setQueryTimestamp()" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::copy(java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.BatchStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::copy(java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.BatchStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setConsistencyLevel(com.datastax.oss.driver.api.core.ConsistencyLevel) @ com.datastax.oss.driver.api.core.cql.BatchStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setConsistencyLevel(com.datastax.oss.driver.api.core.ConsistencyLevel) @ com.datastax.oss.driver.api.core.cql.BatchStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setCustomPayload(java.util.Map) @ com.datastax.oss.driver.api.core.cql.BatchStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setCustomPayload(java.util.Map) @ com.datastax.oss.driver.api.core.cql.BatchStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setExecutionProfile(com.datastax.oss.driver.api.core.config.DriverExecutionProfile) @ com.datastax.oss.driver.api.core.cql.BatchStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setExecutionProfile(com.datastax.oss.driver.api.core.config.DriverExecutionProfile) @ com.datastax.oss.driver.api.core.cql.BatchStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setExecutionProfileName(java.lang.String) @ com.datastax.oss.driver.api.core.cql.BatchStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setExecutionProfileName(java.lang.String) @ com.datastax.oss.driver.api.core.cql.BatchStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setIdempotent(java.lang.Boolean) @ com.datastax.oss.driver.api.core.cql.BatchStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setIdempotent(java.lang.Boolean) @ com.datastax.oss.driver.api.core.cql.BatchStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setNode(com.datastax.oss.driver.api.core.metadata.Node) @ com.datastax.oss.driver.api.core.cql.BatchStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setNode(com.datastax.oss.driver.api.core.metadata.Node) @ com.datastax.oss.driver.api.core.cql.BatchStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setPageSize(int) @ com.datastax.oss.driver.api.core.cql.BatchStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setPageSize(int) @ com.datastax.oss.driver.api.core.cql.BatchStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setPagingState(java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.BatchStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setPagingState(java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.BatchStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setRoutingKey(java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.BatchStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setRoutingKey(java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.BatchStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setRoutingKey(java.nio.ByteBuffer[]) @ com.datastax.oss.driver.api.core.cql.BatchStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setRoutingKey(java.nio.ByteBuffer[]) @ com.datastax.oss.driver.api.core.cql.BatchStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setRoutingKeyspace(com.datastax.oss.driver.api.core.CqlIdentifier) @ com.datastax.oss.driver.api.core.cql.BatchStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setRoutingKeyspace(com.datastax.oss.driver.api.core.CqlIdentifier) @ com.datastax.oss.driver.api.core.cql.BatchStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setRoutingKeyspace(java.lang.String) @ com.datastax.oss.driver.api.core.cql.BatchStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setRoutingKeyspace(java.lang.String) @ com.datastax.oss.driver.api.core.cql.BatchStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setRoutingToken(com.datastax.oss.driver.api.core.metadata.token.Token) @ com.datastax.oss.driver.api.core.cql.BatchStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setRoutingToken(com.datastax.oss.driver.api.core.metadata.token.Token) @ com.datastax.oss.driver.api.core.cql.BatchStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setSerialConsistencyLevel(com.datastax.oss.driver.api.core.ConsistencyLevel) @ com.datastax.oss.driver.api.core.cql.BatchStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setSerialConsistencyLevel(com.datastax.oss.driver.api.core.ConsistencyLevel) @ com.datastax.oss.driver.api.core.cql.BatchStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setTimeout(java.time.Duration) @ com.datastax.oss.driver.api.core.cql.BatchStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setTimeout(java.time.Duration) @ com.datastax.oss.driver.api.core.cql.BatchStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setTracing(boolean) @ com.datastax.oss.driver.api.core.cql.BatchStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setTracing(boolean) @ com.datastax.oss.driver.api.core.cql.BatchStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::copy(java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.BatchableStatement>>", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::copy(java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.BatchableStatement>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setConsistencyLevel(com.datastax.oss.driver.api.core.ConsistencyLevel) @ com.datastax.oss.driver.api.core.cql.BatchableStatement>>", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setConsistencyLevel(com.datastax.oss.driver.api.core.ConsistencyLevel) @ com.datastax.oss.driver.api.core.cql.BatchableStatement>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setCustomPayload(java.util.Map) @ com.datastax.oss.driver.api.core.cql.BatchableStatement>>", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setCustomPayload(java.util.Map) @ com.datastax.oss.driver.api.core.cql.BatchableStatement>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setExecutionProfile(com.datastax.oss.driver.api.core.config.DriverExecutionProfile) @ com.datastax.oss.driver.api.core.cql.BatchableStatement>>", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setExecutionProfile(com.datastax.oss.driver.api.core.config.DriverExecutionProfile) @ com.datastax.oss.driver.api.core.cql.BatchableStatement>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setExecutionProfileName(java.lang.String) @ com.datastax.oss.driver.api.core.cql.BatchableStatement>>", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setExecutionProfileName(java.lang.String) @ com.datastax.oss.driver.api.core.cql.BatchableStatement>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setIdempotent(java.lang.Boolean) @ com.datastax.oss.driver.api.core.cql.BatchableStatement>>", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setIdempotent(java.lang.Boolean) @ com.datastax.oss.driver.api.core.cql.BatchableStatement>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setNode(com.datastax.oss.driver.api.core.metadata.Node) @ com.datastax.oss.driver.api.core.cql.BatchableStatement>>", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setNode(com.datastax.oss.driver.api.core.metadata.Node) @ com.datastax.oss.driver.api.core.cql.BatchableStatement>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setPageSize(int) @ com.datastax.oss.driver.api.core.cql.BatchableStatement>>", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setPageSize(int) @ com.datastax.oss.driver.api.core.cql.BatchableStatement>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setPagingState(java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.BatchableStatement>>", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setPagingState(java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.BatchableStatement>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setRoutingKey(java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.BatchableStatement>>", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setRoutingKey(java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.BatchableStatement>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setRoutingKey(java.nio.ByteBuffer[]) @ com.datastax.oss.driver.api.core.cql.BatchableStatement>>", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setRoutingKey(java.nio.ByteBuffer[]) @ com.datastax.oss.driver.api.core.cql.BatchableStatement>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setRoutingKeyspace(com.datastax.oss.driver.api.core.CqlIdentifier) @ com.datastax.oss.driver.api.core.cql.BatchableStatement>>", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setRoutingKeyspace(com.datastax.oss.driver.api.core.CqlIdentifier) @ com.datastax.oss.driver.api.core.cql.BatchableStatement>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setRoutingKeyspace(java.lang.String) @ com.datastax.oss.driver.api.core.cql.BatchableStatement>>", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setRoutingKeyspace(java.lang.String) @ com.datastax.oss.driver.api.core.cql.BatchableStatement>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setRoutingToken(com.datastax.oss.driver.api.core.metadata.token.Token) @ com.datastax.oss.driver.api.core.cql.BatchableStatement>>", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setRoutingToken(com.datastax.oss.driver.api.core.metadata.token.Token) @ com.datastax.oss.driver.api.core.cql.BatchableStatement>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setSerialConsistencyLevel(com.datastax.oss.driver.api.core.ConsistencyLevel) @ com.datastax.oss.driver.api.core.cql.BatchableStatement>>", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setSerialConsistencyLevel(com.datastax.oss.driver.api.core.ConsistencyLevel) @ com.datastax.oss.driver.api.core.cql.BatchableStatement>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setTimeout(java.time.Duration) @ com.datastax.oss.driver.api.core.cql.BatchableStatement>>", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setTimeout(java.time.Duration) @ com.datastax.oss.driver.api.core.cql.BatchableStatement>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setTracing(boolean) @ com.datastax.oss.driver.api.core.cql.BatchableStatement>>", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setTracing(boolean) @ com.datastax.oss.driver.api.core.cql.BatchableStatement>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::set(com.datastax.oss.driver.api.core.CqlIdentifier, ValueT, com.datastax.oss.driver.api.core.type.codec.TypeCodec) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::set(com.datastax.oss.driver.api.core.CqlIdentifier, ValueT, com.datastax.oss.driver.api.core.type.codec.TypeCodec) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::set(com.datastax.oss.driver.api.core.CqlIdentifier, ValueT, com.datastax.oss.driver.api.core.type.reflect.GenericType) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::set(com.datastax.oss.driver.api.core.CqlIdentifier, ValueT, com.datastax.oss.driver.api.core.type.reflect.GenericType) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::set(com.datastax.oss.driver.api.core.CqlIdentifier, ValueT, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::set(com.datastax.oss.driver.api.core.CqlIdentifier, ValueT, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::set(int, ValueT, com.datastax.oss.driver.api.core.type.codec.TypeCodec) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::set(int, ValueT, com.datastax.oss.driver.api.core.type.codec.TypeCodec) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::set(int, ValueT, com.datastax.oss.driver.api.core.type.reflect.GenericType) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::set(int, ValueT, com.datastax.oss.driver.api.core.type.reflect.GenericType) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::set(int, ValueT, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::set(int, ValueT, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::set(java.lang.String, ValueT, com.datastax.oss.driver.api.core.type.codec.TypeCodec) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::set(java.lang.String, ValueT, com.datastax.oss.driver.api.core.type.codec.TypeCodec) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::set(java.lang.String, ValueT, com.datastax.oss.driver.api.core.type.reflect.GenericType) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::set(java.lang.String, ValueT, com.datastax.oss.driver.api.core.type.reflect.GenericType) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::set(java.lang.String, ValueT, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::set(java.lang.String, ValueT, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setBigDecimal(com.datastax.oss.driver.api.core.CqlIdentifier, java.math.BigDecimal) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setBigDecimal(com.datastax.oss.driver.api.core.CqlIdentifier, java.math.BigDecimal) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBigDecimal(int, java.math.BigDecimal) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBigDecimal(int, java.math.BigDecimal) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setBigDecimal(java.lang.String, java.math.BigDecimal) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setBigDecimal(java.lang.String, java.math.BigDecimal) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setBigInteger(com.datastax.oss.driver.api.core.CqlIdentifier, java.math.BigInteger) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setBigInteger(com.datastax.oss.driver.api.core.CqlIdentifier, java.math.BigInteger) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBigInteger(int, java.math.BigInteger) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBigInteger(int, java.math.BigInteger) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setBigInteger(java.lang.String, java.math.BigInteger) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setBigInteger(java.lang.String, java.math.BigInteger) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setBoolean(com.datastax.oss.driver.api.core.CqlIdentifier, boolean) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setBoolean(com.datastax.oss.driver.api.core.CqlIdentifier, boolean) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBoolean(int, boolean) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBoolean(int, boolean) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setBoolean(java.lang.String, boolean) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setBoolean(java.lang.String, boolean) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setByte(com.datastax.oss.driver.api.core.CqlIdentifier, byte) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setByte(com.datastax.oss.driver.api.core.CqlIdentifier, byte) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setByte(int, byte) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setByte(int, byte) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setByte(java.lang.String, byte) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setByte(java.lang.String, byte) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setByteBuffer(com.datastax.oss.driver.api.core.CqlIdentifier, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setByteBuffer(com.datastax.oss.driver.api.core.CqlIdentifier, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setByteBuffer(int, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setByteBuffer(int, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setByteBuffer(java.lang.String, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setByteBuffer(java.lang.String, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setBytesUnsafe(com.datastax.oss.driver.api.core.CqlIdentifier, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setBytesUnsafe(com.datastax.oss.driver.api.core.CqlIdentifier, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBytesUnsafe(int, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBytesUnsafe(int, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setBytesUnsafe(java.lang.String, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setBytesUnsafe(java.lang.String, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setCqlDuration(com.datastax.oss.driver.api.core.CqlIdentifier, com.datastax.oss.driver.api.core.data.CqlDuration) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setCqlDuration(com.datastax.oss.driver.api.core.CqlIdentifier, com.datastax.oss.driver.api.core.data.CqlDuration) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setCqlDuration(int, com.datastax.oss.driver.api.core.data.CqlDuration) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setCqlDuration(int, com.datastax.oss.driver.api.core.data.CqlDuration) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setCqlDuration(java.lang.String, com.datastax.oss.driver.api.core.data.CqlDuration) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setCqlDuration(java.lang.String, com.datastax.oss.driver.api.core.data.CqlDuration) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setDouble(com.datastax.oss.driver.api.core.CqlIdentifier, double) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setDouble(com.datastax.oss.driver.api.core.CqlIdentifier, double) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setDouble(int, double) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setDouble(int, double) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setDouble(java.lang.String, double) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setDouble(java.lang.String, double) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setFloat(com.datastax.oss.driver.api.core.CqlIdentifier, float) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setFloat(com.datastax.oss.driver.api.core.CqlIdentifier, float) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setFloat(int, float) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setFloat(int, float) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setFloat(java.lang.String, float) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setFloat(java.lang.String, float) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setInetAddress(com.datastax.oss.driver.api.core.CqlIdentifier, java.net.InetAddress) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setInetAddress(com.datastax.oss.driver.api.core.CqlIdentifier, java.net.InetAddress) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setInetAddress(int, java.net.InetAddress) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setInetAddress(int, java.net.InetAddress) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setInetAddress(java.lang.String, java.net.InetAddress) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setInetAddress(java.lang.String, java.net.InetAddress) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setInstant(com.datastax.oss.driver.api.core.CqlIdentifier, java.time.Instant) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setInstant(com.datastax.oss.driver.api.core.CqlIdentifier, java.time.Instant) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setInstant(int, java.time.Instant) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setInstant(int, java.time.Instant) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setInstant(java.lang.String, java.time.Instant) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setInstant(java.lang.String, java.time.Instant) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setInt(com.datastax.oss.driver.api.core.CqlIdentifier, int) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setInt(com.datastax.oss.driver.api.core.CqlIdentifier, int) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setInt(int, int) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setInt(int, int) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setInt(java.lang.String, int) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setInt(java.lang.String, int) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setList(com.datastax.oss.driver.api.core.CqlIdentifier, java.util.List, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setList(com.datastax.oss.driver.api.core.CqlIdentifier, java.util.List, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setList(int, java.util.List, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setList(int, java.util.List, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setList(java.lang.String, java.util.List, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setList(java.lang.String, java.util.List, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setLocalDate(com.datastax.oss.driver.api.core.CqlIdentifier, java.time.LocalDate) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setLocalDate(com.datastax.oss.driver.api.core.CqlIdentifier, java.time.LocalDate) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setLocalDate(int, java.time.LocalDate) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setLocalDate(int, java.time.LocalDate) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setLocalDate(java.lang.String, java.time.LocalDate) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setLocalDate(java.lang.String, java.time.LocalDate) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setLocalTime(com.datastax.oss.driver.api.core.CqlIdentifier, java.time.LocalTime) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setLocalTime(com.datastax.oss.driver.api.core.CqlIdentifier, java.time.LocalTime) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setLocalTime(int, java.time.LocalTime) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setLocalTime(int, java.time.LocalTime) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setLocalTime(java.lang.String, java.time.LocalTime) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setLocalTime(java.lang.String, java.time.LocalTime) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setLong(com.datastax.oss.driver.api.core.CqlIdentifier, long) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setLong(com.datastax.oss.driver.api.core.CqlIdentifier, long) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setLong(int, long) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setLong(int, long) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setLong(java.lang.String, long) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setLong(java.lang.String, long) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setMap(com.datastax.oss.driver.api.core.CqlIdentifier, java.util.Map, java.lang.Class, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setMap(com.datastax.oss.driver.api.core.CqlIdentifier, java.util.Map, java.lang.Class, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setMap(int, java.util.Map, java.lang.Class, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setMap(int, java.util.Map, java.lang.Class, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setMap(java.lang.String, java.util.Map, java.lang.Class, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setMap(java.lang.String, java.util.Map, java.lang.Class, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setSet(com.datastax.oss.driver.api.core.CqlIdentifier, java.util.Set, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setSet(com.datastax.oss.driver.api.core.CqlIdentifier, java.util.Set, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setSet(int, java.util.Set, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setSet(int, java.util.Set, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setSet(java.lang.String, java.util.Set, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setSet(java.lang.String, java.util.Set, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setShort(com.datastax.oss.driver.api.core.CqlIdentifier, short) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setShort(com.datastax.oss.driver.api.core.CqlIdentifier, short) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setShort(int, short) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setShort(int, short) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setShort(java.lang.String, short) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setShort(java.lang.String, short) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setString(com.datastax.oss.driver.api.core.CqlIdentifier, java.lang.String) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setString(com.datastax.oss.driver.api.core.CqlIdentifier, java.lang.String) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setString(int, java.lang.String) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setString(int, java.lang.String) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setString(java.lang.String, java.lang.String) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setString(java.lang.String, java.lang.String) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setToNull(com.datastax.oss.driver.api.core.CqlIdentifier) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setToNull(com.datastax.oss.driver.api.core.CqlIdentifier) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setToNull(int) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setToNull(int) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setToNull(java.lang.String) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setToNull(java.lang.String) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setToken(com.datastax.oss.driver.api.core.CqlIdentifier, com.datastax.oss.driver.api.core.metadata.token.Token) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setToken(com.datastax.oss.driver.api.core.CqlIdentifier, com.datastax.oss.driver.api.core.metadata.token.Token) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setToken(int, com.datastax.oss.driver.api.core.metadata.token.Token) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setToken(int, com.datastax.oss.driver.api.core.metadata.token.Token) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setToken(java.lang.String, com.datastax.oss.driver.api.core.metadata.token.Token) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setToken(java.lang.String, com.datastax.oss.driver.api.core.metadata.token.Token) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setTupleValue(com.datastax.oss.driver.api.core.CqlIdentifier, com.datastax.oss.driver.api.core.data.TupleValue) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setTupleValue(com.datastax.oss.driver.api.core.CqlIdentifier, com.datastax.oss.driver.api.core.data.TupleValue) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setTupleValue(int, com.datastax.oss.driver.api.core.data.TupleValue) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setTupleValue(int, com.datastax.oss.driver.api.core.data.TupleValue) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setTupleValue(java.lang.String, com.datastax.oss.driver.api.core.data.TupleValue) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setTupleValue(java.lang.String, com.datastax.oss.driver.api.core.data.TupleValue) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setUdtValue(com.datastax.oss.driver.api.core.CqlIdentifier, com.datastax.oss.driver.api.core.data.UdtValue) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setUdtValue(com.datastax.oss.driver.api.core.CqlIdentifier, com.datastax.oss.driver.api.core.data.UdtValue) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setUdtValue(int, com.datastax.oss.driver.api.core.data.UdtValue) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setUdtValue(int, com.datastax.oss.driver.api.core.data.UdtValue) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setUdtValue(java.lang.String, com.datastax.oss.driver.api.core.data.UdtValue) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setUdtValue(java.lang.String, com.datastax.oss.driver.api.core.data.UdtValue) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setUuid(com.datastax.oss.driver.api.core.CqlIdentifier, java.util.UUID) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setUuid(com.datastax.oss.driver.api.core.CqlIdentifier, java.util.UUID) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setUuid(int, java.util.UUID) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setUuid(int, java.util.UUID) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setUuid(java.lang.String, java.util.UUID) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setUuid(java.lang.String, java.util.UUID) @ com.datastax.oss.driver.api.core.cql.Bindable>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Bindable>>::unset(com.datastax.oss.driver.api.core.CqlIdentifier)", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Bindable>>::unset(com.datastax.oss.driver.api.core.CqlIdentifier)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Bindable>>::unset(int)", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Bindable>>::unset(int)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Bindable>>::unset(java.lang.String)", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Bindable>>::unset(java.lang.String)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::copy(java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::copy(java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::set(com.datastax.oss.driver.api.core.CqlIdentifier, ValueT, com.datastax.oss.driver.api.core.type.codec.TypeCodec) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::set(com.datastax.oss.driver.api.core.CqlIdentifier, ValueT, com.datastax.oss.driver.api.core.type.codec.TypeCodec) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::set(com.datastax.oss.driver.api.core.CqlIdentifier, ValueT, com.datastax.oss.driver.api.core.type.reflect.GenericType) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::set(com.datastax.oss.driver.api.core.CqlIdentifier, ValueT, com.datastax.oss.driver.api.core.type.reflect.GenericType) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::set(com.datastax.oss.driver.api.core.CqlIdentifier, ValueT, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::set(com.datastax.oss.driver.api.core.CqlIdentifier, ValueT, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::set(int, ValueT, com.datastax.oss.driver.api.core.type.codec.TypeCodec) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::set(int, ValueT, com.datastax.oss.driver.api.core.type.codec.TypeCodec) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::set(int, ValueT, com.datastax.oss.driver.api.core.type.reflect.GenericType) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::set(int, ValueT, com.datastax.oss.driver.api.core.type.reflect.GenericType) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::set(int, ValueT, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::set(int, ValueT, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::set(java.lang.String, ValueT, com.datastax.oss.driver.api.core.type.codec.TypeCodec) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::set(java.lang.String, ValueT, com.datastax.oss.driver.api.core.type.codec.TypeCodec) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::set(java.lang.String, ValueT, com.datastax.oss.driver.api.core.type.reflect.GenericType) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::set(java.lang.String, ValueT, com.datastax.oss.driver.api.core.type.reflect.GenericType) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::set(java.lang.String, ValueT, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::set(java.lang.String, ValueT, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setBigDecimal(com.datastax.oss.driver.api.core.CqlIdentifier, java.math.BigDecimal) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setBigDecimal(com.datastax.oss.driver.api.core.CqlIdentifier, java.math.BigDecimal) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBigDecimal(int, java.math.BigDecimal) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBigDecimal(int, java.math.BigDecimal) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setBigDecimal(java.lang.String, java.math.BigDecimal) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setBigDecimal(java.lang.String, java.math.BigDecimal) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setBigInteger(com.datastax.oss.driver.api.core.CqlIdentifier, java.math.BigInteger) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setBigInteger(com.datastax.oss.driver.api.core.CqlIdentifier, java.math.BigInteger) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBigInteger(int, java.math.BigInteger) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBigInteger(int, java.math.BigInteger) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setBigInteger(java.lang.String, java.math.BigInteger) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setBigInteger(java.lang.String, java.math.BigInteger) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setBoolean(com.datastax.oss.driver.api.core.CqlIdentifier, boolean) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setBoolean(com.datastax.oss.driver.api.core.CqlIdentifier, boolean) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBoolean(int, boolean) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBoolean(int, boolean) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setBoolean(java.lang.String, boolean) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setBoolean(java.lang.String, boolean) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setByte(com.datastax.oss.driver.api.core.CqlIdentifier, byte) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setByte(com.datastax.oss.driver.api.core.CqlIdentifier, byte) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setByte(int, byte) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setByte(int, byte) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setByte(java.lang.String, byte) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setByte(java.lang.String, byte) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setByteBuffer(com.datastax.oss.driver.api.core.CqlIdentifier, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setByteBuffer(com.datastax.oss.driver.api.core.CqlIdentifier, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setByteBuffer(int, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setByteBuffer(int, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setByteBuffer(java.lang.String, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setByteBuffer(java.lang.String, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setBytesUnsafe(com.datastax.oss.driver.api.core.CqlIdentifier, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setBytesUnsafe(com.datastax.oss.driver.api.core.CqlIdentifier, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBytesUnsafe(int, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBytesUnsafe(int, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setBytesUnsafe(java.lang.String, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setBytesUnsafe(java.lang.String, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setConsistencyLevel(com.datastax.oss.driver.api.core.ConsistencyLevel) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setConsistencyLevel(com.datastax.oss.driver.api.core.ConsistencyLevel) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setCqlDuration(com.datastax.oss.driver.api.core.CqlIdentifier, com.datastax.oss.driver.api.core.data.CqlDuration) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setCqlDuration(com.datastax.oss.driver.api.core.CqlIdentifier, com.datastax.oss.driver.api.core.data.CqlDuration) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setCqlDuration(int, com.datastax.oss.driver.api.core.data.CqlDuration) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setCqlDuration(int, com.datastax.oss.driver.api.core.data.CqlDuration) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setCqlDuration(java.lang.String, com.datastax.oss.driver.api.core.data.CqlDuration) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setCqlDuration(java.lang.String, com.datastax.oss.driver.api.core.data.CqlDuration) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setCustomPayload(java.util.Map) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setCustomPayload(java.util.Map) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setDouble(com.datastax.oss.driver.api.core.CqlIdentifier, double) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setDouble(com.datastax.oss.driver.api.core.CqlIdentifier, double) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setDouble(int, double) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setDouble(int, double) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setDouble(java.lang.String, double) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setDouble(java.lang.String, double) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setExecutionProfile(com.datastax.oss.driver.api.core.config.DriverExecutionProfile) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setExecutionProfile(com.datastax.oss.driver.api.core.config.DriverExecutionProfile) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setExecutionProfileName(java.lang.String) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setExecutionProfileName(java.lang.String) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setFloat(com.datastax.oss.driver.api.core.CqlIdentifier, float) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setFloat(com.datastax.oss.driver.api.core.CqlIdentifier, float) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setFloat(int, float) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setFloat(int, float) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setFloat(java.lang.String, float) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setFloat(java.lang.String, float) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setIdempotent(java.lang.Boolean) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setIdempotent(java.lang.Boolean) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setInetAddress(com.datastax.oss.driver.api.core.CqlIdentifier, java.net.InetAddress) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setInetAddress(com.datastax.oss.driver.api.core.CqlIdentifier, java.net.InetAddress) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setInetAddress(int, java.net.InetAddress) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setInetAddress(int, java.net.InetAddress) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setInetAddress(java.lang.String, java.net.InetAddress) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setInetAddress(java.lang.String, java.net.InetAddress) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setInstant(com.datastax.oss.driver.api.core.CqlIdentifier, java.time.Instant) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setInstant(com.datastax.oss.driver.api.core.CqlIdentifier, java.time.Instant) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setInstant(int, java.time.Instant) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setInstant(int, java.time.Instant) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setInstant(java.lang.String, java.time.Instant) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setInstant(java.lang.String, java.time.Instant) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setInt(com.datastax.oss.driver.api.core.CqlIdentifier, int) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setInt(com.datastax.oss.driver.api.core.CqlIdentifier, int) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setInt(int, int) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setInt(int, int) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setInt(java.lang.String, int) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setInt(java.lang.String, int) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setList(com.datastax.oss.driver.api.core.CqlIdentifier, java.util.List, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setList(com.datastax.oss.driver.api.core.CqlIdentifier, java.util.List, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setList(int, java.util.List, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setList(int, java.util.List, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setList(java.lang.String, java.util.List, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setList(java.lang.String, java.util.List, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setLocalDate(com.datastax.oss.driver.api.core.CqlIdentifier, java.time.LocalDate) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setLocalDate(com.datastax.oss.driver.api.core.CqlIdentifier, java.time.LocalDate) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setLocalDate(int, java.time.LocalDate) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setLocalDate(int, java.time.LocalDate) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setLocalDate(java.lang.String, java.time.LocalDate) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setLocalDate(java.lang.String, java.time.LocalDate) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setLocalTime(com.datastax.oss.driver.api.core.CqlIdentifier, java.time.LocalTime) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setLocalTime(com.datastax.oss.driver.api.core.CqlIdentifier, java.time.LocalTime) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setLocalTime(int, java.time.LocalTime) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setLocalTime(int, java.time.LocalTime) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setLocalTime(java.lang.String, java.time.LocalTime) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setLocalTime(java.lang.String, java.time.LocalTime) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setLong(com.datastax.oss.driver.api.core.CqlIdentifier, long) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setLong(com.datastax.oss.driver.api.core.CqlIdentifier, long) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setLong(int, long) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setLong(int, long) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setLong(java.lang.String, long) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setLong(java.lang.String, long) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setMap(com.datastax.oss.driver.api.core.CqlIdentifier, java.util.Map, java.lang.Class, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setMap(com.datastax.oss.driver.api.core.CqlIdentifier, java.util.Map, java.lang.Class, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setMap(int, java.util.Map, java.lang.Class, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setMap(int, java.util.Map, java.lang.Class, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setMap(java.lang.String, java.util.Map, java.lang.Class, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setMap(java.lang.String, java.util.Map, java.lang.Class, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setNode(com.datastax.oss.driver.api.core.metadata.Node) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setNode(com.datastax.oss.driver.api.core.metadata.Node) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setPageSize(int) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setPageSize(int) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setPagingState(java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setPagingState(java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setRoutingKey(java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setRoutingKey(java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setRoutingKey(java.nio.ByteBuffer[]) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setRoutingKey(java.nio.ByteBuffer[]) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setRoutingKeyspace(com.datastax.oss.driver.api.core.CqlIdentifier) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setRoutingKeyspace(com.datastax.oss.driver.api.core.CqlIdentifier) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setRoutingKeyspace(java.lang.String) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setRoutingKeyspace(java.lang.String) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setRoutingToken(com.datastax.oss.driver.api.core.metadata.token.Token) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setRoutingToken(com.datastax.oss.driver.api.core.metadata.token.Token) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setSerialConsistencyLevel(com.datastax.oss.driver.api.core.ConsistencyLevel) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setSerialConsistencyLevel(com.datastax.oss.driver.api.core.ConsistencyLevel) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setSet(com.datastax.oss.driver.api.core.CqlIdentifier, java.util.Set, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setSet(com.datastax.oss.driver.api.core.CqlIdentifier, java.util.Set, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setSet(int, java.util.Set, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setSet(int, java.util.Set, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setSet(java.lang.String, java.util.Set, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setSet(java.lang.String, java.util.Set, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setShort(com.datastax.oss.driver.api.core.CqlIdentifier, short) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setShort(com.datastax.oss.driver.api.core.CqlIdentifier, short) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setShort(int, short) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setShort(int, short) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setShort(java.lang.String, short) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setShort(java.lang.String, short) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setString(com.datastax.oss.driver.api.core.CqlIdentifier, java.lang.String) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setString(com.datastax.oss.driver.api.core.CqlIdentifier, java.lang.String) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setString(int, java.lang.String) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setString(int, java.lang.String) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setString(java.lang.String, java.lang.String) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setString(java.lang.String, java.lang.String) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setTimeout(java.time.Duration) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setTimeout(java.time.Duration) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setToNull(com.datastax.oss.driver.api.core.CqlIdentifier) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setToNull(com.datastax.oss.driver.api.core.CqlIdentifier) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setToNull(int) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setToNull(int) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setToNull(java.lang.String) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setToNull(java.lang.String) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setToken(com.datastax.oss.driver.api.core.CqlIdentifier, com.datastax.oss.driver.api.core.metadata.token.Token) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setToken(com.datastax.oss.driver.api.core.CqlIdentifier, com.datastax.oss.driver.api.core.metadata.token.Token) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setToken(int, com.datastax.oss.driver.api.core.metadata.token.Token) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setToken(int, com.datastax.oss.driver.api.core.metadata.token.Token) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setToken(java.lang.String, com.datastax.oss.driver.api.core.metadata.token.Token) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setToken(java.lang.String, com.datastax.oss.driver.api.core.metadata.token.Token) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setTracing(boolean) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setTracing(boolean) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setTupleValue(com.datastax.oss.driver.api.core.CqlIdentifier, com.datastax.oss.driver.api.core.data.TupleValue) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setTupleValue(com.datastax.oss.driver.api.core.CqlIdentifier, com.datastax.oss.driver.api.core.data.TupleValue) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setTupleValue(int, com.datastax.oss.driver.api.core.data.TupleValue) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setTupleValue(int, com.datastax.oss.driver.api.core.data.TupleValue) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setTupleValue(java.lang.String, com.datastax.oss.driver.api.core.data.TupleValue) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setTupleValue(java.lang.String, com.datastax.oss.driver.api.core.data.TupleValue) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setUdtValue(com.datastax.oss.driver.api.core.CqlIdentifier, com.datastax.oss.driver.api.core.data.UdtValue) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setUdtValue(com.datastax.oss.driver.api.core.CqlIdentifier, com.datastax.oss.driver.api.core.data.UdtValue) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setUdtValue(int, com.datastax.oss.driver.api.core.data.UdtValue) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setUdtValue(int, com.datastax.oss.driver.api.core.data.UdtValue) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setUdtValue(java.lang.String, com.datastax.oss.driver.api.core.data.UdtValue) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setUdtValue(java.lang.String, com.datastax.oss.driver.api.core.data.UdtValue) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setUuid(com.datastax.oss.driver.api.core.CqlIdentifier, java.util.UUID) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setUuid(com.datastax.oss.driver.api.core.CqlIdentifier, java.util.UUID) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setUuid(int, java.util.UUID) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setUuid(int, java.util.UUID) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setUuid(java.lang.String, java.util.UUID) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setUuid(java.lang.String, java.util.UUID) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Bindable>>::unset(com.datastax.oss.driver.api.core.CqlIdentifier) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Bindable>>::unset(com.datastax.oss.driver.api.core.CqlIdentifier) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Bindable>>::unset(int) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Bindable>>::unset(int) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Bindable>>::unset(java.lang.String) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Bindable>>::unset(java.lang.String) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::set(com.datastax.oss.driver.api.core.CqlIdentifier, ValueT, com.datastax.oss.driver.api.core.type.codec.TypeCodec) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::set(com.datastax.oss.driver.api.core.CqlIdentifier, ValueT, com.datastax.oss.driver.api.core.type.codec.TypeCodec) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::set(com.datastax.oss.driver.api.core.CqlIdentifier, ValueT, com.datastax.oss.driver.api.core.type.reflect.GenericType) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::set(com.datastax.oss.driver.api.core.CqlIdentifier, ValueT, com.datastax.oss.driver.api.core.type.reflect.GenericType) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::set(com.datastax.oss.driver.api.core.CqlIdentifier, ValueT, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::set(com.datastax.oss.driver.api.core.CqlIdentifier, ValueT, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::set(int, ValueT, com.datastax.oss.driver.api.core.type.codec.TypeCodec) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::set(int, ValueT, com.datastax.oss.driver.api.core.type.codec.TypeCodec) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::set(int, ValueT, com.datastax.oss.driver.api.core.type.reflect.GenericType) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::set(int, ValueT, com.datastax.oss.driver.api.core.type.reflect.GenericType) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::set(int, ValueT, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::set(int, ValueT, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::set(java.lang.String, ValueT, com.datastax.oss.driver.api.core.type.codec.TypeCodec) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::set(java.lang.String, ValueT, com.datastax.oss.driver.api.core.type.codec.TypeCodec) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::set(java.lang.String, ValueT, com.datastax.oss.driver.api.core.type.reflect.GenericType) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::set(java.lang.String, ValueT, com.datastax.oss.driver.api.core.type.reflect.GenericType) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::set(java.lang.String, ValueT, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::set(java.lang.String, ValueT, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setBigDecimal(com.datastax.oss.driver.api.core.CqlIdentifier, java.math.BigDecimal) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setBigDecimal(com.datastax.oss.driver.api.core.CqlIdentifier, java.math.BigDecimal) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBigDecimal(int, java.math.BigDecimal) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBigDecimal(int, java.math.BigDecimal) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setBigDecimal(java.lang.String, java.math.BigDecimal) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setBigDecimal(java.lang.String, java.math.BigDecimal) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setBigInteger(com.datastax.oss.driver.api.core.CqlIdentifier, java.math.BigInteger) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setBigInteger(com.datastax.oss.driver.api.core.CqlIdentifier, java.math.BigInteger) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBigInteger(int, java.math.BigInteger) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBigInteger(int, java.math.BigInteger) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setBigInteger(java.lang.String, java.math.BigInteger) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setBigInteger(java.lang.String, java.math.BigInteger) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setBoolean(com.datastax.oss.driver.api.core.CqlIdentifier, boolean) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setBoolean(com.datastax.oss.driver.api.core.CqlIdentifier, boolean) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBoolean(int, boolean) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBoolean(int, boolean) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setBoolean(java.lang.String, boolean) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setBoolean(java.lang.String, boolean) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setByte(com.datastax.oss.driver.api.core.CqlIdentifier, byte) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setByte(com.datastax.oss.driver.api.core.CqlIdentifier, byte) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setByte(int, byte) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setByte(int, byte) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setByte(java.lang.String, byte) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setByte(java.lang.String, byte) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setByteBuffer(com.datastax.oss.driver.api.core.CqlIdentifier, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setByteBuffer(com.datastax.oss.driver.api.core.CqlIdentifier, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setByteBuffer(int, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setByteBuffer(int, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setByteBuffer(java.lang.String, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setByteBuffer(java.lang.String, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setBytesUnsafe(com.datastax.oss.driver.api.core.CqlIdentifier, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setBytesUnsafe(com.datastax.oss.driver.api.core.CqlIdentifier, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setBytesUnsafe(java.lang.String, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setBytesUnsafe(java.lang.String, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setCqlDuration(com.datastax.oss.driver.api.core.CqlIdentifier, com.datastax.oss.driver.api.core.data.CqlDuration) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setCqlDuration(com.datastax.oss.driver.api.core.CqlIdentifier, com.datastax.oss.driver.api.core.data.CqlDuration) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setCqlDuration(int, com.datastax.oss.driver.api.core.data.CqlDuration) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setCqlDuration(int, com.datastax.oss.driver.api.core.data.CqlDuration) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setCqlDuration(java.lang.String, com.datastax.oss.driver.api.core.data.CqlDuration) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setCqlDuration(java.lang.String, com.datastax.oss.driver.api.core.data.CqlDuration) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setDouble(com.datastax.oss.driver.api.core.CqlIdentifier, double) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setDouble(com.datastax.oss.driver.api.core.CqlIdentifier, double) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setDouble(int, double) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setDouble(int, double) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setDouble(java.lang.String, double) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setDouble(java.lang.String, double) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setFloat(com.datastax.oss.driver.api.core.CqlIdentifier, float) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setFloat(com.datastax.oss.driver.api.core.CqlIdentifier, float) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setFloat(int, float) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setFloat(int, float) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setFloat(java.lang.String, float) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setFloat(java.lang.String, float) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setInetAddress(com.datastax.oss.driver.api.core.CqlIdentifier, java.net.InetAddress) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setInetAddress(com.datastax.oss.driver.api.core.CqlIdentifier, java.net.InetAddress) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setInetAddress(int, java.net.InetAddress) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setInetAddress(int, java.net.InetAddress) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setInetAddress(java.lang.String, java.net.InetAddress) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setInetAddress(java.lang.String, java.net.InetAddress) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setInstant(com.datastax.oss.driver.api.core.CqlIdentifier, java.time.Instant) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setInstant(com.datastax.oss.driver.api.core.CqlIdentifier, java.time.Instant) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setInstant(int, java.time.Instant) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setInstant(int, java.time.Instant) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setInstant(java.lang.String, java.time.Instant) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setInstant(java.lang.String, java.time.Instant) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setInt(com.datastax.oss.driver.api.core.CqlIdentifier, int) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setInt(com.datastax.oss.driver.api.core.CqlIdentifier, int) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setInt(int, int) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setInt(int, int) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setInt(java.lang.String, int) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setInt(java.lang.String, int) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setList(com.datastax.oss.driver.api.core.CqlIdentifier, java.util.List, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setList(com.datastax.oss.driver.api.core.CqlIdentifier, java.util.List, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setList(int, java.util.List, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setList(int, java.util.List, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setList(java.lang.String, java.util.List, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setList(java.lang.String, java.util.List, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setLocalDate(com.datastax.oss.driver.api.core.CqlIdentifier, java.time.LocalDate) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setLocalDate(com.datastax.oss.driver.api.core.CqlIdentifier, java.time.LocalDate) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setLocalDate(int, java.time.LocalDate) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setLocalDate(int, java.time.LocalDate) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setLocalDate(java.lang.String, java.time.LocalDate) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setLocalDate(java.lang.String, java.time.LocalDate) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setLocalTime(com.datastax.oss.driver.api.core.CqlIdentifier, java.time.LocalTime) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setLocalTime(com.datastax.oss.driver.api.core.CqlIdentifier, java.time.LocalTime) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setLocalTime(int, java.time.LocalTime) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setLocalTime(int, java.time.LocalTime) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setLocalTime(java.lang.String, java.time.LocalTime) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setLocalTime(java.lang.String, java.time.LocalTime) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setLong(com.datastax.oss.driver.api.core.CqlIdentifier, long) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setLong(com.datastax.oss.driver.api.core.CqlIdentifier, long) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setLong(int, long) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setLong(int, long) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setLong(java.lang.String, long) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setLong(java.lang.String, long) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setMap(com.datastax.oss.driver.api.core.CqlIdentifier, java.util.Map, java.lang.Class, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setMap(com.datastax.oss.driver.api.core.CqlIdentifier, java.util.Map, java.lang.Class, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setMap(int, java.util.Map, java.lang.Class, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setMap(int, java.util.Map, java.lang.Class, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setMap(java.lang.String, java.util.Map, java.lang.Class, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setMap(java.lang.String, java.util.Map, java.lang.Class, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setSet(com.datastax.oss.driver.api.core.CqlIdentifier, java.util.Set, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setSet(com.datastax.oss.driver.api.core.CqlIdentifier, java.util.Set, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setSet(int, java.util.Set, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setSet(int, java.util.Set, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setSet(java.lang.String, java.util.Set, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setSet(java.lang.String, java.util.Set, java.lang.Class) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setShort(com.datastax.oss.driver.api.core.CqlIdentifier, short) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setShort(com.datastax.oss.driver.api.core.CqlIdentifier, short) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setShort(int, short) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setShort(int, short) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setShort(java.lang.String, short) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setShort(java.lang.String, short) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setString(com.datastax.oss.driver.api.core.CqlIdentifier, java.lang.String) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setString(com.datastax.oss.driver.api.core.CqlIdentifier, java.lang.String) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setString(int, java.lang.String) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setString(int, java.lang.String) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setString(java.lang.String, java.lang.String) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setString(java.lang.String, java.lang.String) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setToNull(com.datastax.oss.driver.api.core.CqlIdentifier) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setToNull(com.datastax.oss.driver.api.core.CqlIdentifier) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setToNull(int) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setToNull(int) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setToNull(java.lang.String) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setToNull(java.lang.String) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setToken(com.datastax.oss.driver.api.core.CqlIdentifier, com.datastax.oss.driver.api.core.metadata.token.Token) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setToken(com.datastax.oss.driver.api.core.CqlIdentifier, com.datastax.oss.driver.api.core.metadata.token.Token) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setToken(int, com.datastax.oss.driver.api.core.metadata.token.Token) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setToken(int, com.datastax.oss.driver.api.core.metadata.token.Token) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setToken(java.lang.String, com.datastax.oss.driver.api.core.metadata.token.Token) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setToken(java.lang.String, com.datastax.oss.driver.api.core.metadata.token.Token) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setTupleValue(com.datastax.oss.driver.api.core.CqlIdentifier, com.datastax.oss.driver.api.core.data.TupleValue) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setTupleValue(com.datastax.oss.driver.api.core.CqlIdentifier, com.datastax.oss.driver.api.core.data.TupleValue) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setTupleValue(int, com.datastax.oss.driver.api.core.data.TupleValue) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setTupleValue(int, com.datastax.oss.driver.api.core.data.TupleValue) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setTupleValue(java.lang.String, com.datastax.oss.driver.api.core.data.TupleValue) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setTupleValue(java.lang.String, com.datastax.oss.driver.api.core.data.TupleValue) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setUdtValue(com.datastax.oss.driver.api.core.CqlIdentifier, com.datastax.oss.driver.api.core.data.UdtValue) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setUdtValue(com.datastax.oss.driver.api.core.CqlIdentifier, com.datastax.oss.driver.api.core.data.UdtValue) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setUdtValue(int, com.datastax.oss.driver.api.core.data.UdtValue) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setUdtValue(int, com.datastax.oss.driver.api.core.data.UdtValue) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setUdtValue(java.lang.String, com.datastax.oss.driver.api.core.data.UdtValue) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setUdtValue(java.lang.String, com.datastax.oss.driver.api.core.data.UdtValue) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setUuid(com.datastax.oss.driver.api.core.CqlIdentifier, java.util.UUID) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setUuid(com.datastax.oss.driver.api.core.CqlIdentifier, java.util.UUID) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setUuid(int, java.util.UUID) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setUuid(int, java.util.UUID) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setUuid(java.lang.String, java.util.UUID) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setUuid(java.lang.String, java.util.UUID) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Bindable>>::unset(com.datastax.oss.driver.api.core.CqlIdentifier) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Bindable>>::unset(com.datastax.oss.driver.api.core.CqlIdentifier) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Bindable>>::unset(int) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Bindable>>::unset(int) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Bindable>>::unset(java.lang.String) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Bindable>>::unset(java.lang.String) @ com.datastax.oss.driver.api.core.cql.BoundStatementBuilder", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::copy(java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.SimpleStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::copy(java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.SimpleStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setConsistencyLevel(com.datastax.oss.driver.api.core.ConsistencyLevel) @ com.datastax.oss.driver.api.core.cql.SimpleStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setConsistencyLevel(com.datastax.oss.driver.api.core.ConsistencyLevel) @ com.datastax.oss.driver.api.core.cql.SimpleStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setCustomPayload(java.util.Map) @ com.datastax.oss.driver.api.core.cql.SimpleStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setCustomPayload(java.util.Map) @ com.datastax.oss.driver.api.core.cql.SimpleStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setExecutionProfile(com.datastax.oss.driver.api.core.config.DriverExecutionProfile) @ com.datastax.oss.driver.api.core.cql.SimpleStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setExecutionProfile(com.datastax.oss.driver.api.core.config.DriverExecutionProfile) @ com.datastax.oss.driver.api.core.cql.SimpleStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setExecutionProfileName(java.lang.String) @ com.datastax.oss.driver.api.core.cql.SimpleStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setExecutionProfileName(java.lang.String) @ com.datastax.oss.driver.api.core.cql.SimpleStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setIdempotent(java.lang.Boolean) @ com.datastax.oss.driver.api.core.cql.SimpleStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setIdempotent(java.lang.Boolean) @ com.datastax.oss.driver.api.core.cql.SimpleStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setNode(com.datastax.oss.driver.api.core.metadata.Node) @ com.datastax.oss.driver.api.core.cql.SimpleStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setNode(com.datastax.oss.driver.api.core.metadata.Node) @ com.datastax.oss.driver.api.core.cql.SimpleStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setPageSize(int) @ com.datastax.oss.driver.api.core.cql.SimpleStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setPageSize(int) @ com.datastax.oss.driver.api.core.cql.SimpleStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setPagingState(java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.SimpleStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setPagingState(java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.SimpleStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setRoutingKey(java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.SimpleStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setRoutingKey(java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.cql.SimpleStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setRoutingKey(java.nio.ByteBuffer[]) @ com.datastax.oss.driver.api.core.cql.SimpleStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setRoutingKey(java.nio.ByteBuffer[]) @ com.datastax.oss.driver.api.core.cql.SimpleStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setRoutingKeyspace(com.datastax.oss.driver.api.core.CqlIdentifier) @ com.datastax.oss.driver.api.core.cql.SimpleStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setRoutingKeyspace(com.datastax.oss.driver.api.core.CqlIdentifier) @ com.datastax.oss.driver.api.core.cql.SimpleStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setRoutingKeyspace(java.lang.String) @ com.datastax.oss.driver.api.core.cql.SimpleStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setRoutingKeyspace(java.lang.String) @ com.datastax.oss.driver.api.core.cql.SimpleStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setRoutingToken(com.datastax.oss.driver.api.core.metadata.token.Token) @ com.datastax.oss.driver.api.core.cql.SimpleStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setRoutingToken(com.datastax.oss.driver.api.core.metadata.token.Token) @ com.datastax.oss.driver.api.core.cql.SimpleStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setSerialConsistencyLevel(com.datastax.oss.driver.api.core.ConsistencyLevel) @ com.datastax.oss.driver.api.core.cql.SimpleStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setSerialConsistencyLevel(com.datastax.oss.driver.api.core.ConsistencyLevel) @ com.datastax.oss.driver.api.core.cql.SimpleStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setTimeout(java.time.Duration) @ com.datastax.oss.driver.api.core.cql.SimpleStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setTimeout(java.time.Duration) @ com.datastax.oss.driver.api.core.cql.SimpleStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setTracing(boolean) @ com.datastax.oss.driver.api.core.cql.SimpleStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setTracing(boolean) @ com.datastax.oss.driver.api.core.cql.SimpleStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::copy(java.nio.ByteBuffer)", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::copy(java.nio.ByteBuffer)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setConsistencyLevel(com.datastax.oss.driver.api.core.ConsistencyLevel)", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setConsistencyLevel(com.datastax.oss.driver.api.core.ConsistencyLevel)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setCustomPayload(java.util.Map)", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setCustomPayload(java.util.Map)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setExecutionProfile(com.datastax.oss.driver.api.core.config.DriverExecutionProfile)", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setExecutionProfile(com.datastax.oss.driver.api.core.config.DriverExecutionProfile)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setExecutionProfileName(java.lang.String)", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setExecutionProfileName(java.lang.String)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setIdempotent(java.lang.Boolean)", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setIdempotent(java.lang.Boolean)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setNode(com.datastax.oss.driver.api.core.metadata.Node)", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setNode(com.datastax.oss.driver.api.core.metadata.Node)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setPageSize(int)", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setPageSize(int)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setPagingState(java.nio.ByteBuffer)", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setPagingState(java.nio.ByteBuffer)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setRoutingKey(java.nio.ByteBuffer)", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setRoutingKey(java.nio.ByteBuffer)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setRoutingKey(java.nio.ByteBuffer[])", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setRoutingKey(java.nio.ByteBuffer[])", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setRoutingKeyspace(com.datastax.oss.driver.api.core.CqlIdentifier)", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setRoutingKeyspace(com.datastax.oss.driver.api.core.CqlIdentifier)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setRoutingKeyspace(java.lang.String)", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setRoutingKeyspace(java.lang.String)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setRoutingToken(com.datastax.oss.driver.api.core.metadata.token.Token)", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setRoutingToken(com.datastax.oss.driver.api.core.metadata.token.Token)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setSerialConsistencyLevel(com.datastax.oss.driver.api.core.ConsistencyLevel)", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setSerialConsistencyLevel(com.datastax.oss.driver.api.core.ConsistencyLevel)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setTimeout(java.time.Duration)", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setTimeout(java.time.Duration)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setTracing(boolean)", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setTracing(boolean)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::set(com.datastax.oss.driver.api.core.CqlIdentifier, ValueT, com.datastax.oss.driver.api.core.type.codec.TypeCodec)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::set(com.datastax.oss.driver.api.core.CqlIdentifier, ValueT, com.datastax.oss.driver.api.core.type.codec.TypeCodec)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::set(com.datastax.oss.driver.api.core.CqlIdentifier, ValueT, com.datastax.oss.driver.api.core.type.reflect.GenericType)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::set(com.datastax.oss.driver.api.core.CqlIdentifier, ValueT, com.datastax.oss.driver.api.core.type.reflect.GenericType)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::set(com.datastax.oss.driver.api.core.CqlIdentifier, ValueT, java.lang.Class)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::set(com.datastax.oss.driver.api.core.CqlIdentifier, ValueT, java.lang.Class)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::set(int, ValueT, com.datastax.oss.driver.api.core.type.codec.TypeCodec) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::set(int, ValueT, com.datastax.oss.driver.api.core.type.codec.TypeCodec) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::set(int, ValueT, com.datastax.oss.driver.api.core.type.reflect.GenericType) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::set(int, ValueT, com.datastax.oss.driver.api.core.type.reflect.GenericType) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::set(int, ValueT, java.lang.Class) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::set(int, ValueT, java.lang.Class) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setBigDecimal(com.datastax.oss.driver.api.core.CqlIdentifier, java.math.BigDecimal)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setBigDecimal(com.datastax.oss.driver.api.core.CqlIdentifier, java.math.BigDecimal)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBigDecimal(int, java.math.BigDecimal) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBigDecimal(int, java.math.BigDecimal) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setBigInteger(com.datastax.oss.driver.api.core.CqlIdentifier, java.math.BigInteger)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setBigInteger(com.datastax.oss.driver.api.core.CqlIdentifier, java.math.BigInteger)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBigInteger(int, java.math.BigInteger) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBigInteger(int, java.math.BigInteger) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setBoolean(com.datastax.oss.driver.api.core.CqlIdentifier, boolean)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setBoolean(com.datastax.oss.driver.api.core.CqlIdentifier, boolean)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBoolean(int, boolean) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBoolean(int, boolean) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setByte(com.datastax.oss.driver.api.core.CqlIdentifier, byte)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setByte(com.datastax.oss.driver.api.core.CqlIdentifier, byte)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setByte(int, byte) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setByte(int, byte) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setByteBuffer(com.datastax.oss.driver.api.core.CqlIdentifier, java.nio.ByteBuffer)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setByteBuffer(com.datastax.oss.driver.api.core.CqlIdentifier, java.nio.ByteBuffer)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setByteBuffer(int, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setByteBuffer(int, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setBytesUnsafe(com.datastax.oss.driver.api.core.CqlIdentifier, java.nio.ByteBuffer)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setBytesUnsafe(com.datastax.oss.driver.api.core.CqlIdentifier, java.nio.ByteBuffer)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBytesUnsafe(int, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBytesUnsafe(int, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setCqlDuration(com.datastax.oss.driver.api.core.CqlIdentifier, com.datastax.oss.driver.api.core.data.CqlDuration)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setCqlDuration(com.datastax.oss.driver.api.core.CqlIdentifier, com.datastax.oss.driver.api.core.data.CqlDuration)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setCqlDuration(int, com.datastax.oss.driver.api.core.data.CqlDuration) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setCqlDuration(int, com.datastax.oss.driver.api.core.data.CqlDuration) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setDouble(com.datastax.oss.driver.api.core.CqlIdentifier, double)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setDouble(com.datastax.oss.driver.api.core.CqlIdentifier, double)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setDouble(int, double) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setDouble(int, double) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setFloat(com.datastax.oss.driver.api.core.CqlIdentifier, float)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setFloat(com.datastax.oss.driver.api.core.CqlIdentifier, float)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setFloat(int, float) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setFloat(int, float) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setInetAddress(com.datastax.oss.driver.api.core.CqlIdentifier, java.net.InetAddress)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setInetAddress(com.datastax.oss.driver.api.core.CqlIdentifier, java.net.InetAddress)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setInetAddress(int, java.net.InetAddress) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setInetAddress(int, java.net.InetAddress) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setInstant(com.datastax.oss.driver.api.core.CqlIdentifier, java.time.Instant)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setInstant(com.datastax.oss.driver.api.core.CqlIdentifier, java.time.Instant)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setInstant(int, java.time.Instant) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setInstant(int, java.time.Instant) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setInt(com.datastax.oss.driver.api.core.CqlIdentifier, int)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setInt(com.datastax.oss.driver.api.core.CqlIdentifier, int)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setInt(int, int) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setInt(int, int) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setList(com.datastax.oss.driver.api.core.CqlIdentifier, java.util.List, java.lang.Class)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setList(com.datastax.oss.driver.api.core.CqlIdentifier, java.util.List, java.lang.Class)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setList(int, java.util.List, java.lang.Class) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setList(int, java.util.List, java.lang.Class) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setLocalDate(com.datastax.oss.driver.api.core.CqlIdentifier, java.time.LocalDate)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setLocalDate(com.datastax.oss.driver.api.core.CqlIdentifier, java.time.LocalDate)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setLocalDate(int, java.time.LocalDate) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setLocalDate(int, java.time.LocalDate) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setLocalTime(com.datastax.oss.driver.api.core.CqlIdentifier, java.time.LocalTime)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setLocalTime(com.datastax.oss.driver.api.core.CqlIdentifier, java.time.LocalTime)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setLocalTime(int, java.time.LocalTime) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setLocalTime(int, java.time.LocalTime) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setLong(com.datastax.oss.driver.api.core.CqlIdentifier, long)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setLong(com.datastax.oss.driver.api.core.CqlIdentifier, long)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setLong(int, long) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setLong(int, long) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setMap(com.datastax.oss.driver.api.core.CqlIdentifier, java.util.Map, java.lang.Class, java.lang.Class)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setMap(com.datastax.oss.driver.api.core.CqlIdentifier, java.util.Map, java.lang.Class, java.lang.Class)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setMap(int, java.util.Map, java.lang.Class, java.lang.Class) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setMap(int, java.util.Map, java.lang.Class, java.lang.Class) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setSet(com.datastax.oss.driver.api.core.CqlIdentifier, java.util.Set, java.lang.Class)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setSet(com.datastax.oss.driver.api.core.CqlIdentifier, java.util.Set, java.lang.Class)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setSet(int, java.util.Set, java.lang.Class) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setSet(int, java.util.Set, java.lang.Class) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setShort(com.datastax.oss.driver.api.core.CqlIdentifier, short)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setShort(com.datastax.oss.driver.api.core.CqlIdentifier, short)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setShort(int, short) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setShort(int, short) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setString(com.datastax.oss.driver.api.core.CqlIdentifier, java.lang.String)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setString(com.datastax.oss.driver.api.core.CqlIdentifier, java.lang.String)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setString(int, java.lang.String) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setString(int, java.lang.String) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setToNull(com.datastax.oss.driver.api.core.CqlIdentifier)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setToNull(com.datastax.oss.driver.api.core.CqlIdentifier)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setToNull(int) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setToNull(int) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setToken(com.datastax.oss.driver.api.core.CqlIdentifier, com.datastax.oss.driver.api.core.metadata.token.Token)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setToken(com.datastax.oss.driver.api.core.CqlIdentifier, com.datastax.oss.driver.api.core.metadata.token.Token)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setToken(int, com.datastax.oss.driver.api.core.metadata.token.Token) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setToken(int, com.datastax.oss.driver.api.core.metadata.token.Token) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setTupleValue(com.datastax.oss.driver.api.core.CqlIdentifier, com.datastax.oss.driver.api.core.data.TupleValue)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setTupleValue(com.datastax.oss.driver.api.core.CqlIdentifier, com.datastax.oss.driver.api.core.data.TupleValue)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setTupleValue(int, com.datastax.oss.driver.api.core.data.TupleValue) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setTupleValue(int, com.datastax.oss.driver.api.core.data.TupleValue) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setUdtValue(com.datastax.oss.driver.api.core.CqlIdentifier, com.datastax.oss.driver.api.core.data.UdtValue)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setUdtValue(com.datastax.oss.driver.api.core.CqlIdentifier, com.datastax.oss.driver.api.core.data.UdtValue)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setUdtValue(int, com.datastax.oss.driver.api.core.data.UdtValue) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setUdtValue(int, com.datastax.oss.driver.api.core.data.UdtValue) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setUuid(com.datastax.oss.driver.api.core.CqlIdentifier, java.util.UUID)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setUuid(com.datastax.oss.driver.api.core.CqlIdentifier, java.util.UUID)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setUuid(int, java.util.UUID) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setUuid(int, java.util.UUID) @ com.datastax.oss.driver.api.core.data.SettableById>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::set(int, ValueT, com.datastax.oss.driver.api.core.type.codec.TypeCodec)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::set(int, ValueT, com.datastax.oss.driver.api.core.type.codec.TypeCodec)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::set(int, ValueT, com.datastax.oss.driver.api.core.type.reflect.GenericType)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::set(int, ValueT, com.datastax.oss.driver.api.core.type.reflect.GenericType)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::set(int, ValueT, java.lang.Class)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::set(int, ValueT, java.lang.Class)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBigDecimal(int, java.math.BigDecimal)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBigDecimal(int, java.math.BigDecimal)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBigInteger(int, java.math.BigInteger)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBigInteger(int, java.math.BigInteger)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBoolean(int, boolean)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBoolean(int, boolean)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setByte(int, byte)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setByte(int, byte)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setByteBuffer(int, java.nio.ByteBuffer)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setByteBuffer(int, java.nio.ByteBuffer)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBytesUnsafe(int, java.nio.ByteBuffer)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBytesUnsafe(int, java.nio.ByteBuffer)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setCqlDuration(int, com.datastax.oss.driver.api.core.data.CqlDuration)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setCqlDuration(int, com.datastax.oss.driver.api.core.data.CqlDuration)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setDouble(int, double)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setDouble(int, double)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setFloat(int, float)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setFloat(int, float)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setInetAddress(int, java.net.InetAddress)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setInetAddress(int, java.net.InetAddress)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setInstant(int, java.time.Instant)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setInstant(int, java.time.Instant)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setInt(int, int)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setInt(int, int)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setList(int, java.util.List, java.lang.Class)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setList(int, java.util.List, java.lang.Class)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setLocalDate(int, java.time.LocalDate)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setLocalDate(int, java.time.LocalDate)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setLocalTime(int, java.time.LocalTime)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setLocalTime(int, java.time.LocalTime)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setLong(int, long)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setLong(int, long)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setMap(int, java.util.Map, java.lang.Class, java.lang.Class)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setMap(int, java.util.Map, java.lang.Class, java.lang.Class)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setSet(int, java.util.Set, java.lang.Class)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setSet(int, java.util.Set, java.lang.Class)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setShort(int, short)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setShort(int, short)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setString(int, java.lang.String)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setString(int, java.lang.String)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setToNull(int)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setToNull(int)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setToken(int, com.datastax.oss.driver.api.core.metadata.token.Token)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setToken(int, com.datastax.oss.driver.api.core.metadata.token.Token)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setTupleValue(int, com.datastax.oss.driver.api.core.data.TupleValue)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setTupleValue(int, com.datastax.oss.driver.api.core.data.TupleValue)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setUdtValue(int, com.datastax.oss.driver.api.core.data.UdtValue)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setUdtValue(int, com.datastax.oss.driver.api.core.data.UdtValue)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setUuid(int, java.util.UUID)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setUuid(int, java.util.UUID)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::set(int, ValueT, com.datastax.oss.driver.api.core.type.codec.TypeCodec) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::set(int, ValueT, com.datastax.oss.driver.api.core.type.codec.TypeCodec) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::set(int, ValueT, com.datastax.oss.driver.api.core.type.reflect.GenericType) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::set(int, ValueT, com.datastax.oss.driver.api.core.type.reflect.GenericType) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::set(int, ValueT, java.lang.Class) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::set(int, ValueT, java.lang.Class) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::set(java.lang.String, ValueT, com.datastax.oss.driver.api.core.type.codec.TypeCodec)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::set(java.lang.String, ValueT, com.datastax.oss.driver.api.core.type.codec.TypeCodec)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::set(java.lang.String, ValueT, com.datastax.oss.driver.api.core.type.reflect.GenericType)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::set(java.lang.String, ValueT, com.datastax.oss.driver.api.core.type.reflect.GenericType)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::set(java.lang.String, ValueT, java.lang.Class)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::set(java.lang.String, ValueT, java.lang.Class)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBigDecimal(int, java.math.BigDecimal) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBigDecimal(int, java.math.BigDecimal) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setBigDecimal(java.lang.String, java.math.BigDecimal)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setBigDecimal(java.lang.String, java.math.BigDecimal)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBigInteger(int, java.math.BigInteger) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBigInteger(int, java.math.BigInteger) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setBigInteger(java.lang.String, java.math.BigInteger)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setBigInteger(java.lang.String, java.math.BigInteger)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBoolean(int, boolean) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBoolean(int, boolean) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setBoolean(java.lang.String, boolean)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setBoolean(java.lang.String, boolean)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setByte(int, byte) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setByte(int, byte) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setByte(java.lang.String, byte)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setByte(java.lang.String, byte)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setByteBuffer(int, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setByteBuffer(int, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setByteBuffer(java.lang.String, java.nio.ByteBuffer)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setByteBuffer(java.lang.String, java.nio.ByteBuffer)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBytesUnsafe(int, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBytesUnsafe(int, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setBytesUnsafe(java.lang.String, java.nio.ByteBuffer)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setBytesUnsafe(java.lang.String, java.nio.ByteBuffer)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setCqlDuration(int, com.datastax.oss.driver.api.core.data.CqlDuration) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setCqlDuration(int, com.datastax.oss.driver.api.core.data.CqlDuration) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setCqlDuration(java.lang.String, com.datastax.oss.driver.api.core.data.CqlDuration)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setCqlDuration(java.lang.String, com.datastax.oss.driver.api.core.data.CqlDuration)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setDouble(int, double) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setDouble(int, double) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setDouble(java.lang.String, double)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setDouble(java.lang.String, double)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setFloat(int, float) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setFloat(int, float) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setFloat(java.lang.String, float)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setFloat(java.lang.String, float)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setInetAddress(int, java.net.InetAddress) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setInetAddress(int, java.net.InetAddress) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setInetAddress(java.lang.String, java.net.InetAddress)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setInetAddress(java.lang.String, java.net.InetAddress)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setInstant(int, java.time.Instant) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setInstant(int, java.time.Instant) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setInstant(java.lang.String, java.time.Instant)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setInstant(java.lang.String, java.time.Instant)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setInt(int, int) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setInt(int, int) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setInt(java.lang.String, int)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setInt(java.lang.String, int)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setList(int, java.util.List, java.lang.Class) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setList(int, java.util.List, java.lang.Class) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setList(java.lang.String, java.util.List, java.lang.Class)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setList(java.lang.String, java.util.List, java.lang.Class)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setLocalDate(int, java.time.LocalDate) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setLocalDate(int, java.time.LocalDate) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setLocalDate(java.lang.String, java.time.LocalDate)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setLocalDate(java.lang.String, java.time.LocalDate)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setLocalTime(int, java.time.LocalTime) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setLocalTime(int, java.time.LocalTime) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setLocalTime(java.lang.String, java.time.LocalTime)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setLocalTime(java.lang.String, java.time.LocalTime)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setLong(int, long) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setLong(int, long) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setLong(java.lang.String, long)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setLong(java.lang.String, long)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setMap(int, java.util.Map, java.lang.Class, java.lang.Class) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setMap(int, java.util.Map, java.lang.Class, java.lang.Class) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setMap(java.lang.String, java.util.Map, java.lang.Class, java.lang.Class)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setMap(java.lang.String, java.util.Map, java.lang.Class, java.lang.Class)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setSet(int, java.util.Set, java.lang.Class) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setSet(int, java.util.Set, java.lang.Class) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setSet(java.lang.String, java.util.Set, java.lang.Class)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setSet(java.lang.String, java.util.Set, java.lang.Class)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setShort(int, short) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setShort(int, short) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setShort(java.lang.String, short)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setShort(java.lang.String, short)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setString(int, java.lang.String) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setString(int, java.lang.String) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setString(java.lang.String, java.lang.String)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setString(java.lang.String, java.lang.String)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setToNull(int) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setToNull(int) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setToNull(java.lang.String)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setToNull(java.lang.String)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setToken(int, com.datastax.oss.driver.api.core.metadata.token.Token) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setToken(int, com.datastax.oss.driver.api.core.metadata.token.Token) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setToken(java.lang.String, com.datastax.oss.driver.api.core.metadata.token.Token)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setToken(java.lang.String, com.datastax.oss.driver.api.core.metadata.token.Token)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setTupleValue(int, com.datastax.oss.driver.api.core.data.TupleValue) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setTupleValue(int, com.datastax.oss.driver.api.core.data.TupleValue) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setTupleValue(java.lang.String, com.datastax.oss.driver.api.core.data.TupleValue)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setTupleValue(java.lang.String, com.datastax.oss.driver.api.core.data.TupleValue)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setUdtValue(int, com.datastax.oss.driver.api.core.data.UdtValue) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setUdtValue(int, com.datastax.oss.driver.api.core.data.UdtValue) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setUdtValue(java.lang.String, com.datastax.oss.driver.api.core.data.UdtValue)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setUdtValue(java.lang.String, com.datastax.oss.driver.api.core.data.UdtValue)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setUuid(int, java.util.UUID) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setUuid(int, java.util.UUID) @ com.datastax.oss.driver.api.core.data.SettableByName>>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setUuid(java.lang.String, java.util.UUID)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setUuid(java.lang.String, java.util.UUID)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::set(int, ValueT, com.datastax.oss.driver.api.core.type.codec.TypeCodec) @ com.datastax.oss.driver.api.core.data.TupleValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::set(int, ValueT, com.datastax.oss.driver.api.core.type.codec.TypeCodec) @ com.datastax.oss.driver.api.core.data.TupleValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::set(int, ValueT, com.datastax.oss.driver.api.core.type.reflect.GenericType) @ com.datastax.oss.driver.api.core.data.TupleValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::set(int, ValueT, com.datastax.oss.driver.api.core.type.reflect.GenericType) @ com.datastax.oss.driver.api.core.data.TupleValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::set(int, ValueT, java.lang.Class) @ com.datastax.oss.driver.api.core.data.TupleValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::set(int, ValueT, java.lang.Class) @ com.datastax.oss.driver.api.core.data.TupleValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBigDecimal(int, java.math.BigDecimal) @ com.datastax.oss.driver.api.core.data.TupleValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBigDecimal(int, java.math.BigDecimal) @ com.datastax.oss.driver.api.core.data.TupleValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBigInteger(int, java.math.BigInteger) @ com.datastax.oss.driver.api.core.data.TupleValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBigInteger(int, java.math.BigInteger) @ com.datastax.oss.driver.api.core.data.TupleValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBoolean(int, boolean) @ com.datastax.oss.driver.api.core.data.TupleValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBoolean(int, boolean) @ com.datastax.oss.driver.api.core.data.TupleValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setByte(int, byte) @ com.datastax.oss.driver.api.core.data.TupleValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setByte(int, byte) @ com.datastax.oss.driver.api.core.data.TupleValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setByteBuffer(int, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.data.TupleValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setByteBuffer(int, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.data.TupleValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBytesUnsafe(int, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.data.TupleValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBytesUnsafe(int, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.data.TupleValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setCqlDuration(int, com.datastax.oss.driver.api.core.data.CqlDuration) @ com.datastax.oss.driver.api.core.data.TupleValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setCqlDuration(int, com.datastax.oss.driver.api.core.data.CqlDuration) @ com.datastax.oss.driver.api.core.data.TupleValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setDouble(int, double) @ com.datastax.oss.driver.api.core.data.TupleValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setDouble(int, double) @ com.datastax.oss.driver.api.core.data.TupleValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setFloat(int, float) @ com.datastax.oss.driver.api.core.data.TupleValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setFloat(int, float) @ com.datastax.oss.driver.api.core.data.TupleValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setInetAddress(int, java.net.InetAddress) @ com.datastax.oss.driver.api.core.data.TupleValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setInetAddress(int, java.net.InetAddress) @ com.datastax.oss.driver.api.core.data.TupleValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setInstant(int, java.time.Instant) @ com.datastax.oss.driver.api.core.data.TupleValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setInstant(int, java.time.Instant) @ com.datastax.oss.driver.api.core.data.TupleValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setInt(int, int) @ com.datastax.oss.driver.api.core.data.TupleValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setInt(int, int) @ com.datastax.oss.driver.api.core.data.TupleValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setList(int, java.util.List, java.lang.Class) @ com.datastax.oss.driver.api.core.data.TupleValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setList(int, java.util.List, java.lang.Class) @ com.datastax.oss.driver.api.core.data.TupleValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setLocalDate(int, java.time.LocalDate) @ com.datastax.oss.driver.api.core.data.TupleValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setLocalDate(int, java.time.LocalDate) @ com.datastax.oss.driver.api.core.data.TupleValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setLocalTime(int, java.time.LocalTime) @ com.datastax.oss.driver.api.core.data.TupleValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setLocalTime(int, java.time.LocalTime) @ com.datastax.oss.driver.api.core.data.TupleValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setLong(int, long) @ com.datastax.oss.driver.api.core.data.TupleValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setLong(int, long) @ com.datastax.oss.driver.api.core.data.TupleValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setMap(int, java.util.Map, java.lang.Class, java.lang.Class) @ com.datastax.oss.driver.api.core.data.TupleValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setMap(int, java.util.Map, java.lang.Class, java.lang.Class) @ com.datastax.oss.driver.api.core.data.TupleValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setSet(int, java.util.Set, java.lang.Class) @ com.datastax.oss.driver.api.core.data.TupleValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setSet(int, java.util.Set, java.lang.Class) @ com.datastax.oss.driver.api.core.data.TupleValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setShort(int, short) @ com.datastax.oss.driver.api.core.data.TupleValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setShort(int, short) @ com.datastax.oss.driver.api.core.data.TupleValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setString(int, java.lang.String) @ com.datastax.oss.driver.api.core.data.TupleValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setString(int, java.lang.String) @ com.datastax.oss.driver.api.core.data.TupleValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setToNull(int) @ com.datastax.oss.driver.api.core.data.TupleValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setToNull(int) @ com.datastax.oss.driver.api.core.data.TupleValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setToken(int, com.datastax.oss.driver.api.core.metadata.token.Token) @ com.datastax.oss.driver.api.core.data.TupleValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setToken(int, com.datastax.oss.driver.api.core.metadata.token.Token) @ com.datastax.oss.driver.api.core.data.TupleValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setTupleValue(int, com.datastax.oss.driver.api.core.data.TupleValue) @ com.datastax.oss.driver.api.core.data.TupleValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setTupleValue(int, com.datastax.oss.driver.api.core.data.TupleValue) @ com.datastax.oss.driver.api.core.data.TupleValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setUdtValue(int, com.datastax.oss.driver.api.core.data.UdtValue) @ com.datastax.oss.driver.api.core.data.TupleValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setUdtValue(int, com.datastax.oss.driver.api.core.data.UdtValue) @ com.datastax.oss.driver.api.core.data.TupleValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setUuid(int, java.util.UUID) @ com.datastax.oss.driver.api.core.data.TupleValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setUuid(int, java.util.UUID) @ com.datastax.oss.driver.api.core.data.TupleValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::set(com.datastax.oss.driver.api.core.CqlIdentifier, ValueT, com.datastax.oss.driver.api.core.type.codec.TypeCodec) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::set(com.datastax.oss.driver.api.core.CqlIdentifier, ValueT, com.datastax.oss.driver.api.core.type.codec.TypeCodec) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::set(com.datastax.oss.driver.api.core.CqlIdentifier, ValueT, com.datastax.oss.driver.api.core.type.reflect.GenericType) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::set(com.datastax.oss.driver.api.core.CqlIdentifier, ValueT, com.datastax.oss.driver.api.core.type.reflect.GenericType) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::set(com.datastax.oss.driver.api.core.CqlIdentifier, ValueT, java.lang.Class) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::set(com.datastax.oss.driver.api.core.CqlIdentifier, ValueT, java.lang.Class) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::set(int, ValueT, com.datastax.oss.driver.api.core.type.codec.TypeCodec) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::set(int, ValueT, com.datastax.oss.driver.api.core.type.codec.TypeCodec) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::set(int, ValueT, com.datastax.oss.driver.api.core.type.reflect.GenericType) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::set(int, ValueT, com.datastax.oss.driver.api.core.type.reflect.GenericType) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::set(int, ValueT, java.lang.Class) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::set(int, ValueT, java.lang.Class) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::set(java.lang.String, ValueT, com.datastax.oss.driver.api.core.type.codec.TypeCodec) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::set(java.lang.String, ValueT, com.datastax.oss.driver.api.core.type.codec.TypeCodec) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::set(java.lang.String, ValueT, com.datastax.oss.driver.api.core.type.reflect.GenericType) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::set(java.lang.String, ValueT, com.datastax.oss.driver.api.core.type.reflect.GenericType) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::set(java.lang.String, ValueT, java.lang.Class) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::set(java.lang.String, ValueT, java.lang.Class) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setBigDecimal(com.datastax.oss.driver.api.core.CqlIdentifier, java.math.BigDecimal) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setBigDecimal(com.datastax.oss.driver.api.core.CqlIdentifier, java.math.BigDecimal) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBigDecimal(int, java.math.BigDecimal) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBigDecimal(int, java.math.BigDecimal) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setBigDecimal(java.lang.String, java.math.BigDecimal) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setBigDecimal(java.lang.String, java.math.BigDecimal) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setBigInteger(com.datastax.oss.driver.api.core.CqlIdentifier, java.math.BigInteger) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setBigInteger(com.datastax.oss.driver.api.core.CqlIdentifier, java.math.BigInteger) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBigInteger(int, java.math.BigInteger) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBigInteger(int, java.math.BigInteger) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setBigInteger(java.lang.String, java.math.BigInteger) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setBigInteger(java.lang.String, java.math.BigInteger) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setBoolean(com.datastax.oss.driver.api.core.CqlIdentifier, boolean) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setBoolean(com.datastax.oss.driver.api.core.CqlIdentifier, boolean) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBoolean(int, boolean) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBoolean(int, boolean) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setBoolean(java.lang.String, boolean) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setBoolean(java.lang.String, boolean) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setByte(com.datastax.oss.driver.api.core.CqlIdentifier, byte) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setByte(com.datastax.oss.driver.api.core.CqlIdentifier, byte) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setByte(int, byte) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setByte(int, byte) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setByte(java.lang.String, byte) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setByte(java.lang.String, byte) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setByteBuffer(com.datastax.oss.driver.api.core.CqlIdentifier, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setByteBuffer(com.datastax.oss.driver.api.core.CqlIdentifier, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setByteBuffer(int, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setByteBuffer(int, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setByteBuffer(java.lang.String, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setByteBuffer(java.lang.String, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setBytesUnsafe(com.datastax.oss.driver.api.core.CqlIdentifier, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setBytesUnsafe(com.datastax.oss.driver.api.core.CqlIdentifier, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBytesUnsafe(int, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setBytesUnsafe(int, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setBytesUnsafe(java.lang.String, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setBytesUnsafe(java.lang.String, java.nio.ByteBuffer) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setCqlDuration(com.datastax.oss.driver.api.core.CqlIdentifier, com.datastax.oss.driver.api.core.data.CqlDuration) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setCqlDuration(com.datastax.oss.driver.api.core.CqlIdentifier, com.datastax.oss.driver.api.core.data.CqlDuration) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setCqlDuration(int, com.datastax.oss.driver.api.core.data.CqlDuration) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setCqlDuration(int, com.datastax.oss.driver.api.core.data.CqlDuration) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setCqlDuration(java.lang.String, com.datastax.oss.driver.api.core.data.CqlDuration) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setCqlDuration(java.lang.String, com.datastax.oss.driver.api.core.data.CqlDuration) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setDouble(com.datastax.oss.driver.api.core.CqlIdentifier, double) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setDouble(com.datastax.oss.driver.api.core.CqlIdentifier, double) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setDouble(int, double) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setDouble(int, double) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setDouble(java.lang.String, double) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setDouble(java.lang.String, double) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setFloat(com.datastax.oss.driver.api.core.CqlIdentifier, float) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setFloat(com.datastax.oss.driver.api.core.CqlIdentifier, float) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setFloat(int, float) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setFloat(int, float) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setFloat(java.lang.String, float) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setFloat(java.lang.String, float) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setInetAddress(com.datastax.oss.driver.api.core.CqlIdentifier, java.net.InetAddress) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setInetAddress(com.datastax.oss.driver.api.core.CqlIdentifier, java.net.InetAddress) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setInetAddress(int, java.net.InetAddress) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setInetAddress(int, java.net.InetAddress) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setInetAddress(java.lang.String, java.net.InetAddress) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setInetAddress(java.lang.String, java.net.InetAddress) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setInstant(com.datastax.oss.driver.api.core.CqlIdentifier, java.time.Instant) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setInstant(com.datastax.oss.driver.api.core.CqlIdentifier, java.time.Instant) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setInstant(int, java.time.Instant) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setInstant(int, java.time.Instant) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setInstant(java.lang.String, java.time.Instant) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setInstant(java.lang.String, java.time.Instant) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setInt(com.datastax.oss.driver.api.core.CqlIdentifier, int) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setInt(com.datastax.oss.driver.api.core.CqlIdentifier, int) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setInt(int, int) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setInt(int, int) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setInt(java.lang.String, int) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setInt(java.lang.String, int) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setList(com.datastax.oss.driver.api.core.CqlIdentifier, java.util.List, java.lang.Class) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setList(com.datastax.oss.driver.api.core.CqlIdentifier, java.util.List, java.lang.Class) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setList(int, java.util.List, java.lang.Class) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setList(int, java.util.List, java.lang.Class) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setList(java.lang.String, java.util.List, java.lang.Class) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setList(java.lang.String, java.util.List, java.lang.Class) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setLocalDate(com.datastax.oss.driver.api.core.CqlIdentifier, java.time.LocalDate) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setLocalDate(com.datastax.oss.driver.api.core.CqlIdentifier, java.time.LocalDate) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setLocalDate(int, java.time.LocalDate) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setLocalDate(int, java.time.LocalDate) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setLocalDate(java.lang.String, java.time.LocalDate) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setLocalDate(java.lang.String, java.time.LocalDate) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setLocalTime(com.datastax.oss.driver.api.core.CqlIdentifier, java.time.LocalTime) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setLocalTime(com.datastax.oss.driver.api.core.CqlIdentifier, java.time.LocalTime) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setLocalTime(int, java.time.LocalTime) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setLocalTime(int, java.time.LocalTime) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setLocalTime(java.lang.String, java.time.LocalTime) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setLocalTime(java.lang.String, java.time.LocalTime) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setLong(com.datastax.oss.driver.api.core.CqlIdentifier, long) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setLong(com.datastax.oss.driver.api.core.CqlIdentifier, long) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setLong(int, long) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setLong(int, long) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setLong(java.lang.String, long) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setLong(java.lang.String, long) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setMap(com.datastax.oss.driver.api.core.CqlIdentifier, java.util.Map, java.lang.Class, java.lang.Class) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setMap(com.datastax.oss.driver.api.core.CqlIdentifier, java.util.Map, java.lang.Class, java.lang.Class) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setMap(int, java.util.Map, java.lang.Class, java.lang.Class) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setMap(int, java.util.Map, java.lang.Class, java.lang.Class) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setMap(java.lang.String, java.util.Map, java.lang.Class, java.lang.Class) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setMap(java.lang.String, java.util.Map, java.lang.Class, java.lang.Class) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setSet(com.datastax.oss.driver.api.core.CqlIdentifier, java.util.Set, java.lang.Class) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setSet(com.datastax.oss.driver.api.core.CqlIdentifier, java.util.Set, java.lang.Class) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setSet(int, java.util.Set, java.lang.Class) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setSet(int, java.util.Set, java.lang.Class) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setSet(java.lang.String, java.util.Set, java.lang.Class) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setSet(java.lang.String, java.util.Set, java.lang.Class) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setShort(com.datastax.oss.driver.api.core.CqlIdentifier, short) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setShort(com.datastax.oss.driver.api.core.CqlIdentifier, short) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setShort(int, short) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setShort(int, short) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setShort(java.lang.String, short) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setShort(java.lang.String, short) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setString(com.datastax.oss.driver.api.core.CqlIdentifier, java.lang.String) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setString(com.datastax.oss.driver.api.core.CqlIdentifier, java.lang.String) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setString(int, java.lang.String) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setString(int, java.lang.String) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setString(java.lang.String, java.lang.String) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setString(java.lang.String, java.lang.String) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setToNull(com.datastax.oss.driver.api.core.CqlIdentifier) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setToNull(com.datastax.oss.driver.api.core.CqlIdentifier) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setToNull(int) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setToNull(int) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setToNull(java.lang.String) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setToNull(java.lang.String) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setToken(com.datastax.oss.driver.api.core.CqlIdentifier, com.datastax.oss.driver.api.core.metadata.token.Token) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setToken(com.datastax.oss.driver.api.core.CqlIdentifier, com.datastax.oss.driver.api.core.metadata.token.Token) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setToken(int, com.datastax.oss.driver.api.core.metadata.token.Token) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setToken(int, com.datastax.oss.driver.api.core.metadata.token.Token) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setToken(java.lang.String, com.datastax.oss.driver.api.core.metadata.token.Token) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setToken(java.lang.String, com.datastax.oss.driver.api.core.metadata.token.Token) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setTupleValue(com.datastax.oss.driver.api.core.CqlIdentifier, com.datastax.oss.driver.api.core.data.TupleValue) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setTupleValue(com.datastax.oss.driver.api.core.CqlIdentifier, com.datastax.oss.driver.api.core.data.TupleValue) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setTupleValue(int, com.datastax.oss.driver.api.core.data.TupleValue) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setTupleValue(int, com.datastax.oss.driver.api.core.data.TupleValue) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setTupleValue(java.lang.String, com.datastax.oss.driver.api.core.data.TupleValue) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setTupleValue(java.lang.String, com.datastax.oss.driver.api.core.data.TupleValue) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setUdtValue(com.datastax.oss.driver.api.core.CqlIdentifier, com.datastax.oss.driver.api.core.data.UdtValue) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setUdtValue(com.datastax.oss.driver.api.core.CqlIdentifier, com.datastax.oss.driver.api.core.data.UdtValue) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setUdtValue(int, com.datastax.oss.driver.api.core.data.UdtValue) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setUdtValue(int, com.datastax.oss.driver.api.core.data.UdtValue) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setUdtValue(java.lang.String, com.datastax.oss.driver.api.core.data.UdtValue) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setUdtValue(java.lang.String, com.datastax.oss.driver.api.core.data.UdtValue) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setUuid(com.datastax.oss.driver.api.core.CqlIdentifier, java.util.UUID) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>>::setUuid(com.datastax.oss.driver.api.core.CqlIdentifier, java.util.UUID) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setUuid(int, java.util.UUID) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>>::setUuid(int, java.util.UUID) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setUuid(java.lang.String, java.util.UUID) @ com.datastax.oss.driver.api.core.data.UdtValue", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>>::setUuid(java.lang.String, java.util.UUID) @ com.datastax.oss.driver.api.core.data.UdtValue", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setConsistencyLevel(com.datastax.oss.driver.api.core.ConsistencyLevel) @ com.datastax.oss.driver.api.core.cql.BatchStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setConsistencyLevel(com.datastax.oss.driver.api.core.ConsistencyLevel) @ com.datastax.oss.driver.api.core.cql.BatchStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.NonNull", - "justification": "Add missing `@NonNull` annotation to Statement.setConsistencyLevel" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setConsistencyLevel(com.datastax.oss.driver.api.core.ConsistencyLevel) @ com.datastax.oss.driver.api.core.cql.BatchableStatement>>", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setConsistencyLevel(com.datastax.oss.driver.api.core.ConsistencyLevel) @ com.datastax.oss.driver.api.core.cql.BatchableStatement>>", - "annotation": "@edu.umd.cs.findbugs.annotations.NonNull", - "justification": "Add missing `@NonNull` annotation to Statement.setConsistencyLevel" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setConsistencyLevel(com.datastax.oss.driver.api.core.ConsistencyLevel) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setConsistencyLevel(com.datastax.oss.driver.api.core.ConsistencyLevel) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.NonNull", - "justification": "Add missing `@NonNull` annotation to Statement.setConsistencyLevel" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setConsistencyLevel(com.datastax.oss.driver.api.core.ConsistencyLevel) @ com.datastax.oss.driver.api.core.cql.SimpleStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setConsistencyLevel(com.datastax.oss.driver.api.core.ConsistencyLevel) @ com.datastax.oss.driver.api.core.cql.SimpleStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.NonNull", - "justification": "Add missing `@NonNull` annotation to Statement.setConsistencyLevel" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setConsistencyLevel(com.datastax.oss.driver.api.core.ConsistencyLevel)", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>>::setConsistencyLevel(com.datastax.oss.driver.api.core.ConsistencyLevel)", - "annotation": "@edu.umd.cs.findbugs.annotations.NonNull", - "justification": "Add missing `@NonNull` annotation to Statement.setConsistencyLevel" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method java.util.concurrent.CompletionStage> com.datastax.oss.driver.api.core.AsyncPagingIterable::fetchNextPage() throws java.lang.IllegalStateException", - "new": "method java.util.concurrent.CompletionStage com.datastax.oss.driver.api.core.AsyncPagingIterable>>::fetchNextPage() throws java.lang.IllegalStateException", - "justification": "JAVA-2192: Don't return generic types with wildcards" - }, - { - "code": "java.method.returnTypeChangedCovariantly", - "old": "method com.datastax.oss.driver.api.core.AsyncPagingIterable com.datastax.oss.driver.api.core.AsyncPagingIterable::map(java.util.function.Function)", - "new": "method com.datastax.oss.driver.api.core.MappedAsyncPagingIterable com.datastax.oss.driver.api.core.AsyncPagingIterable>>::map(java.util.function.Function)", - "justification": "JAVA-2192: Don't return generic types with wildcards" - }, - { - "code": "java.generics.formalTypeParameterAdded", - "old": "interface com.datastax.oss.driver.api.core.AsyncPagingIterable", - "new": "interface com.datastax.oss.driver.api.core.AsyncPagingIterable>", - "typeParameter": "SelfT extends com.datastax.oss.driver.api.core.AsyncPagingIterable>", - "justification": "JAVA-2192: Don't return generic types with wildcards" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method java.util.concurrent.CompletionStage com.datastax.oss.driver.api.core.CqlSession::executeAsync(com.datastax.oss.driver.api.core.cql.Statement)", - "new": "method java.util.concurrent.CompletionStage com.datastax.oss.driver.api.core.CqlSession::executeAsync(com.datastax.oss.driver.api.core.cql.Statement)", - "justification": "JAVA-2192: Don't return generic types with wildcards" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method java.util.concurrent.CompletionStage com.datastax.oss.driver.api.core.CqlSession::executeAsync(java.lang.String)", - "new": "method java.util.concurrent.CompletionStage com.datastax.oss.driver.api.core.CqlSession::executeAsync(java.lang.String)", - "justification": "JAVA-2192: Don't return generic types with wildcards" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method java.util.concurrent.CompletionStage com.datastax.oss.driver.api.core.CqlSession::prepareAsync(com.datastax.oss.driver.api.core.cql.PrepareRequest)", - "new": "method java.util.concurrent.CompletionStage com.datastax.oss.driver.api.core.CqlSession::prepareAsync(com.datastax.oss.driver.api.core.cql.PrepareRequest)", - "justification": "JAVA-2192: Don't return generic types with wildcards" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method java.util.concurrent.CompletionStage com.datastax.oss.driver.api.core.CqlSession::prepareAsync(com.datastax.oss.driver.api.core.cql.SimpleStatement)", - "new": "method java.util.concurrent.CompletionStage com.datastax.oss.driver.api.core.CqlSession::prepareAsync(com.datastax.oss.driver.api.core.cql.SimpleStatement)", - "justification": "JAVA-2192: Don't return generic types with wildcards" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method java.util.concurrent.CompletionStage com.datastax.oss.driver.api.core.CqlSession::prepareAsync(java.lang.String)", - "new": "method java.util.concurrent.CompletionStage com.datastax.oss.driver.api.core.CqlSession::prepareAsync(java.lang.String)", - "justification": "JAVA-2192: Don't return generic types with wildcards" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method java.util.concurrent.CompletionStage com.datastax.oss.driver.api.core.cql.AsyncResultSet::fetchNextPage() throws java.lang.IllegalStateException", - "new": "method java.util.concurrent.CompletionStage com.datastax.oss.driver.api.core.AsyncPagingIterable>>::fetchNextPage() throws java.lang.IllegalStateException @ com.datastax.oss.driver.api.core.cql.AsyncResultSet", - "justification": "JAVA-2192: Don't return generic types with wildcards" - }, - { - "code": "java.class.noLongerImplementsInterface", - "old": "interface com.datastax.oss.driver.api.core.cql.AsyncResultSet", - "new": "interface com.datastax.oss.driver.api.core.cql.AsyncResultSet", - "interface": "com.datastax.oss.driver.api.core.AsyncPagingIterable", - "justification": "JAVA-2192: Don't return generic types with wildcards" - }, - { - "code": "java.class.superTypeTypeParametersChanged", - "old": "interface com.datastax.oss.driver.api.core.cql.AsyncResultSet", - "new": "interface com.datastax.oss.driver.api.core.cql.AsyncResultSet", - "oldSuperType": "com.datastax.oss.driver.api.core.AsyncPagingIterable", - "newSuperType": "com.datastax.oss.driver.api.core.AsyncPagingIterable", - "justification": "JAVA-2192: Don't return generic types with wildcards" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method java.util.concurrent.CompletionStage com.datastax.oss.driver.api.core.session.Session::refreshSchemaAsync()", - "new": "method java.util.concurrent.CompletionStage com.datastax.oss.driver.api.core.session.Session::refreshSchemaAsync()", - "justification": "JAVA-2192: Don't return generic types with wildcards" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method java.util.concurrent.CompletionStage com.datastax.oss.driver.api.core.session.Session::setSchemaMetadataEnabled(java.lang.Boolean)", - "new": "method java.util.concurrent.CompletionStage com.datastax.oss.driver.api.core.session.Session::setSchemaMetadataEnabled(java.lang.Boolean)", - "justification": "JAVA-2192: Don't return generic types with wildcards" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method java.util.Optional com.datastax.oss.driver.api.core.session.Session::getMetrics()", - "new": "method java.util.Optional com.datastax.oss.driver.api.core.session.Session::getMetrics()", - "justification": "JAVA-2192: Don't return generic types with wildcards" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method java.util.Optional com.datastax.oss.driver.api.core.metadata.Metadata::getKeyspace(com.datastax.oss.driver.api.core.CqlIdentifier)", - "new": "method java.util.Optional com.datastax.oss.driver.api.core.metadata.Metadata::getKeyspace(com.datastax.oss.driver.api.core.CqlIdentifier)", - "justification": "JAVA-2192: Don't return generic types with wildcards" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method java.util.Optional com.datastax.oss.driver.api.core.metadata.Metadata::getKeyspace(java.lang.String)", - "new": "method java.util.Optional com.datastax.oss.driver.api.core.metadata.Metadata::getKeyspace(java.lang.String)", - "justification": "JAVA-2192: Don't return generic types with wildcards" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method java.util.Map com.datastax.oss.driver.api.core.metadata.Metadata::getKeyspaces()", - "new": "method java.util.Map com.datastax.oss.driver.api.core.metadata.Metadata::getKeyspaces()", - "justification": "JAVA-2192: Don't return generic types with wildcards" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method java.util.Optional com.datastax.oss.driver.api.core.metadata.Metadata::getTokenMap()", - "new": "method java.util.Optional com.datastax.oss.driver.api.core.metadata.Metadata::getTokenMap()", - "justification": "JAVA-2192: Don't return generic types with wildcards" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method java.util.Optional com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata::getAggregate(com.datastax.oss.driver.api.core.CqlIdentifier, com.datastax.oss.driver.api.core.type.DataType[])", - "new": "method java.util.Optional com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata::getAggregate(com.datastax.oss.driver.api.core.CqlIdentifier, com.datastax.oss.driver.api.core.type.DataType[])", - "justification": "JAVA-2192: Don't return generic types with wildcards" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method java.util.Optional com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata::getAggregate(com.datastax.oss.driver.api.core.CqlIdentifier, java.lang.Iterable)", - "new": "method java.util.Optional com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata::getAggregate(com.datastax.oss.driver.api.core.CqlIdentifier, java.lang.Iterable)", - "justification": "JAVA-2192: Don't return generic types with wildcards" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method java.util.Optional com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata::getAggregate(com.datastax.oss.driver.api.core.metadata.schema.FunctionSignature)", - "new": "method java.util.Optional com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata::getAggregate(com.datastax.oss.driver.api.core.metadata.schema.FunctionSignature)", - "justification": "JAVA-2192: Don't return generic types with wildcards" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method java.util.Optional com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata::getAggregate(java.lang.String, com.datastax.oss.driver.api.core.type.DataType[])", - "new": "method java.util.Optional com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata::getAggregate(java.lang.String, com.datastax.oss.driver.api.core.type.DataType[])", - "justification": "JAVA-2192: Don't return generic types with wildcards" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method java.util.Optional com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata::getAggregate(java.lang.String, java.lang.Iterable)", - "new": "method java.util.Optional com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata::getAggregate(java.lang.String, java.lang.Iterable)", - "justification": "JAVA-2192: Don't return generic types with wildcards" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method java.util.Map com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata::getAggregates()", - "new": "method java.util.Map com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata::getAggregates()", - "justification": "JAVA-2192: Don't return generic types with wildcards" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method java.util.Optional com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata::getFunction(com.datastax.oss.driver.api.core.CqlIdentifier, com.datastax.oss.driver.api.core.type.DataType[])", - "new": "method java.util.Optional com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata::getFunction(com.datastax.oss.driver.api.core.CqlIdentifier, com.datastax.oss.driver.api.core.type.DataType[])", - "justification": "JAVA-2192: Don't return generic types with wildcards" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method java.util.Optional com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata::getFunction(com.datastax.oss.driver.api.core.CqlIdentifier, java.lang.Iterable)", - "new": "method java.util.Optional com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata::getFunction(com.datastax.oss.driver.api.core.CqlIdentifier, java.lang.Iterable)", - "justification": "JAVA-2192: Don't return generic types with wildcards" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method java.util.Optional com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata::getFunction(com.datastax.oss.driver.api.core.metadata.schema.FunctionSignature)", - "new": "method java.util.Optional com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata::getFunction(com.datastax.oss.driver.api.core.metadata.schema.FunctionSignature)", - "justification": "JAVA-2192: Don't return generic types with wildcards" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method java.util.Optional com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata::getFunction(java.lang.String, com.datastax.oss.driver.api.core.type.DataType[])", - "new": "method java.util.Optional com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata::getFunction(java.lang.String, com.datastax.oss.driver.api.core.type.DataType[])", - "justification": "JAVA-2192: Don't return generic types with wildcards" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method java.util.Optional com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata::getFunction(java.lang.String, java.lang.Iterable)", - "new": "method java.util.Optional com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata::getFunction(java.lang.String, java.lang.Iterable)", - "justification": "JAVA-2192: Don't return generic types with wildcards" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method java.util.Map com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata::getFunctions()", - "new": "method java.util.Map com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata::getFunctions()", - "justification": "JAVA-2192: Don't return generic types with wildcards" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method java.util.Optional com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata::getTable(com.datastax.oss.driver.api.core.CqlIdentifier)", - "new": "method java.util.Optional com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata::getTable(com.datastax.oss.driver.api.core.CqlIdentifier)", - "justification": "JAVA-2192: Don't return generic types with wildcards" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method java.util.Optional com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata::getTable(java.lang.String)", - "new": "method java.util.Optional com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata::getTable(java.lang.String)", - "justification": "JAVA-2192: Don't return generic types with wildcards" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method java.util.Map com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata::getTables()", - "new": "method java.util.Map com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata::getTables()", - "justification": "JAVA-2192: Don't return generic types with wildcards" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method java.util.Optional com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata::getUserDefinedType(com.datastax.oss.driver.api.core.CqlIdentifier)", - "new": "method java.util.Optional com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata::getUserDefinedType(com.datastax.oss.driver.api.core.CqlIdentifier)", - "justification": "JAVA-2192: Don't return generic types with wildcards" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method java.util.Optional com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata::getUserDefinedType(java.lang.String)", - "new": "method java.util.Optional com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata::getUserDefinedType(java.lang.String)", - "justification": "JAVA-2192: Don't return generic types with wildcards" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method java.util.Map com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata::getUserDefinedTypes()", - "new": "method java.util.Map com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata::getUserDefinedTypes()", - "justification": "JAVA-2192: Don't return generic types with wildcards" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method java.util.Optional com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata::getView(com.datastax.oss.driver.api.core.CqlIdentifier)", - "new": "method java.util.Optional com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata::getView(com.datastax.oss.driver.api.core.CqlIdentifier)", - "justification": "JAVA-2192: Don't return generic types with wildcards" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method java.util.Optional com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata::getView(java.lang.String)", - "new": "method java.util.Optional com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata::getView(java.lang.String)", - "justification": "JAVA-2192: Don't return generic types with wildcards" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method java.util.Map com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata::getViews()", - "new": "method java.util.Map com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata::getViews()", - "justification": "JAVA-2192: Don't return generic types with wildcards" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method java.util.Map com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata::getViewsOnTable(com.datastax.oss.driver.api.core.CqlIdentifier)", - "new": "method java.util.Map com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata::getViewsOnTable(com.datastax.oss.driver.api.core.CqlIdentifier)", - "justification": "JAVA-2192: Don't return generic types with wildcards" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method java.util.Map com.datastax.oss.driver.api.core.metadata.schema.RelationMetadata::getClusteringColumns()", - "new": "method java.util.Map com.datastax.oss.driver.api.core.metadata.schema.RelationMetadata::getClusteringColumns()", - "justification": "JAVA-2192: Don't return generic types with wildcards" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method java.util.Optional com.datastax.oss.driver.api.core.metadata.schema.RelationMetadata::getColumn(com.datastax.oss.driver.api.core.CqlIdentifier)", - "new": "method java.util.Optional com.datastax.oss.driver.api.core.metadata.schema.RelationMetadata::getColumn(com.datastax.oss.driver.api.core.CqlIdentifier)", - "justification": "JAVA-2192: Don't return generic types with wildcards" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method java.util.Optional com.datastax.oss.driver.api.core.metadata.schema.RelationMetadata::getColumn(java.lang.String)", - "new": "method java.util.Optional com.datastax.oss.driver.api.core.metadata.schema.RelationMetadata::getColumn(java.lang.String)", - "justification": "JAVA-2192: Don't return generic types with wildcards" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method java.util.Map com.datastax.oss.driver.api.core.metadata.schema.RelationMetadata::getColumns()", - "new": "method java.util.Map com.datastax.oss.driver.api.core.metadata.schema.RelationMetadata::getColumns()", - "justification": "JAVA-2192: Don't return generic types with wildcards" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method java.util.List com.datastax.oss.driver.api.core.metadata.schema.RelationMetadata::getPartitionKey()", - "new": "method java.util.List com.datastax.oss.driver.api.core.metadata.schema.RelationMetadata::getPartitionKey()", - "justification": "JAVA-2192: Don't return generic types with wildcards" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method java.util.List com.datastax.oss.driver.api.core.metadata.schema.RelationMetadata::getPrimaryKey()", - "new": "method java.util.List com.datastax.oss.driver.api.core.metadata.schema.RelationMetadata::getPrimaryKey()", - "justification": "JAVA-2192: Don't return generic types with wildcards" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method java.util.Map com.datastax.oss.driver.api.core.metadata.schema.TableMetadata::getIndexes()", - "new": "method java.util.Map com.datastax.oss.driver.api.core.metadata.schema.TableMetadata::getIndexes()", - "justification": "JAVA-2192: Don't return generic types with wildcards" - }, - { - "code": "java.annotation.added", - "old": "method com.datastax.oss.driver.api.core.config.DriverExecutionProfile com.datastax.oss.driver.api.core.config.DriverExecutionProfile::withLong(com.datastax.oss.driver.api.core.config.DriverOption, long)", - "new": "method SelfT com.datastax.oss.driver.api.core.config.OngoingConfigOptions>>::withLong(com.datastax.oss.driver.api.core.config.DriverOption, long) @ com.datastax.oss.driver.api.core.config.DriverExecutionProfile", - "annotation": "@edu.umd.cs.findbugs.annotations.NonNull", - "justification": "Bugfix, the annotation should have been present from the beginning" - }, - { - "code": "java.annotation.added", - "old": "method java.lang.String com.datastax.oss.driver.api.core.metadata.EndPoint::asMetricPrefix()", - "new": "method java.lang.String com.datastax.oss.driver.api.core.metadata.EndPoint::asMetricPrefix()", - "annotation": "@edu.umd.cs.findbugs.annotations.NonNull", - "justification": "EndPoint.asMetricPrefix() was missing @NonNull" - }, - { - "code": "java.annotation.added", - "old": "method java.net.SocketAddress com.datastax.oss.driver.api.core.metadata.EndPoint::resolve()", - "new": "method java.net.SocketAddress com.datastax.oss.driver.api.core.metadata.EndPoint::resolve()", - "annotation": "@edu.umd.cs.findbugs.annotations.NonNull", - "justification": "EndPoint.resolve() was missing @NonNull" - }, - { - "code": "java.annotation.added", - "old": "method java.util.UUID com.datastax.oss.driver.api.core.metadata.Node::getHostId()", - "new": "method java.util.UUID com.datastax.oss.driver.api.core.metadata.Node::getHostId()", - "annotation": "@edu.umd.cs.findbugs.annotations.NonNull", - "justification": "Node.getHostId() should have been annotated with @NonNull" - }, - { - "code": "java.annotation.removed", - "old": "method java.util.UUID com.datastax.oss.driver.api.core.metadata.Node::getHostId()", - "new": "method java.util.UUID com.datastax.oss.driver.api.core.metadata.Node::getHostId()", - "annotation": "@edu.umd.cs.findbugs.annotations.Nullable", - "justification": "Node.getHostId() should have been annotated with @NonNull" - }, - { - "code": "java.field.removed", - "old": "field com.datastax.oss.driver.api.core.session.SessionBuilder.requestTracker", - "justification": "JAVA-2315: Improve extensibility of session builder" - }, - { - "code": "java.field.removed", - "old": "field com.datastax.oss.driver.api.core.session.SessionBuilder.typeCodecs", - "justification": "JAVA-2315: Improve extensibility of session builder" - }, - { - "code": "java.annotation.added", - "old": "method com.datastax.oss.driver.api.core.PagingIterable com.datastax.oss.driver.api.core.PagingIterable::map(java.util.function.Function)", - "new": "method com.datastax.oss.driver.api.core.PagingIterable com.datastax.oss.driver.api.core.PagingIterable::map(java.util.function.Function)", - "annotation": "@edu.umd.cs.findbugs.annotations.NonNull", - "justification": "PagingIterable.map() should have been annotated with @NonNull" - }, - { - "code": "java.annotation.added", - "old": "method com.datastax.oss.driver.api.core.PagingIterable com.datastax.oss.driver.api.core.PagingIterable::map(java.util.function.Function) @ com.datastax.oss.driver.api.core.cql.ResultSet", - "new": "method com.datastax.oss.driver.api.core.PagingIterable com.datastax.oss.driver.api.core.PagingIterable::map(java.util.function.Function) @ com.datastax.oss.driver.api.core.cql.ResultSet", - "annotation": "@edu.umd.cs.findbugs.annotations.NonNull", - "justification": "PagingIterable.map() should have been annotated with @NonNull" - }, - { - "code": "java.annotation.added", - "old": "method java.util.Spliterator java.lang.Iterable::spliterator() @ com.datastax.oss.driver.api.core.PagingIterable", - "new": "method java.util.Spliterator com.datastax.oss.driver.api.core.PagingIterable::spliterator()", - "annotation": "@edu.umd.cs.findbugs.annotations.NonNull", - "justification": "JAVA-2247: PagingIterable implementations should implement spliterator()" - }, - { - "code": "java.annotation.added", - "old": "method java.util.Spliterator java.lang.Iterable::spliterator() @ com.datastax.oss.driver.api.core.cql.ResultSet", - "new": "method java.util.Spliterator com.datastax.oss.driver.api.core.PagingIterable::spliterator() @ com.datastax.oss.driver.api.core.cql.ResultSet", - "annotation": "@edu.umd.cs.findbugs.annotations.NonNull", - "justification": "JAVA-2247: PagingIterable implementations should implement spliterator()" - }, - { - "code": "java.method.addedToInterface", - "new": "method java.lang.String com.datastax.oss.driver.api.core.cql.Row::toString()", - "justification": "False positive -- all objects implicitly have toString()" - }, - { - "code": "java.method.addedToInterface", - "new": "method java.lang.String com.datastax.oss.driver.api.core.data.TupleValue::toString()", - "justification": "False positive -- all objects implicitly have toString()" - }, - { - "code": "java.method.addedToInterface", - "new": "method java.lang.String com.datastax.oss.driver.api.core.data.UdtValue::toString()", - "justification": "False positive -- all objects implicitly have toString()" - }, - { - "regex": true, - "code": "java.annotation.added", - "old": "field com\\.datastax\\.oss\\.driver\\.api\\.core\\.Version.V.*", - "annotation": "@edu.umd.cs.findbugs.annotations.NonNull", - "justification": "Marking constants as non-null doesn't break existing code" - }, - { - "code": "java.annotation.removed", - "old": "method java.util.UUID com.datastax.oss.driver.api.core.metadata.Node::getHostId()", - "new": "method java.util.UUID com.datastax.oss.driver.api.core.metadata.Node::getHostId()", - "annotation": "@edu.umd.cs.findbugs.annotations.NonNull", - "justification": "JAVA-2505: Annotate Node.getHostId() as nullable" - }, - { - "code": "java.annotation.added", - "old": "method java.util.UUID com.datastax.oss.driver.api.core.metadata.Node::getHostId()", - "new": "method java.util.UUID com.datastax.oss.driver.api.core.metadata.Node::getHostId()", - "annotation": "@edu.umd.cs.findbugs.annotations.Nullable", - "justification": "JAVA-2505: Annotate Node.getHostId() as nullable" - }, - { - "code": "java.annotation.added", - "old": "parameter void com.datastax.oss.driver.api.core.ssl.ProgrammaticSslEngineFactory::(===javax.net.ssl.SSLContext===)", - "new": "parameter void com.datastax.oss.driver.api.core.ssl.ProgrammaticSslEngineFactory::(===javax.net.ssl.SSLContext===)", - "annotation": "@edu.umd.cs.findbugs.annotations.NonNull", - "justification": "JAVA-2434: added @NonNull to ProgrammaticSslEngineFactory(SSLContext) constructor" - }, - { - "code": "java.class.nonFinalClassInheritsFromNewClass", - "old": "class com.datastax.oss.driver.api.core.auth.PlainTextAuthProviderBase.PlainTextAuthenticator", - "new": "class com.datastax.oss.driver.api.core.auth.PlainTextAuthProviderBase.PlainTextAuthenticator", - "superClass": "com.datastax.dse.driver.api.core.auth.BaseDseAuthenticator", - "justification": "New parent doesn't add constraints for implementors" - }, - { - "code": "java.method.exception.runtimeAdded", - "old": "method com.datastax.oss.driver.api.core.auth.Authenticator com.datastax.oss.driver.api.core.auth.PlainTextAuthProviderBase::newAuthenticator(com.datastax.oss.driver.api.core.metadata.EndPoint, java.lang.String)", - "new": "method com.datastax.oss.driver.api.core.auth.Authenticator com.datastax.oss.driver.api.core.auth.PlainTextAuthProviderBase::newAuthenticator(com.datastax.oss.driver.api.core.metadata.EndPoint, java.lang.String) throws com.datastax.oss.driver.api.core.auth.AuthenticationException", - "exception": "com.datastax.oss.driver.api.core.auth.AuthenticationException", - "justification": "New exception is unchecked" - }, - { - "code": "java.class.superTypeTypeParametersChanged", - "old": "class com.datastax.dse.driver.api.core.DseSessionBuilderBase", - "new": "class com.datastax.dse.driver.api.core.DseSessionBuilderBase, SessionT>", - "oldSuperType": "com.datastax.oss.driver.api.core.session.SessionBuilder", - "newSuperType": "com.datastax.oss.driver.api.core.session.SessionBuilder, SessionT>", - "justification": "JAVA-2411: Type parameters were wrong but it is unlikely that implementors would notice that in subclasses" - }, - { - "code": "java.method.removed", - "old": "method org.apache.tinkerpop.gremlin.process.remote.traversal.RemoteTraversal org.apache.tinkerpop.gremlin.process.remote.RemoteConnection::submit(org.apache.tinkerpop.gremlin.process.traversal.Bytecode) throws org.apache.tinkerpop.gremlin.process.remote.RemoteConnectionException", - "justification": "JAVA-2235: GraphBinary support - TinkerPop upgrade from 3.3 to 3.4" - }, - { - "code": "java.method.removed", - "old": "method java.util.Iterator> org.apache.tinkerpop.gremlin.process.remote.RemoteConnection::submit(org.apache.tinkerpop.gremlin.process.traversal.Traversal) throws org.apache.tinkerpop.gremlin.process.remote.RemoteConnectionException", - "justification": "JAVA-2235: GraphBinary support - TinkerPop upgrade from 3.3 to 3.4" - }, - { - "code": "java.method.noLongerDefault", - "old": "method java.util.concurrent.CompletableFuture> org.apache.tinkerpop.gremlin.process.remote.RemoteConnection::submitAsync(org.apache.tinkerpop.gremlin.process.traversal.Bytecode) throws org.apache.tinkerpop.gremlin.process.remote.RemoteConnectionException", - "new": "method java.util.concurrent.CompletableFuture> org.apache.tinkerpop.gremlin.process.remote.RemoteConnection::submitAsync(org.apache.tinkerpop.gremlin.process.traversal.Bytecode) throws org.apache.tinkerpop.gremlin.process.remote.RemoteConnectionException", - "justification": "JAVA-2235: GraphBinary support - TinkerPop upgrade from 3.3 to 3.4" - }, - { - "code": "java.method.nowAbstract", - "old": "method java.util.concurrent.CompletableFuture> org.apache.tinkerpop.gremlin.process.remote.RemoteConnection::submitAsync(org.apache.tinkerpop.gremlin.process.traversal.Bytecode) throws org.apache.tinkerpop.gremlin.process.remote.RemoteConnectionException", - "new": "method java.util.concurrent.CompletableFuture> org.apache.tinkerpop.gremlin.process.remote.RemoteConnection::submitAsync(org.apache.tinkerpop.gremlin.process.traversal.Bytecode) throws org.apache.tinkerpop.gremlin.process.remote.RemoteConnectionException", - "justification": "JAVA-2235: GraphBinary support - TinkerPop upgrade from 3.3 to 3.4" - }, - { - "code": "java.field.removedWithConstant", - "old": "field org.apache.tinkerpop.gremlin.process.traversal.TraversalSource.GREMLIN_REMOTE", - "justification": "JAVA-2235: GraphBinary support - TinkerPop upgrade from 3.3 to 3.4" - }, - { - "code": "java.field.removedWithConstant", - "old": "field org.apache.tinkerpop.gremlin.process.traversal.TraversalSource.GREMLIN_REMOTE_CONNECTION_CLASS", - "justification": "JAVA-2235: GraphBinary support - TinkerPop upgrade from 3.3 to 3.4" - }, - { - "code": "java.method.parameterTypeChanged", - "old": "parameter java.util.List> org.apache.tinkerpop.gremlin.process.traversal.TraversalStrategies::sortStrategies(===java.util.List>===)", - "new": "parameter java.util.Set> org.apache.tinkerpop.gremlin.process.traversal.TraversalStrategies::sortStrategies(===java.util.Set>===)", - "justification": "JAVA-2235: GraphBinary support - TinkerPop upgrade from 3.3 to 3.4" - }, - { - "code": "java.method.returnTypeChanged", - "old": "method java.util.List> org.apache.tinkerpop.gremlin.process.traversal.TraversalStrategies::sortStrategies(java.util.List>)", - "new": "method java.util.Set> org.apache.tinkerpop.gremlin.process.traversal.TraversalStrategies::sortStrategies(java.util.Set>)", - "justification": "JAVA-2235: GraphBinary support - TinkerPop upgrade from 3.3 to 3.4" - }, - { - "code": "java.method.numberOfParametersChanged", - "old": "method void org.apache.tinkerpop.gremlin.process.traversal.Traverser.Admin::incrLoops(java.lang.String)", - "new": "method void org.apache.tinkerpop.gremlin.process.traversal.Traverser.Admin::incrLoops()", - "justification": "JAVA-2235: GraphBinary support - TinkerPop upgrade from 3.3 to 3.4" - }, - { - "code": "java.method.addedToInterface", - "new": "method void org.apache.tinkerpop.gremlin.process.traversal.Traverser.Admin::initialiseLoops(java.lang.String, java.lang.String)", - "justification": "JAVA-2235: GraphBinary support - TinkerPop upgrade from 3.3 to 3.4" - }, - { - "code": "java.method.addedToInterface", - "new": "method int org.apache.tinkerpop.gremlin.process.traversal.Traverser::loops(java.lang.String)", - "justification": "JAVA-2235: GraphBinary support - TinkerPop upgrade from 3.3 to 3.4" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal::max()", - "new": "method org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal::max()", - "justification": "JAVA-2235: GraphBinary support - TinkerPop upgrade from 3.3 to 3.4" - }, - { - "code": "java.generics.formalTypeParameterChanged", - "old": "method org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal::max()", - "new": "method org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal::max()", - "justification": "JAVA-2235: GraphBinary support - TinkerPop upgrade from 3.3 to 3.4" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal::max(org.apache.tinkerpop.gremlin.process.traversal.Scope)", - "new": "method org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal::max(org.apache.tinkerpop.gremlin.process.traversal.Scope)", - "justification": "JAVA-2235: GraphBinary support - TinkerPop upgrade from 3.3 to 3.4" - }, - { - "code": "java.generics.formalTypeParameterChanged", - "old": "method org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal::max(org.apache.tinkerpop.gremlin.process.traversal.Scope)", - "new": "method org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal::max(org.apache.tinkerpop.gremlin.process.traversal.Scope)", - "justification": "JAVA-2235: GraphBinary support - TinkerPop upgrade from 3.3 to 3.4" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal::min()", - "new": "method org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal::min()", - "justification": "JAVA-2235: GraphBinary support - TinkerPop upgrade from 3.3 to 3.4" - }, - { - "code": "java.generics.formalTypeParameterChanged", - "old": "method org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal::min()", - "new": "method org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal::min()", - "justification": "JAVA-2235: GraphBinary support - TinkerPop upgrade from 3.3 to 3.4" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal::min(org.apache.tinkerpop.gremlin.process.traversal.Scope)", - "new": "method org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal::min(org.apache.tinkerpop.gremlin.process.traversal.Scope)", - "justification": "JAVA-2235: GraphBinary support - TinkerPop upgrade from 3.3 to 3.4" - }, - { - "code": "java.generics.formalTypeParameterChanged", - "old": "method org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal::min(org.apache.tinkerpop.gremlin.process.traversal.Scope)", - "new": "method org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal::min(org.apache.tinkerpop.gremlin.process.traversal.Scope)", - "justification": "JAVA-2235: GraphBinary support - TinkerPop upgrade from 3.3 to 3.4" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal> org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal::valueMap(java.lang.String[])", - "new": "method org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal> org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal::valueMap(java.lang.String[])", - "justification": "JAVA-2235: GraphBinary support - TinkerPop upgrade from 3.3 to 3.4" - }, - { - "code": "java.class.externalClassExposedInAPI", - "new": "class org.apache.tinkerpop.gremlin.process.traversal.util.ImmutableExplanation", - "justification": "Upgrade to Tinkerpop 3.4.4" - }, - { - "code": "java.class.nonFinalClassInheritsFromNewClass", - "old": "class org.apache.tinkerpop.gremlin.process.traversal.util.TraversalExplanation", - "new": "class org.apache.tinkerpop.gremlin.process.traversal.util.TraversalExplanation", - "superClass": "org.apache.tinkerpop.gremlin.process.traversal.util.AbstractExplanation", - "justification": "Upgrade to Tinkerpop 3.4.4" - }, - { - "code": "java.class.defaultSerializationChanged", - "old": "class org.apache.tinkerpop.gremlin.process.traversal.util.TraversalExplanation", - "new": "class org.apache.tinkerpop.gremlin.process.traversal.util.TraversalExplanation", - "justification": "Upgrade to Tinkerpop 3.4.4" - }, - { - "code": "java.annotation.added", - "old": "parameter int com.datastax.oss.driver.api.core.type.UserDefinedType::firstIndexOf(===com.datastax.oss.driver.api.core.CqlIdentifier===)", - "new": "parameter int com.datastax.oss.driver.api.core.type.UserDefinedType::firstIndexOf(===com.datastax.oss.driver.api.core.CqlIdentifier===)", - "annotation": "@edu.umd.cs.findbugs.annotations.NonNull", - "justification": "Add missing `@NonNull` annotation to UserDefinedType.firstIndexOf" - }, - { - "code": "java.annotation.added", - "old": "parameter int com.datastax.oss.driver.api.core.type.UserDefinedType::firstIndexOf(===java.lang.String===)", - "new": "parameter int com.datastax.oss.driver.api.core.type.UserDefinedType::firstIndexOf(===java.lang.String===)", - "annotation": "@edu.umd.cs.findbugs.annotations.NonNull", - "justification": "Add missing `@NonNull` annotation to UserDefinedType.firstIndexOf" - }, - { - "code": "java.class.nonPublicPartOfAPI", - "new": "class com.fasterxml.jackson.databind.introspect.AnnotatedConstructor.Serialization", - "justification": "Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.class.nonPublicPartOfAPI", - "new": "class com.fasterxml.jackson.databind.introspect.AnnotatedField.Serialization", - "justification": "Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.class.nonPublicPartOfAPI", - "new": "class com.fasterxml.jackson.databind.introspect.AnnotatedMethod.Serialization", - "justification": "Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.class.nonPublicPartOfAPI", - "new": "class com.fasterxml.jackson.databind.type.TypeParser.MyTokenizer", - "justification": "Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.class.nonPublicPartOfAPI", - "new": "class com.fasterxml.jackson.databind.util.PrimitiveArrayBuilder.Node", - "justification": "Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.annotation.JsonTypeInfo.Id.CUSTOM", - "new": "field com.fasterxml.jackson.annotation.JsonTypeInfo.Id.CUSTOM", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.field.serialVersionUIDUnchanged", - "old": "field com.fasterxml.jackson.core.Base64Variant.serialVersionUID", - "new": "field com.fasterxml.jackson.core.Base64Variant.serialVersionUID", - "serialVersionUID": "1", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.class.nonFinalClassInheritsFromNewClass", - "old": "class com.fasterxml.jackson.core.JsonGenerationException", - "new": "class com.fasterxml.jackson.core.JsonGenerationException", - "superClass": "com.fasterxml.jackson.core.JacksonException", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.class.nonFinalClassInheritsFromNewClass", - "old": "class com.fasterxml.jackson.core.JsonParseException", - "new": "class com.fasterxml.jackson.core.JsonParseException", - "superClass": "com.fasterxml.jackson.core.JacksonException", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.class.nonFinalClassInheritsFromNewClass", - "old": "class com.fasterxml.jackson.core.JsonProcessingException", - "new": "class com.fasterxml.jackson.core.JsonProcessingException", - "superClass": "com.fasterxml.jackson.core.JacksonException", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.class.nonPublicPartOfAPI", - "old": "class com.fasterxml.jackson.core.sym.ByteQuadsCanonicalizer.TableInfo", - "new": "class com.fasterxml.jackson.core.sym.ByteQuadsCanonicalizer.TableInfo", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.class.nonPublicPartOfAPI", - "old": "class com.fasterxml.jackson.core.sym.CharsToNameCanonicalizer.Bucket", - "new": "class com.fasterxml.jackson.core.sym.CharsToNameCanonicalizer.Bucket", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.class.nonPublicPartOfAPI", - "old": "class com.fasterxml.jackson.core.sym.CharsToNameCanonicalizer.TableInfo", - "new": "class com.fasterxml.jackson.core.sym.CharsToNameCanonicalizer.TableInfo", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.method.removed", - "old": "method java.lang.String[] com.fasterxml.jackson.databind.AnnotationIntrospector::findPropertiesToIgnore(com.fasterxml.jackson.databind.introspect.Annotated)", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.class.defaultSerializationChanged", - "old": "class com.fasterxml.jackson.databind.AnnotationIntrospector", - "new": "class com.fasterxml.jackson.databind.AnnotationIntrospector", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.field.serialVersionUIDUnchanged", - "old": "field com.fasterxml.jackson.databind.DeserializationConfig.serialVersionUID", - "new": "field com.fasterxml.jackson.databind.DeserializationConfig.serialVersionUID", - "serialVersionUID": "2", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.method.returnTypeChanged", - "old": "method void com.fasterxml.jackson.databind.DeserializationConfig::initialize(com.fasterxml.jackson.core.JsonParser)", - "new": "method com.fasterxml.jackson.core.JsonParser com.fasterxml.jackson.databind.DeserializationConfig::initialize(com.fasterxml.jackson.core.JsonParser)", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method T com.fasterxml.jackson.databind.DeserializationConfig::introspect(com.fasterxml.jackson.databind.JavaType)", - "new": "method com.fasterxml.jackson.databind.BeanDescription com.fasterxml.jackson.databind.DeserializationConfig::introspect(com.fasterxml.jackson.databind.JavaType)", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.generics.formalTypeParameterRemoved", - "old": "method T com.fasterxml.jackson.databind.DeserializationConfig::introspect(com.fasterxml.jackson.databind.JavaType)", - "new": "method com.fasterxml.jackson.databind.BeanDescription com.fasterxml.jackson.databind.DeserializationConfig::introspect(com.fasterxml.jackson.databind.JavaType)", - "typeParameter": "T extends com.fasterxml.jackson.databind.BeanDescription", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method T com.fasterxml.jackson.databind.DeserializationConfig::introspectForBuilder(com.fasterxml.jackson.databind.JavaType)", - "new": "method com.fasterxml.jackson.databind.BeanDescription com.fasterxml.jackson.databind.DeserializationConfig::introspectForBuilder(com.fasterxml.jackson.databind.JavaType)", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.generics.formalTypeParameterRemoved", - "old": "method T com.fasterxml.jackson.databind.DeserializationConfig::introspectForBuilder(com.fasterxml.jackson.databind.JavaType)", - "new": "method com.fasterxml.jackson.databind.BeanDescription com.fasterxml.jackson.databind.DeserializationConfig::introspectForBuilder(com.fasterxml.jackson.databind.JavaType)", - "typeParameter": "T extends com.fasterxml.jackson.databind.BeanDescription", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method T com.fasterxml.jackson.databind.DeserializationConfig::introspectForCreation(com.fasterxml.jackson.databind.JavaType)", - "new": "method com.fasterxml.jackson.databind.BeanDescription com.fasterxml.jackson.databind.DeserializationConfig::introspectForCreation(com.fasterxml.jackson.databind.JavaType)", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.generics.formalTypeParameterRemoved", - "old": "method T com.fasterxml.jackson.databind.DeserializationConfig::introspectForCreation(com.fasterxml.jackson.databind.JavaType)", - "new": "method com.fasterxml.jackson.databind.BeanDescription com.fasterxml.jackson.databind.DeserializationConfig::introspectForCreation(com.fasterxml.jackson.databind.JavaType)", - "typeParameter": "T extends com.fasterxml.jackson.databind.BeanDescription", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.field.serialVersionUIDUnchanged", - "old": "field com.fasterxml.jackson.databind.DeserializationContext.serialVersionUID", - "new": "field com.fasterxml.jackson.databind.DeserializationContext.serialVersionUID", - "serialVersionUID": "1", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.method.finalMethodAddedToNonFinalClass", - "new": "method boolean com.fasterxml.jackson.databind.DeserializationContext::isEnabled(com.fasterxml.jackson.core.StreamReadCapability)", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.method.finalMethodAddedToNonFinalClass", - "new": "method boolean com.fasterxml.jackson.databind.JavaType::isRecordType()", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.class.nonFinalClassInheritsFromNewClass", - "old": "class com.fasterxml.jackson.databind.JsonMappingException", - "new": "class com.fasterxml.jackson.databind.JsonMappingException", - "superClass": "com.fasterxml.jackson.core.JacksonException", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.MapperFeature.ACCEPT_CASE_INSENSITIVE_ENUMS", - "new": "field com.fasterxml.jackson.databind.MapperFeature.ACCEPT_CASE_INSENSITIVE_ENUMS", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.MapperFeature.ACCEPT_CASE_INSENSITIVE_PROPERTIES", - "new": "field com.fasterxml.jackson.databind.MapperFeature.ACCEPT_CASE_INSENSITIVE_PROPERTIES", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.MapperFeature.ACCEPT_CASE_INSENSITIVE_VALUES", - "new": "field com.fasterxml.jackson.databind.MapperFeature.ACCEPT_CASE_INSENSITIVE_VALUES", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.MapperFeature.ALLOW_COERCION_OF_SCALARS", - "new": "field com.fasterxml.jackson.databind.MapperFeature.ALLOW_COERCION_OF_SCALARS", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.MapperFeature.ALLOW_EXPLICIT_PROPERTY_RENAMING", - "new": "field com.fasterxml.jackson.databind.MapperFeature.ALLOW_EXPLICIT_PROPERTY_RENAMING", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.MapperFeature.BLOCK_UNSAFE_POLYMORPHIC_BASE_TYPES", - "new": "field com.fasterxml.jackson.databind.MapperFeature.BLOCK_UNSAFE_POLYMORPHIC_BASE_TYPES", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.MapperFeature.CAN_OVERRIDE_ACCESS_MODIFIERS", - "new": "field com.fasterxml.jackson.databind.MapperFeature.CAN_OVERRIDE_ACCESS_MODIFIERS", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.MapperFeature.DEFAULT_VIEW_INCLUSION", - "new": "field com.fasterxml.jackson.databind.MapperFeature.DEFAULT_VIEW_INCLUSION", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.MapperFeature.IGNORE_DUPLICATE_MODULE_REGISTRATIONS", - "new": "field com.fasterxml.jackson.databind.MapperFeature.IGNORE_DUPLICATE_MODULE_REGISTRATIONS", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.MapperFeature.IGNORE_MERGE_FOR_UNMERGEABLE", - "new": "field com.fasterxml.jackson.databind.MapperFeature.IGNORE_MERGE_FOR_UNMERGEABLE", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.MapperFeature.OVERRIDE_PUBLIC_ACCESS_MODIFIERS", - "new": "field com.fasterxml.jackson.databind.MapperFeature.OVERRIDE_PUBLIC_ACCESS_MODIFIERS", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.MapperFeature.SORT_PROPERTIES_ALPHABETICALLY", - "new": "field com.fasterxml.jackson.databind.MapperFeature.SORT_PROPERTIES_ALPHABETICALLY", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.MapperFeature.USE_BASE_TYPE_AS_DEFAULT_IMPL", - "new": "field com.fasterxml.jackson.databind.MapperFeature.USE_BASE_TYPE_AS_DEFAULT_IMPL", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.MapperFeature.USE_STATIC_TYPING", - "new": "field com.fasterxml.jackson.databind.MapperFeature.USE_STATIC_TYPING", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.MapperFeature.USE_STD_BEAN_NAMING", - "new": "field com.fasterxml.jackson.databind.MapperFeature.USE_STD_BEAN_NAMING", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.MapperFeature.USE_WRAPPER_NAME_AS_PROPERTY_NAME", - "new": "field com.fasterxml.jackson.databind.MapperFeature.USE_WRAPPER_NAME_AS_PROPERTY_NAME", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.field.serialVersionUIDUnchanged", - "old": "field com.fasterxml.jackson.databind.ObjectMapper.serialVersionUID", - "new": "field com.fasterxml.jackson.databind.ObjectMapper.serialVersionUID", - "serialVersionUID": "2", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.method.removed", - "old": "method java.lang.Object com.fasterxml.jackson.databind.ObjectMapper::_unwrapAndDeserialize(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.databind.DeserializationContext, com.fasterxml.jackson.databind.DeserializationConfig, com.fasterxml.jackson.databind.JavaType, com.fasterxml.jackson.databind.JsonDeserializer) throws java.io.IOException", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.method.finalMethodAddedToNonFinalClass", - "new": "method void com.fasterxml.jackson.databind.ObjectMapper::_writeValueAndClose(com.fasterxml.jackson.core.JsonGenerator, java.lang.Object) throws java.io.IOException", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.method.exception.runtimeAdded", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::treeToValue(com.fasterxml.jackson.core.TreeNode, java.lang.Class) throws com.fasterxml.jackson.core.JsonProcessingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::treeToValue(com.fasterxml.jackson.core.TreeNode, java.lang.Class) throws java.lang.IllegalArgumentException, com.fasterxml.jackson.core.JsonProcessingException", - "exception": "java.lang.IllegalArgumentException", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.method.removed", - "old": "method java.lang.Object com.fasterxml.jackson.databind.ObjectReader::_unwrapAndDeserialize(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.databind.DeserializationContext, com.fasterxml.jackson.databind.JavaType, com.fasterxml.jackson.databind.JsonDeserializer) throws java.io.IOException", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.method.removed", - "old": "method void com.fasterxml.jackson.databind.ObjectWriter::_configAndWriteValue(com.fasterxml.jackson.core.JsonGenerator, java.lang.Object) throws java.io.IOException", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.method.returnTypeChanged", - "old": "method void com.fasterxml.jackson.databind.ObjectWriter::_configureGenerator(com.fasterxml.jackson.core.JsonGenerator)", - "new": "method com.fasterxml.jackson.core.JsonGenerator com.fasterxml.jackson.databind.ObjectWriter::_configureGenerator(com.fasterxml.jackson.core.JsonGenerator)", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.method.finalMethodAddedToNonFinalClass", - "new": "method void com.fasterxml.jackson.databind.ObjectWriter::_writeValueAndClose(com.fasterxml.jackson.core.JsonGenerator, java.lang.Object) throws java.io.IOException", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.field.serialVersionUIDChanged", - "new": "field com.fasterxml.jackson.databind.PropertyNamingStrategy.serialVersionUID", - "oldSerialVersionUID": "-5237220944964015475", - "newSerialVersionUID": "2", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method T com.fasterxml.jackson.databind.SerializationConfig::introspect(com.fasterxml.jackson.databind.JavaType)", - "new": "method com.fasterxml.jackson.databind.BeanDescription com.fasterxml.jackson.databind.SerializationConfig::introspect(com.fasterxml.jackson.databind.JavaType)", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.generics.formalTypeParameterRemoved", - "old": "method T com.fasterxml.jackson.databind.SerializationConfig::introspect(com.fasterxml.jackson.databind.JavaType)", - "new": "method com.fasterxml.jackson.databind.BeanDescription com.fasterxml.jackson.databind.SerializationConfig::introspect(com.fasterxml.jackson.databind.JavaType)", - "typeParameter": "T extends com.fasterxml.jackson.databind.BeanDescription", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.field.serialVersionUIDUnchanged", - "old": "field com.fasterxml.jackson.databind.cfg.BaseSettings.serialVersionUID", - "new": "field com.fasterxml.jackson.databind.cfg.BaseSettings.serialVersionUID", - "serialVersionUID": "1", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.method.numberOfParametersChanged", - "old": "method void com.fasterxml.jackson.databind.cfg.BaseSettings::(com.fasterxml.jackson.databind.introspect.ClassIntrospector, com.fasterxml.jackson.databind.AnnotationIntrospector, com.fasterxml.jackson.databind.PropertyNamingStrategy, com.fasterxml.jackson.databind.type.TypeFactory, com.fasterxml.jackson.databind.jsontype.TypeResolverBuilder, java.text.DateFormat, com.fasterxml.jackson.databind.cfg.HandlerInstantiator, java.util.Locale, java.util.TimeZone, com.fasterxml.jackson.core.Base64Variant)", - "new": "method void com.fasterxml.jackson.databind.cfg.BaseSettings::(com.fasterxml.jackson.databind.introspect.ClassIntrospector, com.fasterxml.jackson.databind.AnnotationIntrospector, com.fasterxml.jackson.databind.PropertyNamingStrategy, com.fasterxml.jackson.databind.type.TypeFactory, com.fasterxml.jackson.databind.jsontype.TypeResolverBuilder, java.text.DateFormat, com.fasterxml.jackson.databind.cfg.HandlerInstantiator, java.util.Locale, java.util.TimeZone, com.fasterxml.jackson.core.Base64Variant, com.fasterxml.jackson.databind.jsontype.PolymorphicTypeValidator, com.fasterxml.jackson.databind.introspect.AccessorNamingStrategy.Provider)", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.method.finalMethodAddedToNonFinalClass", - "new": "method com.fasterxml.jackson.databind.introspect.AccessorNamingStrategy.Provider com.fasterxml.jackson.databind.cfg.MapperConfig>::getAccessorNaming()", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.method.abstractMethodAdded", - "new": "method com.fasterxml.jackson.annotation.JsonIncludeProperties.Value com.fasterxml.jackson.databind.cfg.MapperConfig>::getDefaultPropertyInclusions(java.lang.Class, com.fasterxml.jackson.databind.introspect.AnnotatedClass)", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.field.serialVersionUIDUnchanged", - "old": "field com.fasterxml.jackson.databind.deser.DefaultDeserializationContext.serialVersionUID", - "new": "field com.fasterxml.jackson.databind.deser.DefaultDeserializationContext.serialVersionUID", - "serialVersionUID": "1", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.method.abstractMethodAdded", - "new": "method com.fasterxml.jackson.databind.deser.DefaultDeserializationContext com.fasterxml.jackson.databind.deser.DefaultDeserializationContext::createDummyInstance(com.fasterxml.jackson.databind.DeserializationConfig)", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.class.nonFinalClassInheritsFromNewClass", - "old": "class com.fasterxml.jackson.databind.deser.UnresolvedForwardReference", - "new": "class com.fasterxml.jackson.databind.deser.UnresolvedForwardReference", - "superClass": "com.fasterxml.jackson.core.JacksonException", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.method.removed", - "old": "method com.fasterxml.jackson.databind.introspect.AnnotatedParameter com.fasterxml.jackson.databind.deser.ValueInstantiator::getIncompleteParameter()", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.method.removed", - "old": "method void com.fasterxml.jackson.databind.deser.impl.BeanPropertyMap::(boolean, java.util.Collection)", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.method.numberOfParametersChanged", - "old": "method com.fasterxml.jackson.databind.deser.impl.BeanPropertyMap com.fasterxml.jackson.databind.deser.impl.BeanPropertyMap::construct(java.util.Collection, boolean)", - "new": "method com.fasterxml.jackson.databind.deser.impl.BeanPropertyMap com.fasterxml.jackson.databind.deser.impl.BeanPropertyMap::construct(com.fasterxml.jackson.databind.cfg.MapperConfig, java.util.Collection, java.util.Map>, boolean)", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.method.removed", - "old": "method void com.fasterxml.jackson.databind.deser.impl.BeanPropertyMap::replace(com.fasterxml.jackson.databind.deser.SettableBeanProperty)", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.method.abstractMethodAdded", - "new": "method com.fasterxml.jackson.databind.BeanDescription com.fasterxml.jackson.databind.introspect.ClassIntrospector::forDeserializationWithBuilder(com.fasterxml.jackson.databind.DeserializationConfig, com.fasterxml.jackson.databind.JavaType, com.fasterxml.jackson.databind.introspect.ClassIntrospector.MixInResolver, com.fasterxml.jackson.databind.BeanDescription)", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.method.numberOfParametersChanged", - "old": "method void com.fasterxml.jackson.databind.ser.BeanSerializer::(com.fasterxml.jackson.databind.ser.std.BeanSerializerBase, java.util.Set)", - "new": "method void com.fasterxml.jackson.databind.ser.BeanSerializer::(com.fasterxml.jackson.databind.ser.std.BeanSerializerBase, java.util.Set, java.util.Set)", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.method.removed", - "old": "method com.fasterxml.jackson.databind.ser.impl.PropertySerializerMap com.fasterxml.jackson.databind.ser.impl.PropertySerializerMap::emptyMap()", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.method.visibilityReduced", - "old": "method void com.fasterxml.jackson.databind.ser.std.BeanSerializerBase::(com.fasterxml.jackson.databind.ser.std.BeanSerializerBase, com.fasterxml.jackson.databind.ser.BeanPropertyWriter[], com.fasterxml.jackson.databind.ser.BeanPropertyWriter[])", - "new": "method void com.fasterxml.jackson.databind.ser.std.BeanSerializerBase::(com.fasterxml.jackson.databind.ser.std.BeanSerializerBase, com.fasterxml.jackson.databind.ser.BeanPropertyWriter[], com.fasterxml.jackson.databind.ser.BeanPropertyWriter[])", - "oldVisibility": "public", - "newVisibility": "protected", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.method.abstractMethodAdded", - "new": "method com.fasterxml.jackson.databind.ser.std.BeanSerializerBase com.fasterxml.jackson.databind.ser.std.BeanSerializerBase::withByNameInclusion(java.util.Set, java.util.Set)", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.method.abstractMethodAdded", - "new": "method com.fasterxml.jackson.databind.ser.std.BeanSerializerBase com.fasterxml.jackson.databind.ser.std.BeanSerializerBase::withProperties(com.fasterxml.jackson.databind.ser.BeanPropertyWriter[], com.fasterxml.jackson.databind.ser.BeanPropertyWriter[])", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.field.typeChanged", - "old": "field com.fasterxml.jackson.databind.type.TypeFactory._typeCache", - "new": "field com.fasterxml.jackson.databind.type.TypeFactory._typeCache", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.field.serialVersionUIDUnchanged", - "old": "field com.fasterxml.jackson.databind.type.TypeFactory.serialVersionUID", - "new": "field com.fasterxml.jackson.databind.type.TypeFactory.serialVersionUID", - "serialVersionUID": "1", - "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" - }, - { - "code": "java.missing.newClass", - "new": "missing-class org.apache.tinkerpop.gremlin.process.remote.RemoteConnection", - "justification": "JAVA-2907: switched Tinkerpop dependency to optional" - }, - { - "code": "java.missing.newClass", - "new": "missing-class org.apache.tinkerpop.gremlin.process.traversal.P", - "justification": "JAVA-2907: switched Tinkerpop dependency to optional" - }, - { - "code": "java.missing.newClass", - "new": "missing-class org.apache.tinkerpop.gremlin.process.traversal.Path", - "justification": "JAVA-2907: switched Tinkerpop dependency to optional" - }, - { - "code": "java.missing.newClass", - "new": "missing-class org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal", - "justification": "JAVA-2907: switched Tinkerpop dependency to optional" - }, - { - "code": "java.missing.newClass", - "new": "missing-class org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource", - "justification": "JAVA-2907: switched Tinkerpop dependency to optional" - }, - { - "code": "java.missing.newClass", - "new": "missing-class org.apache.tinkerpop.gremlin.structure.Edge", - "justification": "JAVA-2907: switched Tinkerpop dependency to optional" - }, - { - "code": "java.missing.newClass", - "new": "missing-class org.apache.tinkerpop.gremlin.structure.Property", - "justification": "JAVA-2907: switched Tinkerpop dependency to optional" - }, - { - "code": "java.missing.newClass", - "new": "missing-class org.apache.tinkerpop.gremlin.structure.Vertex", - "justification": "JAVA-2907: switched Tinkerpop dependency to optional" - }, - { - "code": "java.missing.newClass", - "new": "missing-class org.apache.tinkerpop.gremlin.structure.VertexProperty", - "justification": "JAVA-2907: switched Tinkerpop dependency to optional" - }, - { - "code": "java.missing.oldClass", - "old": "missing-class org.apache.tinkerpop.gremlin.process.remote.RemoteConnection", - "new": "missing-class org.apache.tinkerpop.gremlin.process.remote.RemoteConnection", - "justification": "JAVA-2907: switched Tinkerpop dependency to optional" - }, - { - "code": "java.missing.oldClass", - "old": "missing-class org.apache.tinkerpop.gremlin.process.traversal.P", - "new": "missing-class org.apache.tinkerpop.gremlin.process.traversal.P", - "justification": "JAVA-2907: switched Tinkerpop dependency to optional" - }, - { - "code": "java.missing.oldClass", - "old": "missing-class org.apache.tinkerpop.gremlin.process.traversal.Path", - "new": "missing-class org.apache.tinkerpop.gremlin.process.traversal.Path", - "justification": "JAVA-2907: switched Tinkerpop dependency to optional" - }, - { - "code": "java.missing.oldClass", - "old": "missing-class org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal", - "new": "missing-class org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal", - "justification": "JAVA-2907: switched Tinkerpop dependency to optional" - }, - { - "code": "java.missing.oldClass", - "old": "missing-class org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource", - "new": "missing-class org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource", - "justification": "JAVA-2907: switched Tinkerpop dependency to optional" - }, - { - "code": "java.missing.oldClass", - "old": "missing-class org.apache.tinkerpop.gremlin.structure.Edge", - "new": "missing-class org.apache.tinkerpop.gremlin.structure.Edge", - "justification": "JAVA-2907: switched Tinkerpop dependency to optional" - }, - { - "code": "java.missing.oldClass", - "old": "missing-class org.apache.tinkerpop.gremlin.structure.Property", - "new": "missing-class org.apache.tinkerpop.gremlin.structure.Property", - "justification": "JAVA-2907: switched Tinkerpop dependency to optional" - }, - { - "code": "java.missing.oldClass", - "old": "missing-class org.apache.tinkerpop.gremlin.structure.Vertex", - "new": "missing-class org.apache.tinkerpop.gremlin.structure.Vertex", - "justification": "JAVA-2907: switched Tinkerpop dependency to optional" - }, - { - "code": "java.missing.oldClass", - "old": "missing-class org.apache.tinkerpop.gremlin.structure.VertexProperty", - "new": "missing-class org.apache.tinkerpop.gremlin.structure.VertexProperty", - "justification": "JAVA-2907: switched Tinkerpop dependency to optional" - }, - { - "code": "java.class.nonFinalClassInheritsFromNewClass", - "old": "class com.fasterxml.jackson.core.JsonGenerationException", - "new": "class com.fasterxml.jackson.core.JsonGenerationException", - "superClass": "com.fasterxml.jackson.core.exc.StreamWriteException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.field.serialVersionUIDChanged", - "old": "field com.fasterxml.jackson.core.JsonLocation.serialVersionUID", - "new": "field com.fasterxml.jackson.core.JsonLocation.serialVersionUID", - "oldSerialVersionUID": "1", - "newSerialVersionUID": "2", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.removed", - "old": "method java.lang.StringBuilder com.fasterxml.jackson.core.JsonLocation::_appendSourceDesc(java.lang.StringBuilder)", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.visibilityReduced", - "old": "method void com.fasterxml.jackson.core.exc.StreamReadException::(com.fasterxml.jackson.core.JsonParser, java.lang.String)", - "new": "method void com.fasterxml.jackson.core.exc.StreamReadException::(com.fasterxml.jackson.core.JsonParser, java.lang.String)", - "oldVisibility": "public", - "newVisibility": "protected", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.visibilityReduced", - "old": "method void com.fasterxml.jackson.core.exc.StreamReadException::(com.fasterxml.jackson.core.JsonParser, java.lang.String, com.fasterxml.jackson.core.JsonLocation)", - "new": "method void com.fasterxml.jackson.core.exc.StreamReadException::(com.fasterxml.jackson.core.JsonParser, java.lang.String, com.fasterxml.jackson.core.JsonLocation)", - "oldVisibility": "public", - "newVisibility": "protected", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.visibilityReduced", - "old": "method void com.fasterxml.jackson.core.exc.StreamReadException::(com.fasterxml.jackson.core.JsonParser, java.lang.String, java.lang.Throwable)", - "new": "method void com.fasterxml.jackson.core.exc.StreamReadException::(com.fasterxml.jackson.core.JsonParser, java.lang.String, java.lang.Throwable)", - "oldVisibility": "public", - "newVisibility": "protected", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.field.nowFinal", - "old": "field com.fasterxml.jackson.core.sym.ByteQuadsCanonicalizer._intern", - "new": "field com.fasterxml.jackson.core.sym.ByteQuadsCanonicalizer._intern", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.removed", - "old": "method void com.fasterxml.jackson.core.sym.CharsToNameCanonicalizer::reportTooManyCollisions(int)", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.abstractMethodAdded", - "new": "method java.util.List> com.fasterxml.jackson.databind.BeanDescription::getConstructorsWithMode()", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.abstractMethodAdded", - "new": "method java.util.List> com.fasterxml.jackson.databind.BeanDescription::getFactoryMethodsWithMode()", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.parameterTypeChanged", - "old": "parameter com.fasterxml.jackson.databind.DeserializationConfig com.fasterxml.jackson.databind.DeserializationConfig::_withMapperFeatures(===int===)", - "new": "parameter com.fasterxml.jackson.databind.DeserializationConfig com.fasterxml.jackson.databind.DeserializationConfig::_withMapperFeatures(===long===)", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.finalMethodAddedToNonFinalClass", - "new": "method com.fasterxml.jackson.databind.util.TokenBuffer com.fasterxml.jackson.databind.DeserializationContext::bufferForInputBuffering()", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method T com.fasterxml.jackson.databind.JsonDeserializer::deserialize(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.databind.DeserializationContext) throws java.io.IOException, com.fasterxml.jackson.core.JsonProcessingException", - "new": "method T com.fasterxml.jackson.databind.JsonDeserializer::deserialize(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.databind.DeserializationContext) throws java.io.IOException, com.fasterxml.jackson.core.JacksonException", - "exception": "com.fasterxml.jackson.core.JacksonException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method T com.fasterxml.jackson.databind.JsonDeserializer::deserialize(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.databind.DeserializationContext) throws java.io.IOException, com.fasterxml.jackson.core.JsonProcessingException", - "new": "method T com.fasterxml.jackson.databind.JsonDeserializer::deserialize(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.databind.DeserializationContext) throws java.io.IOException, com.fasterxml.jackson.core.JacksonException", - "exception": "com.fasterxml.jackson.core.JsonProcessingException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method T com.fasterxml.jackson.databind.JsonDeserializer::deserialize(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.databind.DeserializationContext, T) throws java.io.IOException", - "new": "method T com.fasterxml.jackson.databind.JsonDeserializer::deserialize(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.databind.DeserializationContext, T) throws java.io.IOException, com.fasterxml.jackson.core.JacksonException", - "exception": "com.fasterxml.jackson.core.JacksonException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method java.lang.Object com.fasterxml.jackson.databind.JsonDeserializer::deserializeWithType(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.databind.DeserializationContext, com.fasterxml.jackson.databind.jsontype.TypeDeserializer) throws java.io.IOException", - "new": "method java.lang.Object com.fasterxml.jackson.databind.JsonDeserializer::deserializeWithType(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.databind.DeserializationContext, com.fasterxml.jackson.databind.jsontype.TypeDeserializer) throws java.io.IOException, com.fasterxml.jackson.core.JacksonException", - "exception": "com.fasterxml.jackson.core.JacksonException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method java.lang.Object com.fasterxml.jackson.databind.JsonDeserializer::deserializeWithType(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.databind.DeserializationContext, com.fasterxml.jackson.databind.jsontype.TypeDeserializer, T) throws java.io.IOException", - "new": "method java.lang.Object com.fasterxml.jackson.databind.JsonDeserializer::deserializeWithType(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.databind.DeserializationContext, com.fasterxml.jackson.databind.jsontype.TypeDeserializer, T) throws java.io.IOException, com.fasterxml.jackson.core.JacksonException", - "exception": "com.fasterxml.jackson.core.JacksonException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.class.nonFinalClassInheritsFromNewClass", - "old": "class com.fasterxml.jackson.databind.JsonMappingException", - "new": "class com.fasterxml.jackson.databind.JsonMappingException", - "superClass": "com.fasterxml.jackson.databind.DatabindException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method com.fasterxml.jackson.databind.JsonDeserializer com.fasterxml.jackson.databind.ObjectMapper::_findRootDeserializer(com.fasterxml.jackson.databind.DeserializationContext, com.fasterxml.jackson.databind.JavaType) throws com.fasterxml.jackson.databind.JsonMappingException", - "new": "method com.fasterxml.jackson.databind.JsonDeserializer com.fasterxml.jackson.databind.ObjectMapper::_findRootDeserializer(com.fasterxml.jackson.databind.DeserializationContext, com.fasterxml.jackson.databind.JavaType) throws com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.DatabindException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method com.fasterxml.jackson.databind.JsonDeserializer com.fasterxml.jackson.databind.ObjectMapper::_findRootDeserializer(com.fasterxml.jackson.databind.DeserializationContext, com.fasterxml.jackson.databind.JavaType) throws com.fasterxml.jackson.databind.JsonMappingException", - "new": "method com.fasterxml.jackson.databind.JsonDeserializer com.fasterxml.jackson.databind.ObjectMapper::_findRootDeserializer(com.fasterxml.jackson.databind.DeserializationContext, com.fasterxml.jackson.databind.JavaType) throws com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.JsonMappingException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.removed", - "old": "method com.fasterxml.jackson.core.JsonToken com.fasterxml.jackson.databind.ObjectMapper::_initForReading(com.fasterxml.jackson.core.JsonParser) throws java.io.IOException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readTree(com.fasterxml.jackson.core.JsonParser) throws java.io.IOException, com.fasterxml.jackson.core.JsonProcessingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readTree(com.fasterxml.jackson.core.JsonParser) throws java.io.IOException", - "exception": "com.fasterxml.jackson.core.JsonProcessingException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method com.fasterxml.jackson.databind.JsonNode com.fasterxml.jackson.databind.ObjectMapper::readTree(java.io.File) throws java.io.IOException, com.fasterxml.jackson.core.JsonProcessingException", - "new": "method com.fasterxml.jackson.databind.JsonNode com.fasterxml.jackson.databind.ObjectMapper::readTree(java.io.File) throws java.io.IOException", - "exception": "com.fasterxml.jackson.core.JsonProcessingException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.exc.StreamReadException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.DatabindException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.JsonParseException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.JsonMappingException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.exc.StreamReadException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.DatabindException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.JsonParseException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.JsonMappingException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], int, int, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], int, int, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.exc.StreamReadException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], int, int, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], int, int, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.DatabindException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], int, int, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], int, int, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.JsonParseException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], int, int, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], int, int, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.JsonMappingException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], int, int, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], int, int, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.exc.StreamReadException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], int, int, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], int, int, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.DatabindException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], int, int, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], int, int, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.JsonParseException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], int, int, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], int, int, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.JsonMappingException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], int, int, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], int, int, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.exc.StreamReadException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], int, int, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], int, int, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.DatabindException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], int, int, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], int, int, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.JsonParseException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], int, int, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], int, int, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.JsonMappingException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.exc.StreamReadException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.DatabindException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.JsonParseException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.JsonMappingException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.core.type.ResolvedType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.core.type.ResolvedType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.exc.StreamReadException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.core.type.ResolvedType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.core.type.ResolvedType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.DatabindException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.core.type.ResolvedType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.core.type.ResolvedType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.JsonParseException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.core.type.ResolvedType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.core.type.ResolvedType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.JsonMappingException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.exc.StreamReadException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.DatabindException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.JsonParseException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.JsonMappingException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.exc.StreamReadException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.DatabindException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.JsonParseException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.JsonMappingException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.exc.StreamReadException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.DatabindException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.JsonParseException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.JsonMappingException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.File, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.File, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.exc.StreamReadException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.File, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.File, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.DatabindException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.File, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.File, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.JsonParseException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.File, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.File, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.JsonMappingException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.File, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.File, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.exc.StreamReadException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.File, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.File, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.DatabindException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.File, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.File, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.JsonParseException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.File, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.File, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.JsonMappingException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.File, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.File, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.exc.StreamReadException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.File, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.File, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.DatabindException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.File, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.File, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.JsonParseException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.File, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.File, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.JsonMappingException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.InputStream, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.InputStream, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.exc.StreamReadException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.InputStream, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.InputStream, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.DatabindException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.InputStream, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.InputStream, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.JsonParseException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.InputStream, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.InputStream, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.JsonMappingException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.InputStream, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.InputStream, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.exc.StreamReadException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.InputStream, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.InputStream, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.DatabindException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.InputStream, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.InputStream, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.JsonParseException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.InputStream, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.InputStream, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.JsonMappingException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.InputStream, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.InputStream, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.exc.StreamReadException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.InputStream, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.InputStream, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.DatabindException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.InputStream, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.InputStream, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.JsonParseException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.InputStream, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.InputStream, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.JsonMappingException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.Reader, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.Reader, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.exc.StreamReadException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.Reader, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.Reader, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.DatabindException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.Reader, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.Reader, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.JsonParseException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.Reader, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.Reader, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.JsonMappingException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.Reader, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.Reader, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.exc.StreamReadException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.Reader, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.Reader, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.DatabindException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.Reader, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.Reader, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.JsonParseException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.Reader, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.Reader, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.JsonMappingException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.Reader, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.Reader, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.exc.StreamReadException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.Reader, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.Reader, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.DatabindException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.Reader, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.Reader, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.JsonParseException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.Reader, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.Reader, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.JsonMappingException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.net.URL, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.net.URL, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.exc.StreamReadException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.net.URL, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.net.URL, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.DatabindException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.net.URL, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.net.URL, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.JsonParseException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.net.URL, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.net.URL, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.JsonMappingException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.net.URL, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.net.URL, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.exc.StreamReadException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.net.URL, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.net.URL, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.DatabindException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.net.URL, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.net.URL, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.JsonParseException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.net.URL, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.net.URL, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.JsonMappingException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.net.URL, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.net.URL, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.exc.StreamReadException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.net.URL, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.net.URL, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.DatabindException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.net.URL, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.net.URL, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.JsonParseException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.net.URL, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.net.URL, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.JsonMappingException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method com.fasterxml.jackson.databind.MappingIterator com.fasterxml.jackson.databind.ObjectMapper::readValues(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.core.type.ResolvedType) throws java.io.IOException, com.fasterxml.jackson.core.JsonProcessingException", - "new": "method com.fasterxml.jackson.databind.MappingIterator com.fasterxml.jackson.databind.ObjectMapper::readValues(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.core.type.ResolvedType) throws java.io.IOException", - "exception": "com.fasterxml.jackson.core.JsonProcessingException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method com.fasterxml.jackson.databind.MappingIterator com.fasterxml.jackson.databind.ObjectMapper::readValues(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonProcessingException", - "new": "method com.fasterxml.jackson.databind.MappingIterator com.fasterxml.jackson.databind.ObjectMapper::readValues(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException", - "exception": "com.fasterxml.jackson.core.JsonProcessingException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method com.fasterxml.jackson.databind.MappingIterator com.fasterxml.jackson.databind.ObjectMapper::readValues(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonProcessingException", - "new": "method com.fasterxml.jackson.databind.MappingIterator com.fasterxml.jackson.databind.ObjectMapper::readValues(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException", - "exception": "com.fasterxml.jackson.core.JsonProcessingException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method com.fasterxml.jackson.databind.MappingIterator com.fasterxml.jackson.databind.ObjectMapper::readValues(com.fasterxml.jackson.core.JsonParser, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonProcessingException", - "new": "method com.fasterxml.jackson.databind.MappingIterator com.fasterxml.jackson.databind.ObjectMapper::readValues(com.fasterxml.jackson.core.JsonParser, java.lang.Class) throws java.io.IOException", - "exception": "com.fasterxml.jackson.core.JsonProcessingException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method void com.fasterxml.jackson.databind.ObjectMapper::writeTree(com.fasterxml.jackson.core.JsonGenerator, com.fasterxml.jackson.core.TreeNode) throws java.io.IOException, com.fasterxml.jackson.core.JsonProcessingException", - "new": "method void com.fasterxml.jackson.databind.ObjectMapper::writeTree(com.fasterxml.jackson.core.JsonGenerator, com.fasterxml.jackson.core.TreeNode) throws java.io.IOException", - "exception": "com.fasterxml.jackson.core.JsonProcessingException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method void com.fasterxml.jackson.databind.ObjectMapper::writeTree(com.fasterxml.jackson.core.JsonGenerator, com.fasterxml.jackson.databind.JsonNode) throws java.io.IOException, com.fasterxml.jackson.core.JsonProcessingException", - "new": "method void com.fasterxml.jackson.databind.ObjectMapper::writeTree(com.fasterxml.jackson.core.JsonGenerator, com.fasterxml.jackson.databind.JsonNode) throws java.io.IOException", - "exception": "com.fasterxml.jackson.core.JsonProcessingException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(com.fasterxml.jackson.core.JsonGenerator, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(com.fasterxml.jackson.core.JsonGenerator, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.exc.StreamWriteException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(com.fasterxml.jackson.core.JsonGenerator, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(com.fasterxml.jackson.core.JsonGenerator, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.DatabindException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(com.fasterxml.jackson.core.JsonGenerator, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(com.fasterxml.jackson.core.JsonGenerator, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.JsonGenerationException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(com.fasterxml.jackson.core.JsonGenerator, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(com.fasterxml.jackson.core.JsonGenerator, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.JsonMappingException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(java.io.File, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(java.io.File, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.exc.StreamWriteException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(java.io.File, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(java.io.File, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.DatabindException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(java.io.File, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(java.io.File, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.JsonGenerationException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(java.io.File, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(java.io.File, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.JsonMappingException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(java.io.OutputStream, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(java.io.OutputStream, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.exc.StreamWriteException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(java.io.OutputStream, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(java.io.OutputStream, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.DatabindException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(java.io.OutputStream, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(java.io.OutputStream, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.JsonGenerationException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(java.io.OutputStream, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(java.io.OutputStream, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.JsonMappingException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(java.io.Writer, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(java.io.Writer, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.exc.StreamWriteException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(java.io.Writer, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(java.io.Writer, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.DatabindException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(java.io.Writer, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(java.io.Writer, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.JsonGenerationException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(java.io.Writer, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(java.io.Writer, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.JsonMappingException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method com.fasterxml.jackson.databind.JsonDeserializer com.fasterxml.jackson.databind.ObjectReader::_findRootDeserializer(com.fasterxml.jackson.databind.DeserializationContext) throws com.fasterxml.jackson.databind.JsonMappingException", - "new": "method com.fasterxml.jackson.databind.JsonDeserializer com.fasterxml.jackson.databind.ObjectReader::_findRootDeserializer(com.fasterxml.jackson.databind.DeserializationContext) throws com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.DatabindException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method com.fasterxml.jackson.databind.JsonDeserializer com.fasterxml.jackson.databind.ObjectReader::_findRootDeserializer(com.fasterxml.jackson.databind.DeserializationContext) throws com.fasterxml.jackson.databind.JsonMappingException", - "new": "method com.fasterxml.jackson.databind.JsonDeserializer com.fasterxml.jackson.databind.ObjectReader::_findRootDeserializer(com.fasterxml.jackson.databind.DeserializationContext) throws com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.JsonMappingException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method com.fasterxml.jackson.databind.JsonDeserializer com.fasterxml.jackson.databind.ObjectReader::_findTreeDeserializer(com.fasterxml.jackson.databind.DeserializationContext) throws com.fasterxml.jackson.databind.JsonMappingException", - "new": "method com.fasterxml.jackson.databind.JsonDeserializer com.fasterxml.jackson.databind.ObjectReader::_findTreeDeserializer(com.fasterxml.jackson.databind.DeserializationContext) throws com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.DatabindException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method com.fasterxml.jackson.databind.JsonDeserializer com.fasterxml.jackson.databind.ObjectReader::_findTreeDeserializer(com.fasterxml.jackson.databind.DeserializationContext) throws com.fasterxml.jackson.databind.JsonMappingException", - "new": "method com.fasterxml.jackson.databind.JsonDeserializer com.fasterxml.jackson.databind.ObjectReader::_findTreeDeserializer(com.fasterxml.jackson.databind.DeserializationContext) throws com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.JsonMappingException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method void com.fasterxml.jackson.databind.ObjectReader::_reportUndetectableSource(java.lang.Object) throws com.fasterxml.jackson.core.JsonParseException", - "new": "method void com.fasterxml.jackson.databind.ObjectReader::_reportUndetectableSource(java.lang.Object) throws com.fasterxml.jackson.core.exc.StreamReadException", - "exception": "com.fasterxml.jackson.core.exc.StreamReadException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method void com.fasterxml.jackson.databind.ObjectReader::_reportUndetectableSource(java.lang.Object) throws com.fasterxml.jackson.core.JsonParseException", - "new": "method void com.fasterxml.jackson.databind.ObjectReader::_reportUndetectableSource(java.lang.Object) throws com.fasterxml.jackson.core.exc.StreamReadException", - "exception": "com.fasterxml.jackson.core.JsonParseException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method void com.fasterxml.jackson.databind.ObjectReader::_reportUnkownFormat(com.fasterxml.jackson.databind.deser.DataFormatReaders, com.fasterxml.jackson.databind.deser.DataFormatReaders.Match) throws com.fasterxml.jackson.core.JsonProcessingException", - "new": "method void com.fasterxml.jackson.databind.ObjectReader::_reportUnkownFormat(com.fasterxml.jackson.databind.deser.DataFormatReaders, com.fasterxml.jackson.databind.deser.DataFormatReaders.Match) throws java.io.IOException", - "exception": "java.io.IOException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method void com.fasterxml.jackson.databind.ObjectReader::_reportUnkownFormat(com.fasterxml.jackson.databind.deser.DataFormatReaders, com.fasterxml.jackson.databind.deser.DataFormatReaders.Match) throws com.fasterxml.jackson.core.JsonProcessingException", - "new": "method void com.fasterxml.jackson.databind.ObjectReader::_reportUnkownFormat(com.fasterxml.jackson.databind.deser.DataFormatReaders, com.fasterxml.jackson.databind.deser.DataFormatReaders.Match) throws java.io.IOException", - "exception": "com.fasterxml.jackson.core.JsonProcessingException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.DataOutput, java.lang.Object) throws java.io.IOException", - "new": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.DataOutput, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.exc.StreamWriteException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.DataOutput, java.lang.Object) throws java.io.IOException", - "new": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.DataOutput, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.DatabindException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.File, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.File, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.exc.StreamWriteException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.File, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.File, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.DatabindException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.File, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.File, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.JsonGenerationException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.File, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.File, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.JsonMappingException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.OutputStream, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.OutputStream, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.exc.StreamWriteException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.OutputStream, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.OutputStream, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.DatabindException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.OutputStream, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.OutputStream, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.JsonGenerationException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.OutputStream, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.OutputStream, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.JsonMappingException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.Writer, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.Writer, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.exc.StreamWriteException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.Writer, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.Writer, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.DatabindException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.Writer, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.Writer, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.core.JsonGenerationException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.Writer, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", - "new": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.Writer, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", - "exception": "com.fasterxml.jackson.databind.JsonMappingException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.field.serialVersionUIDUnchanged", - "old": "field com.fasterxml.jackson.databind.SerializationConfig.serialVersionUID", - "new": "field com.fasterxml.jackson.databind.SerializationConfig.serialVersionUID", - "serialVersionUID": "1", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.parameterTypeChanged", - "old": "parameter com.fasterxml.jackson.databind.SerializationConfig com.fasterxml.jackson.databind.SerializationConfig::_withMapperFeatures(===int===)", - "new": "parameter com.fasterxml.jackson.databind.SerializationConfig com.fasterxml.jackson.databind.SerializationConfig::_withMapperFeatures(===long===)", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.SerializationFeature.EAGER_SERIALIZER_FETCH", - "new": "field com.fasterxml.jackson.databind.SerializationFeature.EAGER_SERIALIZER_FETCH", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.SerializationFeature.ORDER_MAP_ENTRIES_BY_KEYS", - "new": "field com.fasterxml.jackson.databind.SerializationFeature.ORDER_MAP_ENTRIES_BY_KEYS", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.SerializationFeature.USE_EQUALITY_FOR_OBJECT_ID", - "new": "field com.fasterxml.jackson.databind.SerializationFeature.USE_EQUALITY_FOR_OBJECT_ID", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.SerializationFeature.WRITE_BIGDECIMAL_AS_PLAIN", - "new": "field com.fasterxml.jackson.databind.SerializationFeature.WRITE_BIGDECIMAL_AS_PLAIN", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.SerializationFeature.WRITE_CHAR_ARRAYS_AS_JSON_ARRAYS", - "new": "field com.fasterxml.jackson.databind.SerializationFeature.WRITE_CHAR_ARRAYS_AS_JSON_ARRAYS", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.SerializationFeature.WRITE_DATE_TIMESTAMPS_AS_NANOSECONDS", - "new": "field com.fasterxml.jackson.databind.SerializationFeature.WRITE_DATE_TIMESTAMPS_AS_NANOSECONDS", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.SerializationFeature.WRITE_DURATIONS_AS_TIMESTAMPS", - "new": "field com.fasterxml.jackson.databind.SerializationFeature.WRITE_DURATIONS_AS_TIMESTAMPS", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.SerializationFeature.WRITE_EMPTY_JSON_ARRAYS", - "new": "field com.fasterxml.jackson.databind.SerializationFeature.WRITE_EMPTY_JSON_ARRAYS", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.SerializationFeature.WRITE_ENUMS_USING_INDEX", - "new": "field com.fasterxml.jackson.databind.SerializationFeature.WRITE_ENUMS_USING_INDEX", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.SerializationFeature.WRITE_ENUMS_USING_TO_STRING", - "new": "field com.fasterxml.jackson.databind.SerializationFeature.WRITE_ENUMS_USING_TO_STRING", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.SerializationFeature.WRITE_ENUM_KEYS_USING_INDEX", - "new": "field com.fasterxml.jackson.databind.SerializationFeature.WRITE_ENUM_KEYS_USING_INDEX", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.SerializationFeature.WRITE_NULL_MAP_VALUES", - "new": "field com.fasterxml.jackson.databind.SerializationFeature.WRITE_NULL_MAP_VALUES", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.SerializationFeature.WRITE_SINGLE_ELEM_ARRAYS_UNWRAPPED", - "new": "field com.fasterxml.jackson.databind.SerializationFeature.WRITE_SINGLE_ELEM_ARRAYS_UNWRAPPED", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.finalMethodAddedToNonFinalClass", - "new": "method com.fasterxml.jackson.databind.util.TokenBuffer com.fasterxml.jackson.databind.SerializerProvider::bufferForValueConversion()", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.removed", - "old": "method java.lang.Class com.fasterxml.jackson.databind.SerializerProvider::getSerializationView()", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.field.typeChanged", - "old": "field com.fasterxml.jackson.databind.cfg.MapperConfig>._mapperFeatures", - "new": "field com.fasterxml.jackson.databind.cfg.MapperConfig>._mapperFeatures", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.field.serialVersionUIDUnchanged", - "old": "field com.fasterxml.jackson.databind.cfg.MapperConfig>.serialVersionUID", - "new": "field com.fasterxml.jackson.databind.cfg.MapperConfig>.serialVersionUID", - "serialVersionUID": "2", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.parameterTypeChanged", - "old": "parameter void com.fasterxml.jackson.databind.cfg.MapperConfig>::(com.fasterxml.jackson.databind.cfg.BaseSettings, ===int===)", - "new": "parameter void com.fasterxml.jackson.databind.cfg.MapperConfig>::(com.fasterxml.jackson.databind.cfg.BaseSettings, ===long===)", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.parameterTypeChanged", - "old": "parameter void com.fasterxml.jackson.databind.cfg.MapperConfig>::(com.fasterxml.jackson.databind.cfg.MapperConfig, ===int===)", - "new": "parameter void com.fasterxml.jackson.databind.cfg.MapperConfig>::(com.fasterxml.jackson.databind.cfg.MapperConfig, ===long===)", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method void com.fasterxml.jackson.databind.deser.BeanDeserializerBuilder::_fixAccess(java.util.Collection)", - "new": "method void com.fasterxml.jackson.databind.deser.BeanDeserializerBuilder::_fixAccess(java.util.Collection) throws com.fasterxml.jackson.databind.JsonMappingException", - "exception": "com.fasterxml.jackson.databind.JsonMappingException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method void com.fasterxml.jackson.databind.deser.BeanDeserializerBuilder::addBackReferenceProperty(java.lang.String, com.fasterxml.jackson.databind.deser.SettableBeanProperty)", - "new": "method void com.fasterxml.jackson.databind.deser.BeanDeserializerBuilder::addBackReferenceProperty(java.lang.String, com.fasterxml.jackson.databind.deser.SettableBeanProperty) throws com.fasterxml.jackson.databind.JsonMappingException", - "exception": "com.fasterxml.jackson.databind.JsonMappingException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method void com.fasterxml.jackson.databind.deser.BeanDeserializerBuilder::addInjectable(com.fasterxml.jackson.databind.PropertyName, com.fasterxml.jackson.databind.JavaType, com.fasterxml.jackson.databind.util.Annotations, com.fasterxml.jackson.databind.introspect.AnnotatedMember, java.lang.Object)", - "new": "method void com.fasterxml.jackson.databind.deser.BeanDeserializerBuilder::addInjectable(com.fasterxml.jackson.databind.PropertyName, com.fasterxml.jackson.databind.JavaType, com.fasterxml.jackson.databind.util.Annotations, com.fasterxml.jackson.databind.introspect.AnnotatedMember, java.lang.Object) throws com.fasterxml.jackson.databind.JsonMappingException", - "exception": "com.fasterxml.jackson.databind.JsonMappingException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedAdded", - "old": "method com.fasterxml.jackson.databind.JsonDeserializer com.fasterxml.jackson.databind.deser.BeanDeserializerBuilder::build()", - "new": "method com.fasterxml.jackson.databind.JsonDeserializer com.fasterxml.jackson.databind.deser.BeanDeserializerBuilder::build() throws com.fasterxml.jackson.databind.JsonMappingException", - "exception": "com.fasterxml.jackson.databind.JsonMappingException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.class.nonFinalClassInheritsFromNewClass", - "old": "class com.fasterxml.jackson.databind.deser.UnresolvedForwardReference", - "new": "class com.fasterxml.jackson.databind.deser.UnresolvedForwardReference", - "superClass": "com.fasterxml.jackson.databind.DatabindException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method void com.fasterxml.jackson.databind.deser.impl.PropertyValue::assign(java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonProcessingException", - "new": "method void com.fasterxml.jackson.databind.deser.impl.PropertyValue::assign(java.lang.Object) throws java.io.IOException", - "exception": "com.fasterxml.jackson.core.JsonProcessingException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method void com.fasterxml.jackson.databind.node.BinaryNode::serialize(com.fasterxml.jackson.core.JsonGenerator, com.fasterxml.jackson.databind.SerializerProvider) throws java.io.IOException, com.fasterxml.jackson.core.JsonProcessingException", - "new": "method void com.fasterxml.jackson.databind.node.BinaryNode::serialize(com.fasterxml.jackson.core.JsonGenerator, com.fasterxml.jackson.databind.SerializerProvider) throws java.io.IOException", - "exception": "com.fasterxml.jackson.core.JsonProcessingException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method void com.fasterxml.jackson.databind.node.BaseJsonNode::serialize(com.fasterxml.jackson.core.JsonGenerator, com.fasterxml.jackson.databind.SerializerProvider) throws java.io.IOException, com.fasterxml.jackson.core.JsonProcessingException @ com.fasterxml.jackson.databind.node.NumericNode", - "new": "method void com.fasterxml.jackson.databind.node.BaseJsonNode::serialize(com.fasterxml.jackson.core.JsonGenerator, com.fasterxml.jackson.databind.SerializerProvider) throws java.io.IOException @ com.fasterxml.jackson.databind.node.NumericNode", - "exception": "com.fasterxml.jackson.core.JsonProcessingException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method void com.fasterxml.jackson.databind.node.BaseJsonNode::serialize(com.fasterxml.jackson.core.JsonGenerator, com.fasterxml.jackson.databind.SerializerProvider) throws java.io.IOException, com.fasterxml.jackson.core.JsonProcessingException @ com.fasterxml.jackson.databind.node.ValueNode", - "new": "method void com.fasterxml.jackson.databind.node.BaseJsonNode::serialize(com.fasterxml.jackson.core.JsonGenerator, com.fasterxml.jackson.databind.SerializerProvider) throws java.io.IOException @ com.fasterxml.jackson.databind.node.ValueNode", - "exception": "com.fasterxml.jackson.core.JsonProcessingException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method void com.fasterxml.jackson.databind.ser.std.BeanSerializerBase::serializeFieldsFiltered(java.lang.Object, com.fasterxml.jackson.core.JsonGenerator, com.fasterxml.jackson.databind.SerializerProvider) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException", - "new": "method void com.fasterxml.jackson.databind.ser.std.BeanSerializerBase::serializeFieldsFiltered(java.lang.Object, com.fasterxml.jackson.core.JsonGenerator, com.fasterxml.jackson.databind.SerializerProvider) throws java.io.IOException", - "exception": "com.fasterxml.jackson.core.JsonGenerationException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.method.exception.checkedRemoved", - "old": "method void com.fasterxml.jackson.databind.type.TypeBase::serialize(com.fasterxml.jackson.core.JsonGenerator, com.fasterxml.jackson.databind.SerializerProvider) throws java.io.IOException, com.fasterxml.jackson.core.JsonProcessingException", - "new": "method void com.fasterxml.jackson.databind.type.TypeBase::serialize(com.fasterxml.jackson.core.JsonGenerator, com.fasterxml.jackson.databind.SerializerProvider) throws java.io.IOException", - "exception": "com.fasterxml.jackson.core.JsonProcessingException", - "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" - }, - { - "code": "java.class.nonFinalClassInheritsFromNewClass", - "old": "class com.datastax.oss.driver.api.core.type.codec.CodecNotFoundException", - "new": "class com.datastax.oss.driver.api.core.type.codec.CodecNotFoundException", - "superClass": "com.datastax.oss.driver.api.core.DriverException", - "justification": "Make CodecNotFoundException to extend DriverException as all other driver exceptions do" - }, - { - "code": "java.class.removed", - "old": "class com.datastax.oss.driver.api.core.data.CqlVector", - "justification": "Refactoring in JAVA-3061" - }, - { - "code": "java.method.removed", - "old": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.GettableById::getCqlVector(com.datastax.oss.driver.api.core.CqlIdentifier)", - "justification": "Refactoring in JAVA-3061" - }, - { - "code": "java.method.removed", - "old": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.GettableByIndex::getCqlVector(int)", - "justification": "Refactoring in JAVA-3061" - }, - { - "code": "java.method.removed", - "old": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.GettableByName::getCqlVector(java.lang.String)", - "justification": "Refactoring in JAVA-3061" - }, - { - "code": "java.method.removed", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>::setCqlVector(com.datastax.oss.driver.api.core.CqlIdentifier, com.datastax.oss.driver.api.core.data.CqlVector)", - "justification": "Refactoring in JAVA-3061" - }, - { - "code": "java.method.removed", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>::setCqlVector(int, com.datastax.oss.driver.api.core.data.CqlVector)", - "justification": "Refactoring in JAVA-3061" - }, - { - "code": "java.method.removed", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>::setCqlVector(java.lang.String, com.datastax.oss.driver.api.core.data.CqlVector)", - "justification": "Refactoring in JAVA-3061" - }, - { - "code": "java.class.removed", - "old": "class com.datastax.oss.driver.api.core.type.CqlVectorType", - "justification": "Refactoring in JAVA-3061" - }, - { - "code": "java.method.returnTypeChanged", - "old": "method com.datastax.oss.driver.api.core.type.CqlVectorType com.datastax.oss.driver.api.core.type.DataTypes::vectorOf(com.datastax.oss.driver.api.core.type.DataType, int)", - "new": "method com.datastax.oss.driver.api.core.type.VectorType com.datastax.oss.driver.api.core.type.DataTypes::vectorOf(com.datastax.oss.driver.api.core.type.DataType, int)", - "justification": "Refactoring in JAVA-3061" - }, - { - "code": "java.method.parameterTypeChanged", - "old": "parameter com.datastax.oss.driver.api.core.type.codec.TypeCodec> com.datastax.oss.driver.api.core.type.codec.TypeCodecs::vectorOf(===com.datastax.oss.driver.api.core.type.CqlVectorType===, com.datastax.oss.driver.api.core.type.codec.TypeCodec)", - "new": "parameter com.datastax.oss.driver.api.core.type.codec.TypeCodec> com.datastax.oss.driver.api.core.type.codec.TypeCodecs::vectorOf(===com.datastax.oss.driver.api.core.type.VectorType===, com.datastax.oss.driver.api.core.type.codec.TypeCodec)", - "justification": "Refactoring in JAVA-3061" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method com.datastax.oss.driver.api.core.type.codec.TypeCodec> com.datastax.oss.driver.api.core.type.codec.TypeCodecs::vectorOf(com.datastax.oss.driver.api.core.type.CqlVectorType, com.datastax.oss.driver.api.core.type.codec.TypeCodec)", - "new": "method com.datastax.oss.driver.api.core.type.codec.TypeCodec> com.datastax.oss.driver.api.core.type.codec.TypeCodecs::vectorOf(com.datastax.oss.driver.api.core.type.VectorType, com.datastax.oss.driver.api.core.type.codec.TypeCodec)", - "justification": "Refactoring in JAVA-3061" - }, - { - "code": "java.method.removed", - "old": "method com.datastax.oss.driver.api.core.type.reflect.GenericType> com.datastax.oss.driver.api.core.type.reflect.GenericType::vectorOf(com.datastax.oss.driver.api.core.type.reflect.GenericType)", - "justification": "Refactoring in JAVA-3061" - }, - { - "code": "java.class.removed", - "old": "class com.datastax.oss.driver.api.core.data.CqlVector.Builder", - "justification": "Refactorings in PR 1666" - }, - { - "code": "java.method.removed", - "old": "method com.datastax.oss.driver.api.core.data.CqlVector.Builder com.datastax.oss.driver.api.core.data.CqlVector::builder()", - "justification": "Refactorings in PR 1666" - }, - { - "code": "java.method.removed", - "old": "method java.lang.Iterable com.datastax.oss.driver.api.core.data.CqlVector::getValues()", - "justification": "Refactorings in PR 1666" - }, - { - "code": "java.generics.formalTypeParameterChanged", - "old": "class com.datastax.oss.driver.api.core.data.CqlVector", - "new": "class com.datastax.oss.driver.api.core.data.CqlVector", - "justification": "Refactorings in PR 1666" - }, - { - "code": "java.method.parameterTypeChanged", - "old": "parameter com.datastax.oss.driver.api.core.type.codec.TypeCodec> com.datastax.oss.driver.api.core.type.codec.TypeCodecs::vectorOf(===com.datastax.oss.driver.api.core.type.CqlVectorType===, com.datastax.oss.driver.api.core.type.codec.TypeCodec)", - "new": "parameter com.datastax.oss.driver.api.core.type.codec.TypeCodec> com.datastax.oss.driver.api.core.type.codec.TypeCodecs::vectorOf(===com.datastax.oss.driver.api.core.type.VectorType===, com.datastax.oss.driver.api.core.type.codec.TypeCodec)", - "justification": "Refactorings in PR 1666" - }, - { - "code": "java.method.parameterTypeParameterChanged", - "old": "parameter com.datastax.oss.driver.api.core.type.codec.TypeCodec> com.datastax.oss.driver.api.core.type.codec.TypeCodecs::vectorOf(com.datastax.oss.driver.api.core.type.CqlVectorType, ===com.datastax.oss.driver.api.core.type.codec.TypeCodec===)", - "new": "parameter com.datastax.oss.driver.api.core.type.codec.TypeCodec> com.datastax.oss.driver.api.core.type.codec.TypeCodecs::vectorOf(com.datastax.oss.driver.api.core.type.VectorType, ===com.datastax.oss.driver.api.core.type.codec.TypeCodec===)", - "justification": "Refactorings in PR 1666" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method com.datastax.oss.driver.api.core.type.codec.TypeCodec> com.datastax.oss.driver.api.core.type.codec.TypeCodecs::vectorOf(com.datastax.oss.driver.api.core.type.CqlVectorType, com.datastax.oss.driver.api.core.type.codec.TypeCodec)", - "new": "method com.datastax.oss.driver.api.core.type.codec.TypeCodec> com.datastax.oss.driver.api.core.type.codec.TypeCodecs::vectorOf(com.datastax.oss.driver.api.core.type.VectorType, com.datastax.oss.driver.api.core.type.codec.TypeCodec)", - "justification": "Refactorings in PR 1666" - }, - { - "code": "java.generics.formalTypeParameterChanged", - "old": "method com.datastax.oss.driver.api.core.type.codec.TypeCodec> com.datastax.oss.driver.api.core.type.codec.TypeCodecs::vectorOf(com.datastax.oss.driver.api.core.type.CqlVectorType, com.datastax.oss.driver.api.core.type.codec.TypeCodec)", - "new": "method com.datastax.oss.driver.api.core.type.codec.TypeCodec> com.datastax.oss.driver.api.core.type.codec.TypeCodecs::vectorOf(com.datastax.oss.driver.api.core.type.VectorType, com.datastax.oss.driver.api.core.type.codec.TypeCodec)", - "justification": "Refactorings in PR 1666" - }, - { - "code": "java.method.parameterTypeParameterChanged", - "old": "parameter com.datastax.oss.driver.api.core.type.reflect.GenericType> com.datastax.oss.driver.api.core.type.reflect.GenericType::vectorOf(===com.datastax.oss.driver.api.core.type.reflect.GenericType===)", - "new": "parameter com.datastax.oss.driver.api.core.type.reflect.GenericType> com.datastax.oss.driver.api.core.type.reflect.GenericType::vectorOf(===com.datastax.oss.driver.api.core.type.reflect.GenericType===)", - "justification": "Refactorings in PR 1666" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method com.datastax.oss.driver.api.core.type.reflect.GenericType> com.datastax.oss.driver.api.core.type.reflect.GenericType::vectorOf(com.datastax.oss.driver.api.core.type.reflect.GenericType)", - "new": "method com.datastax.oss.driver.api.core.type.reflect.GenericType> com.datastax.oss.driver.api.core.type.reflect.GenericType::vectorOf(com.datastax.oss.driver.api.core.type.reflect.GenericType)", - "justification": "Refactorings in PR 1666" - }, - { - "code": "java.generics.formalTypeParameterChanged", - "old": "method com.datastax.oss.driver.api.core.type.reflect.GenericType> com.datastax.oss.driver.api.core.type.reflect.GenericType::vectorOf(com.datastax.oss.driver.api.core.type.reflect.GenericType)", - "new": "method com.datastax.oss.driver.api.core.type.reflect.GenericType> com.datastax.oss.driver.api.core.type.reflect.GenericType::vectorOf(com.datastax.oss.driver.api.core.type.reflect.GenericType)", - "justification": "Refactorings in PR 1666" - }, - { - "code": "java.method.returnTypeChangedCovariantly", - "old": "method java.lang.Throwable java.lang.Throwable::fillInStackTrace() @ com.fasterxml.jackson.databind.deser.UnresolvedForwardReference", - "new": "method com.fasterxml.jackson.databind.deser.UnresolvedForwardReference com.fasterxml.jackson.databind.deser.UnresolvedForwardReference::fillInStackTrace()", - "justification": "Upgrade jackson-databind to 2.13.4.1 to address CVEs, API change cause: https://github.com/FasterXML/jackson-databind/issues/3419" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>::setNowInSeconds(int) @ com.datastax.oss.driver.api.core.cql.BatchStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>::setNowInSeconds(int) @ com.datastax.oss.driver.api.core.cql.BatchStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>::setNowInSeconds(int) @ com.datastax.oss.driver.api.core.cql.BatchableStatement>", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>::setNowInSeconds(int) @ com.datastax.oss.driver.api.core.cql.BatchStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>::setNowInSeconds(int) @ com.datastax.oss.driver.api.core.cql.BatchableStatement>", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>::setNowInSeconds(int) @ com.datastax.oss.driver.api.core.cql.BatchableStatement>", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>::setNowInSeconds(int) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>::setNowInSeconds(int) @ com.datastax.oss.driver.api.core.cql.BoundStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>::setNowInSeconds(int) @ com.datastax.oss.driver.api.core.cql.SimpleStatement", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>::setNowInSeconds(int) @ com.datastax.oss.driver.api.core.cql.SimpleStatement", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>::setNowInSeconds(int)", - "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>::setNowInSeconds(int)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method com.datastax.oss.driver.api.core.cql.BatchStatement com.datastax.oss.driver.api.core.cql.BatchStatement::add(com.datastax.oss.driver.api.core.cql.BatchableStatement)", - "new": "method com.datastax.oss.driver.api.core.cql.BatchStatement com.datastax.oss.driver.api.core.cql.BatchStatement::add(com.datastax.oss.driver.api.core.cql.BatchableStatement)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method com.datastax.oss.driver.api.core.cql.BatchStatement com.datastax.oss.driver.api.core.cql.BatchStatement::addAll(com.datastax.oss.driver.api.core.cql.BatchableStatement[])", - "new": "method com.datastax.oss.driver.api.core.cql.BatchStatement com.datastax.oss.driver.api.core.cql.BatchStatement::addAll(com.datastax.oss.driver.api.core.cql.BatchableStatement[])", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method com.datastax.oss.driver.api.core.cql.BatchStatement com.datastax.oss.driver.api.core.cql.BatchStatement::addAll(java.lang.Iterable>)", - "new": "method com.datastax.oss.driver.api.core.cql.BatchStatement com.datastax.oss.driver.api.core.cql.BatchStatement::addAll(java.lang.Iterable>)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method com.datastax.oss.driver.api.core.cql.BatchStatement com.datastax.oss.driver.api.core.cql.BatchStatement::clear()", - "new": "method com.datastax.oss.driver.api.core.cql.BatchStatement com.datastax.oss.driver.api.core.cql.BatchStatement::clear()", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method com.datastax.oss.driver.api.core.cql.BatchStatement com.datastax.oss.driver.api.core.cql.BatchStatement::setBatchType(com.datastax.oss.driver.api.core.cql.BatchType)", - "new": "method com.datastax.oss.driver.api.core.cql.BatchStatement com.datastax.oss.driver.api.core.cql.BatchStatement::setBatchType(com.datastax.oss.driver.api.core.cql.BatchType)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method com.datastax.oss.driver.api.core.cql.BatchStatement com.datastax.oss.driver.api.core.cql.BatchStatement::setKeyspace(com.datastax.oss.driver.api.core.CqlIdentifier)", - "new": "method com.datastax.oss.driver.api.core.cql.BatchStatement com.datastax.oss.driver.api.core.cql.BatchStatement::setKeyspace(com.datastax.oss.driver.api.core.CqlIdentifier)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method com.datastax.oss.driver.api.core.cql.BatchStatement com.datastax.oss.driver.api.core.cql.BatchStatement::setKeyspace(java.lang.String)", - "new": "method com.datastax.oss.driver.api.core.cql.BatchStatement com.datastax.oss.driver.api.core.cql.BatchStatement::setKeyspace(java.lang.String)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method com.datastax.oss.driver.api.core.cql.SimpleStatement com.datastax.oss.driver.api.core.cql.SimpleStatement::setQuery(java.lang.String)", - "new": "method com.datastax.oss.driver.api.core.cql.SimpleStatement com.datastax.oss.driver.api.core.cql.SimpleStatement::setQuery(java.lang.String)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method com.datastax.oss.driver.api.core.cql.SimpleStatement com.datastax.oss.driver.api.core.cql.SimpleStatement::setKeyspace(com.datastax.oss.driver.api.core.CqlIdentifier)", - "new": "method com.datastax.oss.driver.api.core.cql.SimpleStatement com.datastax.oss.driver.api.core.cql.SimpleStatement::setKeyspace(com.datastax.oss.driver.api.core.CqlIdentifier)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method com.datastax.oss.driver.api.core.cql.SimpleStatement com.datastax.oss.driver.api.core.cql.SimpleStatement::setKeyspace(java.lang.String)", - "new": "method com.datastax.oss.driver.api.core.cql.SimpleStatement com.datastax.oss.driver.api.core.cql.SimpleStatement::setKeyspace(java.lang.String)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method com.datastax.oss.driver.api.core.cql.SimpleStatement com.datastax.oss.driver.api.core.cql.SimpleStatement::setPositionalValues(java.util.List)", - "new": "method com.datastax.oss.driver.api.core.cql.SimpleStatement com.datastax.oss.driver.api.core.cql.SimpleStatement::setPositionalValues(java.util.List)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method com.datastax.oss.driver.api.core.cql.SimpleStatement com.datastax.oss.driver.api.core.cql.SimpleStatement::setNamedValuesWithIds(java.util.Map)", - "new": "method com.datastax.oss.driver.api.core.cql.SimpleStatement com.datastax.oss.driver.api.core.cql.SimpleStatement::setNamedValuesWithIds(java.util.Map)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.annotation.added", - "old": "method com.datastax.oss.driver.api.core.cql.SimpleStatement com.datastax.oss.driver.api.core.cql.SimpleStatement::setNamedValues(java.util.Map)", - "new": "method com.datastax.oss.driver.api.core.cql.SimpleStatement com.datastax.oss.driver.api.core.cql.SimpleStatement::setNamedValues(java.util.Map)", - "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", - "justification": "Annotate mutating methods with @CheckReturnValue" - }, - { - "code": "java.method.parameterTypeParameterChanged", - "old": "parameter com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.CqlVector::from(java.lang.String, ===com.datastax.oss.driver.api.core.type.codec.TypeCodec===)", - "new": "parameter com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.CqlVector::from(java.lang.String, ===com.datastax.oss.driver.api.core.type.codec.TypeCodec===)", - "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.CqlVector::from(java.lang.String, com.datastax.oss.driver.api.core.type.codec.TypeCodec)", - "new": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.CqlVector::from(java.lang.String, com.datastax.oss.driver.api.core.type.codec.TypeCodec)", - "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" - }, - { - "code": "java.generics.formalTypeParameterChanged", - "old": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.CqlVector::from(java.lang.String, com.datastax.oss.driver.api.core.type.codec.TypeCodec)", - "new": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.CqlVector::from(java.lang.String, com.datastax.oss.driver.api.core.type.codec.TypeCodec)", - "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" - }, - { - "code": "java.method.returnTypeChanged", - "old": "method T com.datastax.oss.driver.api.core.data.CqlVector::get(int)", - "new": "method T com.datastax.oss.driver.api.core.data.CqlVector::get(int)", - "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method java.util.Iterator com.datastax.oss.driver.api.core.data.CqlVector::iterator()", - "new": "method java.util.Iterator com.datastax.oss.driver.api.core.data.CqlVector::iterator()", - "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" - }, - { - "code": "java.method.parameterTypeChanged", - "old": "parameter com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.CqlVector::newInstance(===V[]===)", - "new": "parameter com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.CqlVector::newInstance(===V[]===)", - "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.CqlVector::newInstance(V[])", - "new": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.CqlVector::newInstance(V[])", - "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" - }, - { - "code": "java.generics.formalTypeParameterChanged", - "old": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.CqlVector::newInstance(V[])", - "new": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.CqlVector::newInstance(V[])", - "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" - }, - { - "code": "java.method.parameterTypeParameterChanged", - "old": "parameter com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.CqlVector::newInstance(===java.util.List===)", - "new": "parameter com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.CqlVector::newInstance(===java.util.List===)", - "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.CqlVector::newInstance(java.util.List)", - "new": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.CqlVector::newInstance(java.util.List)", - "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" - }, - { - "code": "java.generics.formalTypeParameterChanged", - "old": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.CqlVector::newInstance(java.util.List)", - "new": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.CqlVector::newInstance(java.util.List)", - "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" - }, - { - "code": "java.method.parameterTypeChanged", - "old": "parameter T com.datastax.oss.driver.api.core.data.CqlVector::set(int, ===T===)", - "new": "parameter T com.datastax.oss.driver.api.core.data.CqlVector::set(int, ===T===)", - "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" - }, - { - "code": "java.method.returnTypeChanged", - "old": "method T com.datastax.oss.driver.api.core.data.CqlVector::set(int, T)", - "new": "method T com.datastax.oss.driver.api.core.data.CqlVector::set(int, T)", - "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method java.util.Spliterator java.lang.Iterable::spliterator() @ com.datastax.oss.driver.api.core.data.CqlVector", - "new": "method java.util.Spliterator java.lang.Iterable::spliterator() @ com.datastax.oss.driver.api.core.data.CqlVector", - "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method java.util.stream.Stream com.datastax.oss.driver.api.core.data.CqlVector::stream()", - "new": "method java.util.stream.Stream com.datastax.oss.driver.api.core.data.CqlVector::stream()", - "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.CqlVector::subVector(int, int)", - "new": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.CqlVector::subVector(int, int)", - "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" - }, - { - "code": "java.class.noLongerImplementsInterface", - "old": "class com.datastax.oss.driver.api.core.data.CqlVector", - "new": "class com.datastax.oss.driver.api.core.data.CqlVector", - "interface": "java.lang.Iterable", - "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" - }, - { - "code": "java.generics.formalTypeParameterChanged", - "old": "class com.datastax.oss.driver.api.core.data.CqlVector", - "new": "class com.datastax.oss.driver.api.core.data.CqlVector", - "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" - }, - { - "code": "java.class.superTypeTypeParametersChanged", - "old": "class com.datastax.oss.driver.api.core.data.CqlVector", - "new": "class com.datastax.oss.driver.api.core.data.CqlVector", - "oldSuperType": "java.lang.Iterable", - "newSuperType": "java.lang.Iterable", - "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" - }, - { - "code": "java.method.parameterTypeParameterChanged", - "old": "parameter com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.GettableById::getVector(com.datastax.oss.driver.api.core.CqlIdentifier, ===java.lang.Class===)", - "new": "parameter com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.GettableById::getVector(com.datastax.oss.driver.api.core.CqlIdentifier, ===java.lang.Class===)", - "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.GettableById::getVector(com.datastax.oss.driver.api.core.CqlIdentifier, java.lang.Class)", - "new": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.GettableById::getVector(com.datastax.oss.driver.api.core.CqlIdentifier, java.lang.Class)", - "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" - }, - { - "code": "java.generics.formalTypeParameterChanged", - "old": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.GettableById::getVector(com.datastax.oss.driver.api.core.CqlIdentifier, java.lang.Class)", - "new": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.GettableById::getVector(com.datastax.oss.driver.api.core.CqlIdentifier, java.lang.Class)", - "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" - }, - { - "code": "java.method.parameterTypeParameterChanged", - "old": "parameter com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.GettableByIndex::getVector(int, ===java.lang.Class===)", - "new": "parameter com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.GettableByIndex::getVector(int, ===java.lang.Class===)", - "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.GettableByIndex::getVector(int, java.lang.Class)", - "new": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.GettableByIndex::getVector(int, java.lang.Class)", - "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" - }, - { - "code": "java.generics.formalTypeParameterChanged", - "old": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.GettableByIndex::getVector(int, java.lang.Class)", - "new": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.GettableByIndex::getVector(int, java.lang.Class)", - "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" - }, - { - "code": "java.method.parameterTypeParameterChanged", - "old": "parameter com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.GettableByName::getVector(java.lang.String, ===java.lang.Class===)", - "new": "parameter com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.GettableByName::getVector(java.lang.String, ===java.lang.Class===)", - "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.GettableByName::getVector(java.lang.String, java.lang.Class)", - "new": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.GettableByName::getVector(java.lang.String, java.lang.Class)", - "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" - }, - { - "code": "java.generics.formalTypeParameterChanged", - "old": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.GettableByName::getVector(java.lang.String, java.lang.Class)", - "new": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.GettableByName::getVector(java.lang.String, java.lang.Class)", - "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" - }, - { - "code": "java.method.parameterTypeParameterChanged", - "old": "parameter SelfT com.datastax.oss.driver.api.core.data.SettableById>::setVector(com.datastax.oss.driver.api.core.CqlIdentifier, ===com.datastax.oss.driver.api.core.data.CqlVector===, java.lang.Class)", - "new": "parameter SelfT com.datastax.oss.driver.api.core.data.SettableById>::setVector(com.datastax.oss.driver.api.core.CqlIdentifier, ===com.datastax.oss.driver.api.core.data.CqlVector===, java.lang.Class)", - "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" - }, - { - "code": "java.method.parameterTypeParameterChanged", - "old": "parameter SelfT com.datastax.oss.driver.api.core.data.SettableById>::setVector(com.datastax.oss.driver.api.core.CqlIdentifier, com.datastax.oss.driver.api.core.data.CqlVector, ===java.lang.Class===)", - "new": "parameter SelfT com.datastax.oss.driver.api.core.data.SettableById>::setVector(com.datastax.oss.driver.api.core.CqlIdentifier, com.datastax.oss.driver.api.core.data.CqlVector, ===java.lang.Class===)", - "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" - }, - { - "code": "java.generics.formalTypeParameterChanged", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>::setVector(com.datastax.oss.driver.api.core.CqlIdentifier, com.datastax.oss.driver.api.core.data.CqlVector, java.lang.Class)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>::setVector(com.datastax.oss.driver.api.core.CqlIdentifier, com.datastax.oss.driver.api.core.data.CqlVector, java.lang.Class)", - "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" - }, - { - "code": "java.method.parameterTypeParameterChanged", - "old": "parameter SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>::setVector(int, ===com.datastax.oss.driver.api.core.data.CqlVector===, java.lang.Class)", - "new": "parameter SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>::setVector(int, ===com.datastax.oss.driver.api.core.data.CqlVector===, java.lang.Class)", - "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" - }, - { - "code": "java.method.parameterTypeParameterChanged", - "old": "parameter SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>::setVector(int, com.datastax.oss.driver.api.core.data.CqlVector, ===java.lang.Class===)", - "new": "parameter SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>::setVector(int, com.datastax.oss.driver.api.core.data.CqlVector, ===java.lang.Class===)", - "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" - }, - { - "code": "java.generics.formalTypeParameterChanged", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>::setVector(int, com.datastax.oss.driver.api.core.data.CqlVector, java.lang.Class)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>::setVector(int, com.datastax.oss.driver.api.core.data.CqlVector, java.lang.Class)", - "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" - }, - { - "code": "java.method.parameterTypeParameterChanged", - "old": "parameter SelfT com.datastax.oss.driver.api.core.data.SettableByName>::setVector(java.lang.String, ===com.datastax.oss.driver.api.core.data.CqlVector===, java.lang.Class)", - "new": "parameter SelfT com.datastax.oss.driver.api.core.data.SettableByName>::setVector(java.lang.String, ===com.datastax.oss.driver.api.core.data.CqlVector===, java.lang.Class)", - "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" - }, - { - "code": "java.method.parameterTypeParameterChanged", - "old": "parameter SelfT com.datastax.oss.driver.api.core.data.SettableByName>::setVector(java.lang.String, com.datastax.oss.driver.api.core.data.CqlVector, ===java.lang.Class===)", - "new": "parameter SelfT com.datastax.oss.driver.api.core.data.SettableByName>::setVector(java.lang.String, com.datastax.oss.driver.api.core.data.CqlVector, ===java.lang.Class===)", - "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" - }, - { - "code": "java.generics.formalTypeParameterChanged", - "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>::setVector(java.lang.String, com.datastax.oss.driver.api.core.data.CqlVector, java.lang.Class)", - "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>::setVector(java.lang.String, com.datastax.oss.driver.api.core.data.CqlVector, java.lang.Class)", - "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" - }, - { - "code": "java.method.parameterTypeParameterChanged", - "old": "parameter com.datastax.oss.driver.api.core.type.codec.TypeCodec> com.datastax.oss.driver.api.core.type.codec.TypeCodecs::vectorOf(com.datastax.oss.driver.api.core.type.VectorType, ===com.datastax.oss.driver.api.core.type.codec.TypeCodec===)", - "new": "parameter com.datastax.oss.driver.api.core.type.codec.TypeCodec> com.datastax.oss.driver.api.core.type.codec.TypeCodecs::vectorOf(com.datastax.oss.driver.api.core.type.VectorType, ===com.datastax.oss.driver.api.core.type.codec.TypeCodec===)", - "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method com.datastax.oss.driver.api.core.type.codec.TypeCodec> com.datastax.oss.driver.api.core.type.codec.TypeCodecs::vectorOf(com.datastax.oss.driver.api.core.type.VectorType, com.datastax.oss.driver.api.core.type.codec.TypeCodec)", - "new": "method com.datastax.oss.driver.api.core.type.codec.TypeCodec> com.datastax.oss.driver.api.core.type.codec.TypeCodecs::vectorOf(com.datastax.oss.driver.api.core.type.VectorType, com.datastax.oss.driver.api.core.type.codec.TypeCodec)", - "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" - }, - { - "code": "java.generics.formalTypeParameterChanged", - "old": "method com.datastax.oss.driver.api.core.type.codec.TypeCodec> com.datastax.oss.driver.api.core.type.codec.TypeCodecs::vectorOf(com.datastax.oss.driver.api.core.type.VectorType, com.datastax.oss.driver.api.core.type.codec.TypeCodec)", - "new": "method com.datastax.oss.driver.api.core.type.codec.TypeCodec> com.datastax.oss.driver.api.core.type.codec.TypeCodecs::vectorOf(com.datastax.oss.driver.api.core.type.VectorType, com.datastax.oss.driver.api.core.type.codec.TypeCodec)", - "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" - }, - { - "code": "java.method.parameterTypeParameterChanged", - "old": "parameter com.datastax.oss.driver.api.core.type.codec.TypeCodec> com.datastax.oss.driver.api.core.type.codec.TypeCodecs::vectorOf(int, ===com.datastax.oss.driver.api.core.type.codec.TypeCodec===)", - "new": "parameter com.datastax.oss.driver.api.core.type.codec.TypeCodec> com.datastax.oss.driver.api.core.type.codec.TypeCodecs::vectorOf(int, ===com.datastax.oss.driver.api.core.type.codec.TypeCodec===)", - "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method com.datastax.oss.driver.api.core.type.codec.TypeCodec> com.datastax.oss.driver.api.core.type.codec.TypeCodecs::vectorOf(int, com.datastax.oss.driver.api.core.type.codec.TypeCodec)", - "new": "method com.datastax.oss.driver.api.core.type.codec.TypeCodec> com.datastax.oss.driver.api.core.type.codec.TypeCodecs::vectorOf(int, com.datastax.oss.driver.api.core.type.codec.TypeCodec)", - "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" - }, - { - "code": "java.generics.formalTypeParameterChanged", - "old": "method com.datastax.oss.driver.api.core.type.codec.TypeCodec> com.datastax.oss.driver.api.core.type.codec.TypeCodecs::vectorOf(int, com.datastax.oss.driver.api.core.type.codec.TypeCodec)", - "new": "method com.datastax.oss.driver.api.core.type.codec.TypeCodec> com.datastax.oss.driver.api.core.type.codec.TypeCodecs::vectorOf(int, com.datastax.oss.driver.api.core.type.codec.TypeCodec)", - "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" - }, - { - "code": "java.method.parameterTypeParameterChanged", - "old": "parameter com.datastax.oss.driver.api.core.type.reflect.GenericType> com.datastax.oss.driver.api.core.type.reflect.GenericType::vectorOf(===com.datastax.oss.driver.api.core.type.reflect.GenericType===)", - "new": "parameter com.datastax.oss.driver.api.core.type.reflect.GenericType> com.datastax.oss.driver.api.core.type.reflect.GenericType::vectorOf(===com.datastax.oss.driver.api.core.type.reflect.GenericType===)", - "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method com.datastax.oss.driver.api.core.type.reflect.GenericType> com.datastax.oss.driver.api.core.type.reflect.GenericType::vectorOf(com.datastax.oss.driver.api.core.type.reflect.GenericType)", - "new": "method com.datastax.oss.driver.api.core.type.reflect.GenericType> com.datastax.oss.driver.api.core.type.reflect.GenericType::vectorOf(com.datastax.oss.driver.api.core.type.reflect.GenericType)", - "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" - }, - { - "code": "java.generics.formalTypeParameterChanged", - "old": "method com.datastax.oss.driver.api.core.type.reflect.GenericType> com.datastax.oss.driver.api.core.type.reflect.GenericType::vectorOf(com.datastax.oss.driver.api.core.type.reflect.GenericType)", - "new": "method com.datastax.oss.driver.api.core.type.reflect.GenericType> com.datastax.oss.driver.api.core.type.reflect.GenericType::vectorOf(com.datastax.oss.driver.api.core.type.reflect.GenericType)", - "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" - }, - { - "code": "java.method.parameterTypeParameterChanged", - "old": "parameter com.datastax.oss.driver.api.core.type.reflect.GenericType> com.datastax.oss.driver.api.core.type.reflect.GenericType::vectorOf(===java.lang.Class===)", - "new": "parameter com.datastax.oss.driver.api.core.type.reflect.GenericType> com.datastax.oss.driver.api.core.type.reflect.GenericType::vectorOf(===java.lang.Class===)", - "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method com.datastax.oss.driver.api.core.type.reflect.GenericType> com.datastax.oss.driver.api.core.type.reflect.GenericType::vectorOf(java.lang.Class)", - "new": "method com.datastax.oss.driver.api.core.type.reflect.GenericType> com.datastax.oss.driver.api.core.type.reflect.GenericType::vectorOf(java.lang.Class)", - "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" - }, - { - "code": "java.generics.formalTypeParameterChanged", - "old": "method com.datastax.oss.driver.api.core.type.reflect.GenericType> com.datastax.oss.driver.api.core.type.reflect.GenericType::vectorOf(java.lang.Class)", - "new": "method com.datastax.oss.driver.api.core.type.reflect.GenericType> com.datastax.oss.driver.api.core.type.reflect.GenericType::vectorOf(java.lang.Class)", - "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" - }, - { - "code": "java.class.nonPublicPartOfAPI", - "old": "class com.datastax.oss.driver.internal.core.config.typesafe.TypesafeDriverExecutionProfile.Base", - "justification": "CASSJAVA-102: Fix spurious complaints about optional dependencies" - }, - { - "code": "java.class.nonPublicPartOfAPI", - "old": "class com.fasterxml.jackson.databind.type.TypeParser.MyTokenizer", - "justification": "CASSJAVA-102: Fix spurious complaints about optional dependencies" - }, - { - "code": "java.class.nonPublicPartOfAPI", - "old": "class org.apache.tinkerpop.shaded.jackson.databind.type.TypeParser.MyTokenizer", - "justification": "CASSJAVA-102: Fix spurious complaints about optional dependencies" - }, - { - "code": "java.class.externalClassExposedInAPI", - "justification": "CASSJAVA-102: Migrate revapi config into dedicated config files, ported from pom.xml" - }, - { - "code": "java.method.varargOverloadsOnlyDifferInVarargParameter", - "justification": "CASSJAVA-102: Migrate revapi config into dedicated config files, ported from pom.xml" - }, - { - "code": "java.method.addedToInterface", - "new": "method java.util.Optional com.datastax.oss.driver.api.core.context.DriverContext::getRequestIdGenerator()", - "justification": "CASSJAVA-97: Let users inject an ID for each request and write to the custom payload" - } - ] - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/DseProtocolVersion.java b/core/src/main/java/com/datastax/dse/driver/api/core/DseProtocolVersion.java deleted file mode 100644 index dc420970427..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/DseProtocolVersion.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core; - -import com.datastax.dse.protocol.internal.DseProtocolConstants; -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.ProtocolVersion; - -/** - * A DSE-specific protocol version. - * - *

Legacy DSE versions did not have a specific version, but instead reused a Cassandra protocol - * version: DSE 5.0 is supported via {@link DefaultProtocolVersion#V4}, and DSE 4.7 and 4.8 via - * {@link DefaultProtocolVersion#V3}. - * - *

DSE 4.6 and earlier are not supported by this version of the driver, use the 1.x series. - */ -public enum DseProtocolVersion implements ProtocolVersion { - - /** Version 1, supported by DSE 5.1.0 and above. */ - DSE_V1(DseProtocolConstants.Version.DSE_V1, false), - - /** Version 2, supported by DSE 6 and above. */ - DSE_V2(DseProtocolConstants.Version.DSE_V2, false), - ; - - private final int code; - private final boolean beta; - - DseProtocolVersion(int code, boolean beta) { - this.code = code; - this.beta = beta; - } - - @Override - public int getCode() { - return code; - } - - @Override - public boolean isBeta() { - return beta; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/DseSession.java b/core/src/main/java/com/datastax/dse/driver/api/core/DseSession.java deleted file mode 100644 index 8251aaf767c..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/DseSession.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.MavenCoordinates; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * @deprecated All DSE functionality is now available directly on {@link CqlSession}. This type is - * preserved for backward compatibility, but you should now use {@link CqlSession} instead. - */ -@Deprecated -public interface DseSession extends CqlSession { - - /** - * @deprecated the DSE driver is no longer published as a separate artifact. This field is - * preserved for backward compatibility, but it returns the same value as {@link - * CqlSession#OSS_DRIVER_COORDINATES}. - */ - @Deprecated @NonNull MavenCoordinates DSE_DRIVER_COORDINATES = CqlSession.OSS_DRIVER_COORDINATES; - - /** - * Returns a builder to create a new instance. - * - *

Note that this builder is mutable and not thread-safe. - */ - @NonNull - static DseSessionBuilder builder() { - return new DseSessionBuilder(); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/DseSessionBuilder.java b/core/src/main/java/com/datastax/dse/driver/api/core/DseSessionBuilder.java deleted file mode 100644 index 01e5f9f9125..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/DseSessionBuilder.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.session.SessionBuilder; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.NotThreadSafe; - -/** - * @deprecated DSE functionality is now exposed directly on {@link CqlSession}. This class is - * preserved for backward compatibility, but {@link CqlSession#builder()} should be used - * instead. - */ -@NotThreadSafe -@Deprecated -public class DseSessionBuilder extends SessionBuilder { - - @NonNull - @Override - protected DseSession wrap(@NonNull CqlSession defaultSession) { - return new com.datastax.dse.driver.internal.core.session.DefaultDseSession(defaultSession); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/auth/BaseDseAuthenticator.java b/core/src/main/java/com/datastax/dse/driver/api/core/auth/BaseDseAuthenticator.java deleted file mode 100644 index abd68b530b6..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/auth/BaseDseAuthenticator.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.auth; - -import com.datastax.oss.driver.api.core.auth.SyncAuthenticator; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import net.jcip.annotations.ThreadSafe; - -/** - * Base class for {@link SyncAuthenticator} implementations that want to make use of the - * authentication scheme negotiation in DseAuthenticator. - */ -@ThreadSafe -public abstract class BaseDseAuthenticator implements SyncAuthenticator { - - private static final String DSE_AUTHENTICATOR = - "com.datastax.bdp.cassandra.auth.DseAuthenticator"; - - private final String serverAuthenticator; - - protected BaseDseAuthenticator(@NonNull String serverAuthenticator) { - this.serverAuthenticator = serverAuthenticator; - } - - /** - * Return a byte buffer containing the required SASL mechanism. - * - *

This should be one of: - * - *

    - *
  • PLAIN - *
  • GSSAPI - *
- * - * This must be either a {@linkplain ByteBuffer#asReadOnlyBuffer() read-only} buffer, or a new - * instance every time. - */ - @NonNull - protected abstract ByteBuffer getMechanism(); - - /** - * Return a byte buffer containing the expected successful server challenge. - * - *

This should be one of: - * - *

    - *
  • PLAIN-START - *
  • GSSAPI-START - *
- * - * This must be either a {@linkplain ByteBuffer#asReadOnlyBuffer() read-only} buffer, or a new - * instance every time. - */ - @NonNull - protected abstract ByteBuffer getInitialServerChallenge(); - - @Nullable - @Override - public ByteBuffer initialResponseSync() { - // DseAuthenticator communicates back the mechanism in response to server authenticate message. - // older authenticators simply expect the auth response with credentials. - if (isDseAuthenticator()) { - return getMechanism(); - } else { - return evaluateChallengeSync(getInitialServerChallenge()); - } - } - - @Override - public void onAuthenticationSuccessSync(@Nullable ByteBuffer token) {} - - private boolean isDseAuthenticator() { - return serverAuthenticator.equals(DSE_AUTHENTICATOR); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/auth/DseGssApiAuthProviderBase.java b/core/src/main/java/com/datastax/dse/driver/api/core/auth/DseGssApiAuthProviderBase.java deleted file mode 100644 index 48a0e5b0ef3..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/auth/DseGssApiAuthProviderBase.java +++ /dev/null @@ -1,378 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.auth; - -import com.datastax.oss.driver.api.core.auth.AuthProvider; -import com.datastax.oss.driver.api.core.auth.AuthenticationException; -import com.datastax.oss.driver.api.core.auth.Authenticator; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.shaded.guava.common.base.Charsets; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.protocol.internal.util.Bytes; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.net.InetSocketAddress; -import java.nio.ByteBuffer; -import java.security.PrivilegedActionException; -import java.security.PrivilegedExceptionAction; -import java.util.HashMap; -import java.util.Map; -import java.util.Objects; -import javax.security.auth.Subject; -import javax.security.auth.login.AppConfigurationEntry; -import javax.security.auth.login.Configuration; -import javax.security.auth.login.LoginContext; -import javax.security.auth.login.LoginException; -import javax.security.sasl.Sasl; -import javax.security.sasl.SaslClient; -import javax.security.sasl.SaslException; -import net.jcip.annotations.Immutable; -import net.jcip.annotations.NotThreadSafe; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@ThreadSafe -public abstract class DseGssApiAuthProviderBase implements AuthProvider { - - /** The default SASL service name used by this auth provider. */ - public static final String DEFAULT_SASL_SERVICE_NAME = "dse"; - - /** The name of the system property to use to specify the SASL service name. */ - public static final String SASL_SERVICE_NAME_PROPERTY = "dse.sasl.service"; - - /** - * Legacy system property for SASL protocol name. Clients should migrate to - * SASL_SERVICE_NAME_PROPERTY above. - */ - private static final String LEGACY_SASL_PROTOCOL_PROPERTY = "dse.sasl.protocol"; - - private static final Logger LOG = LoggerFactory.getLogger(DseGssApiAuthProviderBase.class); - - private final String logPrefix; - - /** - * @param logPrefix a string that will get prepended to the logs (this is used for discrimination - * when you have multiple driver instances executing in the same JVM). Config-based - * implementations fill this with {@link Session#getName()}. - */ - protected DseGssApiAuthProviderBase(@NonNull String logPrefix) { - this.logPrefix = Objects.requireNonNull(logPrefix); - } - - @NonNull - protected abstract GssApiOptions getOptions( - @NonNull EndPoint endPoint, @NonNull String serverAuthenticator); - - @NonNull - @Override - public Authenticator newAuthenticator( - @NonNull EndPoint endPoint, @NonNull String serverAuthenticator) - throws AuthenticationException { - return new GssApiAuthenticator( - getOptions(endPoint, serverAuthenticator), endPoint, serverAuthenticator); - } - - @Override - public void onMissingChallenge(@NonNull EndPoint endPoint) { - LOG.warn( - "[{}] {} did not send an authentication challenge; " - + "This is suspicious because the driver expects authentication", - logPrefix, - endPoint); - } - - @Override - public void close() { - // nothing to do - } - - /** - * The options to initialize a new authenticator. - * - *

Use {@link #builder()} to create an instance. - */ - @Immutable - public static class GssApiOptions { - - @NonNull - public static Builder builder() { - return new Builder(); - } - - private final Configuration loginConfiguration; - private final Subject subject; - private final String saslProtocol; - private final String authorizationId; - private final Map saslProperties; - - private GssApiOptions( - @Nullable Configuration loginConfiguration, - @Nullable Subject subject, - @Nullable String saslProtocol, - @Nullable String authorizationId, - @NonNull Map saslProperties) { - this.loginConfiguration = loginConfiguration; - this.subject = subject; - this.saslProtocol = saslProtocol; - this.authorizationId = authorizationId; - this.saslProperties = saslProperties; - } - - @Nullable - public Configuration getLoginConfiguration() { - return loginConfiguration; - } - - @Nullable - public Subject getSubject() { - return subject; - } - - @Nullable - public String getSaslProtocol() { - return saslProtocol; - } - - @Nullable - public String getAuthorizationId() { - return authorizationId; - } - - @NonNull - public Map getSaslProperties() { - return saslProperties; - } - - @NotThreadSafe - public static class Builder { - - private Configuration loginConfiguration; - private Subject subject; - private String saslProtocol; - private String authorizationId; - private final Map saslProperties = new HashMap<>(); - - public Builder() { - saslProperties.put(Sasl.SERVER_AUTH, "true"); - saslProperties.put(Sasl.QOP, "auth"); - } - - /** - * Sets a login configuration that will be used to create a {@link LoginContext}. - * - *

You MUST call either a withLoginConfiguration method or {@link #withSubject(Subject)}; - * if both are called, the subject takes precedence, and the login configuration will be - * ignored. - * - * @see #withLoginConfiguration(Map) - */ - @NonNull - public Builder withLoginConfiguration(@Nullable Configuration loginConfiguration) { - this.loginConfiguration = loginConfiguration; - return this; - } - /** - * Sets a login configuration that will be used to create a {@link LoginContext}. - * - *

This is an alternative to {@link #withLoginConfiguration(Configuration)}, that builds - * the configuration from {@code Krb5LoginModule} with the given options. - * - *

You MUST call either a withLoginConfiguration method or {@link #withSubject(Subject)}; - * if both are called, the subject takes precedence, and the login configuration will be - * ignored. - */ - @NonNull - public Builder withLoginConfiguration(@Nullable Map loginConfiguration) { - this.loginConfiguration = fetchLoginConfiguration(loginConfiguration); - return this; - } - - /** - * Sets a previously authenticated subject to reuse. - * - *

You MUST call either this method or {@link #withLoginConfiguration(Configuration)}; if - * both are called, the subject takes precedence, and the login configuration will be ignored. - */ - @NonNull - public Builder withSubject(@Nullable Subject subject) { - this.subject = subject; - return this; - } - - /** - * Sets the SASL protocol name to use; should match the username of the Kerberos service - * principal used by the DSE server. - */ - @NonNull - public Builder withSaslProtocol(@Nullable String saslProtocol) { - this.saslProtocol = saslProtocol; - return this; - } - - /** Sets the authorization ID (allows proxy authentication). */ - @NonNull - public Builder withAuthorizationId(@Nullable String authorizationId) { - this.authorizationId = authorizationId; - return this; - } - - /** - * Add a SASL property to use when creating the SASL client. - * - *

Note that this builder pre-initializes these two default properties: - * - *

-       * javax.security.sasl.server.authentication = true
-       * javax.security.sasl.qop = auth
-       * 
- */ - @NonNull - public Builder addSaslProperty(@NonNull String name, @NonNull String value) { - this.saslProperties.put(Objects.requireNonNull(name), Objects.requireNonNull(value)); - return this; - } - - @NonNull - public GssApiOptions build() { - return new GssApiOptions( - loginConfiguration, - subject, - saslProtocol, - authorizationId, - ImmutableMap.copyOf(saslProperties)); - } - - public static Configuration fetchLoginConfiguration(Map options) { - return new Configuration() { - - @Override - public AppConfigurationEntry[] getAppConfigurationEntry(String name) { - return new AppConfigurationEntry[] { - new AppConfigurationEntry( - "com.sun.security.auth.module.Krb5LoginModule", - AppConfigurationEntry.LoginModuleControlFlag.REQUIRED, - options) - }; - } - }; - } - } - } - - protected static class GssApiAuthenticator extends BaseDseAuthenticator { - - private static final ByteBuffer MECHANISM = - ByteBuffer.wrap("GSSAPI".getBytes(Charsets.UTF_8)).asReadOnlyBuffer(); - private static final ByteBuffer SERVER_INITIAL_CHALLENGE = - ByteBuffer.wrap("GSSAPI-START".getBytes(Charsets.UTF_8)).asReadOnlyBuffer(); - private static final ByteBuffer EMPTY_BYTE_ARRAY = - ByteBuffer.wrap(new byte[0]).asReadOnlyBuffer(); - private static final String JAAS_CONFIG_ENTRY = "DseClient"; - private static final String[] SUPPORTED_MECHANISMS = new String[] {"GSSAPI"}; - - private Subject subject; - private SaslClient saslClient; - private EndPoint endPoint; - - protected GssApiAuthenticator( - GssApiOptions options, EndPoint endPoint, String serverAuthenticator) { - super(serverAuthenticator); - - try { - if (options.getSubject() != null) { - this.subject = options.getSubject(); - } else { - Configuration loginConfiguration = options.getLoginConfiguration(); - if (loginConfiguration == null) { - throw new IllegalArgumentException("Must provide one of subject or loginConfiguration"); - } - LoginContext login = new LoginContext(JAAS_CONFIG_ENTRY, null, null, loginConfiguration); - login.login(); - this.subject = login.getSubject(); - } - String protocol = options.getSaslProtocol(); - if (protocol == null) { - protocol = - System.getProperty( - SASL_SERVICE_NAME_PROPERTY, - System.getProperty(LEGACY_SASL_PROTOCOL_PROPERTY, DEFAULT_SASL_SERVICE_NAME)); - } - this.saslClient = - Sasl.createSaslClient( - SUPPORTED_MECHANISMS, - options.getAuthorizationId(), - protocol, - ((InetSocketAddress) endPoint.resolve()).getAddress().getCanonicalHostName(), - options.getSaslProperties(), - null); - } catch (LoginException | SaslException e) { - throw new AuthenticationException(endPoint, e.getMessage()); - } - this.endPoint = endPoint; - } - - @NonNull - @Override - protected ByteBuffer getMechanism() { - return MECHANISM; - } - - @NonNull - @Override - protected ByteBuffer getInitialServerChallenge() { - return SERVER_INITIAL_CHALLENGE; - } - - @Nullable - @Override - public ByteBuffer evaluateChallengeSync(@Nullable ByteBuffer challenge) { - - byte[] challengeBytes; - if (SERVER_INITIAL_CHALLENGE.equals(challenge)) { - if (!saslClient.hasInitialResponse()) { - return EMPTY_BYTE_ARRAY; - } - challengeBytes = new byte[0]; - } else { - // The native protocol spec says the incoming challenge can be null depending on the - // implementation. But saslClient.evaluateChallenge clearly documents that the byte array - // can't be null, which probably means that a SASL authenticator never sends back null. - if (challenge == null) { - throw new AuthenticationException(this.endPoint, "Unexpected null challenge from server"); - } - challengeBytes = Bytes.getArray(challenge); - } - try { - - return ByteBuffer.wrap( - Subject.doAs( - subject, - new PrivilegedExceptionAction() { - @Override - public byte[] run() throws SaslException { - return saslClient.evaluateChallenge(challengeBytes); - } - })); - } catch (PrivilegedActionException e) { - throw new AuthenticationException(this.endPoint, e.getMessage(), e.getException()); - } - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/auth/DsePlainTextAuthProviderBase.java b/core/src/main/java/com/datastax/dse/driver/api/core/auth/DsePlainTextAuthProviderBase.java deleted file mode 100644 index 7c5ee23bd6c..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/auth/DsePlainTextAuthProviderBase.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.auth; - -import com.datastax.oss.driver.api.core.auth.PlainTextAuthProviderBase; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.ThreadSafe; - -/** - * @deprecated The driver's default plain text providers now support both Apache Cassandra and DSE. - * This type was preserved for backward compatibility, but implementors should now extend {@link - * PlainTextAuthProviderBase} instead. - */ -@ThreadSafe -@Deprecated -public abstract class DsePlainTextAuthProviderBase extends PlainTextAuthProviderBase { - - protected DsePlainTextAuthProviderBase(@NonNull String logPrefix) { - super(logPrefix); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/auth/ProgrammaticDseGssApiAuthProvider.java b/core/src/main/java/com/datastax/dse/driver/api/core/auth/ProgrammaticDseGssApiAuthProvider.java deleted file mode 100644 index 64ee5265b5a..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/auth/ProgrammaticDseGssApiAuthProvider.java +++ /dev/null @@ -1,174 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.auth; - -import com.datastax.oss.driver.api.core.auth.AuthProvider; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * {@link AuthProvider} that provides GSSAPI authenticator instances for clients to connect to DSE - * clusters secured with {@code DseAuthenticator}, in a programmatic way. - * - *

To use this provider the corresponding GssApiOptions must be passed into the provider - * directly, for example: - * - *

- *     DseGssApiAuthProviderBase.GssApiOptions.Builder builder =
- *         DseGssApiAuthProviderBase.GssApiOptions.builder();
- *     Map<String, String> loginConfig =
- *         ImmutableMap.of(
- *             "principal",
- *             "user principal here ex cassandra@DATASTAX.COM",
- *             "useKeyTab",
- *             "true",
- *             "refreshKrb5Config",
- *             "true",
- *             "keyTab",
- *             "Path to keytab file here");
- *
- *     builder.withLoginConfiguration(loginConfig);
- *
- *     CqlSession session =
- *         CqlSession.builder()
- *             .withAuthProvider(new ProgrammaticDseGssApiAuthProvider(builder.build()))
- *             .build();
- * 
- * - * or alternatively - * - *
- *     DseGssApiAuthProviderBase.GssApiOptions.Builder builder =
- *         DseGssApiAuthProviderBase.GssApiOptions.builder().withSubject(subject);
- *     CqlSession session =
- *         CqlSession.builder()
- *             .withAuthProvider(new ProgrammaticDseGssApiAuthProvider(builder.build()))
- *             .build();
- * 
- * - *

Kerberos Authentication

- * - * Keytab and ticket cache settings are specified using a standard JAAS configuration file. The - * location of the file can be set using the java.security.auth.login.config system - * property or by adding a login.config.url.n entry in the java.security - * properties file. Alternatively a login-configuration, or subject can be provided to the provider - * via the GssApiOptions (see above). - * - *

See the following documents for further details: - * - *

    - *
  1. JAAS - * Login Configuration File; - *
  2. Krb5LoginModule - * options; - *
  3. JAAS - * Authentication Tutorial for more on JAAS in general. - *
- * - *

Authentication using ticket cache

- * - * Run kinit to obtain a ticket and populate the cache before connecting. JAAS config: - * - *
- * DseClient {
- *   com.sun.security.auth.module.Krb5LoginModule required
- *     useTicketCache=true
- *     renewTGT=true;
- * };
- * 
- * - *

Authentication using a keytab file

- * - * To enable authentication using a keytab file, specify its location on disk. If your keytab - * contains more than one principal key, you should also specify which one to select. This - * information can also be specified in the driver config, under the login-configuration section. - * - *
- * DseClient {
- *     com.sun.security.auth.module.Krb5LoginModule required
- *       useKeyTab=true
- *       keyTab="/path/to/file.keytab"
- *       principal="user@MYDOMAIN.COM";
- * };
- * 
- * - *

Specifying SASL protocol name

- * - * The SASL protocol name used by this auth provider defaults to " - * {@value #DEFAULT_SASL_SERVICE_NAME}". - * - *

Important: the SASL protocol name should match the username of the Kerberos - * service principal used by the DSE server. This information is specified in the dse.yaml file by - * the {@code service_principal} option under the kerberos_options - * section, and may vary from one DSE installation to another – especially if you installed - * DSE with an automated package installer. - * - *

For example, if your dse.yaml file contains the following: - * - *

{@code
- * kerberos_options:
- *     ...
- *     service_principal: cassandra/my.host.com@MY.REALM.COM
- * }
- * - * The correct SASL protocol name to use when authenticating against this DSE server is "{@code - * cassandra}". - * - *

Should you need to change the SASL protocol name specify it in the GssApiOptions, use the - * method below: - * - *

- *     DseGssApiAuthProviderBase.GssApiOptions.Builder builder =
- *         DseGssApiAuthProviderBase.GssApiOptions.builder();
- *     builder.withSaslProtocol("alternate");
- *     DseGssApiAuthProviderBase.GssApiOptions options = builder.build();
- * 
- * - *

Should internal sasl properties need to be set such as qop. This can also be accomplished by - * setting it in the GssApiOptions: - * - *

- *   DseGssApiAuthProviderBase.GssApiOptions.Builder builder =
- *         DseGssApiAuthProviderBase.GssApiOptions.builder();
- *     builder.addSaslProperty("javax.security.sasl.qop", "auth-conf");
- *     DseGssApiAuthProviderBase.GssApiOptions options = builder.build();
- * 
- * - * @see Authenticating - * a DSE cluster with Kerberos - */ -public class ProgrammaticDseGssApiAuthProvider extends DseGssApiAuthProviderBase { - private final GssApiOptions options; - - public ProgrammaticDseGssApiAuthProvider(GssApiOptions options) { - super("Programmatic-Kerberos"); - this.options = options; - } - - @NonNull - @Override - protected GssApiOptions getOptions( - @NonNull EndPoint endPoint, @NonNull String serverAuthenticator) { - return options; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/auth/ProxyAuthentication.java b/core/src/main/java/com/datastax/dse/driver/api/core/auth/ProxyAuthentication.java deleted file mode 100644 index a3624ba736d..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/auth/ProxyAuthentication.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.auth; - -import com.datastax.dse.driver.api.core.graph.GraphStatement; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.shaded.guava.common.base.Charsets; -import com.datastax.oss.protocol.internal.util.collection.NullAllowingImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.util.Map; - -public class ProxyAuthentication { - private static final String PROXY_EXECUTE = "ProxyExecute"; - - /** - * Adds proxy authentication information to a CQL statement. - * - *

This allows executing a statement as another role than the one the session is currently - * authenticated as. - * - * @param userOrRole the role to use for execution. If the statement was already configured with - * another role, it will get replaced by this one. - * @param statement the statement to modify. - * @return a statement that will run the same CQL query as {@code statement}, but acting as the - * provided role. Note: with the driver's default implementations, this will always be a copy; - * but if you use a custom implementation, it might return the same instance (depending on the - * behavior of {@link Statement#setCustomPayload(Map) statement.setCustomPayload()}). - * @see Setting - * up roles for applications (DSE 6.0 admin guide) - */ - @NonNull - public static > StatementT executeAs( - @NonNull String userOrRole, @NonNull StatementT statement) { - return statement.setCustomPayload( - addProxyExecuteEntry(statement.getCustomPayload(), userOrRole)); - } - - /** - * Adds proxy authentication information to a graph statement. - * - * @see #executeAs(String, Statement) - */ - @NonNull - public static > StatementT executeAs( - @NonNull String userOrRole, @NonNull StatementT statement) { - return statement.setCustomPayload( - addProxyExecuteEntry(statement.getCustomPayload(), userOrRole)); - } - - private static Map addProxyExecuteEntry( - Map currentPayload, @NonNull String userOrRole) { - NullAllowingImmutableMap.Builder builder = - NullAllowingImmutableMap.builder(); - builder.put(PROXY_EXECUTE, ByteBuffer.wrap(userOrRole.getBytes(Charsets.UTF_8))); - if (!currentPayload.isEmpty()) { - for (Map.Entry entry : currentPayload.entrySet()) { - String key = entry.getKey(); - if (!key.equals(PROXY_EXECUTE)) { - builder.put(key, entry.getValue()); - } - } - } - return builder.build(); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/config/DseDriverConfigLoader.java b/core/src/main/java/com/datastax/dse/driver/api/core/config/DseDriverConfigLoader.java deleted file mode 100644 index 2694b51ffca..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/config/DseDriverConfigLoader.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.config; - -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.config.ProgrammaticDriverConfigLoaderBuilder; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.File; -import java.net.URL; - -/** - * @deprecated This class only exists for backward compatibility. All of its methods delegate to - * their counterparts on {@link DriverConfigLoader}, which you should call directly instead. - */ -@Deprecated -public class DseDriverConfigLoader { - - /** - * @deprecated This method only exists for backward compatibility. It delegates to {@link - * DriverConfigLoader#fromClasspath(String)}, which you should call directly instead. - */ - @Deprecated - @NonNull - public static DriverConfigLoader fromClasspath(@NonNull String resourceBaseName) { - return DriverConfigLoader.fromClasspath(resourceBaseName); - } - - /** - * @deprecated This method only exists for backward compatibility. It delegates to {@link - * DriverConfigLoader#fromFile(File)}, which you should call directly instead. - */ - @Deprecated - @NonNull - public static DriverConfigLoader fromFile(@NonNull File file) { - return DriverConfigLoader.fromFile(file); - } - - /** - * @deprecated This method only exists for backward compatibility. It delegates to {@link - * DriverConfigLoader#fromUrl(URL)}, which you should call directly instead. - */ - @Deprecated - @NonNull - public static DriverConfigLoader fromUrl(@NonNull URL url) { - return DriverConfigLoader.fromUrl(url); - } - - /** - * @deprecated This method only exists for backward compatibility. It delegates to {@link - * DriverConfigLoader#programmaticBuilder()}, which you should call directly instead. - */ - @Deprecated - @NonNull - public static ProgrammaticDriverConfigLoaderBuilder programmaticBuilder() { - return DriverConfigLoader.programmaticBuilder(); - } - - private DseDriverConfigLoader() { - throw new AssertionError("Not meant to be instantiated"); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/config/DseDriverOption.java b/core/src/main/java/com/datastax/dse/driver/api/core/config/DseDriverOption.java deleted file mode 100644 index 4d10501f6d2..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/config/DseDriverOption.java +++ /dev/null @@ -1,334 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.config; - -import com.datastax.oss.driver.api.core.config.DriverOption; -import edu.umd.cs.findbugs.annotations.NonNull; - -public enum DseDriverOption implements DriverOption { - /** - * The name of the application using the session. - * - *

Value type: {@link String} - */ - APPLICATION_NAME("basic.application.name"), - /** - * The version of the application using the session. - * - *

Value type: {@link String} - */ - APPLICATION_VERSION("basic.application.version"), - - /** - * Proxy authentication for GSSAPI authentication: allows to login as another user or role. - * - *

Value type: {@link String} - */ - AUTH_PROVIDER_AUTHORIZATION_ID("advanced.auth-provider.authorization-id"), - /** - * Service name for GSSAPI authentication. - * - *

Value type: {@link String} - */ - AUTH_PROVIDER_SERVICE("advanced.auth-provider.service"), - /** - * Login configuration for GSSAPI authentication. - * - *

Value type: {@link java.util.Map Map}<{@link String},{@link String}> - */ - AUTH_PROVIDER_LOGIN_CONFIGURATION("advanced.auth-provider.login-configuration"), - /** - * Internal SASL properties, if any, such as QOP, for GSSAPI authentication. - * - *

Value type: {@link java.util.Map Map}<{@link String},{@link String}> - */ - AUTH_PROVIDER_SASL_PROPERTIES("advanced.auth-provider.sasl-properties"), - - /** - * The page size for continuous paging. - * - *

Value type: int - */ - CONTINUOUS_PAGING_PAGE_SIZE("advanced.continuous-paging.page-size"), - /** - * Whether {@link #CONTINUOUS_PAGING_PAGE_SIZE} should be interpreted in number of rows or bytes. - * - *

Value type: boolean - */ - CONTINUOUS_PAGING_PAGE_SIZE_BYTES("advanced.continuous-paging.page-size-in-bytes"), - /** - * The maximum number of continuous pages to return. - * - *

Value type: int - */ - CONTINUOUS_PAGING_MAX_PAGES("advanced.continuous-paging.max-pages"), - /** - * The maximum number of continuous pages per second. - * - *

Value type: int - */ - CONTINUOUS_PAGING_MAX_PAGES_PER_SECOND("advanced.continuous-paging.max-pages-per-second"), - /** - * The maximum number of continuous pages that can be stored in the local queue. - * - *

Value type: int - */ - CONTINUOUS_PAGING_MAX_ENQUEUED_PAGES("advanced.continuous-paging.max-enqueued-pages"), - /** - * How long to wait for the coordinator to send the first continuous page. - * - *

Value-type: {@link java.time.Duration Duration} - */ - CONTINUOUS_PAGING_TIMEOUT_FIRST_PAGE("advanced.continuous-paging.timeout.first-page"), - /** - * How long to wait for the coordinator to send subsequent continuous pages. - * - *

Value-type: {@link java.time.Duration Duration} - */ - CONTINUOUS_PAGING_TIMEOUT_OTHER_PAGES("advanced.continuous-paging.timeout.other-pages"), - - /** - * The largest latency that we expect to record for continuous requests. - * - *

Value-type: {@link java.time.Duration Duration} - */ - CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_HIGHEST( - "advanced.metrics.session.continuous-cql-requests.highest-latency"), - /** - * The number of significant decimal digits to which internal structures will maintain for - * continuous requests. - * - *

Value-type: int - */ - CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_DIGITS( - "advanced.metrics.session.continuous-cql-requests.significant-digits"), - /** - * The interval at which percentile data is refreshed for continuous requests. - * - *

Value-type: {@link java.time.Duration Duration} - */ - CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_INTERVAL( - "advanced.metrics.session.continuous-cql-requests.refresh-interval"), - - /** - * The read consistency level to use for graph statements. - * - *

Value type: {@link String} - */ - GRAPH_READ_CONSISTENCY_LEVEL("basic.graph.read-consistency-level"), - /** - * The write consistency level to use for graph statements. - * - *

Value type: {@link String} - */ - GRAPH_WRITE_CONSISTENCY_LEVEL("basic.graph.write-consistency-level"), - /** - * The traversal source to use for graph statements. - * - *

Value type: {@link String} - */ - GRAPH_TRAVERSAL_SOURCE("basic.graph.traversal-source"), - /** - * The sub-protocol the driver will use to communicate with DSE Graph, on top of the Cassandra - * native protocol. - * - *

Value type: {@link String} - */ - GRAPH_SUB_PROTOCOL("advanced.graph.sub-protocol"), - /** - * Whether a script statement represents a system query. - * - *

Value type: boolean - */ - GRAPH_IS_SYSTEM_QUERY("basic.graph.is-system-query"), - /** - * The name of the graph targeted by graph statements. - * - *

Value type: {@link String} - */ - GRAPH_NAME("basic.graph.name"), - /** - * How long the driver waits for a graph request to complete. - * - *

Value-type: {@link java.time.Duration Duration} - */ - GRAPH_TIMEOUT("basic.graph.timeout"), - - /** - * Whether to send events for Insights monitoring. - * - *

Value type: boolean - */ - MONITOR_REPORTING_ENABLED("advanced.monitor-reporting.enabled"), - - /** - * Whether to enable paging for Graph queries. - * - *

Value type: {@link String} - */ - GRAPH_PAGING_ENABLED("advanced.graph.paging-enabled"), - - /** - * The page size for Graph continuous paging. - * - *

Value type: int - */ - GRAPH_CONTINUOUS_PAGING_PAGE_SIZE("advanced.graph.paging-options.page-size"), - - /** - * The maximum number of Graph continuous pages to return. - * - *

Value type: int - */ - GRAPH_CONTINUOUS_PAGING_MAX_PAGES("advanced.graph.paging-options.max-pages"), - /** - * The maximum number of Graph continuous pages per second. - * - *

Value type: int - */ - GRAPH_CONTINUOUS_PAGING_MAX_PAGES_PER_SECOND( - "advanced.graph.paging-options.max-pages-per-second"), - /** - * The maximum number of Graph continuous pages that can be stored in the local queue. - * - *

Value type: int - */ - GRAPH_CONTINUOUS_PAGING_MAX_ENQUEUED_PAGES("advanced.graph.paging-options.max-enqueued-pages"), - /** - * The largest latency that we expect to record for graph requests. - * - *

Value-type: {@link java.time.Duration Duration} - */ - METRICS_SESSION_GRAPH_REQUESTS_HIGHEST("advanced.metrics.session.graph-requests.highest-latency"), - /** - * The number of significant decimal digits to which internal structures will maintain for graph - * requests. - * - *

Value-type: int - */ - METRICS_SESSION_GRAPH_REQUESTS_DIGITS( - "advanced.metrics.session.graph-requests.significant-digits"), - /** - * The interval at which percentile data is refreshed for graph requests. - * - *

Value-type: {@link java.time.Duration Duration} - */ - METRICS_SESSION_GRAPH_REQUESTS_INTERVAL( - "advanced.metrics.session.graph-requests.refresh-interval"), - /** - * The largest latency that we expect to record for graph requests. - * - *

Value-type: {@link java.time.Duration Duration} - */ - METRICS_NODE_GRAPH_MESSAGES_HIGHEST("advanced.metrics.node.graph-messages.highest-latency"), - /** - * The number of significant decimal digits to which internal structures will maintain for graph - * requests. - * - *

Value-type: int - */ - METRICS_NODE_GRAPH_MESSAGES_DIGITS("advanced.metrics.node.graph-messages.significant-digits"), - /** - * The interval at which percentile data is refreshed for graph requests. - * - *

Value-type: {@link java.time.Duration Duration} - */ - METRICS_NODE_GRAPH_MESSAGES_INTERVAL("advanced.metrics.node.graph-messages.refresh-interval"), - - /** - * The shortest latency that we expect to record for continuous requests. - * - *

Value-type: {@link java.time.Duration Duration} - */ - CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_LOWEST( - "advanced.metrics.session.continuous-cql-requests.lowest-latency"), - /** - * Optional service-level objectives to meet, as a list of latencies to track. - * - *

Value-type: {@link java.time.Duration Duration} - */ - CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_SLO( - "advanced.metrics.session.continuous-cql-requests.slo"), - - /** - * The shortest latency that we expect to record for graph requests. - * - *

Value-type: {@link java.time.Duration Duration} - */ - METRICS_SESSION_GRAPH_REQUESTS_LOWEST("advanced.metrics.session.graph-requests.lowest-latency"), - /** - * Optional service-level objectives to meet, as a list of latencies to track. - * - *

Value-type: {@link java.time.Duration Duration} - */ - METRICS_SESSION_GRAPH_REQUESTS_SLO("advanced.metrics.session.graph-requests.slo"), - - /** - * The shortest latency that we expect to record for graph requests. - * - *

Value-type: {@link java.time.Duration Duration} - */ - METRICS_NODE_GRAPH_MESSAGES_LOWEST("advanced.metrics.node.graph-messages.lowest-latency"), - /** - * Optional service-level objectives to meet, as a list of latencies to track. - * - *

Value-type: {@link java.time.Duration Duration} - */ - METRICS_NODE_GRAPH_MESSAGES_SLO("advanced.metrics.node.graph-messages.slo"), - /** - * Optional list of percentiles to publish for graph-requests metric. Produces an additional time - * series for each requested percentile. This percentile is computed locally, and so can't be - * aggregated with percentiles computed across other dimensions (e.g. in a different instance). - * - *

Value type: {@link java.util.List List}<{@link Double}> - */ - METRICS_SESSION_GRAPH_REQUESTS_PUBLISH_PERCENTILES( - "advanced.metrics.session.graph-requests.publish-percentiles"), - /** - * Optional list of percentiles to publish for node graph-messages metric. Produces an additional - * time series for each requested percentile. This percentile is computed locally, and so can't be - * aggregated with percentiles computed across other dimensions (e.g. in a different instance). - * - *

Value type: {@link java.util.List List}<{@link Double}> - */ - METRICS_NODE_GRAPH_MESSAGES_PUBLISH_PERCENTILES( - "advanced.metrics.node.graph-messages.publish-percentiles"), - /** - * Optional list of percentiles to publish for continuous paging requests metric. Produces an - * additional time series for each requested percentile. This percentile is computed locally, and - * so can't be aggregated with percentiles computed across other dimensions (e.g. in a different - * instance). - * - *

Value type: {@link java.util.List List}<{@link Double}> - */ - CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_PUBLISH_PERCENTILES( - "advanced.metrics.session.continuous-cql-requests.publish-percentiles"), - ; - - private final String path; - - DseDriverOption(String path) { - this.path = path; - } - - @NonNull - @Override - public String getPath() { - return path; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/cql/continuous/ContinuousAsyncResultSet.java b/core/src/main/java/com/datastax/dse/driver/api/core/cql/continuous/ContinuousAsyncResultSet.java deleted file mode 100644 index a9491ec2414..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/cql/continuous/ContinuousAsyncResultSet.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.cql.continuous; - -import com.datastax.oss.driver.api.core.AsyncPagingIterable; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.Statement; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.util.concurrent.CancellationException; - -/** - * The result of an {@linkplain ContinuousSession#executeContinuouslyAsync(Statement) asynchronous - * continuous paging query}. - * - *

DSE replies to a continuous query with a stream of response frames. There is one instance of - * this class for each frame. - */ -public interface ContinuousAsyncResultSet - extends AsyncPagingIterable { - - /** Returns the current page's number. Pages are numbered starting from 1. */ - int pageNumber(); - - /** - * Cancels the continuous query. - * - *

There might still be rows available in the {@linkplain #currentPage() current page} after - * the cancellation; these rows can be retrieved normally. - * - *

Also, there might be more pages available in the driver's local page cache after the - * cancellation; these extra pages will be discarded. - * - *

Therefore, if you plan to resume the iteration later, the correct procedure is as follows: - * - *

    - *
  1. Cancel the operation by invoking this method, or by cancelling the {@linkplain - * #fetchNextPage() next page's future}; - *
  2. Keep iterating on the current page until it doesn't return any more rows; - *
  3. Retrieve the paging state with {@link #getExecutionInfo() - * getExecutionInfo().getPagingState()}; - *
  4. {@linkplain Statement#setPagingState(ByteBuffer) Re-inject the paging state} in the - * statement; - *
  5. Resume the operation by invoking {@link - * ContinuousSession#executeContinuouslyAsync(Statement) executeContinuouslyAsync} again. - *
- * - * After a cancellation, futures returned by {@link #fetchNextPage()} that are not yet complete - * will always complete exceptionally by throwing a {@link CancellationException}, even if - * they were obtained before the cancellation. - */ - void cancel(); - - /** - * {@inheritDoc} - * - *

Note: because the driver does not support query traces for continuous queries, {@link - * ExecutionInfo#getTracingId()} will always be {@code null}. - */ - @NonNull - @Override - ExecutionInfo getExecutionInfo(); -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/cql/continuous/ContinuousResultSet.java b/core/src/main/java/com/datastax/dse/driver/api/core/cql/continuous/ContinuousResultSet.java deleted file mode 100644 index a333801a59a..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/cql/continuous/ContinuousResultSet.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.cql.continuous; - -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Statement; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.util.List; - -/** - * The result of a {@linkplain ContinuousSession#executeContinuously(Statement) synchronous - * continuous paging query}. - * - *

It uses {@linkplain ContinuousAsyncResultSet asynchronous calls} internally, but blocks on the - * results in order to provide a synchronous API to its clients. If the query is paged, only the - * first page will be fetched initially, and iteration will trigger background fetches of the next - * pages when necessary. - * - *

Note that this object can only be iterated once: rows are "consumed" as they are read, - * subsequent calls to {@code iterator()} will return the same iterator instance. - * - *

Implementations of this type are not thread-safe. They can only be iterated by the - * thread that invoked {@code session.executeContinuously}. - */ -public interface ContinuousResultSet extends ResultSet { - - /** - * Cancels the continuous query. - * - *

There might still be rows available in the current page after the cancellation; the - * iteration will only stop when such rows are fully iterated upon. - * - *

Also, there might be more pages available in the driver's local page cache after the - * cancellation; these extra pages will be discarded. - * - *

Therefore, if you plan to resume the iteration later, the correct procedure is as follows: - * - *

    - *
  1. Cancel the operation by invoking this method; - *
  2. Keep iterating on this object until it doesn't return any more rows; - *
  3. Retrieve the paging state with {@link #getExecutionInfo() - * getExecutionInfo().getPagingState()}; - *
  4. {@linkplain Statement#setPagingState(ByteBuffer) Re-inject the paging state} in the - * statement; - *
  5. Resume the operation by invoking {@link ContinuousSession#executeContinuously(Statement) - * executeContinuously} again. - *
- */ - void cancel(); - - /** - * {@inheritDoc} - * - *

Note: because the driver does not support query traces for continuous queries, {@link - * ExecutionInfo#getTracingId()} will always be {@code null}. - */ - @NonNull - @Override - default ExecutionInfo getExecutionInfo() { - List infos = getExecutionInfos(); - return infos.get(infos.size() - 1); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/cql/continuous/ContinuousSession.java b/core/src/main/java/com/datastax/dse/driver/api/core/cql/continuous/ContinuousSession.java deleted file mode 100644 index 1c647b33b92..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/cql/continuous/ContinuousSession.java +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.cql.continuous; - -import com.datastax.dse.driver.internal.core.cql.continuous.ContinuousCqlRequestAsyncProcessor; -import com.datastax.dse.driver.internal.core.cql.continuous.ContinuousCqlRequestSyncProcessor; -import com.datastax.oss.driver.api.core.DefaultConsistencyLevel; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.session.Session; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.util.Objects; -import java.util.concurrent.CompletionStage; - -/** - * A session that has the ability to execute continuous paging queries. - * - *

Continuous paging is a new method of streaming bulk amounts of records from DataStax - * Enterprise (DSE) to the Java Driver, available since DSE 5.1. It is mainly intended to be - * leveraged by DSE - * Analytics and Apache Spark™, or by any similar analytics tool that needs to read large - * portions of a table in one single operation, as quick and reliably as possible. - * - *

Continuous paging provides the best performance improvement against regular paging when the - * following conditions are met: - * - *

    - *
  1. The statement must target a single partition or a token range owned by one single replica; - * in practice, this means that the statement must have either a {@linkplain - * Statement#setRoutingKey(ByteBuffer) routing key} or a {@linkplain - * Statement#setRoutingToken(Token) routing token} set; - *
  2. The coordinator must be a replica; in practice, this is usually achieved by using - * token-aware routing (if you are using the driver's default {@link LoadBalancingPolicy}, - * then this condition is met); - *
  3. The consistency level must be {@link DefaultConsistencyLevel#ONE ONE} (or {@link - * DefaultConsistencyLevel#LOCAL_ONE LOCAL_ONE}). - *
- * - *

If the above conditions are met, the coordinator will be able to optimize the read path and - * serve results from local data, thus significantly improving response times; if however these - * conditions cannot be met, continuous paging would still work, but response times wouldn't be - * significantly better than those of regular paging anymore. - * - * @see Continuous - * paging options in cassandra.yaml configuration file - * @see DSE - * Continuous Paging Tuning and Support Guide - */ -public interface ContinuousSession extends Session { - - /** - * Executes the provided query with continuous paging synchronously. - * - *

This method takes care of chaining the successive results into a convenient iterable, - * provided that you always access the result from the same thread. For more flexibility, consider - * using the {@linkplain #executeContinuouslyAsync(Statement) asynchronous variant} of this method - * instead. - * - *

See {@link ContinuousSession} for more explanations about continuous paging. - * - *

This feature is only available with DataStax Enterprise. Executing continuous queries - * against an Apache Cassandra© cluster will result in a runtime error. - * - * @param statement the query to execute. - * @return a synchronous iterable on the results. - */ - @NonNull - default ContinuousResultSet executeContinuously(@NonNull Statement statement) { - return Objects.requireNonNull( - execute(statement, ContinuousCqlRequestSyncProcessor.CONTINUOUS_RESULT_SYNC)); - } - - /** - * Executes the provided query with continuous paging asynchronously. - * - *

The server will push all requested pages asynchronously, according to the options defined in - * the current execution profile. The client should consume all pages as quickly as possible, to - * avoid blocking the server for too long. The server will adjust the rate according to the client - * speed, but it will give up if the client does not consume any pages in a period of time equal - * to the read request timeout. - * - *

See {@link ContinuousSession} for more explanations about continuous paging. - * - *

This feature is only available with DataStax Enterprise. Executing continuous queries - * against an Apache Cassandra© cluster will result in a runtime error. - * - * @param statement the query to execute. - * @return a future to the first asynchronous result. - */ - @NonNull - default CompletionStage executeContinuouslyAsync( - @NonNull Statement statement) { - return Objects.requireNonNull( - execute(statement, ContinuousCqlRequestAsyncProcessor.CONTINUOUS_RESULT_ASYNC)); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/cql/continuous/reactive/ContinuousReactiveResultSet.java b/core/src/main/java/com/datastax/dse/driver/api/core/cql/continuous/reactive/ContinuousReactiveResultSet.java deleted file mode 100644 index 6b645ad05bf..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/cql/continuous/reactive/ContinuousReactiveResultSet.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.cql.continuous.reactive; - -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveResultSet; -import com.datastax.oss.driver.api.core.cql.Statement; - -/** - * A marker interface for publishers returned by {@link ContinuousReactiveSession}. - * - * @see ContinuousReactiveSession#executeContinuouslyReactive(String) - * @see ContinuousReactiveSession#executeContinuouslyReactive(Statement) - */ -public interface ContinuousReactiveResultSet extends ReactiveResultSet {} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/cql/continuous/reactive/ContinuousReactiveSession.java b/core/src/main/java/com/datastax/dse/driver/api/core/cql/continuous/reactive/ContinuousReactiveSession.java deleted file mode 100644 index d00013731cb..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/cql/continuous/reactive/ContinuousReactiveSession.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.cql.continuous.reactive; - -import com.datastax.dse.driver.api.core.cql.continuous.ContinuousSession; -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveRow; -import com.datastax.dse.driver.internal.core.cql.continuous.reactive.ContinuousCqlRequestReactiveProcessor; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.session.Session; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Objects; -import org.reactivestreams.Publisher; - -/** - * A {@link Session} that offers utility methods to issue queries using reactive-style programming - * and continuous paging, combined together. - * - *

Methods in this interface all return {@link ContinuousReactiveResultSet} instances. All - * publishers support multiple subscriptions in a unicast fashion: each subscriber triggers an - * independent request execution and gets its own copy of the results. - * - *

Also, note that the publishers may emit items to their subscribers on an internal driver IO - * thread. Subscriber implementors are encouraged to abide by Reactive Streams - * Specification rule 2.2 and avoid performing heavy computations or blocking calls inside - * {@link org.reactivestreams.Subscriber#onNext(Object) onNext} calls, as doing so could slow down - * the driver and impact performance. Instead, they should asynchronously dispatch received signals - * to their processing logic. - * - * @see ReactiveRow - */ -public interface ContinuousReactiveSession extends Session { - - /** - * Returns a {@link Publisher} that, once subscribed to, executes the given query continuously and - * emits all the results. - * - *

See {@link ContinuousSession} for more explanations about continuous paging. - * - *

This feature is only available with DataStax Enterprise. Executing continuous queries - * against an Apache Cassandra® cluster will result in a runtime error. - * - * @param query the query to execute. - * @return The {@link Publisher} that will publish the returned results. - */ - @NonNull - default ContinuousReactiveResultSet executeContinuouslyReactive(@NonNull String query) { - return executeContinuouslyReactive(SimpleStatement.newInstance(query)); - } - - /** - * Returns a {@link Publisher} that, once subscribed to, executes the given query continuously and - * emits all the results. - * - *

See {@link ContinuousSession} for more explanations about continuous paging. - * - *

This feature is only available with DataStax Enterprise. Executing continuous queries - * against an Apache Cassandra® cluster will result in a runtime error. - * - * @param statement the statement to execute. - * @return The {@link Publisher} that will publish the returned results. - */ - @NonNull - default ContinuousReactiveResultSet executeContinuouslyReactive(@NonNull Statement statement) { - return Objects.requireNonNull( - execute(statement, ContinuousCqlRequestReactiveProcessor.CONTINUOUS_REACTIVE_RESULT_SET)); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/cql/reactive/ReactiveQueryMetadata.java b/core/src/main/java/com/datastax/dse/driver/api/core/cql/reactive/ReactiveQueryMetadata.java deleted file mode 100644 index 55a898cd3ee..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/cql/reactive/ReactiveQueryMetadata.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.cql.reactive; - -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import edu.umd.cs.findbugs.annotations.NonNull; -import org.reactivestreams.Publisher; - -/** - * Interface implemented by all the reactive result set publishers provided by the driver, and - * notably by {@link ReactiveResultSet}. - */ -public interface ReactiveQueryMetadata { - - /** - * Returns metadata about the {@linkplain ColumnDefinitions columns} contained in this result set. - * - *

This publisher emits exactly one item as soon as the first response arrives, then completes. - * If the query execution fails within the first request-response cycle, then this - * publisher will fail with the same error; however if the error happens after the first - * response, then this publisher will be already completed and will not acknowledge that - * error in any way. - * - *

By default, publishers returned by this method do not support multiple subscriptions. - * - * @see ReactiveRow#getColumnDefinitions() - */ - @NonNull - Publisher getColumnDefinitions(); - - /** - * Returns {@linkplain ExecutionInfo information about the execution} of all requests that have - * been performed so far to assemble this result set. - * - *

If the query is not paged, this publisher will emit exactly one item as soon as the response - * arrives, then complete. If the query is paged, it will emit multiple items, one per page; then - * it will complete when the last page arrives. If the query execution fails, then this publisher - * will fail with the same error. - * - *

By default, publishers returned by this method do not support multiple subscriptions. - * - * @see ReactiveRow#getExecutionInfo() - */ - @NonNull - Publisher getExecutionInfos(); - - /** - * If the query that produced this result was a conditional update, indicates whether it was - * successfully applied. - * - *

This publisher emits exactly one item as soon as the first response arrives, then completes. - * If the query execution fails within the first request-response cycle, then this - * publisher will fail with the same error; however if the error happens after the first - * response, then this publisher will be already completed and will not acknowledge that - * error in any way. - * - *

By default, publishers returned by this method do not support multiple subscriptions. - * - *

For consistency, this method always returns {@code true} for non-conditional queries - * (although there is no reason to call the method in that case). This is also the case for - * conditional DDL statements ({@code CREATE KEYSPACE... IF NOT EXISTS}, {@code CREATE TABLE... IF - * NOT EXISTS}), for which Cassandra doesn't return an {@code [applied]} column. - * - *

Note that, for versions of Cassandra strictly lower than 2.1.0-rc2, a server-side bug (CASSANDRA-7337) causes this - * method to always return {@code true} for batches containing conditional queries. - * - * @see ReactiveRow#wasApplied() - */ - @NonNull - Publisher wasApplied(); -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/cql/reactive/ReactiveResultSet.java b/core/src/main/java/com/datastax/dse/driver/api/core/cql/reactive/ReactiveResultSet.java deleted file mode 100644 index 0e44dab8cab..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/cql/reactive/ReactiveResultSet.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.cql.reactive; - -import com.datastax.oss.driver.api.core.cql.Statement; -import org.reactivestreams.Publisher; - -/** - * A {@link Publisher} of {@link ReactiveRow}s returned by a {@link ReactiveSession}. - * - *

By default, all implementations returned by the driver are cold, unicast, single-subscriber - * only publishers. In other words, they do not support multiple subscriptions; consider - * caching the results produced by such publishers if you need to consume them by more than one - * downstream subscriber. - * - *

Also, note that reactive result sets may emit items to their subscribers on an internal driver - * IO thread. Subscriber implementors are encouraged to abide by Reactive Streams - * Specification rule 2.2 and avoid performing heavy computations or blocking calls inside - * {@link org.reactivestreams.Subscriber#onNext(Object) onNext} calls, as doing so could slow down - * the driver and impact performance. Instead, they should asynchronously dispatch received signals - * to their processing logic. - * - *

This interface exists mainly to expose useful information about {@linkplain - * #getExecutionInfos() request execution} and {@linkplain #getColumnDefinitions() query metadata}. - * This is particularly convenient for queries that do not return rows; for queries that do return - * rows, it is also possible, and oftentimes easier, to access that same information {@linkplain - * ReactiveRow at row level}. - * - * @see ReactiveSession#executeReactive(String) - * @see ReactiveSession#executeReactive(Statement) - * @see ReactiveRow - */ -public interface ReactiveResultSet extends Publisher, ReactiveQueryMetadata {} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/cql/reactive/ReactiveRow.java b/core/src/main/java/com/datastax/dse/driver/api/core/cql/reactive/ReactiveRow.java deleted file mode 100644 index c3b94689580..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/cql/reactive/ReactiveRow.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.cql.reactive; - -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.type.DataTypes; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * A row produced by a {@linkplain ReactiveResultSet reactive result set}. - * - *

This is essentially an extension of the driver's {@link Row} object that also exposes useful - * information about {@linkplain #getExecutionInfo() request execution} and {@linkplain - * #getColumnDefinitions() query metadata} (note however that this information is also exposed at - * result set level for convenience). - * - * @see ReactiveSession - * @see ReactiveResultSet - */ -public interface ReactiveRow extends Row { - - /** - * Returns the column definitions contained in this row. - * - *

This object is the same for all rows pertaining to the same result set. - * - * @return the column definitions contained in this row. - * @see ReactiveResultSet#getColumnDefinitions() - */ - @NonNull - @Override - ColumnDefinitions getColumnDefinitions(); - - /** - * The execution information for the paged request that produced this result. - * - *

This object is the same for two rows pertaining to the same page, but differs for rows - * pertaining to different pages. - * - * @return the execution information for the paged request that produced this result. - * @see ReactiveResultSet#getExecutionInfos() - */ - @NonNull - ExecutionInfo getExecutionInfo(); - - /** - * If the query that produced this result was a conditional update, indicates whether it was - * successfully applied. - * - *

This is equivalent to calling: - * - *

{@code
-   * ReactiveRow row = ...
-   * boolean wasApplied = row.getBoolean("[applied]");
-   * }
- * - *

For consistency, this method always returns {@code true} for non-conditional queries - * (although there is no reason to call the method in that case). This is also the case for - * conditional DDL statements ({@code CREATE KEYSPACE... IF NOT EXISTS}, {@code CREATE TABLE... IF - * NOT EXISTS}), for which Cassandra doesn't return an {@code [applied]} column. - * - *

Note that, for versions of Cassandra strictly lower than 2.1.0-rc2, a server-side bug (CASSANDRA-7337) causes this - * method to always return {@code true} for batches containing conditional queries. - * - *

This method always return the same value for all results in the result set. - * - * @return {@code true} for non-conditional queries and for conditional queries that were - * successfully applied, {@code false} otherwise. - */ - default boolean wasApplied() { - return !getColumnDefinitions().contains("[applied]") - || !getColumnDefinitions().get("[applied]").getType().equals(DataTypes.BOOLEAN) - || getBoolean("[applied]"); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/cql/reactive/ReactiveSession.java b/core/src/main/java/com/datastax/dse/driver/api/core/cql/reactive/ReactiveSession.java deleted file mode 100644 index 2fd8ffe41c2..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/cql/reactive/ReactiveSession.java +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.cql.reactive; - -import com.datastax.dse.driver.internal.core.cql.reactive.CqlRequestReactiveProcessor; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.session.Session; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; -import java.util.Objects; -import org.reactivestreams.Publisher; - -/** - * A {@link Session} that offers utility methods to issue queries using reactive-style programming. - * - *

Methods in this interface all return {@link ReactiveResultSet} instances. See the javadocs of - * this interface for important remarks anc caveats regarding the subscription to and consumption of - * reactive result sets. - * - * @see ReactiveResultSet - * @see ReactiveRow - */ -public interface ReactiveSession extends Session { - - /** - * Returns a {@link Publisher} that, once subscribed to, executes the given query and emits all - * the results. - * - *

This is an alias for {@link #executeReactive(Statement)} - * executeReactive(SimpleStatement.newInstance(query))}. - * - * @param query the query to execute. - * @return The {@link Publisher} that will publish the returned results. - * @see SimpleStatement#newInstance(String) - */ - @NonNull - default ReactiveResultSet executeReactive(@NonNull String query) { - return executeReactive(SimpleStatement.newInstance(query)); - } - - /** - * Returns a {@link Publisher} that, once subscribed to, executes the given query and emits all - * the results. - * - *

This is an alias for {@link #executeReactive(Statement)} - * executeReactive(SimpleStatement.newInstance(query, values))}. - * - * @param query the query to execute. - * @param values the values for placeholders in the query string. Individual values can be {@code - * null}, but the vararg array itself can't. - * @return The {@link Publisher} that will publish the returned results. - * @see SimpleStatement#newInstance(String,Object...) - */ - @NonNull - default ReactiveResultSet executeReactive(@NonNull String query, @NonNull Object... values) { - return executeReactive(SimpleStatement.newInstance(query, values)); - } - - /** - * Returns a {@link Publisher} that, once subscribed to, executes the given query and emits all - * the results. - * - *

This is an alias for {@link #executeReactive(Statement)} - * executeReactive(SimpleStatement.newInstance(query,values))}. - * - * @param query the query to execute. - * @param values the values for named placeholders in the query string. Individual values can be - * {@code null}, but the map itself can't. - * @return The {@link Publisher} that will publish the returned results. - * @see SimpleStatement#newInstance(String,Map) - */ - @NonNull - default ReactiveResultSet executeReactive( - @NonNull String query, @NonNull Map values) { - return executeReactive(SimpleStatement.newInstance(query, values)); - } - - /** - * Returns a {@link Publisher} that, once subscribed to, executes the given query and emits all - * the results. - * - * @param statement the statement to execute. - * @return The {@link Publisher} that will publish the returned results. - */ - @NonNull - default ReactiveResultSet executeReactive(@NonNull Statement statement) { - return Objects.requireNonNull( - execute(statement, CqlRequestReactiveProcessor.REACTIVE_RESULT_SET)); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/cql/reactive/package-info.java b/core/src/main/java/com/datastax/dse/driver/api/core/cql/reactive/package-info.java deleted file mode 100644 index 01a5f514aba..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/cql/reactive/package-info.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Types related to CQL execution using reactive-style programming. - * - *

Note that this is located in a {@code dse} package for historical reasons; reactive queries - * can now be used with open-source Cassandra as well. - */ -package com.datastax.dse.driver.api.core.cql.reactive; diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/data/geometry/Geometry.java b/core/src/main/java/com/datastax/dse/driver/api/core/data/geometry/Geometry.java deleted file mode 100644 index 66a5708832e..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/data/geometry/Geometry.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.data.geometry; - -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; - -/** - * The driver-side representation for a DSE geospatial type. - * - *

- *     Row row = dseSession.execute("SELECT coords FROM points_of_interest WHERE name = 'Eiffel Tower'").one();
- *     Point coords = row.get("coords", Point.class);
- * 
- * - * The default implementations returned by the driver are immutable and serializable. If you write - * your own implementations, they should at least be thread-safe; serializability is not mandatory, - * but recommended for use with some 3rd-party tools like Apache Spark ™. - */ -public interface Geometry { - - /** - * Returns a Well-known Text (WKT) - * representation of this geospatial type. - */ - @NonNull - String asWellKnownText(); - - /** - * Returns a Well-known - * Binary (WKB) representation of this geospatial type. - * - *

Note that, due to DSE implementation details, the resulting byte buffer always uses - * little-endian order, regardless of the platform's native order. - */ - @NonNull - ByteBuffer asWellKnownBinary(); - - /** Returns a JSON representation of this geospatial type. */ - @NonNull - String asGeoJson(); - - /** - * Tests whether this geospatial type instance contains another instance. - * - * @param other the other instance. - * @return whether {@code this} contains {@code other}. - */ - boolean contains(@NonNull Geometry other); -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/data/geometry/LineString.java b/core/src/main/java/com/datastax/dse/driver/api/core/data/geometry/LineString.java deleted file mode 100644 index 7f77b3202a2..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/data/geometry/LineString.java +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.data.geometry; - -import com.datastax.dse.driver.internal.core.data.geometry.DefaultGeometry; -import com.datastax.dse.driver.internal.core.data.geometry.DefaultLineString; -import com.esri.core.geometry.ogc.OGCLineString; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.util.List; - -/** - * The driver-side representation for DSE's {@code LineString}. - * - *

This is a curve in a two-dimensional XY-plane, represented by a set of points (with linear - * interpolation between them). - * - *

The default implementation returned by the driver is immutable. - */ -public interface LineString extends Geometry { - /** - * Creates a line string from its Well-known Text (WKT) representation. - * - * @param source the Well-known Text representation to parse. - * @return the line string represented by the WKT. - * @throws IllegalArgumentException if the string does not contain a valid Well-known Text - * representation. - */ - @NonNull - static LineString fromWellKnownText(@NonNull String source) { - return new DefaultLineString(DefaultGeometry.fromOgcWellKnownText(source, OGCLineString.class)); - } - - /** - * Creates a line string from its Well-known Binary - * (WKB) representation. - * - * @param source the Well-known Binary representation to parse. - * @return the line string represented by the WKB. - * @throws IllegalArgumentException if the provided {@link ByteBuffer} does not contain a valid - * Well-known Binary representation. - */ - @NonNull - static LineString fromWellKnownBinary(@NonNull ByteBuffer source) { - return new DefaultLineString( - DefaultGeometry.fromOgcWellKnownBinary(source, OGCLineString.class)); - } - - /** - * Creates a line string from a GeoJSON - * LineString representation. - * - * @param source the GeoJSON - * LineString representation to parse. - * @return the line string represented by the GeoJSON LineString. - * @throws IllegalArgumentException if the string does not contain a valid GeoJSON LineString - * representation. - */ - @NonNull - static LineString fromGeoJson(@NonNull String source) { - return new DefaultLineString(DefaultGeometry.fromOgcGeoJson(source, OGCLineString.class)); - } - - /** Creates a line string from two or more points. */ - @NonNull - static LineString fromPoints(@NonNull Point p1, @NonNull Point p2, @NonNull Point... pn) { - return new DefaultLineString(p1, p2, pn); - } - - @NonNull - List getPoints(); -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/data/geometry/Point.java b/core/src/main/java/com/datastax/dse/driver/api/core/data/geometry/Point.java deleted file mode 100644 index b064b3fb222..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/data/geometry/Point.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.data.geometry; - -import com.datastax.dse.driver.internal.core.data.geometry.DefaultGeometry; -import com.datastax.dse.driver.internal.core.data.geometry.DefaultPoint; -import com.esri.core.geometry.ogc.OGCPoint; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; - -/** - * The driver-side representation of DSE's {@code Point}. - * - *

This is a zero-dimensional object that represents a specific (X,Y) location in a - * two-dimensional XY-plane. In case of Geographic Coordinate Systems, the X coordinate is the - * longitude and the Y is the latitude. - * - *

The default implementation returned by the driver is immutable. - */ -public interface Point extends Geometry { - - /** - * Creates a point from its Well-known - * Text (WKT) representation. - * - * @param source the Well-known Text representation to parse. - * @return the point represented by the WKT. - * @throws IllegalArgumentException if the string does not contain a valid Well-known Text - * representation. - */ - @NonNull - static Point fromWellKnownText(@NonNull String source) { - return new DefaultPoint(DefaultGeometry.fromOgcWellKnownText(source, OGCPoint.class)); - } - - /** - * Creates a point from its Well-known Binary - * (WKB) representation. - * - * @param source the Well-known Binary representation to parse. - * @return the point represented by the WKB. - * @throws IllegalArgumentException if the provided {@link ByteBuffer} does not contain a valid - * Well-known Binary representation. - */ - @NonNull - static Point fromWellKnownBinary(@NonNull ByteBuffer source) { - return new DefaultPoint(DefaultGeometry.fromOgcWellKnownBinary(source, OGCPoint.class)); - } - - /** - * Creates a point from a GeoJSON - * Point representation. - * - * @param source the GeoJSON Point - * representation to parse. - * @return the point represented by the GeoJSON Point. - * @throws IllegalArgumentException if the string does not contain a valid GeoJSON Point representation. - */ - @NonNull - static Point fromGeoJson(@NonNull String source) { - return new DefaultPoint(DefaultGeometry.fromOgcGeoJson(source, OGCPoint.class)); - } - - /** - * Creates a new point. - * - * @param x The X coordinate of this point (or its longitude in Geographic Coordinate Systems). - * @param y The Y coordinate of this point (or its latitude in Geographic Coordinate Systems). - * @return the point represented by coordinates. - */ - @NonNull - static Point fromCoordinates(double x, double y) { - return new DefaultPoint(x, y); - } - - /** - * Returns the X coordinate of this 2D point (or its longitude in Geographic Coordinate Systems). - */ - double X(); - - /** - * Returns the Y coordinate of this 2D point (or its latitude in Geographic Coordinate Systems). - */ - double Y(); -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/data/geometry/Polygon.java b/core/src/main/java/com/datastax/dse/driver/api/core/data/geometry/Polygon.java deleted file mode 100644 index d793704defa..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/data/geometry/Polygon.java +++ /dev/null @@ -1,127 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.data.geometry; - -import com.datastax.dse.driver.internal.core.data.geometry.DefaultGeometry; -import com.datastax.dse.driver.internal.core.data.geometry.DefaultPolygon; -import com.esri.core.geometry.ogc.OGCPolygon; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.util.List; - -/** - * The driver-side representation of DSE's {@code Polygon}. - * - *

This is a planar surface in a two-dimensional XY-plane, represented by one exterior boundary - * and 0 or more interior boundaries. - * - *

The default implementation returned by the driver is immutable. - */ -public interface Polygon extends Geometry { - /** - * Creates a polygon from its Well-known - * Text (WKT) representation. - * - * @param source the Well-known Text representation to parse. - * @return the polygon represented by the WKT. - * @throws IllegalArgumentException if the string does not contain a valid Well-known Text - * representation. - */ - @NonNull - static Polygon fromWellKnownText(@NonNull String source) { - return new DefaultPolygon(DefaultGeometry.fromOgcWellKnownText(source, OGCPolygon.class)); - } - - /** - * Creates a polygon from its Well-known Binary - * (WKB) representation. - * - * @param source the Well-known Binary representation to parse. - * @return the polygon represented by the WKB. - * @throws IllegalArgumentException if the provided {@link ByteBuffer} does not contain a valid - * Well-known Binary representation. - */ - @NonNull - static Polygon fromWellKnownBinary(@NonNull ByteBuffer source) { - return new DefaultPolygon(DefaultGeometry.fromOgcWellKnownBinary(source, OGCPolygon.class)); - } - - /** - * Creates a polygon from a GeoJSON - * Polygon representation. - * - * @param source the GeoJSON Polygon - * representation to parse. - * @return the polygon represented by the GeoJSON Polygon. - * @throws IllegalArgumentException if the string does not contain a valid GeoJSON Polygon representation. - */ - @NonNull - static Polygon fromGeoJson(@NonNull String source) { - return new DefaultPolygon(DefaultGeometry.fromOgcGeoJson(source, OGCPolygon.class)); - } - - /** Creates a polygon from a series of 3 or more points. */ - @NonNull - static Polygon fromPoints( - @NonNull Point p1, @NonNull Point p2, @NonNull Point p3, @NonNull Point... pn) { - return new DefaultPolygon(p1, p2, p3, pn); - } - - /** - * Returns a polygon builder. - * - *

This is intended for complex polygons with multiple rings (i.e. holes inside the polygon). - * For simple cases, consider {@link #fromPoints(Point, Point, Point, Point...)} instead. - */ - @NonNull - static Builder builder() { - return new DefaultPolygon.Builder(); - } - - /** Returns the external ring of the polygon. */ - @NonNull - List getExteriorRing(); - - /** - * Returns the internal rings of the polygon, i.e. any holes inside of it (or islands inside of - * the holes). - */ - @NonNull - List> getInteriorRings(); - - /** Provides a simple DSL to build a polygon. */ - interface Builder { - /** - * Adds a new ring for this polygon. - * - *

There can be one or more outer rings and zero or more inner rings. If a polygon has an - * inner ring, the inner ring looks like a hole. If the hole contains another outer ring, that - * outer ring looks like an island. - * - *

There must be one "main" outer ring that contains all the others. - */ - @NonNull - Builder addRing(@NonNull Point p1, @NonNull Point p2, @NonNull Point p3, @NonNull Point... pn); - - @NonNull - Polygon build(); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/data/time/DateRange.java b/core/src/main/java/com/datastax/dse/driver/api/core/data/time/DateRange.java deleted file mode 100644 index 3dd48915dba..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/data/time/DateRange.java +++ /dev/null @@ -1,257 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.data.time; - -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.driver.shaded.guava.common.base.Strings; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.io.Serializable; -import java.text.ParseException; -import java.time.ZonedDateTime; -import java.util.Objects; -import java.util.Optional; - -/** - * A date range, as defined by the server type {@code - * org.apache.cassandra.db.marshal.DateRangeType}, corresponding to the Apache Solr type {@code - * DateRangeField}. - * - *

A date range can be either {@linkplain DateRange#DateRange(DateRangeBound) single-bounded}, in - * which case it represents a unique instant (e.g. "{@code 2001-01-01}"), or {@linkplain - * #DateRange(DateRangeBound, DateRangeBound) double-bounded}, in which case it represents an - * interval of time (e.g. "{@code [2001-01-01 TO 2002]}"). - * - *

Date range {@linkplain DateRangeBound bounds} are always inclusive; they must be either valid - * dates, or the special value {@link DateRangeBound#UNBOUNDED UNBOUNDED}, represented by a "{@code - * *}", e.g. "{@code [2001 TO *]}". - * - *

Instances can be more easily created with the {@link #parse(String)} method. - * - *

This class is immutable and thread-safe. - * - * @since DSE 5.1 - */ -public class DateRange implements Serializable { - - /** - * Parses the given string as a date range. - * - *

The given input must be compliant with Apache Solr type {@code - * DateRangeField} syntax; it can either be a {@linkplain #DateRange(DateRangeBound) - * single-bounded range}, or a {@linkplain #DateRange(DateRangeBound, DateRangeBound) - * double-bounded range}. - * - * @throws ParseException if the given string could not be parsed into a valid range. - * @see DateRangeBound#parseLowerBound(String) - * @see DateRangeBound#parseUpperBound(String) - */ - @NonNull - public static DateRange parse(@NonNull String source) throws ParseException { - if (Strings.isNullOrEmpty(source)) { - throw new ParseException("Date range is null or empty", 0); - } - - if (source.charAt(0) == '[') { - if (source.charAt(source.length() - 1) != ']') { - throw new ParseException( - "If date range starts with '[' it must end with ']'; got " + source, - source.length() - 1); - } - int middle = source.indexOf(" TO "); - if (middle < 0) { - throw new ParseException( - "If date range starts with '[' it must contain ' TO '; got " + source, 0); - } - String lowerBoundString = source.substring(1, middle); - int upperBoundStart = middle + 4; - String upperBoundString = source.substring(upperBoundStart, source.length() - 1); - DateRangeBound lowerBound; - try { - lowerBound = DateRangeBound.parseLowerBound(lowerBoundString); - } catch (Exception e) { - throw newParseException("Cannot parse date range lower bound: " + source, 1, e); - } - DateRangeBound upperBound; - try { - upperBound = DateRangeBound.parseUpperBound(upperBoundString); - } catch (Exception e) { - throw newParseException( - "Cannot parse date range upper bound: " + source, upperBoundStart, e); - } - return new DateRange(lowerBound, upperBound); - } else { - try { - return new DateRange(DateRangeBound.parseLowerBound(source)); - } catch (Exception e) { - throw newParseException("Cannot parse single date range bound: " + source, 0, e); - } - } - } - - @NonNull private final DateRangeBound lowerBound; - @Nullable private final DateRangeBound upperBound; - - /** - * Creates a "single bounded" instance, i.e., a date range whose upper and lower bounds are - * identical. - * - * @throws NullPointerException if {@code singleBound} is null. - */ - public DateRange(@NonNull DateRangeBound singleBound) { - this.lowerBound = Preconditions.checkNotNull(singleBound, "singleBound cannot be null"); - this.upperBound = null; - } - - /** - * Creates an instance composed of two distinct bounds. - * - * @throws NullPointerException if {@code lowerBound} or {@code upperBound} is null. - * @throws IllegalArgumentException if both {@code lowerBound} and {@code upperBound} are not - * unbounded and {@code lowerBound} is greater than {@code upperBound}. - */ - public DateRange(@NonNull DateRangeBound lowerBound, @NonNull DateRangeBound upperBound) { - Preconditions.checkNotNull(lowerBound, "lowerBound cannot be null"); - Preconditions.checkNotNull(upperBound, "upperBound cannot be null"); - if (!lowerBound.isUnbounded() - && !upperBound.isUnbounded() - && lowerBound.getTimestamp().compareTo(upperBound.getTimestamp()) >= 0) { - throw new IllegalArgumentException( - String.format( - "Lower bound of a date range should be before upper bound, got: [%s TO %s]", - lowerBound, upperBound)); - } - this.lowerBound = lowerBound; - this.upperBound = upperBound; - } - - /** Returns the lower bound of this range (inclusive). */ - @NonNull - public DateRangeBound getLowerBound() { - return lowerBound; - } - - /** - * Returns the upper bound of this range (inclusive), or empty if the range is {@linkplain - * #isSingleBounded() single-bounded}. - */ - @NonNull - public Optional getUpperBound() { - return Optional.ofNullable(upperBound); - } - - /** - * Returns whether this range is single-bounded, i.e. if the upper and lower bounds are identical. - */ - public boolean isSingleBounded() { - return upperBound == null; - } - - /** - * Returns the string representation of this range, in a format compatible with Apache Solr - * DateRageField syntax - * - * @see DateRangeBound#toString() - */ - @NonNull - @Override - public String toString() { - if (isSingleBounded()) { - return lowerBound.toString(); - } else { - return String.format("[%s TO %s]", lowerBound, upperBound); - } - } - - @Override - public boolean equals(@Nullable Object other) { - if (other == this) { - return true; - } else if (other instanceof DateRange) { - DateRange that = (DateRange) other; - return Objects.equals(this.lowerBound, that.lowerBound) - && Objects.equals(this.upperBound, that.upperBound); - - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(lowerBound, upperBound); - } - - private static ParseException newParseException(String message, int offset, Exception cause) { - ParseException parseException = new ParseException(message, offset); - parseException.initCause(cause); - return parseException; - } - - /** - * This object gets replaced by an internal proxy for serialization. - * - * @serialData the lower bound timestamp and precision, followed by the upper bound timestamp and - * precision, or two {@code null}s if the range is single-bounded. - */ - private Object writeReplace() { - return new SerializationProxy(this); - } - - private static class SerializationProxy implements Serializable { - - private static final long serialVersionUID = 1L; - - private final ZonedDateTime lowerBoundTimestamp; - private final DateRangePrecision lowerBoundPrecision; - private final ZonedDateTime upperBoundTimestamp; - private final DateRangePrecision upperBoundPrecision; - - SerializationProxy(DateRange input) { - this.lowerBoundTimestamp = input.lowerBound.getTimestamp(); - this.lowerBoundPrecision = input.lowerBound.getPrecision(); - if (input.upperBound != null) { - this.upperBoundTimestamp = input.upperBound.getTimestamp(); - this.upperBoundPrecision = input.upperBound.getPrecision(); - } else { - this.upperBoundTimestamp = null; - this.upperBoundPrecision = null; - } - } - - private Object readResolve() { - if (upperBoundTimestamp == null ^ upperBoundPrecision == null) { - // Should not happen, but protect against corrupted streams - throw new IllegalArgumentException( - "Invalid serialized form, upper bound timestamp and precision " - + "should be either both null or both non-null"); - } - - if (upperBoundTimestamp == null) { - return new DateRange(DateRangeBound.lowerBound(lowerBoundTimestamp, lowerBoundPrecision)); - } else { - return new DateRange( - DateRangeBound.lowerBound(lowerBoundTimestamp, lowerBoundPrecision), - DateRangeBound.upperBound(upperBoundTimestamp, upperBoundPrecision)); - } - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/data/time/DateRangeBound.java b/core/src/main/java/com/datastax/dse/driver/api/core/data/time/DateRangeBound.java deleted file mode 100644 index 1621b8bf742..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/data/time/DateRangeBound.java +++ /dev/null @@ -1,212 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.data.time; - -import com.datastax.dse.driver.internal.core.search.DateRangeUtil; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.text.ParseException; -import java.time.ZonedDateTime; -import java.util.Calendar; -import java.util.Objects; - -/** - * A date range bound. - * - *

It is composed of a {@link ZonedDateTime} field and a corresponding {@link - * DateRangePrecision}. - * - *

Date range bounds are inclusive. The special value {@link #UNBOUNDED} denotes an un unbounded - * (infinite) bound, represented by a {@code *} sign. - * - *

This class is immutable and thread-safe. - */ -public class DateRangeBound { - - /** - * The unbounded {@link DateRangeBound} instance. It is syntactically represented by a {@code *} - * (star) sign. - */ - public static final DateRangeBound UNBOUNDED = new DateRangeBound(); - - /** - * Parses the given input as a lower date range bound. - * - *

The input should be a Lucene-compliant - * string. - * - *

The returned bound will have its {@linkplain DateRangePrecision precision} inferred from the - * input, and its timestamp will be {@linkplain DateRangePrecision#roundDown(ZonedDateTime) - * rounded down} to that precision. - * - *

Note that, in order to align with the server's parsing behavior, dates will always be parsed - * in the UTC time zone. - * - * @throws NullPointerException if {@code lowerBound} is {@code null}. - * @throws ParseException if the given input cannot be parsed. - */ - @NonNull - public static DateRangeBound parseLowerBound(@NonNull String source) throws ParseException { - Preconditions.checkNotNull(source); - Calendar calendar = DateRangeUtil.parseCalendar(source); - DateRangePrecision precision = DateRangeUtil.getPrecision(calendar); - return (precision == null) - ? UNBOUNDED - : lowerBound(DateRangeUtil.toZonedDateTime(calendar), precision); - } - - /** - * Parses the given input as an upper date range bound. - * - *

The input should be a Lucene-compliant - * string. - * - *

The returned bound will have its {@linkplain DateRangePrecision precision} inferred from the - * input, and its timestamp will be {@linkplain DateRangePrecision#roundUp(ZonedDateTime)} rounded - * up} to that precision. - * - *

Note that, in order to align with the server's behavior (e.g. when using date range literals - * in CQL query strings), dates must always be in the UTC time zone: an optional trailing {@code - * Z}" is allowed, but no other time zone ID (not even {@code UTC}, {@code GMT} or {@code +00:00}) - * is permitted. - * - * @throws NullPointerException if {@code upperBound} is {@code null}. - * @throws ParseException if the given input cannot be parsed. - */ - public static DateRangeBound parseUpperBound(String source) throws ParseException { - Preconditions.checkNotNull(source); - Calendar calendar = DateRangeUtil.parseCalendar(source); - DateRangePrecision precision = DateRangeUtil.getPrecision(calendar); - return (precision == null) - ? UNBOUNDED - : upperBound(DateRangeUtil.toZonedDateTime(calendar), precision); - } - - /** - * Creates a date range lower bound from the given date and precision. Temporal fields smaller - * than the precision will be rounded down. - */ - public static DateRangeBound lowerBound(ZonedDateTime timestamp, DateRangePrecision precision) { - return new DateRangeBound(precision.roundDown(timestamp), precision); - } - - /** - * Creates a date range upper bound from the given date and precision. Temporal fields smaller - * than the precision will be rounded up. - */ - public static DateRangeBound upperBound(ZonedDateTime timestamp, DateRangePrecision precision) { - return new DateRangeBound(precision.roundUp(timestamp), precision); - } - - @Nullable private final ZonedDateTime timestamp; - @Nullable private final DateRangePrecision precision; - - private DateRangeBound(@NonNull ZonedDateTime timestamp, @NonNull DateRangePrecision precision) { - Preconditions.checkNotNull(timestamp); - Preconditions.checkNotNull(precision); - this.timestamp = timestamp; - this.precision = precision; - } - - // constructor used for the special UNBOUNDED value - private DateRangeBound() { - this.timestamp = null; - this.precision = null; - } - - /** Whether this bound is unbounded (i.e. denotes the special {@code *} value). */ - public boolean isUnbounded() { - return this.timestamp == null && this.precision == null; - } - - /** - * Returns the timestamp of this bound. - * - * @throws IllegalStateException if this bound is {@linkplain #isUnbounded() unbounded}. - */ - @NonNull - public ZonedDateTime getTimestamp() { - if (isUnbounded()) { - throw new IllegalStateException( - "Can't call this method on UNBOUNDED, use isUnbounded() to check first"); - } - assert timestamp != null; - return timestamp; - } - - /** - * Returns the precision of this bound. - * - * @throws IllegalStateException if this bound is {@linkplain #isUnbounded() unbounded}. - */ - @NonNull - public DateRangePrecision getPrecision() { - if (isUnbounded()) { - throw new IllegalStateException( - "Can't call this method on UNBOUNDED, use isUnbounded() to check first"); - } - assert precision != null; - return precision; - } - - /** - * Returns this bound as a Lucene-compliant string. - * - *

Unbounded bounds always return "{@code *}"; all other bounds are formatted in one of the - * common ISO-8601 datetime formats, depending on their precision. - * - *

Note that Lucene expects timestamps in UTC only. Timezone presence is always optional, and - * if present, it must be expressed with the symbol "Z" exclusively. Therefore this method does - * not include any timezone information in the returned string, except for bounds with {@linkplain - * DateRangePrecision#MILLISECOND millisecond} precision, where the symbol "Z" is always appended - * to the resulting string. - */ - @NonNull - @Override - public String toString() { - if (isUnbounded()) { - return "*"; - } else { - assert timestamp != null && precision != null; - return precision.format(timestamp); - } - } - - @Override - public boolean equals(@Nullable Object other) { - if (other == this) { - return true; - } else if (other instanceof DateRangeBound) { - DateRangeBound that = (DateRangeBound) other; - return Objects.equals(this.timestamp, that.timestamp) - && Objects.equals(this.precision, that.precision); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(timestamp, precision); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/data/time/DateRangePrecision.java b/core/src/main/java/com/datastax/dse/driver/api/core/data/time/DateRangePrecision.java deleted file mode 100644 index ce811466c38..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/data/time/DateRangePrecision.java +++ /dev/null @@ -1,197 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.data.time; - -import com.datastax.dse.driver.internal.core.search.DateRangeUtil; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.time.ZoneOffset; -import java.time.ZonedDateTime; -import java.time.format.DateTimeFormatter; -import java.time.format.DateTimeFormatterBuilder; -import java.time.temporal.ChronoField; -import java.time.temporal.ChronoUnit; -import java.util.Locale; -import java.util.Map; - -/** The precision of a {@link DateRangeBound}. */ -public enum DateRangePrecision { - MILLISECOND( - 0x06, - ChronoUnit.MILLIS, - new DateTimeFormatterBuilder() - .parseCaseSensitive() - .parseStrict() - .appendPattern("uuuu-MM-dd'T'HH:mm:ss.SSS") - .optionalStart() - .appendZoneId() - .optionalEnd() - .toFormatter() - .withZone(ZoneOffset.UTC) - .withLocale(Locale.ROOT)), - SECOND( - 0x05, - ChronoUnit.SECONDS, - new DateTimeFormatterBuilder() - .parseCaseSensitive() - .parseStrict() - .appendPattern("uuuu-MM-dd'T'HH:mm:ss") - .parseDefaulting(ChronoField.MILLI_OF_SECOND, 0) - .toFormatter() - .withZone(ZoneOffset.UTC) - .withLocale(Locale.ROOT)), - MINUTE( - 0x04, - ChronoUnit.MINUTES, - new DateTimeFormatterBuilder() - .parseCaseSensitive() - .parseStrict() - .appendPattern("uuuu-MM-dd'T'HH:mm") - .parseDefaulting(ChronoField.SECOND_OF_MINUTE, 0) - .parseDefaulting(ChronoField.MILLI_OF_SECOND, 0) - .toFormatter() - .withZone(ZoneOffset.UTC) - .withLocale(Locale.ROOT)), - HOUR( - 0x03, - ChronoUnit.HOURS, - new DateTimeFormatterBuilder() - .parseCaseSensitive() - .parseStrict() - .appendPattern("uuuu-MM-dd'T'HH") - .parseDefaulting(ChronoField.MINUTE_OF_HOUR, 0) - .parseDefaulting(ChronoField.SECOND_OF_MINUTE, 0) - .parseDefaulting(ChronoField.MILLI_OF_SECOND, 0) - .toFormatter() - .withZone(ZoneOffset.UTC) - .withLocale(Locale.ROOT)), - DAY( - 0x02, - ChronoUnit.DAYS, - new DateTimeFormatterBuilder() - .parseCaseSensitive() - .parseStrict() - .appendPattern("uuuu-MM-dd") - .parseDefaulting(ChronoField.HOUR_OF_DAY, 0) - .parseDefaulting(ChronoField.MINUTE_OF_HOUR, 0) - .parseDefaulting(ChronoField.SECOND_OF_MINUTE, 0) - .parseDefaulting(ChronoField.MILLI_OF_SECOND, 0) - .toFormatter() - .withZone(ZoneOffset.UTC) - .withLocale(Locale.ROOT)), - MONTH( - 0x01, - ChronoUnit.MONTHS, - new DateTimeFormatterBuilder() - .parseCaseSensitive() - .parseStrict() - .appendPattern("uuuu-MM") - .parseDefaulting(ChronoField.DAY_OF_MONTH, 1) - .parseDefaulting(ChronoField.HOUR_OF_DAY, 0) - .parseDefaulting(ChronoField.MINUTE_OF_HOUR, 0) - .parseDefaulting(ChronoField.SECOND_OF_MINUTE, 0) - .parseDefaulting(ChronoField.MILLI_OF_SECOND, 0) - .toFormatter() - .withZone(ZoneOffset.UTC) - .withLocale(Locale.ROOT)), - YEAR( - 0x00, - ChronoUnit.YEARS, - new DateTimeFormatterBuilder() - .parseCaseSensitive() - .parseStrict() - .appendPattern("uuuu") - .parseDefaulting(ChronoField.MONTH_OF_YEAR, 1) - .parseDefaulting(ChronoField.DAY_OF_MONTH, 1) - .parseDefaulting(ChronoField.HOUR_OF_DAY, 0) - .parseDefaulting(ChronoField.MINUTE_OF_HOUR, 0) - .parseDefaulting(ChronoField.SECOND_OF_MINUTE, 0) - .parseDefaulting(ChronoField.MILLI_OF_SECOND, 0) - .toFormatter() - .withZone(ZoneOffset.UTC) - .withLocale(Locale.ROOT)); - - private final byte encoding; - private final ChronoUnit roundingUnit; - // The formatter is only used for formatting (parsing is done with DateRangeUtil.parseCalendar to - // be exactly the same as DSE's). - // If that ever were to change, note that DateTimeFormatters with a time zone have a parsing bug - // in Java 8: the formatter's zone will always be used, even if the input string specifies one - // explicitly. - // See https://stackoverflow.com/questions/41999421 - private final DateTimeFormatter formatter; - - DateRangePrecision(int encoding, ChronoUnit roundingUnit, DateTimeFormatter formatter) { - this.encoding = (byte) encoding; - this.roundingUnit = roundingUnit; - this.formatter = formatter; - } - - private static final Map ENCODINGS; - - static { - ImmutableMap.Builder builder = ImmutableMap.builder(); - for (DateRangePrecision precision : values()) { - builder.put(precision.encoding, precision); - } - ENCODINGS = builder.build(); - } - - public static DateRangePrecision fromEncoding(byte encoding) { - DateRangePrecision precision = ENCODINGS.get(encoding); - if (precision == null) { - throw new IllegalArgumentException("Invalid precision encoding: " + encoding); - } - return precision; - } - - /** The code used to represent the precision when a date range is encoded to binary. */ - public byte getEncoding() { - return encoding; - } - - /** - * Rounds up the given timestamp to this precision. - * - *

Temporal fields smaller than this precision will be rounded up; other fields will be left - * untouched. - */ - @NonNull - public ZonedDateTime roundUp(@NonNull ZonedDateTime timestamp) { - Preconditions.checkNotNull(timestamp); - return DateRangeUtil.roundUp(timestamp, roundingUnit); - } - - /** - * Rounds down the given timestamp to this precision. - * - *

Temporal fields smaller than this precision will be rounded down; other fields will be left - * untouched. - */ - @NonNull - public ZonedDateTime roundDown(@NonNull ZonedDateTime timestamp) { - Preconditions.checkNotNull(timestamp); - return DateRangeUtil.roundDown(timestamp, roundingUnit); - } - - /** Formats the given timestamp according to this precision. */ - public String format(ZonedDateTime timestamp) { - return formatter.format(timestamp); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/AsyncGraphResultSet.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/AsyncGraphResultSet.java deleted file mode 100644 index 995de53959b..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/graph/AsyncGraphResultSet.java +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -import com.datastax.dse.driver.internal.core.graph.GraphExecutionInfoConverter; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Iterator; -import java.util.concurrent.CompletionStage; - -/** - * The result of an asynchronous graph query. - * - *

The default implementation returned by the driver is not thread-safe: the iterable - * returned by {@link #currentPage()} should only be iterated by a single thread. However, if - * subsequent pages are requested via {@link #fetchNextPage()}, it's safe to process those new - * instances in other threads (as long as each individual page of results is not accessed - * concurrently). - * - * @see GraphResultSet - */ -public interface AsyncGraphResultSet { - - /** The execution information for this page of results. */ - @NonNull - default ExecutionInfo getRequestExecutionInfo() { - return GraphExecutionInfoConverter.convert(getExecutionInfo()); - } - - /** - * The execution information for this page of results. - * - * @deprecated Use {@link #getRequestExecutionInfo()} instead. - */ - @Deprecated - @NonNull - com.datastax.dse.driver.api.core.graph.GraphExecutionInfo getExecutionInfo(); - - /** How many rows are left before the current page is exhausted. */ - int remaining(); - - /** - * The nodes in the current page. To keep iterating beyond that, use {@link #hasMorePages()} and - * {@link #fetchNextPage()}. - * - *

Note that this method always returns the same object, and that that object can only be - * iterated once: nodes are "consumed" as they are read. - */ - @NonNull - Iterable currentPage(); - - /** - * Returns the next node, or {@code null} if the result set is exhausted. - * - *

This is convenient for queries that are known to return exactly one node. - */ - @Nullable - default GraphNode one() { - Iterator iterator = currentPage().iterator(); - return iterator.hasNext() ? iterator.next() : null; - } - - /** - * Whether there are more pages of results. If so, call {@link #fetchNextPage()} to fetch the next - * one asynchronously. - */ - boolean hasMorePages(); - - /** - * Fetch the next page of results asynchronously. - * - * @throws IllegalStateException if there are no more pages. Use {@link #hasMorePages()} to check - * if you can call this method. - */ - @NonNull - CompletionStage fetchNextPage() throws IllegalStateException; - - /** - * Cancels the query and asks the server to stop sending results. - * - *

At this time, graph queries are not paginated and the server sends all the results at once; - * therefore this method has no effect. - */ - void cancel(); -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/BatchGraphStatement.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/BatchGraphStatement.java deleted file mode 100644 index 2169dc5f053..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/graph/BatchGraphStatement.java +++ /dev/null @@ -1,150 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -import com.datastax.dse.driver.internal.core.graph.DefaultBatchGraphStatement; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Collections; -import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; - -/** - * A graph statement that groups multiple mutating traversals together, to be executed in the - * same transaction. - * - *

It is reserved for graph mutations, and does not return any result. - * - *

All the mutations grouped in the batch will either all succeed, or they will all be discarded - * and return an error. - * - *

The default implementation returned by the driver is immutable and thread-safe. Each mutation - * operation returns a copy. If you chain many of those operations, it is recommended to use {@link - * #builder()} instead for better memory usage. - * - *

Typically used like so: - * - *

{@code
- * import static com.datastax.dse.driver.api.core.graph.DseGraph.g;
- *
- * BatchGraphStatement statement =
- *     BatchGraphStatement.builder()
- *         .addTraversal(
- *                 g.addV("person").property("name", "batch1").property("age", 1))
- *         .addTraversal(
- *                 g.addV("person").property("name", "batch2").property("age", 2))
- *         .build();
- *
- * GraphResultSet graphResultSet = dseSession.execute(statement);
- * }
- * - * @see DseGraph#g - */ -public interface BatchGraphStatement - extends GraphStatement, Iterable { - - /** - * Create a new, empty instance. - * - *

Traversals can be added with {@link #addTraversal(GraphTraversal)}. - */ - @NonNull - static BatchGraphStatement newInstance() { - return new DefaultBatchGraphStatement( - ImmutableList.of(), - null, - null, - null, - Statement.NO_DEFAULT_TIMESTAMP, - null, - null, - Collections.emptyMap(), - null, - null, - null, - null, - null, - null); - } - - /** Create a new instance from the given list of traversals. */ - @NonNull - static BatchGraphStatement newInstance(@NonNull Iterable traversals) { - return new DefaultBatchGraphStatement( - traversals, - null, - null, - null, - Statement.NO_DEFAULT_TIMESTAMP, - null, - null, - Collections.emptyMap(), - null, - null, - null, - null, - null, - null); - } - - /** Create a new instance from the given list of traversals. */ - @NonNull - static BatchGraphStatement newInstance(@NonNull GraphTraversal... traversals) { - return newInstance(ImmutableList.copyOf(traversals)); - } - - /** - * Create a builder helper object to start creating a new instance. - * - *

Note that this builder is mutable and not thread-safe. - */ - @NonNull - static BatchGraphStatementBuilder builder() { - return new BatchGraphStatementBuilder(); - } - - /** - * Create a builder helper object to start creating a new instance with an existing statement as a - * template. The traversals and options set on the template will be copied for the new statement - * at the moment this method is called. - * - *

Note that this builder is mutable and not thread-safe. - */ - @NonNull - static BatchGraphStatementBuilder builder(@NonNull BatchGraphStatement template) { - return new BatchGraphStatementBuilder(template); - } - - /** - * Add a traversal to this statement. If many traversals need to be added, use a {@link - * #builder()}, or the {@link #addTraversals(Iterable)} method instead to avoid intermediary - * copies. - */ - @NonNull - BatchGraphStatement addTraversal(@NonNull GraphTraversal traversal); - - /** - * Adds several traversals to this statement. If this method is to be called many times, consider - * using a {@link #builder()} instead to avoid intermediary copies. - */ - @NonNull - BatchGraphStatement addTraversals(@NonNull Iterable traversals); - - /** Get the number of traversals already added to this statement. */ - int size(); -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/BatchGraphStatementBuilder.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/BatchGraphStatementBuilder.java deleted file mode 100644 index ac1b85bdc71..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/graph/BatchGraphStatementBuilder.java +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -import com.datastax.dse.driver.internal.core.graph.DefaultBatchGraphStatement; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.NotThreadSafe; -import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; - -/** - * A builder to create a batch graph statement. - * - *

This class is mutable and not thread-safe. - */ -@NotThreadSafe -public class BatchGraphStatementBuilder - extends GraphStatementBuilderBase { - - private ImmutableList.Builder traversalsBuilder = ImmutableList.builder(); - private int traversalsCount; - - public BatchGraphStatementBuilder() { - // nothing to do - } - - public BatchGraphStatementBuilder(BatchGraphStatement template) { - super(template); - traversalsBuilder.addAll(template); - traversalsCount = template.size(); - } - - /** Add a traversal to this builder to include in the generated {@link BatchGraphStatement}. */ - @NonNull - public BatchGraphStatementBuilder addTraversal(@NonNull GraphTraversal traversal) { - traversalsBuilder.add(traversal); - traversalsCount += 1; - return this; - } - - /** - * Add several traversals to this builder to include in the generated {@link BatchGraphStatement}. - */ - @NonNull - public BatchGraphStatementBuilder addTraversals(@NonNull Iterable traversals) { - for (GraphTraversal traversal : traversals) { - traversalsBuilder.add(traversal); - traversalsCount += 1; - } - return this; - } - - /** - * Add several traversals to this builder to include in the generated {@link BatchGraphStatement}. - */ - @NonNull - public BatchGraphStatementBuilder addTraversals(@NonNull GraphTraversal... traversals) { - for (GraphTraversal traversal : traversals) { - traversalsBuilder.add(traversal); - traversalsCount += 1; - } - return this; - } - - /** Clears all the traversals previously added to this builder. */ - @NonNull - public BatchGraphStatementBuilder clearTraversals() { - traversalsBuilder = ImmutableList.builder(); - traversalsCount = 0; - return this; - } - - /** Returns the number of traversals added to this statement so far. */ - public int getTraversalsCount() { - return traversalsCount; - } - - @NonNull - @Override - public BatchGraphStatement build() { - return new DefaultBatchGraphStatement( - traversalsBuilder.build(), - isIdempotent, - timeout, - node, - timestamp, - executionProfile, - executionProfileName, - buildCustomPayload(), - graphName, - traversalSource, - subProtocol, - consistencyLevel, - readConsistencyLevel, - writeConsistencyLevel); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/DseGraph.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/DseGraph.java deleted file mode 100644 index dd1dbe95bc8..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/graph/DseGraph.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -import com.datastax.dse.driver.internal.core.graph.DefaultDseRemoteConnectionBuilder; -import com.datastax.oss.driver.api.core.CqlSession; -import org.apache.tinkerpop.gremlin.process.traversal.AnonymousTraversalSource; -import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; -import org.apache.tinkerpop.gremlin.structure.util.empty.EmptyGraph; - -/** - * General purpose utility class for interaction with DSE Graph via the DataStax Enterprise Java - * driver. - */ -public class DseGraph { - - /** - * IMPORTANT: As of Tinkerpop 3.3.5, you should no longer use this shortcut if you intend - * to connect the traversal to DSE Graph using a {@linkplain - * org.apache.tinkerpop.gremlin.process.remote.RemoteConnection remote connection}, for example - * via the {@link #remoteConnectionBuilder} method declared below. Instead of: - * - *

{@code
-   * DseSession session = ...;
-   * RemoteConnection remoteConnection = DseGraph.remoteConnectionBuilder(session).build();
-   * GraphTraversalSource g = DseGraph.g.withRemote(remoteConnection);
-   * }
- * - * You should now use {@link AnonymousTraversalSource#traversal()}, and adopt the following idiom: - * - *
{@code
-   * DseSession session = ...;
-   * RemoteConnection remoteConnection = DseGraph.remoteConnectionBuilder(session).build();
-   * GraphTraversalSource g = AnonymousTraversalSource.traversal().withRemote(remoteConnection);
-   * }
- * - * A general-purpose shortcut for a non-connected TinkerPop {@link GraphTraversalSource} - * based on an immutable empty graph. This is really just a shortcut to {@code - * EmptyGraph.instance().traversal();}. - * - *

It can be used to create {@link FluentGraphStatement} instances (recommended); for ease of - * use you may statically import this variable. - * - *

Calling {@code g.getGraph()} will return a local immutable empty graph which is in no way - * connected to the DSE Graph server, it will not allow to modify a DSE Graph directly. To act on - * data stored in DSE Graph you must use {@linkplain - * org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal traversal}s such as - * {@code DseGraph.g.V()}, {@code DseGraph.g.addV/addE()}. - */ - public static final GraphTraversalSource g = EmptyGraph.instance().traversal(); - - /** - * Returns a builder helper class to help create {@link - * org.apache.tinkerpop.gremlin.process.remote.RemoteConnection} implementations that seamlessly - * connect to DSE Graph using the {@link CqlSession} in parameter. - */ - public static DseGraphRemoteConnectionBuilder remoteConnectionBuilder(CqlSession dseSession) { - return new DefaultDseRemoteConnectionBuilder(dseSession); - } - - private DseGraph() { - // nothing to do - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/DseGraphRemoteConnectionBuilder.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/DseGraphRemoteConnectionBuilder.java deleted file mode 100644 index c4210a5b3dd..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/graph/DseGraphRemoteConnectionBuilder.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import org.apache.tinkerpop.gremlin.process.remote.RemoteConnection; - -/** - * A builder helper to create a {@link RemoteConnection} that will be used to build - * implicitly-executing fluent traversals. - * - *

To create an instance of this, use the {@link DseGraph#remoteConnectionBuilder(CqlSession)} - * method: - * - *

{@code
- * DseSession dseSession = DseSession.builder().build();
- * GraphTraversalSource g = AnonymousTraversalSource.traversal().withRemote(DseGraph.remoteConnectionBuilder(dseSession).build());
- * List vertices = g.V().hasLabel("person").toList();
- * }
- * - * @see CqlSession - */ -public interface DseGraphRemoteConnectionBuilder { - - /** Build the remote connection that was configured with this builder. */ - RemoteConnection build(); - - /** - * Set a configuration profile that will be used for every traversal built using the remote - * connection. - * - *

For the list of options available for Graph requests, see the {@code reference.conf} - * configuration file. - */ - DseGraphRemoteConnectionBuilder withExecutionProfile(DriverExecutionProfile executionProfile); - - /** - * Set the name of an execution profile that will be used for every traversal using from the - * remote connection. Named profiles are pre-defined in the driver configuration. - * - *

For the list of options available for Graph requests, see the {@code reference.conf} - * configuration file. - */ - DseGraphRemoteConnectionBuilder withExecutionProfileName(String executionProfileName); -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/FluentGraphStatement.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/FluentGraphStatement.java deleted file mode 100644 index 051c6501c65..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/graph/FluentGraphStatement.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -import com.datastax.dse.driver.internal.core.graph.DefaultFluentGraphStatement; -import com.datastax.oss.driver.api.core.cql.Statement; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Collections; -import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; - -/** - * A graph statement that uses a TinkerPop {@link GraphTraversal} as the query. - * - *

Typically used like so: - * - *

{@code
- * import static com.datastax.dse.driver.api.core.graph.DseGraph.g;
- *
- * FluentGraphStatement statement = FluentGraphStatement.newInstance(g.V().has("name", "marko"));
- *
- * GraphResultSet graphResultSet = dseSession.execute(statement);
- * }
- * - * @see DseGraph#g - */ -public interface FluentGraphStatement extends GraphStatement { - - /** - * Create a new instance from the given traversal. - * - *

Use {@link #builder(GraphTraversal)} if you want to set more options before building the - * final statement instance. - */ - @NonNull - static FluentGraphStatement newInstance(@NonNull GraphTraversal traversal) { - return new DefaultFluentGraphStatement( - traversal, - null, - null, - null, - Statement.NO_DEFAULT_TIMESTAMP, - null, - null, - Collections.emptyMap(), - null, - null, - null, - null, - null, - null); - } - - /** - * Create a builder object to start creating a new instance from the given traversal. - * - *

Note that this builder is mutable and not thread-safe. - */ - @NonNull - static FluentGraphStatementBuilder builder(@NonNull GraphTraversal traversal) { - return new FluentGraphStatementBuilder(traversal); - } - - /** - * Create a builder helper object to start creating a new instance with an existing statement as a - * template. The traversal and options set on the template will be copied for the new statement at - * the moment this method is called. - * - *

Note that this builder is mutable and not thread-safe. - */ - @NonNull - static FluentGraphStatementBuilder builder(@NonNull FluentGraphStatement template) { - return new FluentGraphStatementBuilder(template); - } - - /** The underlying TinkerPop object representing the traversal executed by this statement. */ - @NonNull - GraphTraversal getTraversal(); -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/FluentGraphStatementBuilder.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/FluentGraphStatementBuilder.java deleted file mode 100644 index 59e588c564a..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/graph/FluentGraphStatementBuilder.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -import com.datastax.dse.driver.internal.core.graph.DefaultFluentGraphStatement; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.NotThreadSafe; -import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; - -/** - * A builder to create a fluent graph statement. - * - *

This class is mutable and not thread-safe. - */ -@NotThreadSafe -public class FluentGraphStatementBuilder - extends GraphStatementBuilderBase { - - private GraphTraversal traversal; - - public FluentGraphStatementBuilder(@NonNull GraphTraversal traversal) { - this.traversal = traversal; - } - - public FluentGraphStatementBuilder(@NonNull FluentGraphStatement template) { - super(template); - this.traversal = template.getTraversal(); - } - - @NonNull - @Override - public FluentGraphStatement build() { - return new DefaultFluentGraphStatement( - this.traversal, - isIdempotent, - timeout, - node, - timestamp, - executionProfile, - executionProfileName, - buildCustomPayload(), - graphName, - traversalSource, - subProtocol, - consistencyLevel, - readConsistencyLevel, - writeConsistencyLevel); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/GraphExecutionInfo.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/GraphExecutionInfo.java deleted file mode 100644 index 758f6b358ed..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/graph/GraphExecutionInfo.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.specex.SpeculativeExecutionPolicy; -import java.nio.ByteBuffer; -import java.util.List; -import java.util.Map; - -/** - * Information about the execution of a graph statement. - * - * @deprecated This interface is not used by any driver component anymore; the driver now exposes - * instances of {@link com.datastax.oss.driver.api.core.cql.ExecutionInfo} for all Graph - * queries. - */ -@Deprecated -public interface GraphExecutionInfo { - - /** The statement that was executed. */ - GraphStatement getStatement(); - - /** The node that was used as a coordinator to successfully complete the query. */ - Node getCoordinator(); - - /** - * The number of speculative executions that were started for this query. - * - *

This does not include the initial, normal execution of the query. Therefore, if speculative - * executions are disabled, this will always be 0. If they are enabled and one speculative - * execution was triggered in addition to the initial execution, this will be 1, etc. - * - * @see SpeculativeExecutionPolicy - */ - int getSpeculativeExecutionCount(); - - /** - * The index of the execution that completed this query. - * - *

0 represents the initial, normal execution of the query, 1 the first speculative execution, - * etc. - * - * @see SpeculativeExecutionPolicy - */ - int getSuccessfulExecutionIndex(); - - /** - * The errors encountered on previous coordinators, if any. - * - *

The list is in chronological order, based on the time that the driver processed the error - * responses. If speculative executions are enabled, they run concurrently so their errors will be - * interleaved. A node can appear multiple times (if the retry policy decided to retry on the same - * node). - */ - List> getErrors(); - - /** - * The server-side warnings for this query, if any (otherwise the list will be empty). - * - *

This feature is only available with {@link DefaultProtocolVersion#V4} or above; with lower - * versions, this list will always be empty. - */ - List getWarnings(); - - /** - * The custom payload sent back by the server with the response, if any (otherwise the map will be - * empty). - * - *

This method returns a read-only view of the original map, but its values remain inherently - * mutable. If multiple clients will read these values, care should be taken not to corrupt the - * data (in particular, preserve the indices by calling {@link ByteBuffer#duplicate()}). - * - *

This feature is only available with {@link DefaultProtocolVersion#V4} or above; with lower - * versions, this map will always be empty. - */ - Map getIncomingPayload(); -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/GraphNode.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/GraphNode.java deleted file mode 100644 index 97d48a6b04d..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/graph/GraphNode.java +++ /dev/null @@ -1,234 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import java.util.List; -import java.util.Map; -import java.util.Set; -import org.apache.tinkerpop.gremlin.process.traversal.Path; -import org.apache.tinkerpop.gremlin.structure.Edge; -import org.apache.tinkerpop.gremlin.structure.Property; -import org.apache.tinkerpop.gremlin.structure.Vertex; -import org.apache.tinkerpop.gremlin.structure.VertexProperty; - -/** - * A node in a tree-like structure representing a Graph or a Graph component. - * - *

It can be: - * - *

    - *
  • a scalar value of a primitive type (boolean, string, int, long, double); - *
  • a graph element (vertex, edge, path or property); - *
  • a list of nodes; - *
  • a set of nodes; - *
  • a map of nodes. - *
- * - * This interface provides test methods to find out what a node represents, and conversion methods - * to cast it to a particular Java type. Two generic methods {@link #as(Class)} and {@link - * #as(GenericType)} can produce any arbitrary Java type, provided that the underlying serialization - * runtime has been correctly configured to support the requested conversion. - */ -public interface GraphNode { - - /** Whether this node represents a {@code null} value. */ - boolean isNull(); - - /** - * Returns {@code true} if this node is a {@link Map}, and {@code false} otherwise. - * - *

If this method returns {@code true}, you can convert this node with {@link #asMap()}, or use - * {@link #keys()} and {@link #getByKey(Object)} to access the individual fields (note that - * entries are not ordered, so {@link #getByIndex(int)} does not work). - */ - boolean isMap(); - - /** The keys of this map node, or an empty iterator if it is not a map. */ - Iterable keys(); - - /** - * Returns the value for the given key as a node. - * - *

If this node is not a map, or does not contain the specified key, {@code null} is returned. - * - *

If the property value has been explicitly set to {@code null}, implementors may return a - * special "null node" instead of {@code null}. - */ - GraphNode getByKey(Object key); - - /** Deserializes and returns this node as a {@link Map}. */ - Map asMap(); - - /** - * Returns {@code true} if this node is a {@link List}, and {@code false} otherwise. - * - *

If this method returns {@code true}, you can convert this node with {@link #asList()}, or - * use {@link #size()} and {@link #getByIndex(int)} to access the individual fields. - */ - boolean isList(); - - /** The size of the current node, if it is a list or map, or {@code 0} otherwise. */ - int size(); - - /** - * Returns the element at the given index as a node. - * - *

If this node is not a list, or {@code index} is out of bounds (i.e. less than zero or {@code - * >= size()}, {@code null} is returned; no exception will be thrown. - * - *

If the requested element has been explicitly set to {@code null}, implementors may return a - * special "null node" instead of {@code null}. - */ - GraphNode getByIndex(int index); - - /** Deserializes and returns this node as a {@link List}. */ - List asList(); - - /** - * Returns {@code true} if this node is a simple scalar value, (i.e., string, boolean or number), - * and {@code false} otherwise. - * - *

If this method returns {@code true}, you can convert this node with {@link #asString()}, - * {@link #asBoolean()}, {@link #asInt()}, {@link #asLong()} or {@link #asDouble()}. - */ - boolean isValue(); - - /** - * Returns this node as an integer. - * - *

If the underlying object is not convertible to integer, implementors may choose to either - * throw {@link ClassCastException} or return [null | empty | some default value], whichever is - * deemed more appropriate. - */ - int asInt(); - - /** - * Returns this node as a boolean. - * - *

If the underlying object is not convertible to boolean, implementors may choose to either - * throw {@link ClassCastException} or return [null | empty | some default value], whichever is - * deemed more appropriate. - */ - boolean asBoolean(); - - /** - * Returns this node as a long integer. - * - *

If the underlying object is not convertible to long, implementors may choose to either throw - * {@link ClassCastException} or return [null | empty | some default value], whichever is deemed - * more appropriate. - */ - long asLong(); - - /** - * Returns this node as a long integer. - * - *

If the underlying object is not convertible to double, implementors may choose to either - * throw {@link ClassCastException} or return [null | empty | some default value], whichever is - * deemed more appropriate. - */ - double asDouble(); - - /** - * A valid string representation of this node. - * - *

If the underlying object is not convertible to a string, implementors may choose to either - * throw {@link ClassCastException} or return an empty string, whichever is deemed more - * appropriate. - */ - String asString(); - - /** - * Deserializes and returns this node as an instance of {@code clazz}. - * - *

Before attempting such a conversion, there must be an appropriate converter configured on - * the underlying serialization runtime. - */ - ResultT as(Class clazz); - - /** - * Deserializes and returns this node as an instance of the given {@link GenericType type}. - * - *

Before attempting such a conversion, there must be an appropriate converter configured on - * the underlying serialization runtime. - */ - ResultT as(GenericType type); - - /** - * Returns {@code true} if this node is a {@link Vertex}, and {@code false} otherwise. - * - *

If this method returns {@code true}, then {@link #asVertex()} can be safely called. - */ - boolean isVertex(); - - /** Returns this node as a Tinkerpop {@link Vertex}. */ - Vertex asVertex(); - - /** - * Returns {@code true} if this node is a {@link Edge}, and {@code false} otherwise. - * - *

If this method returns {@code true}, then {@link #asEdge()} can be safely called. - */ - boolean isEdge(); - - /** Returns this node as a Tinkerpop {@link Edge}. */ - Edge asEdge(); - - /** - * Returns {@code true} if this node is a {@link Path}, and {@code false} otherwise. - * - *

If this method returns {@code true}, then {@link #asPath()} can be safely called. - */ - boolean isPath(); - - /** Returns this node as a Tinkerpop {@link Path}. */ - Path asPath(); - - /** - * Returns {@code true} if this node is a {@link Property}, and {@code false} otherwise. - * - *

If this method returns {@code true}, then {@link #asProperty()} can be safely called. - */ - boolean isProperty(); - - /** Returns this node as a Tinkerpop {@link Property}. */ - Property asProperty(); - - /** - * Returns {@code true} if this node is a {@link VertexProperty}, and {@code false} otherwise. - * - *

If this method returns {@code true}, then {@link #asVertexProperty()} ()} can be safely - * called. - */ - boolean isVertexProperty(); - - /** Returns this node as a Tinkerpop {@link VertexProperty}. */ - VertexProperty asVertexProperty(); - - /** - * Returns {@code true} if this node is a {@link Set}, and {@code false} otherwise. - * - *

If this method returns {@code true}, you can convert this node with {@link #asSet()}, or use - * {@link #size()}. - */ - boolean isSet(); - - /** Deserializes and returns this node as a {@link Set}. */ - Set asSet(); -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/GraphResultSet.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/GraphResultSet.java deleted file mode 100644 index d9c8d8fa460..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/graph/GraphResultSet.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -import com.datastax.dse.driver.internal.core.graph.GraphExecutionInfoConverter; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; - -/** - * The result of a synchronous Graph query. - * - *

This object is a container for {@link GraphNode} objects that will contain the data returned - * by Graph queries. - * - *

Note that this object can only be iterated once: items are "consumed" as they are read, - * subsequent calls to {@code iterator()} will return the same iterator instance. - * - *

The default implementation returned by the driver is not thread-safe. It can only be - * iterated by the thread that invoked {@code dseSession.execute}. - * - * @see GraphNode - * @see GraphSession#execute(GraphStatement) - */ -public interface GraphResultSet extends Iterable { - - /** - * Returns the next node, or {@code null} if the result set is exhausted. - * - *

This is convenient for queries that are known to return exactly one row, for example count - * queries. - */ - @Nullable - default GraphNode one() { - Iterator graphNodeIterator = iterator(); - return graphNodeIterator.hasNext() ? graphNodeIterator.next() : null; - } - - /** - * Returns all the remaining nodes as a list; not recommended for paginated queries that return - * a large number of nodes. - * - *

At this time (DSE 6.0.0), graph queries are not paginated and the server sends all the - * results at once. - */ - @NonNull - default List all() { - if (!iterator().hasNext()) { - return Collections.emptyList(); - } - return ImmutableList.copyOf(this); - } - - /** - * Cancels the query and asks the server to stop sending results. - * - *

At this time (DSE 6.0.0), graph queries are not paginated and the server sends all the - * results at once; therefore this method has no effect. - */ - void cancel(); - - /** - * The execution information for the query that have been performed to assemble this result set. - */ - @NonNull - default ExecutionInfo getRequestExecutionInfo() { - return GraphExecutionInfoConverter.convert(getExecutionInfo()); - } - - /** @deprecated Use {@link #getRequestExecutionInfo()} instead. */ - @Deprecated - @NonNull - com.datastax.dse.driver.api.core.graph.GraphExecutionInfo getExecutionInfo(); -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/GraphSession.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/GraphSession.java deleted file mode 100644 index b985bc56353..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/graph/GraphSession.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.session.Session; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Objects; -import java.util.concurrent.CompletionStage; - -/** - * A session that has the ability to execute DSE Graph requests. - * - *

Generally this interface won't be referenced directly in an application; instead, you should - * use {@link CqlSession}, which is a combination of this interface and many others for a more - * integrated usage of DataStax Enterprise's multi-model database via a single entry point. However, - * it is still possible to cast a {@code CqlSession} to a {@code GraphSession} to only expose the - * DSE Graph execution methods. - */ -public interface GraphSession extends Session { - - /** - * Executes a graph statement synchronously (the calling thread blocks until the result becomes - * available). - * - *

The driver provides different kinds of graph statements: - * - *

    - *
  • {@link FluentGraphStatement} (recommended): wraps a fluent TinkerPop {@linkplain - * org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal traversal}; - *
  • {@link BatchGraphStatement}: groups together multiple mutating traversals ({@code - * g.addV()/g.addE()}) inside a single transaction and avoids multiple client-server - * round-trips. Improves performance in data ingestion scenarios; - *
  • {@link ScriptGraphStatement}: wraps a Gremlin-groovy script provided as a plain Java - * string. Required for administrative queries such as creating/dropping a graph, - * configuration and schema. - *
- * - *

This feature is only available with DataStax Enterprise. Executing graph queries against an - * Apache Cassandra® cluster will result in a runtime error. - * - * @see GraphResultSet - * @param graphStatement the graph query to execute (that can be any {@code GraphStatement}). - * @return the result of the graph query. That result will never be null but can be empty. - */ - @NonNull - default GraphResultSet execute(@NonNull GraphStatement graphStatement) { - return Objects.requireNonNull( - execute(graphStatement, GraphStatement.SYNC), - "The graph processor should never return a null result"); - } - - /** - * Executes a graph statement asynchronously (the call returns as soon as the statement was sent, - * generally before the result is available). - * - *

This feature is only available with DataStax Enterprise. Executing graph queries against an - * Apache Cassandra® cluster will result in a runtime error. - * - * @see #execute(GraphStatement) - * @see AsyncGraphResultSet - * @param graphStatement the graph query to execute (that can be any {@code GraphStatement}). - * @return the {@code CompletionStage} on the result of the graph query. - */ - @NonNull - default CompletionStage executeAsync( - @NonNull GraphStatement graphStatement) { - return Objects.requireNonNull( - execute(graphStatement, GraphStatement.ASYNC), - "The graph processor should never return a null result"); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/GraphStatement.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/GraphStatement.java deleted file mode 100644 index f770469b824..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/graph/GraphStatement.java +++ /dev/null @@ -1,378 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.NoNodeAvailableException; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.core.specex.SpeculativeExecutionPolicy; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.CheckReturnValue; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.Map; -import java.util.concurrent.CompletionStage; - -/** - * A request to execute a DSE Graph query. - * - * @param the "self type" used for covariant returns in subtypes. - */ -public interface GraphStatement> extends Request { - - /** - * The type returned when a graph statement is executed synchronously. - * - *

Most users won't use this explicitly. It is needed for the generic execute method ({@link - * Session#execute(Request, GenericType)}), but graph statements will generally be run with one of - * the DSE driver's built-in helper methods (such as {@link CqlSession#execute(GraphStatement)}). - */ - GenericType SYNC = GenericType.of(GraphResultSet.class); - - /** - * The type returned when a graph statement is executed asynchronously. - * - *

Most users won't use this explicitly. It is needed for the generic execute method ({@link - * Session#execute(Request, GenericType)}), but graph statements will generally be run with one of - * the DSE driver's built-in helper methods (such as {@link - * CqlSession#executeAsync(GraphStatement)}). - */ - GenericType> ASYNC = - new GenericType>() {}; - - /** - * Set the idempotence to use for execution. - * - *

Idempotence defines whether it will be possible to speculatively re-execute the statement, - * based on a {@link SpeculativeExecutionPolicy}. - * - *

All the driver's built-in implementations are immutable, and return a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance. - * - * @param idempotent a boolean instance to set a statement-specific value, or {@code null} to use - * the default idempotence defined in the configuration. - */ - @NonNull - @CheckReturnValue - SelfT setIdempotent(@Nullable Boolean idempotent); - - /** - * {@inheritDoc} - * - *

Note that, if this method returns {@code null}, graph statements fall back to a dedicated - * configuration option: {@code basic.graph.timeout}. See {@code reference.conf} in the DSE driver - * distribution for more details. - */ - @Nullable - @Override - Duration getTimeout(); - - /** - * Sets how long to wait for this request to complete. This is a global limit on the duration of a - * session.execute() call, including any retries the driver might do. - * - *

All the driver's built-in implementations are immutable, and return a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance. - * - * @param newTimeout the timeout to use, or {@code null} to use the default value defined in the - * configuration. - * @see #getTimeout() - */ - @NonNull - @CheckReturnValue - SelfT setTimeout(@Nullable Duration newTimeout); - - /** - * Sets the {@link Node} that should handle this query. - * - *

In the general case, use of this method is heavily discouraged and should only be - * used in specific cases, such as applying a series of schema changes, which may be advantageous - * to execute in sequence on the same node. - * - *

Configuring a specific node causes the configured {@link LoadBalancingPolicy} to be - * completely bypassed. However, if the load balancing policy dictates that the node is at - * distance {@link NodeDistance#IGNORED} or there is no active connectivity to the node, the - * request will fail with a {@link NoNodeAvailableException}. - * - *

All the driver's built-in implementations are immutable, and return a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance. - * - * @param newNode The node that should be used to handle executions of this statement or null to - * delegate to the configured load balancing policy. - */ - @NonNull - @CheckReturnValue - SelfT setNode(@Nullable Node newNode); - - /** - * Get the timestamp set on the statement. - * - *

By default, if left unset, the value returned by this is {@code Long.MIN_VALUE}, which means - * that the timestamp will be set via the Timestamp Generator. - * - * @return the timestamp set on this statement. - */ - long getTimestamp(); - - /** - * Set the timestamp to use for execution. - * - *

By default the timestamp generator (see reference config file) will be used for timestamps, - * unless set explicitly via this method. - * - *

All the driver's built-in implementations are immutable, and return a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance. - */ - @CheckReturnValue - SelfT setTimestamp(long timestamp); - - /** - * Sets the configuration profile to use for execution. - * - *

All the driver's built-in implementations are immutable, and return a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance. - */ - @NonNull - @CheckReturnValue - SelfT setExecutionProfile(@Nullable DriverExecutionProfile executionProfile); - - /** - * Sets the name of the driver configuration profile that will be used for execution. - * - *

For all the driver's built-in implementations, this method has no effect if {@link - * #setExecutionProfile} has been called with a non-null argument. - * - *

All the driver's built-in implementations are immutable, and return a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance. - */ - @NonNull - @CheckReturnValue - SelfT setExecutionProfileName(@Nullable String name); - - /** - * Sets the custom payload to use for execution. - * - *

This is intended for advanced use cases, such as tools with very advanced knowledge of DSE - * Graph, and reserved for internal settings like transaction settings. Note that the driver also - * adds graph-related options to the payload, in addition to the ones provided here; it won't - * override any option that is already present. - * - *

All the driver's built-in statement implementations are immutable, and return a new instance - * from this method. However custom implementations may choose to be mutable and return the same - * instance. - * - *

Note that it's your responsibility to provide a thread-safe map. This can be achieved with a - * concurrent or immutable implementation, or by making it effectively immutable (meaning that - * it's never modified after being set on the statement). - * - *

All the driver's built-in implementations are immutable, and return a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance. - */ - @NonNull - @CheckReturnValue - SelfT setCustomPayload(@NonNull Map newCustomPayload); - - /** - * The name of the graph to use for this statement. - * - *

This is the programmatic equivalent of the configuration option {@code basic.graph.name}, - * and takes precedence over it. That is, if this property is non-null, then the configuration - * will be ignored. - */ - @Nullable - String getGraphName(); - - /** - * Sets the graph name. - * - *

All the driver's built-in implementations are immutable, and return a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance. - * - * @see #getGraphName() - */ - @NonNull - @CheckReturnValue - SelfT setGraphName(@Nullable String newGraphName); - - /** - * The name of the traversal source to use for this statement. - * - *

This is the programmatic equivalent of the configuration option {@code - * basic.graph.traversal-source}, and takes precedence over it. That is, if this property is - * non-null, then the configuration will be ignored. - */ - @Nullable - String getTraversalSource(); - - /** - * Sets the traversal source. - * - *

All the driver's built-in implementations are immutable, and return a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance. - * - * @see #getTraversalSource() - */ - @NonNull - @CheckReturnValue - SelfT setTraversalSource(@Nullable String newTraversalSource); - - /** - * The DSE graph sub-protocol to use for this statement. - * - *

This is the programmatic equivalent of the configuration option {@code - * advanced.graph.sub-protocol}, and takes precedence over it. That is, if this property is - * non-null, then the configuration will be ignored. - */ - @Nullable - String getSubProtocol(); - - /** - * Sets the sub-protocol. - * - *

All the driver's built-in implementations are immutable, and return a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance. - * - * @see #getSubProtocol() - */ - @NonNull - @CheckReturnValue - SelfT setSubProtocol(@Nullable String newSubProtocol); - - /** - * Returns the consistency level to use for the statement. - * - *

This is the programmatic equivalent of the configuration option {@code - * basic.request.consistency}, and takes precedence over it. That is, if this property is - * non-null, then the configuration will be ignored. - */ - @Nullable - ConsistencyLevel getConsistencyLevel(); - - /** - * Sets the consistency level to use for this statement. - * - *

All the driver's built-in implementations are immutable, and return a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance. - * - * @param newConsistencyLevel the consistency level to use, or null to use the default value - * defined in the configuration. - * @see #getConsistencyLevel() - */ - @CheckReturnValue - SelfT setConsistencyLevel(@Nullable ConsistencyLevel newConsistencyLevel); - - /** - * The consistency level to use for the internal read queries that will be produced by this - * statement. - * - *

This is the programmatic equivalent of the configuration option {@code - * basic.graph.read-consistency-level}, and takes precedence over it. That is, if this property is - * non-null, then the configuration will be ignored. - * - *

If this property isn't set here or in the configuration, the default consistency level will - * be used ({@link #getConsistencyLevel()} or {@code basic.request.consistency}). - */ - @Nullable - ConsistencyLevel getReadConsistencyLevel(); - - /** - * Sets the read consistency level. - * - *

All the driver's built-in implementations are immutable, and return a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance. - * - * @see #getReadConsistencyLevel() - */ - @NonNull - @CheckReturnValue - SelfT setReadConsistencyLevel(@Nullable ConsistencyLevel newReadConsistencyLevel); - - /** - * The consistency level to use for the internal write queries that will be produced by this - * statement. - * - *

This is the programmatic equivalent of the configuration option {@code - * basic.graph.write-consistency-level}, and takes precedence over it. That is, if this property - * is non-null, then the configuration will be ignored. - * - *

If this property isn't set here or in the configuration, the default consistency level will - * be used ({@link #getConsistencyLevel()} or {@code basic.request.consistency}). - */ - @Nullable - ConsistencyLevel getWriteConsistencyLevel(); - - /** - * Sets the write consistency level. - * - *

All the driver's built-in implementations are immutable, and return a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance. - * - * @see #getWriteConsistencyLevel() - */ - @NonNull - @CheckReturnValue - SelfT setWriteConsistencyLevel(@Nullable ConsistencyLevel newWriteConsistencyLevel); - - /** Graph statements do not have a per-query keyspace, this method always returns {@code null}. */ - @Nullable - @Override - default CqlIdentifier getKeyspace() { - return null; - } - - /** Graph statements can't be routed, this method always returns {@code null}. */ - @Nullable - @Override - default CqlIdentifier getRoutingKeyspace() { - return null; - } - - /** Graph statements can't be routed, this method always returns {@code null}. */ - @Nullable - @Override - default ByteBuffer getRoutingKey() { - return null; - } - - /** Graph statements can't be routed, this method always returns {@code null}. */ - @Nullable - @Override - default Token getRoutingToken() { - return null; - } - - /** - * Whether tracing information should be recorded for this statement. - * - *

This method is only exposed for future extensibility. At the time of writing, graph - * statements do not support tracing, and this always returns {@code false}. - */ - default boolean isTracing() { - return false; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/GraphStatementBuilderBase.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/GraphStatementBuilderBase.java deleted file mode 100644 index 5cb48613cf5..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/graph/GraphStatementBuilderBase.java +++ /dev/null @@ -1,190 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.protocol.internal.util.collection.NullAllowingImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.Map; -import net.jcip.annotations.NotThreadSafe; - -@NotThreadSafe -public abstract class GraphStatementBuilderBase< - SelfT extends GraphStatementBuilderBase, - StatementT extends GraphStatement> { - - @SuppressWarnings({"unchecked"}) - private final SelfT self = (SelfT) this; - - protected Boolean isIdempotent; - protected Duration timeout; - protected Node node; - protected long timestamp = Statement.NO_DEFAULT_TIMESTAMP; - protected DriverExecutionProfile executionProfile; - protected String executionProfileName; - private NullAllowingImmutableMap.Builder customPayloadBuilder; - protected String graphName; - protected String traversalSource; - protected String subProtocol; - protected ConsistencyLevel consistencyLevel; - protected ConsistencyLevel readConsistencyLevel; - protected ConsistencyLevel writeConsistencyLevel; - - protected GraphStatementBuilderBase() { - // nothing to do - } - - protected GraphStatementBuilderBase(StatementT template) { - this.isIdempotent = template.isIdempotent(); - this.timeout = template.getTimeout(); - this.node = template.getNode(); - this.timestamp = template.getTimestamp(); - this.executionProfile = template.getExecutionProfile(); - this.executionProfileName = template.getExecutionProfileName(); - if (!template.getCustomPayload().isEmpty()) { - this.customPayloadBuilder = - NullAllowingImmutableMap.builder() - .putAll(template.getCustomPayload()); - } - this.graphName = template.getGraphName(); - this.traversalSource = template.getTraversalSource(); - this.subProtocol = template.getSubProtocol(); - this.consistencyLevel = template.getConsistencyLevel(); - this.readConsistencyLevel = template.getReadConsistencyLevel(); - this.writeConsistencyLevel = template.getWriteConsistencyLevel(); - } - - /** @see GraphStatement#setIdempotent(Boolean) */ - @NonNull - public SelfT setIdempotence(@Nullable Boolean idempotent) { - this.isIdempotent = idempotent; - return self; - } - - /** @see GraphStatement#setTimeout(Duration) */ - @NonNull - public SelfT setTimeout(@Nullable Duration timeout) { - this.timeout = timeout; - return self; - } - - /** @see GraphStatement#setNode(Node) */ - @NonNull - public SelfT setNode(@Nullable Node node) { - this.node = node; - return self; - } - - /** @see GraphStatement#setTimestamp(long) */ - @NonNull - public SelfT setTimestamp(long timestamp) { - this.timestamp = timestamp; - return self; - } - - /** @see GraphStatement#setExecutionProfileName(String) */ - @NonNull - public SelfT setExecutionProfileName(@Nullable String executionProfileName) { - this.executionProfileName = executionProfileName; - return self; - } - - /** @see GraphStatement#setExecutionProfile(DriverExecutionProfile) */ - @NonNull - public SelfT setExecutionProfile(@Nullable DriverExecutionProfile executionProfile) { - this.executionProfile = executionProfile; - this.executionProfileName = null; - return self; - } - - /** @see GraphStatement#setCustomPayload(Map) */ - @NonNull - public SelfT addCustomPayload(@NonNull String key, @Nullable ByteBuffer value) { - if (customPayloadBuilder == null) { - customPayloadBuilder = NullAllowingImmutableMap.builder(); - } - customPayloadBuilder.put(key, value); - return self; - } - - /** @see GraphStatement#setCustomPayload(Map) */ - @NonNull - public SelfT clearCustomPayload() { - customPayloadBuilder = null; - return self; - } - - /** @see GraphStatement#setGraphName(String) */ - @NonNull - public SelfT setGraphName(@Nullable String graphName) { - this.graphName = graphName; - return self; - } - - /** @see GraphStatement#setTraversalSource(String) */ - @NonNull - public SelfT setTraversalSource(@Nullable String traversalSource) { - this.traversalSource = traversalSource; - return self; - } - - /** @see GraphStatement#setSubProtocol(String) */ - @NonNull - public SelfT setSubProtocol(@Nullable String subProtocol) { - this.subProtocol = subProtocol; - return self; - } - - /** @see GraphStatement#setConsistencyLevel(ConsistencyLevel) */ - @NonNull - public SelfT setConsistencyLevel(@Nullable ConsistencyLevel consistencyLevel) { - this.consistencyLevel = consistencyLevel; - return self; - } - - /** @see GraphStatement#setReadConsistencyLevel(ConsistencyLevel) */ - @NonNull - public SelfT setReadConsistencyLevel(@Nullable ConsistencyLevel readConsistencyLevel) { - this.readConsistencyLevel = readConsistencyLevel; - return self; - } - - /** @see GraphStatement#setWriteConsistencyLevel(ConsistencyLevel) */ - @NonNull - public SelfT setWriteConsistencyLevel(@Nullable ConsistencyLevel writeConsistencyLevel) { - this.writeConsistencyLevel = writeConsistencyLevel; - return self; - } - - @NonNull - protected Map buildCustomPayload() { - return (customPayloadBuilder == null) - ? NullAllowingImmutableMap.of() - : customPayloadBuilder.build(); - } - - /** Create the statement with the configuration defined by this builder object. */ - @NonNull - public abstract StatementT build(); -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/PagingEnabledOptions.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/PagingEnabledOptions.java deleted file mode 100644 index f59d0e50e93..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/graph/PagingEnabledOptions.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -public enum PagingEnabledOptions { - ENABLED, - DISABLED, - AUTO -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/ScriptGraphStatement.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/ScriptGraphStatement.java deleted file mode 100644 index 2ad7aafc232..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/graph/ScriptGraphStatement.java +++ /dev/null @@ -1,171 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -import com.datastax.dse.driver.internal.core.graph.DefaultScriptGraphStatement; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.protocol.internal.util.collection.NullAllowingImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Collections; -import java.util.Map; - -/** - * A graph statement that uses a Gremlin-groovy script the query. - * - *

These statements are generally used for DSE Graph set-up queries, such as creating or dropping - * a graph, or defining a graph schema. For graph traversals, we recommend using {@link - * FluentGraphStatement} instead. To do bulk data ingestion in graph, we recommend using {@link - * BatchGraphStatement} instead. - * - *

Typical usage: - * - *

{@code
- * ScriptGraphStatement statement = ScriptGraphStatement.newInstance("schema.propertyKey('age').Int().create()");
- *
- * GraphResultSet graphResultSet = dseSession.execute(statement);
- * }
- */ -public interface ScriptGraphStatement extends GraphStatement { - - /** Create a new instance from the given script. */ - @NonNull - static ScriptGraphStatement newInstance(@NonNull String script) { - return new DefaultScriptGraphStatement( - script, - NullAllowingImmutableMap.of(), - null, - null, - null, - null, - Statement.NO_DEFAULT_TIMESTAMP, - null, - null, - Collections.emptyMap(), - null, - null, - null, - null, - null, - null); - } - - /** - * Create a builder object to start creating a new instance from the given script. - * - *

Note that this builder is mutable and not thread-safe. - */ - @NonNull - static ScriptGraphStatementBuilder builder(@NonNull String script) { - return new ScriptGraphStatementBuilder(script); - } - - /** - * Create a builder helper object to start creating a new instance with an existing statement as a - * template. The script and options set on the template will be copied for the new statement at - * the moment this method is called. - * - *

Note that this builder is mutable and not thread-safe. - */ - @NonNull - static ScriptGraphStatementBuilder builder(@NonNull ScriptGraphStatement template) { - return new ScriptGraphStatementBuilder(template); - } - - /** The Gremlin-groovy script representing the graph query. */ - @NonNull - String getScript(); - - /** - * Whether the statement is a system query, or {@code null} if it defaults to the value defined in - * the configuration. - * - * @see #setSystemQuery(Boolean) - */ - @Nullable - Boolean isSystemQuery(); - - /** - * Defines if this statement is a system query. - * - *

Script statements that access the {@code system} variable must not specify a graph - * name (otherwise {@code system} is not available). However, if your application executes a lot - * of non-system statements, it is convenient to configure the graph name in your configuration to - * avoid repeating it every time. This method allows you to ignore that global graph name for a - * specific statement. - * - *

This property is the programmatic equivalent of the configuration option {@code - * basic.graph.is-system-query}, and takes precedence over it. That is, if this property is - * non-null, then the configuration will be ignored. - * - *

The driver's built-in implementation is immutable, and returns a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance. - * - * @param newValue {@code true} to mark this statement as a system query (the driver will ignore - * any graph name set on the statement or the configuration); {@code false} to mark it as a - * non-system query; {@code null} to default to the value defined in the configuration. - * @see #isSystemQuery() - */ - @NonNull - ScriptGraphStatement setSystemQuery(@Nullable Boolean newValue); - - /** - * The query parameters to send along the request. - * - * @see #setQueryParam(String, Object) - */ - @NonNull - Map getQueryParams(); - - /** - * Set a value for a parameter defined in the Groovy script. - * - *

The script engine in the DSE Graph server allows to define parameters in a Groovy script and - * set the values of these parameters as a binding. Defining parameters allows to re-use scripts - * and only change their parameters values, which improves the performance of the script executed, - * so defining parameters is encouraged; however, for optimal Graph traversal performance, we - * recommend either using {@link BatchGraphStatement}s for data ingestion, or {@link - * FluentGraphStatement} for normal traversals. - * - *

Parameters in a Groovy script are always named; unlike CQL, they are not prefixed by a - * column ({@code :}). - * - *

The driver's built-in implementation is immutable, and returns a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance. - * If many parameters are to be set in a query, it is recommended to create the statement with - * {@link #builder(String)} instead. - * - * @param name the name of the parameter defined in the script. If the statement already had a - * binding for this name, it gets replaced. - * @param value the value that will be transmitted with the request. - */ - @NonNull - ScriptGraphStatement setQueryParam(@NonNull String name, @Nullable Object value); - - /** - * Removes a binding for the given name from this statement. - * - *

If the statement did not have such a binding, this method has no effect and returns the same - * statement instance. Otherwise, the driver's built-in implementation returns a new instance - * (however custom implementations may choose to be mutable and return the same instance). - * - * @see #setQueryParam(String, Object) - */ - @NonNull - ScriptGraphStatement removeQueryParam(@NonNull String name); -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/ScriptGraphStatementBuilder.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/ScriptGraphStatementBuilder.java deleted file mode 100644 index 1985c58955f..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/graph/ScriptGraphStatementBuilder.java +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -import com.datastax.dse.driver.internal.core.graph.DefaultScriptGraphStatement; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.driver.shaded.guava.common.collect.Maps; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Map; -import net.jcip.annotations.NotThreadSafe; - -/** - * A builder to create a script graph statement. - * - *

This class is mutable and not thread-safe. - */ -@NotThreadSafe -public class ScriptGraphStatementBuilder - extends GraphStatementBuilderBase { - - private String script; - private Boolean isSystemQuery; - private final Map queryParams; - - public ScriptGraphStatementBuilder() { - this.queryParams = Maps.newHashMap(); - } - - public ScriptGraphStatementBuilder(String script) { - this.script = script; - this.queryParams = Maps.newHashMap(); - } - - public ScriptGraphStatementBuilder(ScriptGraphStatement template) { - super(template); - this.script = template.getScript(); - this.queryParams = Maps.newHashMap(template.getQueryParams()); - this.isSystemQuery = template.isSystemQuery(); - } - - @NonNull - public ScriptGraphStatementBuilder setScript(@NonNull String script) { - this.script = script; - return this; - } - - /** @see ScriptGraphStatement#isSystemQuery() */ - @NonNull - public ScriptGraphStatementBuilder setSystemQuery(@Nullable Boolean isSystemQuery) { - this.isSystemQuery = isSystemQuery; - return this; - } - - /** - * Set a value for a parameter defined in the script query. - * - * @see ScriptGraphStatement#setQueryParam(String, Object) - */ - @NonNull - public ScriptGraphStatementBuilder setQueryParam(@NonNull String name, @Nullable Object value) { - this.queryParams.put(name, value); - return this; - } - - /** - * Set multiple values for named parameters defined in the script query. - * - * @see ScriptGraphStatement#setQueryParam(String, Object) - */ - @NonNull - public ScriptGraphStatementBuilder setQueryParams(@NonNull Map params) { - this.queryParams.putAll(params); - return this; - } - - /** - * Removes a parameter. - * - *

This is useful if the builder was {@linkplain - * ScriptGraphStatement#builder(ScriptGraphStatement) initialized with a template statement} that - * has more parameters than desired. - * - * @see ScriptGraphStatement#setQueryParam(String, Object) - * @see #clearQueryParams() - */ - @NonNull - public ScriptGraphStatementBuilder removeQueryParam(@NonNull String name) { - this.queryParams.remove(name); - return this; - } - - /** Clears all the parameters previously added to this builder. */ - public ScriptGraphStatementBuilder clearQueryParams() { - this.queryParams.clear(); - return this; - } - - @NonNull - @Override - public ScriptGraphStatement build() { - Preconditions.checkNotNull(this.script, "Script hasn't been defined in this builder."); - return new DefaultScriptGraphStatement( - this.script, - this.queryParams, - this.isSystemQuery, - isIdempotent, - timeout, - node, - timestamp, - executionProfile, - executionProfileName, - buildCustomPayload(), - graphName, - traversalSource, - subProtocol, - consistencyLevel, - readConsistencyLevel, - writeConsistencyLevel); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/predicates/CqlCollection.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/predicates/CqlCollection.java deleted file mode 100644 index fdbf3fbe397..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/graph/predicates/CqlCollection.java +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph.predicates; - -import com.datastax.dse.driver.internal.core.graph.CqlCollectionPredicate; -import java.util.Collection; -import java.util.Map; -import org.apache.tinkerpop.gremlin.process.traversal.P; -import org.javatuples.Pair; - -/** - * Predicates that can be used on CQL collections (lists, sets and maps). - * - *

Note: CQL collection predicates are only available when using the binary subprotocol. - */ -public class CqlCollection { - - /** - * Checks if the target collection contains the given value. - * - * @param value the value to look for; cannot be {@code null}. - * @return a predicate to apply in a {@link - * org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal}. - */ - @SuppressWarnings("unchecked") - public static , V> P contains(V value) { - return new P(CqlCollectionPredicate.contains, value); - } - - /** - * Checks if the target map contains the given key. - * - * @param key the key to look for; cannot be {@code null}. - * @return a predicate to apply in a {@link - * org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal}. - */ - @SuppressWarnings("unchecked") - public static , K> P containsKey(K key) { - return new P(CqlCollectionPredicate.containsKey, key); - } - - /** - * Checks if the target map contains the given value. - * - * @param value the value to look for; cannot be {@code null}. - * @return a predicate to apply in a {@link - * org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal}. - */ - @SuppressWarnings("unchecked") - public static , V> P containsValue(V value) { - return new P(CqlCollectionPredicate.containsValue, value); - } - - /** - * Checks if the target map contains the given entry. - * - * @param key the key to look for; cannot be {@code null}. - * @param value the value to look for; cannot be {@code null}. - * @return a predicate to apply in a {@link - * org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal}. - */ - @SuppressWarnings("unchecked") - public static , K, V> P entryEq(K key, V value) { - return new P(CqlCollectionPredicate.entryEq, new Pair<>(key, value)); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/predicates/Geo.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/predicates/Geo.java deleted file mode 100644 index 65dd84d0076..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/graph/predicates/Geo.java +++ /dev/null @@ -1,160 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph.predicates; - -import com.datastax.dse.driver.api.core.data.geometry.LineString; -import com.datastax.dse.driver.api.core.data.geometry.Point; -import com.datastax.dse.driver.api.core.data.geometry.Polygon; -import com.datastax.dse.driver.internal.core.data.geometry.Distance; -import com.datastax.dse.driver.internal.core.graph.GeoPredicate; -import com.datastax.dse.driver.internal.core.graph.GeoUtils; -import edu.umd.cs.findbugs.annotations.NonNull; -import org.apache.tinkerpop.gremlin.process.traversal.P; -import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; - -public interface Geo { - - enum Unit { - MILES(GeoUtils.MILES_TO_KM * GeoUtils.KM_TO_DEG), - KILOMETERS(GeoUtils.KM_TO_DEG), - METERS(GeoUtils.KM_TO_DEG / 1000.0), - DEGREES(1); - - private final double multiplier; - - Unit(double multiplier) { - this.multiplier = multiplier; - } - - /** Convert distance to degrees (used internally only). */ - public double toDegrees(double distance) { - return distance * multiplier; - } - } - - /** - * Finds whether an entity is inside the given circular area using a geo coordinate system. - * - * @return a predicate to apply in a {@link GraphTraversal}. - */ - static P inside(Point center, double radius, Unit units) { - return new P<>(GeoPredicate.inside, new Distance(center, units.toDegrees(radius))); - } - - /** - * Finds whether an entity is inside the given circular area using a cartesian coordinate system. - * - * @return a predicate to apply in a {@link GraphTraversal}. - */ - static P inside(Point center, double radius) { - return new P<>(GeoPredicate.insideCartesian, new Distance(center, radius)); - } - - /** - * Finds whether an entity is inside the given polygon. - * - * @return a predicate to apply in a {@link GraphTraversal}. - */ - static P inside(Polygon polygon) { - return new P<>(GeoPredicate.insideCartesian, polygon); - } - - /** - * Creates a point from the given coordinates. - * - *

This is just a shortcut to {@link Point#fromCoordinates(double, double)}. It is duplicated - * here so that {@code Geo} can be used as a single entry point in Gremlin-groovy scripts. - */ - @NonNull - static Point point(double x, double y) { - return Point.fromCoordinates(x, y); - } - - /** - * Creates a line string from the given (at least 2) points. - * - *

This is just a shortcut to {@link LineString#fromPoints(Point, Point, Point...)}. It is - * duplicated here so that {@code Geo} can be used as a single entry point in Gremlin-groovy - * scripts. - */ - @NonNull - static LineString lineString( - @NonNull Point point1, @NonNull Point point2, @NonNull Point... otherPoints) { - return LineString.fromPoints(point1, point2, otherPoints); - } - - /** - * Creates a line string from the coordinates of its points. - * - *

This is provided for backward compatibility with previous DSE versions. We recommend {@link - * #lineString(Point, Point, Point...)} instead. - */ - @NonNull - static LineString lineString(double... coordinates) { - if (coordinates.length % 2 != 0) { - throw new IllegalArgumentException("lineString() must be passed an even number of arguments"); - } else if (coordinates.length < 4) { - throw new IllegalArgumentException( - "lineString() must be passed at least 4 arguments (2 points)"); - } - Point point1 = Point.fromCoordinates(coordinates[0], coordinates[1]); - Point point2 = Point.fromCoordinates(coordinates[2], coordinates[3]); - Point[] otherPoints = new Point[coordinates.length / 2 - 2]; - for (int i = 4; i < coordinates.length; i += 2) { - otherPoints[i / 2 - 2] = Point.fromCoordinates(coordinates[i], coordinates[i + 1]); - } - return LineString.fromPoints(point1, point2, otherPoints); - } - - /** - * Creates a polygon from the given (at least 3) points. - * - *

This is just a shortcut to {@link Polygon#fromPoints(Point, Point, Point, Point...)}. It is - * duplicated here so that {@code Geo} can be used as a single entry point in Gremlin-groovy - * scripts. - */ - @NonNull - static Polygon polygon( - @NonNull Point p1, @NonNull Point p2, @NonNull Point p3, @NonNull Point... otherPoints) { - return Polygon.fromPoints(p1, p2, p3, otherPoints); - } - - /** - * Creates a polygon from the coordinates of its points. - * - *

This is provided for backward compatibility with previous DSE versions. We recommend {@link - * #polygon(Point, Point, Point, Point...)} instead. - */ - @NonNull - static Polygon polygon(double... coordinates) { - if (coordinates.length % 2 != 0) { - throw new IllegalArgumentException("polygon() must be passed an even number of arguments"); - } else if (coordinates.length < 6) { - throw new IllegalArgumentException( - "polygon() must be passed at least 6 arguments (3 points)"); - } - Point point1 = Point.fromCoordinates(coordinates[0], coordinates[1]); - Point point2 = Point.fromCoordinates(coordinates[2], coordinates[3]); - Point point3 = Point.fromCoordinates(coordinates[4], coordinates[5]); - Point[] otherPoints = new Point[coordinates.length / 2 - 3]; - for (int i = 6; i < coordinates.length; i += 2) { - otherPoints[i / 2 - 3] = Point.fromCoordinates(coordinates[i], coordinates[i + 1]); - } - return Polygon.fromPoints(point1, point2, point3, otherPoints); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/predicates/Search.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/predicates/Search.java deleted file mode 100644 index e285c118c8a..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/graph/predicates/Search.java +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph.predicates; - -import com.datastax.dse.driver.internal.core.graph.EditDistance; -import com.datastax.dse.driver.internal.core.graph.SearchPredicate; -import org.apache.tinkerpop.gremlin.process.traversal.P; -import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; - -public interface Search { - - /** - * Search any instance of a certain token within the text property targeted (case insensitive). - * - * @return a predicate to apply in a {@link GraphTraversal}. - */ - static P token(String value) { - return new P<>(SearchPredicate.token, value); - } - - /** - * Search any instance of a certain token prefix within the text property targeted (case - * insensitive). - * - * @return a predicate to apply in a {@link GraphTraversal}. - */ - static P tokenPrefix(String value) { - return new P<>(SearchPredicate.tokenPrefix, value); - } - - /** - * Search any instance of the provided regular expression for the targeted property (case - * insensitive). - * - * @return a predicate to apply in a {@link GraphTraversal}. - */ - static P tokenRegex(String value) { - return new P<>(SearchPredicate.tokenRegex, value); - } - - /** - * Search for a specific prefix at the beginning of the text property targeted (case sensitive). - * - * @return a predicate to apply in a {@link GraphTraversal}. - */ - static P prefix(String value) { - return new P<>(SearchPredicate.prefix, value); - } - - /** - * Search for this regular expression inside the text property targeted (case sensitive). - * - * @return a predicate to apply in a {@link GraphTraversal}. - */ - static P regex(String value) { - return new P<>(SearchPredicate.regex, value); - } - - /** - * Supports finding words which are a within a specific distance away (case insensitive). - * - *

Example: the search expression is {@code phrase("Hello world", 2)} - * - *

    - *
  • the inserted value "Hello world" is found - *
  • the inserted value "Hello wild world" is found - *
  • the inserted value "Hello big wild world" is found - *
  • the inserted value "Hello the big wild world" is not found - *
  • the inserted value "Goodbye world" is not found. - *
- * - * @param query the string to look for in the value - * @param distance the number of terms allowed between two correct terms to find a value. - * @return a predicate to apply in a {@link GraphTraversal}. - */ - static P phrase(String query, int distance) { - return new P<>(SearchPredicate.phrase, new EditDistance(query, distance)); - } - - /** - * Supports fuzzy searches based on the Damerau-Levenshtein Distance, or Edit Distance algorithm - * (case sensitive). - * - *

Example: the search expression is {@code fuzzy("david", 1)} - * - *

    - *
  • the inserted value "david" is found - *
  • the inserted value "dawid" is found - *
  • the inserted value "davids" is found - *
  • the inserted value "dewid" is not found - *
- * - * @param query the string to look for in the value - * @param distance the number of "uncertainties" allowed for the Levenshtein algorithm. - * @return a predicate to apply in a {@link GraphTraversal}. - */ - static P fuzzy(String query, int distance) { - return new P<>(SearchPredicate.fuzzy, new EditDistance(query, distance)); - } - - /** - * Supports fuzzy searches based on the Damerau-Levenshtein Distance, or Edit Distance algorithm - * after having tokenized the data stored (case insensitive). - * - *

Example: the search expression is {@code tokenFuzzy("david", 1)} - * - *

    - *
  • the inserted value "david" is found - *
  • the inserted value "dawid" is found - *
  • the inserted value "hello-dawid" is found - *
  • the inserted value "dewid" is not found - *
- * - * @param query the string to look for in the value - * @param distance the number of "uncertainties" allowed for the Levenshtein algorithm. - * @return a predicate to apply in a {@link GraphTraversal}. - */ - static P tokenFuzzy(String query, int distance) { - return new P<>(SearchPredicate.tokenFuzzy, new EditDistance(query, distance)); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/reactive/ReactiveGraphNode.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/reactive/ReactiveGraphNode.java deleted file mode 100644 index ad7849633c6..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/graph/reactive/ReactiveGraphNode.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph.reactive; - -import com.datastax.dse.driver.api.core.graph.GraphNode; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * A {@link GraphNode} produced by a {@linkplain ReactiveGraphResultSet reactive graph result set}. - * - *

This is essentially an extension of the driver's {@link GraphNode} object that also exposes - * useful information about {@linkplain #getExecutionInfo() request execution} (note however that - * this information is also exposed at result set level for convenience). - * - * @see ReactiveGraphSession - * @see ReactiveGraphResultSet - */ -public interface ReactiveGraphNode extends GraphNode { - - /** - * The execution information for the paged request that produced this result. - * - *

This object is the same for two rows pertaining to the same page, but differs for rows - * pertaining to different pages. - * - * @return the execution information for the paged request that produced this result. - * @see ReactiveGraphResultSet#getExecutionInfos() - */ - @NonNull - ExecutionInfo getExecutionInfo(); -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/reactive/ReactiveGraphResultSet.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/reactive/ReactiveGraphResultSet.java deleted file mode 100644 index a0e3231750e..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/graph/reactive/ReactiveGraphResultSet.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph.reactive; - -import com.datastax.dse.driver.api.core.graph.GraphStatement; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import edu.umd.cs.findbugs.annotations.NonNull; -import org.reactivestreams.Publisher; - -/** - * A {@link Publisher} of {@link ReactiveGraphNode}s returned by a {@link ReactiveGraphSession}. - * - *

By default, all implementations returned by the driver are cold, unicast, single-subscriber - * only publishers. In other words, they do not support multiple subscriptions; consider - * caching the results produced by such publishers if you need to consume them by more than one - * downstream subscriber. - * - *

Also, note that reactive graph result sets may emit items to their subscribers on an internal - * driver IO thread. Subscriber implementors are encouraged to abide by Reactive Streams - * Specification rule 2.2 and avoid performing heavy computations or blocking calls inside - * {@link org.reactivestreams.Subscriber#onNext(Object) onNext} calls, as doing so could slow down - * the driver and impact performance. Instead, they should asynchronously dispatch received signals - * to their processing logic. - * - *

This interface exists mainly to expose useful information about {@linkplain - * #getExecutionInfos() request execution}. This is particularly convenient for queries that do not - * return rows; for queries that do return rows, it is also possible, and oftentimes easier, to - * access that same information {@linkplain ReactiveGraphNode at node level}. - * - * @see ReactiveGraphSession#executeReactive(GraphStatement) - * @see ReactiveGraphNode - */ -public interface ReactiveGraphResultSet extends Publisher { - - /** - * Returns {@linkplain ExecutionInfo information about the execution} of all requests that have - * been performed so far to assemble this result set. - * - *

If the query is not paged, this publisher will emit exactly one item as soon as the response - * arrives, then complete. If the query is paged, it will emit multiple items, one per page; then - * it will complete when the last page arrives. If the query execution fails, then this publisher - * will fail with the same error. - * - *

By default, publishers returned by this method do not support multiple subscriptions. - * - * @see ReactiveGraphNode#getExecutionInfo() - */ - @NonNull - Publisher getExecutionInfos(); -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/reactive/ReactiveGraphSession.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/reactive/ReactiveGraphSession.java deleted file mode 100644 index 88f0e5def61..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/graph/reactive/ReactiveGraphSession.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph.reactive; - -import com.datastax.dse.driver.api.core.graph.GraphStatement; -import com.datastax.dse.driver.internal.core.graph.reactive.ReactiveGraphRequestProcessor; -import com.datastax.oss.driver.api.core.session.Session; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Objects; - -/** - * A {@link Session} that offers utility methods to issue graph queries using reactive-style - * programming. - */ -public interface ReactiveGraphSession extends Session { - - /** - * Returns a {@link ReactiveGraphResultSet} that, once subscribed to, executes the given query and - * emits all the results. - * - *

See the javadocs of {@link ReactiveGraphResultSet} for important remarks anc caveats - * regarding the subscription to and consumption of reactive graph result sets. - * - * @param statement the statement to execute. - * @return The {@link ReactiveGraphResultSet} that will publish the returned results. - * @see ReactiveGraphResultSet - * @see ReactiveGraphNode - */ - @NonNull - default ReactiveGraphResultSet executeReactive(@NonNull GraphStatement statement) { - return Objects.requireNonNull( - execute(statement, ReactiveGraphRequestProcessor.REACTIVE_GRAPH_RESULT_SET)); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/DseNodeProperties.java b/core/src/main/java/com/datastax/dse/driver/api/core/metadata/DseNodeProperties.java deleted file mode 100644 index 88dbc164588..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/DseNodeProperties.java +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.metadata; - -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.metadata.Node; - -/** The keys for the additional DSE-specific properties stored in {@link Node#getExtras()}. */ -public class DseNodeProperties { - - /** - * The DSE version that the node is running. - * - *

The associated value in {@link Node#getExtras()} is a {@link Version}). - */ - public static final String DSE_VERSION = "DSE_VERSION"; - - /** - * The value of the {@code server_id} field in the {@code peers} system table for this node. - * - *

This is the single identifier of the machine running a DSE instance. If DSE has been - * configured with Multi-Instance, the {@code server_id} helps identifying the single physical - * machine that runs the multiple DSE instances. If DSE is not configured with DSE Multi-Instance, - * the {@code server_id} will be automatically set and be unique for each node. - * - *

This information is only available if connecting to a DSE 6.0+ node. - * - *

The associated value in {@link Node#getExtras()} is a {@code String}). - * - * @see DSE - * Multi-Instance (DSE Administrator Guide) - * @see - * server_id (DSE Administrator Guide) - */ - public static final String SERVER_ID = "SERVER_ID"; - - /** - * The DSE workloads that the node is running. - * - *

This is based on the {@code workload} or {@code workloads} columns in {@code system.local} - * and {@code system.peers}. - * - *

Workload labels may vary depending on the DSE version in use; e.g. DSE 5.1 may report two - * distinct workloads: {@code Search} and {@code Analytics}, while DSE 5.0 would report a single - * {@code SearchAnalytics} workload instead. It is up to users to deal with such discrepancies; - * the driver simply returns the workload labels as reported by DSE, without any form of - * pre-processing (with the exception of Graph in DSE 5.0, which is stored in a separate column, - * but will be reported as {@code Graph} here). - * - *

The associated value in {@link Node#getExtras()} is an immutable {@code Set}. - */ - public static final String DSE_WORKLOADS = "DSE_WORKLOADS"; - - /** - * The port for the native transport connections on the DSE node. - * - *

The native transport port is {@code 9042} by default but can be changed on instances - * requiring specific firewall configurations. This can be configured in the {@code - * cassandra.yaml} configuration file under the {@code native_transport_port} property. - * - *

This information is only available if connecting the driver to a DSE 6.0+ node. - * - *

The associated value in {@link Node#getExtras()} is an {@code Integer}. - */ - public static final String NATIVE_TRANSPORT_PORT = "NATIVE_TRANSPORT_PORT"; - - /** - * The port for the encrypted native transport connections on the DSE node. - * - *

In most scenarios enabling client communications in DSE will result in using a single port - * that will only accept encrypted connections (by default the port {@code 9042} is reused since - * unencrypted connections are not allowed). - * - *

However, it is possible to configure DSE to use both encrypted and a non-encrypted - * communication ports with clients. In that case the port accepting encrypted connections will - * differ from the non-encrypted one (see {@link #NATIVE_TRANSPORT_PORT}) and will be exposed via - * this method. - * - *

This information is only available if connecting the driver to a DSE 6.0+ node. - * - *

The associated value in {@link Node#getExtras()} is an {@code Integer}. - */ - public static final String NATIVE_TRANSPORT_PORT_SSL = "NATIVE_TRANSPORT_PORT_SSL"; - - /** - * The storage port used by the DSE node. - * - *

The storage port is used for internal communication between the DSE server nodes. This port - * is never used by the driver. - * - *

This information is only available if connecting the driver to a DSE 6.0+ node. - * - *

The associated value in {@link Node#getExtras()} is an {@code Integer}. - */ - public static final String STORAGE_PORT = "STORAGE_PORT"; - - /** - * The encrypted storage port used by the DSE node. - * - *

If inter-node encryption is enabled on the DSE cluster, nodes will communicate securely - * between each other via this port. This port is never used by the driver. - * - *

This information is only available if connecting the driver to a DSE 6.0+ node. - * - *

The associated value in {@link Node#getExtras()} is an {@code Integer}. - */ - public static final String STORAGE_PORT_SSL = "STORAGE_PORT_SSL"; - - /** - * The JMX port used by this node. - * - *

The JMX port can be configured in the {@code cassandra-env.sh} configuration file separately - * on each node. - * - *

This information is only available if connecting the driver to a DSE 6.0+ node. - * - *

The associated value in {@link Node#getExtras()} is an {@code Integer}. - */ - public static final String JMX_PORT = "JMX_PORT"; -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseAggregateMetadata.java b/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseAggregateMetadata.java deleted file mode 100644 index 609c64f7c15..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseAggregateMetadata.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.metadata.schema; - -import com.datastax.oss.driver.api.core.metadata.schema.AggregateMetadata; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.internal.core.metadata.schema.ScriptBuilder; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Optional; - -/** - * Specialized aggregate metadata for DSE. - * - *

It adds support for the DSE-specific {@link #getDeterministic() DETERMINISTIC} keyword. - */ -public interface DseAggregateMetadata extends AggregateMetadata { - - /** @deprecated Use {@link #getDeterministic()} instead. */ - @Deprecated - boolean isDeterministic(); - - /** - * Indicates if this aggregate is deterministic. A deterministic aggregate means that given a - * particular input, the aggregate will always produce the same output. - * - *

This method returns {@linkplain Optional#empty() empty} if this information was not found in - * the system tables, regardless of the actual aggregate characteristics; this is the case for all - * versions of DSE older than 6.0.0. - * - * @return Whether or not this aggregate is deterministic; or {@linkplain Optional#empty() empty} - * if such information is not available in the system tables. - */ - default Optional getDeterministic() { - return Optional.of(isDeterministic()); - } - - @NonNull - @Override - default String describe(boolean pretty) { - // Easiest to just copy the OSS describe() method and add in DETERMINISTIC - ScriptBuilder builder = new ScriptBuilder(pretty); - builder - .append("CREATE AGGREGATE ") - .append(getKeyspace()) - .append(".") - .append(getSignature().getName()) - .append("("); - boolean first = true; - for (int i = 0; i < getSignature().getParameterTypes().size(); i++) { - if (first) { - first = false; - } else { - builder.append(","); - } - DataType type = getSignature().getParameterTypes().get(i); - builder.append(type.asCql(false, pretty)); - } - builder - .increaseIndent() - .append(")") - .newLine() - .append("SFUNC ") - .append(getStateFuncSignature().getName()) - .newLine() - .append("STYPE ") - .append(getStateType().asCql(false, pretty)); - - if (getFinalFuncSignature().isPresent()) { - builder.newLine().append("FINALFUNC ").append(getFinalFuncSignature().get().getName()); - } - if (getInitCond().isPresent()) { - Optional formatInitCond = formatInitCond(); - assert formatInitCond.isPresent(); - builder.newLine().append("INITCOND ").append(formatInitCond.get()); - } - // add DETERMINISTIC if present - if (getDeterministic().orElse(false)) { - builder.newLine().append("DETERMINISTIC"); - } - return builder.append(";").build(); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseColumnMetadata.java b/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseColumnMetadata.java deleted file mode 100644 index 62b5650697e..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseColumnMetadata.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.metadata.schema; - -import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; - -/** - * Specialized column metadata for DSE. - * - *

This type exists only for future extensibility; currently, it is identical to {@link - * ColumnMetadata}. - */ -public interface DseColumnMetadata extends ColumnMetadata {} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseEdgeMetadata.java b/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseEdgeMetadata.java deleted file mode 100644 index 59ee8a277ff..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseEdgeMetadata.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.metadata.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.List; - -/** Edge metadata, for a table that was created with CREATE TABLE ... WITH EDGE LABEL. */ -public interface DseEdgeMetadata { - - /** The label of the edge in graph. */ - @NonNull - CqlIdentifier getLabelName(); - - /** The identifier of the table representing the incoming vertex. */ - @NonNull - CqlIdentifier getFromTable(); - - /** The label of the incoming vertex in graph. */ - @NonNull - CqlIdentifier getFromLabel(); - - /** The columns in this table that match the partition key of the incoming vertex table. */ - @NonNull - List getFromPartitionKeyColumns(); - - /** The columns in this table that match the clustering columns of the incoming vertex table. */ - @NonNull - List getFromClusteringColumns(); - - /** The identifier of the table representing the outgoing vertex. */ - @NonNull - CqlIdentifier getToTable(); - - /** The label of the outgoing vertex in graph. */ - @NonNull - CqlIdentifier getToLabel(); - - /** The columns in this table that match the partition key of the outgoing vertex table. */ - @NonNull - List getToPartitionKeyColumns(); - - /** The columns in this table that match the clustering columns of the outgoing vertex table. */ - @NonNull - List getToClusteringColumns(); -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseFunctionMetadata.java b/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseFunctionMetadata.java deleted file mode 100644 index 91298795959..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseFunctionMetadata.java +++ /dev/null @@ -1,184 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.metadata.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.FunctionMetadata; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.internal.core.metadata.schema.ScriptBuilder; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.List; -import java.util.Optional; - -/** - * Specialized function metadata for DSE. - * - *

It adds support for the DSE-specific {@link #getDeterministic() DETERMINISTIC} and {@link - * #getMonotonicity() MONOTONIC} keywords. - */ -public interface DseFunctionMetadata extends FunctionMetadata { - - /** The monotonicity of a function. */ - enum Monotonicity { - - /** - * Indicates that the function is fully monotonic on all of its arguments. This means that it is - * either entirely non-increasing or non-decreasing. Full monotonicity is required to use the - * function in a GROUP BY clause. - */ - FULLY_MONOTONIC, - - /** - * Indicates that the function is partially monotonic, meaning that partial application over - * some of the its arguments is monotonic. Currently (DSE 6.0.0), CQL only allows partial - * monotonicity on exactly one argument. This may change in a future CQL version. - */ - PARTIALLY_MONOTONIC, - - /** Indicates that the function is not monotonic. */ - NOT_MONOTONIC, - } - - /** @deprecated Use {@link #getDeterministic()} instead. */ - @Deprecated - boolean isDeterministic(); - - /** - * Indicates if this function is deterministic. A deterministic function means that given a - * particular input, the function will always produce the same output. - * - *

This method returns {@linkplain Optional#empty() empty} if this information was not found in - * the system tables, regardless of the actual function characteristics; this is the case for all - * versions of DSE older than 6.0.0. - * - * @return Whether or not this function is deterministic; or {@linkplain Optional#empty() empty} - * if such information is not available in the system tables. - */ - default Optional getDeterministic() { - return Optional.of(isDeterministic()); - } - - /** @deprecated use {@link #getMonotonicity()} instead. */ - @Deprecated - boolean isMonotonic(); - - /** - * Returns this function's {@link Monotonicity}. - * - *

A function can be either: - * - *

    - *
  • fully monotonic. In that case, this method returns {@link Monotonicity#FULLY_MONOTONIC}, - * and {@link #getMonotonicArgumentNames()} returns all the arguments; - *
  • partially monotonic, meaning that partial application over some of the arguments is - * monotonic. Currently (DSE 6.0.0), CQL only allows partial monotonicity on exactly one - * argument. This may change in a future CQL version. In that case, this method returns - * {@link Monotonicity#PARTIALLY_MONOTONIC}, and {@link #getMonotonicArgumentNames()} - * returns a singleton list; - *
  • not monotonic. In that case, this method return {@link Monotonicity#NOT_MONOTONIC} and - * {@link #getMonotonicArgumentNames()} returns an empty list. - *
- * - *

Full monotonicity is required to use the function in a GROUP BY clause. - * - *

This method returns {@linkplain Optional#empty() empty} if this information was not found in - * the system tables, regardless of the actual function characteristics; this is the case for all - * versions of DSE older than 6.0.0. - * - * @return this function's {@link Monotonicity}; or {@linkplain Optional#empty() empty} if such - * information is not available in the system tables. - */ - default Optional getMonotonicity() { - return Optional.of( - isMonotonic() - ? Monotonicity.FULLY_MONOTONIC - : getMonotonicArgumentNames().isEmpty() - ? Monotonicity.NOT_MONOTONIC - : Monotonicity.PARTIALLY_MONOTONIC); - } - - /** - * Returns a list of argument names that are monotonic. - * - *

See {@link #getMonotonicity()} for explanations on monotonicity, and the possible values - * returned by this method. - * - *

NOTE: For versions of DSE older than 6.0.0, this method will always return an empty list, - * regardless of the actual function characteristics. - * - * @return the argument names that the function is monotonic on. - */ - @NonNull - List getMonotonicArgumentNames(); - - @NonNull - @Override - default String describe(boolean pretty) { - ScriptBuilder builder = new ScriptBuilder(pretty); - builder - .append("CREATE FUNCTION ") - .append(getKeyspace()) - .append(".") - .append(getSignature().getName()) - .append("("); - boolean first = true; - for (int i = 0; i < getSignature().getParameterTypes().size(); i++) { - if (first) { - first = false; - } else { - builder.append(","); - } - DataType type = getSignature().getParameterTypes().get(i); - CqlIdentifier name = getParameterNames().get(i); - builder.append(name).append(" ").append(type.asCql(false, pretty)); - } - builder - .append(")") - .increaseIndent() - .newLine() - .append(isCalledOnNullInput() ? "CALLED ON NULL INPUT" : "RETURNS NULL ON NULL INPUT") - .newLine() - .append("RETURNS ") - .append(getReturnType().asCql(false, true)) - .newLine(); - // handle deterministic and monotonic - if (getDeterministic().orElse(false)) { - builder.append("DETERMINISTIC").newLine(); - } - if (getMonotonicity().isPresent()) { - switch (getMonotonicity().get()) { - case FULLY_MONOTONIC: - builder.append("MONOTONIC").newLine(); - break; - case PARTIALLY_MONOTONIC: - builder.append("MONOTONIC ON ").append(getMonotonicArgumentNames().get(0)).newLine(); - break; - default: - break; - } - } - builder - .append("LANGUAGE ") - .append(getLanguage()) - .newLine() - .append("AS '") - .append(getBody()) - .append("';"); - return builder.build(); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseGraphKeyspaceMetadata.java b/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseGraphKeyspaceMetadata.java deleted file mode 100644 index 8978a8858f9..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseGraphKeyspaceMetadata.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.metadata.schema; - -import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; -import com.datastax.oss.driver.internal.core.metadata.schema.ScriptBuilder; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; -import java.util.Optional; - -/** - * Specialized keyspace metadata, that handles the graph-specific properties introduced in DSE 6.8. - * - *

This type only exists to avoid breaking binary compatibility. When the driver is connected to - * a DSE cluster, all the {@link KeyspaceMetadata} instances it returns can be safely downcast to - * this interface. - */ -public interface DseGraphKeyspaceMetadata extends DseKeyspaceMetadata { - - /** The graph engine that will be used to interpret this keyspace. */ - @NonNull - Optional getGraphEngine(); - - @NonNull - @Override - default String describe(boolean pretty) { - ScriptBuilder builder = new ScriptBuilder(pretty); - if (isVirtual()) { - builder.append("/* VIRTUAL "); - } else { - builder.append("CREATE "); - } - builder - .append("KEYSPACE ") - .append(getName()) - .append(" WITH replication = { 'class' : '") - .append(getReplication().get("class")) - .append("'"); - for (Map.Entry entry : getReplication().entrySet()) { - if (!entry.getKey().equals("class")) { - builder - .append(", '") - .append(entry.getKey()) - .append("': '") - .append(entry.getValue()) - .append("'"); - } - } - builder.append(" } AND durable_writes = ").append(Boolean.toString(isDurableWrites())); - getGraphEngine() - .ifPresent( - graphEngine -> builder.append(" AND graph_engine ='").append(graphEngine).append("'")); - builder.append(";"); - if (isVirtual()) { - builder.append(" */"); - } - return builder.build(); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseGraphTableMetadata.java b/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseGraphTableMetadata.java deleted file mode 100644 index 8f340b3b447..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseGraphTableMetadata.java +++ /dev/null @@ -1,157 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.metadata.schema; - -import com.datastax.dse.driver.internal.core.metadata.schema.ScriptHelper; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.ClusteringOrder; -import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.TableMetadata; -import com.datastax.oss.driver.internal.core.metadata.schema.ScriptBuilder; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.RelationParser; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; -import java.util.Optional; - -/** - * Specialized table metadata, that handles the graph-specific properties introduced in DSE 6.8. - * - *

This type only exists to avoid breaking binary compatibility. When the driver is connected to - * a DSE cluster, all the {@link TableMetadata} instances it returns can be safely downcast to this - * interface. - */ -public interface DseGraphTableMetadata extends DseTableMetadata { - /** - * The vertex metadata if this table represents a vertex in graph, otherwise empty. - * - *

This is mutually exclusive with {@link #getEdge()}. - */ - @NonNull - Optional getVertex(); - - /** - * The edge metadata if this table represents an edge in graph, otherwise empty. - * - *

This is mutually exclusive with {@link #getVertex()}. - */ - @NonNull - Optional getEdge(); - - @NonNull - @Override - default String describe(boolean pretty) { - ScriptBuilder builder = new ScriptBuilder(pretty); - if (isVirtual()) { - builder.append("/* VIRTUAL "); - } else { - builder.append("CREATE "); - } - - builder - .append("TABLE ") - .append(getKeyspace()) - .append(".") - .append(getName()) - .append(" (") - .newLine() - .increaseIndent(); - - for (ColumnMetadata column : getColumns().values()) { - builder.append(column.getName()).append(" ").append(column.getType().asCql(true, pretty)); - if (column.isStatic()) { - builder.append(" static"); - } - builder.append(",").newLine(); - } - - // PK - builder.append("PRIMARY KEY ("); - if (getPartitionKey().size() == 1) { // PRIMARY KEY (k - builder.append(getPartitionKey().get(0).getName()); - } else { // PRIMARY KEY ((k1, k2) - builder.append("("); - boolean first = true; - for (ColumnMetadata pkColumn : getPartitionKey()) { - if (first) { - first = false; - } else { - builder.append(", "); - } - builder.append(pkColumn.getName()); - } - builder.append(")"); - } - // PRIMARY KEY (, cc1, cc2, cc3) - for (ColumnMetadata clusteringColumn : getClusteringColumns().keySet()) { - builder.append(", ").append(clusteringColumn.getName()); - } - builder.append(")"); - - builder.newLine().decreaseIndent().append(")"); - - builder.increaseIndent(); - if (isCompactStorage()) { - builder.andWith().append("COMPACT STORAGE"); - } - if (getClusteringColumns().containsValue(ClusteringOrder.DESC)) { - builder.andWith().append("CLUSTERING ORDER BY ("); - boolean first = true; - for (Map.Entry entry : - getClusteringColumns().entrySet()) { - if (first) { - first = false; - } else { - builder.append(", "); - } - builder.append(entry.getKey().getName()).append(" ").append(entry.getValue().name()); - } - builder.append(")"); - } - getVertex() - .ifPresent( - vertex -> { - builder.andWith().append("VERTEX LABEL").append(" ").append(vertex.getLabelName()); - }); - getEdge() - .ifPresent( - edge -> { - builder.andWith().append("EDGE LABEL").append(" ").append(edge.getLabelName()); - ScriptHelper.appendEdgeSide( - builder, - edge.getFromTable(), - edge.getFromLabel(), - edge.getFromPartitionKeyColumns(), - edge.getFromClusteringColumns(), - "FROM"); - ScriptHelper.appendEdgeSide( - builder, - edge.getToTable(), - edge.getToLabel(), - edge.getToPartitionKeyColumns(), - edge.getToClusteringColumns(), - "TO"); - }); - Map options = getOptions(); - RelationParser.appendOptions(options, builder); - builder.append(";"); - if (isVirtual()) { - builder.append(" */"); - } - return builder.build(); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseIndexMetadata.java b/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseIndexMetadata.java deleted file mode 100644 index ac4c1057fbf..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseIndexMetadata.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.metadata.schema; - -import com.datastax.oss.driver.api.core.metadata.schema.IndexMetadata; - -/** - * Specialized index metadata for DSE. - * - *

This type exists only for future extensibility; currently, it is identical to {@link - * IndexMetadata}. - */ -public interface DseIndexMetadata extends IndexMetadata {} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseKeyspaceMetadata.java b/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseKeyspaceMetadata.java deleted file mode 100644 index bc5cb002802..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseKeyspaceMetadata.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.metadata.schema; - -import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.TableMetadata; - -/** - * Specialized keyspace metadata for DSE. - * - *

Notes: - * - *

    - *
  • this type can always be safely downcast to {@link DseGraphKeyspaceMetadata} (the only - * reason the two interfaces are separate is for backward compatibility). - *
  • all returned elements can be cast to their DSE counterparts, for example {@link - * TableMetadata} to {@link DseTableMetadata}. - *
- */ -public interface DseKeyspaceMetadata extends KeyspaceMetadata {} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseRelationMetadata.java b/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseRelationMetadata.java deleted file mode 100644 index 55b36cb7fe5..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseRelationMetadata.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.metadata.schema; - -import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.RelationMetadata; - -/** - * Specialized table or materialized view metadata for DSE. - * - *

This type exists only for future extensibility; currently, it is identical to {@link - * RelationMetadata}. - * - *

Note that all returned {@link ColumnMetadata} can be cast to {@link DseColumnMetadata}. - */ -public interface DseRelationMetadata extends RelationMetadata {} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseTableMetadata.java b/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseTableMetadata.java deleted file mode 100644 index a140f93bc2e..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseTableMetadata.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.metadata.schema; - -import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.IndexMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.TableMetadata; - -/** - * Specialized table metadata for DSE. - * - *

Notes: - * - *

    - *
  • this type can always be safely downcast to {@link DseGraphTableMetadata} (the only reason - * the two interfaces are separate is for backward compatibility). - *
  • all returned {@link ColumnMetadata} can be cast to {@link DseColumnMetadata}, and all - * {@link IndexMetadata} to {@link DseIndexMetadata}. - *
- */ -public interface DseTableMetadata extends DseRelationMetadata, TableMetadata {} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseVertexMetadata.java b/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseVertexMetadata.java deleted file mode 100644 index c08a7eb1d60..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseVertexMetadata.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.metadata.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** Vertex metadata, for a table that was created with CREATE TABLE ... WITH VERTEX LABEL. */ -public interface DseVertexMetadata { - - /** The label of the vertex in graph. */ - @NonNull - CqlIdentifier getLabelName(); -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseViewMetadata.java b/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseViewMetadata.java deleted file mode 100644 index 0f68ea7e456..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseViewMetadata.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.metadata.schema; - -import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.ViewMetadata; - -/** - * Specialized materialized view metadata for DSE. - * - *

This type exists only for future extensibility; currently, it is identical to {@link - * ViewMetadata}. - * - *

Note that all returned {@link ColumnMetadata} can be cast to {@link DseColumnMetadata}. - */ -public interface DseViewMetadata extends DseRelationMetadata, ViewMetadata {} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/metrics/DseNodeMetric.java b/core/src/main/java/com/datastax/dse/driver/api/core/metrics/DseNodeMetric.java deleted file mode 100644 index cf4b4d0aa18..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/metrics/DseNodeMetric.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.metrics; - -import com.datastax.oss.driver.api.core.metrics.NodeMetric; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; - -/** See {@code reference.conf} for a description of each metric. */ -public enum DseNodeMetric implements NodeMetric { - GRAPH_MESSAGES("graph-messages"); - - private static final Map BY_PATH = sortByPath(); - - private final String path; - - DseNodeMetric(String path) { - this.path = path; - } - - @Override - @NonNull - public String getPath() { - return path; - } - - @NonNull - public static DseNodeMetric fromPath(@NonNull String path) { - DseNodeMetric metric = BY_PATH.get(path); - if (metric == null) { - throw new IllegalArgumentException("Unknown node metric path " + path); - } - return metric; - } - - private static Map sortByPath() { - ImmutableMap.Builder result = ImmutableMap.builder(); - for (DseNodeMetric value : values()) { - result.put(value.getPath(), value); - } - return result.build(); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/metrics/DseSessionMetric.java b/core/src/main/java/com/datastax/dse/driver/api/core/metrics/DseSessionMetric.java deleted file mode 100644 index 79584f3c44a..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/metrics/DseSessionMetric.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.metrics; - -import com.datastax.oss.driver.api.core.metrics.SessionMetric; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; - -/** See {@code reference.conf} for a description of each metric. */ -public enum DseSessionMetric implements SessionMetric { - CONTINUOUS_CQL_REQUESTS("continuous-cql-requests"), - GRAPH_REQUESTS("graph-requests"), - GRAPH_CLIENT_TIMEOUTS("graph-client-timeouts"), - ; - - private static final Map BY_PATH = sortByPath(); - - private final String path; - - DseSessionMetric(String path) { - this.path = path; - } - - @NonNull - @Override - public String getPath() { - return path; - } - - @NonNull - public static DseSessionMetric fromPath(@NonNull String path) { - DseSessionMetric metric = BY_PATH.get(path); - if (metric == null) { - throw new IllegalArgumentException("Unknown DSE session metric path " + path); - } - return metric; - } - - private static Map sortByPath() { - ImmutableMap.Builder result = ImmutableMap.builder(); - for (DseSessionMetric value : values()) { - result.put(value.getPath(), value); - } - return result.build(); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/servererrors/UnfitClientException.java b/core/src/main/java/com/datastax/dse/driver/api/core/servererrors/UnfitClientException.java deleted file mode 100644 index 8bf4d80699d..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/servererrors/UnfitClientException.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.servererrors; - -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.servererrors.CoordinatorException; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** - * A server-side error triggered when DSE can't send asynchronous results back to the client. - * - *

Currently, this is used when the client is unable to keep up with the rate during a continuous - * paging session. - * - *

Note that the protocol specification refers to this error as {@code CLIENT_WRITE_FAILURE}; we - * don't follow that terminology because it would be too misleading (this is not a client error, and - * it doesn't occur while writing data to DSE). - */ -public class UnfitClientException extends CoordinatorException { - - public UnfitClientException(@NonNull Node coordinator, @NonNull String message) { - this(coordinator, message, null, false); - } - - private UnfitClientException( - @NonNull Node coordinator, - @NonNull String message, - @Nullable ExecutionInfo executionInfo, - boolean writableStackTrace) { - super(coordinator, message, executionInfo, writableStackTrace); - } - - @Override - @NonNull - public UnfitClientException copy() { - return new UnfitClientException(getCoordinator(), getMessage(), getExecutionInfo(), true); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/type/DseDataTypes.java b/core/src/main/java/com/datastax/dse/driver/api/core/type/DseDataTypes.java deleted file mode 100644 index 6003274e09a..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/type/DseDataTypes.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.type; - -import com.datastax.oss.driver.api.core.type.CustomType; -import com.datastax.oss.driver.api.core.type.DataTypes; - -/** Extends {@link DataTypes} to handle DSE-specific types. */ -public class DseDataTypes extends DataTypes { - - public static final CustomType LINE_STRING = - (CustomType) custom("org.apache.cassandra.db.marshal.LineStringType"); - - public static final CustomType POINT = - (CustomType) custom("org.apache.cassandra.db.marshal.PointType"); - - public static final CustomType POLYGON = - (CustomType) custom("org.apache.cassandra.db.marshal.PolygonType"); - - public static final CustomType DATE_RANGE = - (CustomType) custom("org.apache.cassandra.db.marshal.DateRangeType"); -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/type/codec/DseTypeCodecs.java b/core/src/main/java/com/datastax/dse/driver/api/core/type/codec/DseTypeCodecs.java deleted file mode 100644 index fb0225970b4..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/type/codec/DseTypeCodecs.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.type.codec; - -import com.datastax.dse.driver.api.core.data.geometry.LineString; -import com.datastax.dse.driver.api.core.data.geometry.Point; -import com.datastax.dse.driver.api.core.data.geometry.Polygon; -import com.datastax.dse.driver.api.core.data.time.DateRange; -import com.datastax.dse.driver.internal.core.type.codec.geometry.LineStringCodec; -import com.datastax.dse.driver.internal.core.type.codec.geometry.PointCodec; -import com.datastax.dse.driver.internal.core.type.codec.geometry.PolygonCodec; -import com.datastax.dse.driver.internal.core.type.codec.time.DateRangeCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; - -/** Extends {@link TypeCodecs} to handle DSE-specific types. */ -public class DseTypeCodecs extends TypeCodecs { - - public static final TypeCodec LINE_STRING = new LineStringCodec(); - - public static final TypeCodec POINT = new PointCodec(); - - public static final TypeCodec POLYGON = new PolygonCodec(); - - public static final TypeCodec DATE_RANGE = new DateRangeCodec(); -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/DseProtocolFeature.java b/core/src/main/java/com/datastax/dse/driver/internal/core/DseProtocolFeature.java deleted file mode 100644 index 95f245061d2..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/DseProtocolFeature.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core; - -import com.datastax.oss.driver.internal.core.ProtocolFeature; - -/** - * Features that are supported by DataStax Enterprise (DSE) protocol versions. - * - * @see com.datastax.dse.driver.api.core.DseProtocolVersion - * @see com.datastax.oss.driver.internal.core.DefaultProtocolFeature - */ -public enum DseProtocolFeature implements ProtocolFeature { - - /** - * The ability to execute continuous paging requests. - * - * @see CASSANDRA-11521 - * @see com.datastax.dse.driver.api.core.cql.continuous.ContinuousSession - */ - CONTINUOUS_PAGING, - ; -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/InsightsClientLifecycleListener.java b/core/src/main/java/com/datastax/dse/driver/internal/core/InsightsClientLifecycleListener.java deleted file mode 100644 index e4dd6f93bf7..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/InsightsClientLifecycleListener.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core; - -import static com.datastax.dse.driver.api.core.config.DseDriverOption.MONITOR_REPORTING_ENABLED; - -import com.datastax.dse.driver.internal.core.insights.InsightsClient; -import com.datastax.dse.driver.internal.core.insights.configuration.InsightsConfiguration; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.context.LifecycleListener; - -public class InsightsClientLifecycleListener implements LifecycleListener { - private static final boolean DEFAULT_INSIGHTS_ENABLED = true; - private static final long STATUS_EVENT_DELAY_MILLIS = 300000L; - private final InternalDriverContext context; - private final StackTraceElement[] initCallStackTrace; - private volatile InsightsClient insightsClient; - - public InsightsClientLifecycleListener( - InternalDriverContext context, StackTraceElement[] initCallStackTrace) { - this.context = context; - this.initCallStackTrace = initCallStackTrace; - } - - @Override - public void onSessionReady() { - boolean monitorReportingEnabled = - context - .getConfig() - .getDefaultProfile() - .getBoolean(MONITOR_REPORTING_ENABLED, DEFAULT_INSIGHTS_ENABLED); - - this.insightsClient = - InsightsClient.createInsightsClient( - new InsightsConfiguration( - monitorReportingEnabled, - STATUS_EVENT_DELAY_MILLIS, - context.getNettyOptions().adminEventExecutorGroup().next()), - context, - initCallStackTrace); - insightsClient.sendStartupMessage(); - insightsClient.scheduleStatusMessageSend(); - } - - @Override - public void close() { - if (insightsClient != null) { - insightsClient.shutdown(); - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/auth/AuthUtils.java b/core/src/main/java/com/datastax/dse/driver/internal/core/auth/AuthUtils.java deleted file mode 100644 index 38f1644bcb7..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/auth/AuthUtils.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.auth; - -import com.datastax.oss.driver.api.core.auth.AuthenticationException; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.config.DriverOption; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import java.util.ArrayList; -import java.util.List; - -public class AuthUtils { - /** - * Utility function that checks for the existence of settings and throws an exception if they - * aren't present - * - * @param config Current working driver configuration - * @param authenticatorName name of authenticator for logging purposes - * @param endPoint the host we are attempting to authenticate to - * @param options a list of DriverOptions to check to see if they are present - */ - public static void validateConfigPresent( - DriverExecutionProfile config, - String authenticatorName, - EndPoint endPoint, - DriverOption... options) { - List missingOptions = new ArrayList<>(); - for (DriverOption option : options) { - - if (!config.isDefined(option)) { - missingOptions.add(option); - } - if (missingOptions.size() > 0) { - String message = - "Missing required configuration options for authenticator " + authenticatorName + ":"; - for (DriverOption missingOption : missingOptions) { - message = message + " " + missingOption.getPath(); - } - throw new AuthenticationException(endPoint, message); - } - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/auth/DseGssApiAuthProvider.java b/core/src/main/java/com/datastax/dse/driver/internal/core/auth/DseGssApiAuthProvider.java deleted file mode 100644 index 6ef6596a870..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/auth/DseGssApiAuthProvider.java +++ /dev/null @@ -1,198 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.auth; - -import com.datastax.dse.driver.api.core.auth.DseGssApiAuthProviderBase; -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.oss.driver.api.core.auth.AuthProvider; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; -import net.jcip.annotations.ThreadSafe; - -/** - * {@link AuthProvider} that provides GSSAPI authenticator instances for clients to connect to DSE - * clusters secured with {@code DseAuthenticator}. - * - *

To activate this provider an {@code auth-provider} section must be included in the driver - * configuration, for example: - * - *

- * dse-java-driver {
- *  auth-provider {
- *      class = com.datastax.dse.driver.internal.core.auth.DseGssApiAuthProvider
- *      login-configuration {
- *          principal = "user principal here ex cassandra@DATASTAX.COM"
- *          useKeyTab = "true"
- *          refreshKrb5Config = "true"
- *          keyTab = "Path to keytab file here"
- *      }
- *   }
- * }
- * 
- * - *

Kerberos Authentication

- * - * Keytab and ticket cache settings are specified using a standard JAAS configuration file. The - * location of the file can be set using the java.security.auth.login.config system - * property or by adding a login.config.url.n entry in the java.security - * properties file. Alternatively a login-configuration section can be included in the driver - * configuration. - * - *

See the following documents for further details: - * - *

    - *
  1. JAAS - * Login Configuration File; - *
  2. Krb5LoginModule - * options; - *
  3. JAAS - * Authentication Tutorial for more on JAAS in general. - *
- * - *

Authentication using ticket cache

- * - * Run kinit to obtain a ticket and populate the cache before connecting. JAAS config: - * - *
- * DseClient {
- *   com.sun.security.auth.module.Krb5LoginModule required
- *     useTicketCache=true
- *     renewTGT=true;
- * };
- * 
- * - *

Authentication using a keytab file

- * - * To enable authentication using a keytab file, specify its location on disk. If your keytab - * contains more than one principal key, you should also specify which one to select. This - * information can also be specified in the driver config, under the login-configuration section. - * - *
- * DseClient {
- *     com.sun.security.auth.module.Krb5LoginModule required
- *       useKeyTab=true
- *       keyTab="/path/to/file.keytab"
- *       principal="user@MYDOMAIN.COM";
- * };
- * 
- * - *

Specifying SASL protocol name

- * - * The SASL protocol name used by this auth provider defaults to " - * {@value #DEFAULT_SASL_SERVICE_NAME}". - * - *

Important: the SASL protocol name should match the username of the Kerberos - * service principal used by the DSE server. This information is specified in the dse.yaml file by - * the {@code service_principal} option under the kerberos_options - * section, and may vary from one DSE installation to another – especially if you installed - * DSE with an automated package installer. - * - *

For example, if your dse.yaml file contains the following: - * - *

{@code
- * kerberos_options:
- *     ...
- *     service_principal: cassandra/my.host.com@MY.REALM.COM
- * }
- * - * The correct SASL protocol name to use when authenticating against this DSE server is "{@code - * cassandra}". - * - *

Should you need to change the SASL protocol name, use one of the methods below: - * - *

    - *
  1. Specify the service name in the driver config. - *
    - * dse-java-driver {
    - *   auth-provider {
    - *     class = com.datastax.dse.driver.internal.core.auth.DseGssApiAuthProvider
    - *     service = "alternate"
    - *   }
    - * }
    - * 
    - *
  2. Specify the service name with the {@code dse.sasl.service} system property when starting - * your application, e.g. {@code -Ddse.sasl.service=cassandra}. - *
- * - * If a non-null SASL service name is provided to the aforementioned config, that name takes - * precedence over the contents of the {@code dse.sasl.service} system property. - * - *

Should internal sasl properties need to be set such as qop. This can be accomplished by - * including a sasl-properties in the driver config, for example: - * - *

- * dse-java-driver {
- *   auth-provider {
- *     class = com.datastax.dse.driver.internal.core.auth.DseGssApiAuthProvider
- *     sasl-properties {
- *       javax.security.sasl.qop = "auth-conf"
- *     }
- *   }
- * }
- * 
- */ -@ThreadSafe -public class DseGssApiAuthProvider extends DseGssApiAuthProviderBase { - - private final DriverExecutionProfile config; - - public DseGssApiAuthProvider(DriverContext context) { - super(context.getSessionName()); - - this.config = context.getConfig().getDefaultProfile(); - } - - @NonNull - @Override - protected GssApiOptions getOptions( - @NonNull EndPoint endPoint, @NonNull String serverAuthenticator) { - // A login configuration is always necessary, throw an exception if that option is missing. - AuthUtils.validateConfigPresent( - config, - DseGssApiAuthProvider.class.getName(), - endPoint, - DseDriverOption.AUTH_PROVIDER_LOGIN_CONFIGURATION); - - GssApiOptions.Builder optionsBuilder = GssApiOptions.builder(); - - if (config.isDefined(DseDriverOption.AUTH_PROVIDER_AUTHORIZATION_ID)) { - optionsBuilder.withAuthorizationId( - config.getString(DseDriverOption.AUTH_PROVIDER_AUTHORIZATION_ID)); - } - if (config.isDefined(DseDriverOption.AUTH_PROVIDER_SERVICE)) { - optionsBuilder.withSaslProtocol(config.getString(DseDriverOption.AUTH_PROVIDER_SERVICE)); - } - if (config.isDefined(DseDriverOption.AUTH_PROVIDER_SASL_PROPERTIES)) { - for (Map.Entry entry : - config.getStringMap(DseDriverOption.AUTH_PROVIDER_SASL_PROPERTIES).entrySet()) { - optionsBuilder.addSaslProperty(entry.getKey(), entry.getValue()); - } - } - Map loginConfigurationMap = - config.getStringMap(DseDriverOption.AUTH_PROVIDER_LOGIN_CONFIGURATION); - optionsBuilder.withLoginConfiguration(loginConfigurationMap); - return optionsBuilder.build(); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/auth/DsePlainTextAuthProvider.java b/core/src/main/java/com/datastax/dse/driver/internal/core/auth/DsePlainTextAuthProvider.java deleted file mode 100644 index 6cf82aef03e..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/auth/DsePlainTextAuthProvider.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.auth; - -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.internal.core.auth.PlainTextAuthProvider; -import net.jcip.annotations.ThreadSafe; - -/** - * @deprecated The driver's default plain text providers now support both Apache Cassandra and DSE. - * This type was preserved for backward compatibility, but {@link PlainTextAuthProvider} should - * be used instead. - */ -@ThreadSafe -@Deprecated -public class DsePlainTextAuthProvider extends PlainTextAuthProvider { - - public DsePlainTextAuthProvider(DriverContext context) { - super(context); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/DseConversions.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/DseConversions.java deleted file mode 100644 index 15aab143150..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/DseConversions.java +++ /dev/null @@ -1,155 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql; - -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.dse.driver.api.core.servererrors.UnfitClientException; -import com.datastax.dse.protocol.internal.DseProtocolConstants; -import com.datastax.dse.protocol.internal.request.query.ContinuousPagingOptions; -import com.datastax.dse.protocol.internal.request.query.DseQueryOptions; -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.servererrors.CoordinatorException; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.internal.core.ConsistencyLevelRegistry; -import com.datastax.oss.driver.internal.core.DefaultProtocolFeature; -import com.datastax.oss.driver.internal.core.ProtocolVersionRegistry; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.cql.Conversions; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.request.Execute; -import com.datastax.oss.protocol.internal.request.Query; -import com.datastax.oss.protocol.internal.response.Error; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.nio.ByteBuffer; -import java.util.Collections; -import java.util.List; -import java.util.Map; - -public class DseConversions { - - public static Message toContinuousPagingMessage( - Statement statement, DriverExecutionProfile config, InternalDriverContext context) { - ConsistencyLevelRegistry consistencyLevelRegistry = context.getConsistencyLevelRegistry(); - ConsistencyLevel consistency = statement.getConsistencyLevel(); - int consistencyCode = - (consistency == null) - ? consistencyLevelRegistry.nameToCode( - config.getString(DefaultDriverOption.REQUEST_CONSISTENCY)) - : consistency.getProtocolCode(); - int pageSize = config.getInt(DseDriverOption.CONTINUOUS_PAGING_PAGE_SIZE); - boolean pageSizeInBytes = config.getBoolean(DseDriverOption.CONTINUOUS_PAGING_PAGE_SIZE_BYTES); - int maxPages = config.getInt(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES); - int maxPagesPerSecond = config.getInt(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES_PER_SECOND); - int maxEnqueuedPages = config.getInt(DseDriverOption.CONTINUOUS_PAGING_MAX_ENQUEUED_PAGES); - ContinuousPagingOptions options = - new ContinuousPagingOptions(maxPages, maxPagesPerSecond, maxEnqueuedPages); - ConsistencyLevel serialConsistency = statement.getSerialConsistencyLevel(); - int serialConsistencyCode = - (serialConsistency == null) - ? consistencyLevelRegistry.nameToCode( - config.getString(DefaultDriverOption.REQUEST_SERIAL_CONSISTENCY)) - : serialConsistency.getProtocolCode(); - long timestamp = statement.getQueryTimestamp(); - if (timestamp == Statement.NO_DEFAULT_TIMESTAMP) { - timestamp = context.getTimestampGenerator().next(); - } - CodecRegistry codecRegistry = context.getCodecRegistry(); - ProtocolVersion protocolVersion = context.getProtocolVersion(); - ProtocolVersionRegistry protocolVersionRegistry = context.getProtocolVersionRegistry(); - CqlIdentifier keyspace = statement.getKeyspace(); - if (statement instanceof SimpleStatement) { - SimpleStatement simpleStatement = (SimpleStatement) statement; - List positionalValues = simpleStatement.getPositionalValues(); - Map namedValues = simpleStatement.getNamedValues(); - if (!positionalValues.isEmpty() && !namedValues.isEmpty()) { - throw new IllegalArgumentException( - "Can't have both positional and named values in a statement."); - } - if (keyspace != null - && !protocolVersionRegistry.supports( - protocolVersion, DefaultProtocolFeature.PER_REQUEST_KEYSPACE)) { - throw new IllegalArgumentException( - "Can't use per-request keyspace with protocol " + protocolVersion); - } - DseQueryOptions queryOptions = - new DseQueryOptions( - consistencyCode, - Conversions.encode(positionalValues, codecRegistry, protocolVersion), - Conversions.encode(namedValues, codecRegistry, protocolVersion), - false, - pageSize, - statement.getPagingState(), - serialConsistencyCode, - timestamp, - (keyspace == null) ? null : keyspace.asInternal(), - pageSizeInBytes, - options); - return new Query(simpleStatement.getQuery(), queryOptions); - } else if (statement instanceof BoundStatement) { - BoundStatement boundStatement = (BoundStatement) statement; - if (!protocolVersionRegistry.supports( - protocolVersion, DefaultProtocolFeature.UNSET_BOUND_VALUES)) { - Conversions.ensureAllSet(boundStatement); - } - boolean skipMetadata = - boundStatement.getPreparedStatement().getResultSetDefinitions().size() > 0; - DseQueryOptions queryOptions = - new DseQueryOptions( - consistencyCode, - boundStatement.getValues(), - Collections.emptyMap(), - skipMetadata, - pageSize, - statement.getPagingState(), - serialConsistencyCode, - timestamp, - null, - pageSizeInBytes, - options); - PreparedStatement preparedStatement = boundStatement.getPreparedStatement(); - ByteBuffer id = preparedStatement.getId(); - ByteBuffer resultMetadataId = preparedStatement.getResultMetadataId(); - return new Execute( - Bytes.getArray(id), - (resultMetadataId == null) ? null : Bytes.getArray(resultMetadataId), - queryOptions); - } else { - throw new IllegalArgumentException( - "Unsupported statement type: " + statement.getClass().getName()); - } - } - - public static CoordinatorException toThrowable( - Node node, Error errorMessage, InternalDriverContext context) { - switch (errorMessage.code) { - case DseProtocolConstants.ErrorCode.CLIENT_WRITE_FAILURE: - return new UnfitClientException(node, errorMessage.message); - default: - return Conversions.toThrowable(node, errorMessage, context); - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestAsyncProcessor.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestAsyncProcessor.java deleted file mode 100644 index 8a098bf2895..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestAsyncProcessor.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.continuous; - -import com.datastax.dse.driver.api.core.cql.continuous.ContinuousAsyncResultSet; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.datastax.oss.driver.internal.core.session.RequestProcessor; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import java.util.concurrent.CompletionStage; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class ContinuousCqlRequestAsyncProcessor - implements RequestProcessor, CompletionStage> { - - public static final GenericType> - CONTINUOUS_RESULT_ASYNC = new GenericType>() {}; - - @Override - public boolean canProcess(Request request, GenericType resultType) { - return request instanceof Statement && resultType.equals(CONTINUOUS_RESULT_ASYNC); - } - - @Override - public CompletionStage process( - Statement request, - DefaultSession session, - InternalDriverContext context, - String sessionLogPrefix) { - return new ContinuousCqlRequestHandler(request, session, context, sessionLogPrefix).handle(); - } - - @Override - public CompletionStage newFailure(RuntimeException error) { - return CompletableFutures.failedFuture(error); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestHandler.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestHandler.java deleted file mode 100644 index dd308c11854..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestHandler.java +++ /dev/null @@ -1,171 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.continuous; - -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.dse.driver.api.core.cql.continuous.ContinuousAsyncResultSet; -import com.datastax.dse.driver.api.core.metrics.DseSessionMetric; -import com.datastax.dse.driver.internal.core.cql.DseConversions; -import com.datastax.dse.protocol.internal.response.result.DseRowsMetadata; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; -import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.cql.Conversions; -import com.datastax.oss.driver.internal.core.cql.DefaultRow; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.datastax.oss.driver.internal.core.util.CountingIterator; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.response.result.Rows; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.List; -import java.util.Map; -import java.util.Queue; -import net.jcip.annotations.ThreadSafe; - -/** - * Handles a request that supports multiple response messages (a.k.a. continuous paging request). - */ -@ThreadSafe -public class ContinuousCqlRequestHandler - extends ContinuousRequestHandlerBase, ContinuousAsyncResultSet> { - - ContinuousCqlRequestHandler( - @NonNull Statement statement, - @NonNull DefaultSession session, - @NonNull InternalDriverContext context, - @NonNull String sessionLogPrefix) { - super( - statement, - session, - context, - sessionLogPrefix, - ContinuousAsyncResultSet.class, - false, - DefaultSessionMetric.CQL_CLIENT_TIMEOUTS, - DseSessionMetric.CONTINUOUS_CQL_REQUESTS, - DefaultNodeMetric.CQL_MESSAGES); - // NOTE that ordering of the following statement matters. - // We should register this request after all fields have been initialized. - throttler.register(this); - } - - @NonNull - @Override - protected Duration getGlobalTimeout() { - return Duration.ZERO; - } - - @NonNull - @Override - protected Duration getPageTimeout(@NonNull Statement statement, int pageNumber) { - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(statement, context); - if (pageNumber == 1) { - return executionProfile.getDuration(DseDriverOption.CONTINUOUS_PAGING_TIMEOUT_FIRST_PAGE); - } else { - return executionProfile.getDuration(DseDriverOption.CONTINUOUS_PAGING_TIMEOUT_OTHER_PAGES); - } - } - - @NonNull - @Override - protected Duration getReviseRequestTimeout(@NonNull Statement statement) { - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(statement, context); - return executionProfile.getDuration(DseDriverOption.CONTINUOUS_PAGING_TIMEOUT_OTHER_PAGES); - } - - @Override - protected int getMaxEnqueuedPages(@NonNull Statement statement) { - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(statement, context); - return executionProfile.getInt(DseDriverOption.CONTINUOUS_PAGING_MAX_ENQUEUED_PAGES); - } - - @Override - protected int getMaxPages(@NonNull Statement statement) { - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(statement, context); - return executionProfile.getInt(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES); - } - - @NonNull - @Override - protected Message getMessage(@NonNull Statement statement) { - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(statement, context); - return DseConversions.toContinuousPagingMessage(statement, executionProfile, context); - } - - @Override - protected boolean isTracingEnabled(@NonNull Statement statement) { - return false; - } - - @NonNull - @Override - protected Map createPayload(@NonNull Statement statement) { - return statement.getCustomPayload(); - } - - @NonNull - @Override - protected ContinuousAsyncResultSet createEmptyResultSet(@NonNull ExecutionInfo executionInfo) { - return DefaultContinuousAsyncResultSet.empty(executionInfo); - } - - @NonNull - @Override - protected DefaultContinuousAsyncResultSet createResultSet( - @NonNull Statement statement, - @NonNull Rows rows, - @NonNull ExecutionInfo executionInfo, - @NonNull ColumnDefinitions columnDefinitions) { - Queue> data = rows.getData(); - CountingIterator iterator = - new CountingIterator(data.size()) { - @Override - protected Row computeNext() { - List rowData = data.poll(); - return (rowData == null) - ? endOfData() - : new DefaultRow(columnDefinitions, rowData, context); - } - }; - DseRowsMetadata metadata = (DseRowsMetadata) rows.getMetadata(); - return new DefaultContinuousAsyncResultSet( - iterator, - columnDefinitions, - metadata.continuousPageNumber, - !metadata.isLastContinuousPage, - executionInfo, - this); - } - - @Override - protected int pageNumber(@NonNull ContinuousAsyncResultSet resultSet) { - return resultSet.pageNumber(); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestSyncProcessor.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestSyncProcessor.java deleted file mode 100644 index f151eb7eae2..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestSyncProcessor.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.continuous; - -import com.datastax.dse.driver.api.core.cql.continuous.ContinuousAsyncResultSet; -import com.datastax.dse.driver.api.core.cql.continuous.ContinuousResultSet; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.datastax.oss.driver.internal.core.session.RequestProcessor; -import com.datastax.oss.driver.internal.core.util.concurrent.BlockingOperation; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class ContinuousCqlRequestSyncProcessor - implements RequestProcessor, ContinuousResultSet> { - - public static final GenericType CONTINUOUS_RESULT_SYNC = - GenericType.of(ContinuousResultSet.class); - - private final ContinuousCqlRequestAsyncProcessor asyncProcessor; - - public ContinuousCqlRequestSyncProcessor(ContinuousCqlRequestAsyncProcessor asyncProcessor) { - this.asyncProcessor = asyncProcessor; - } - - @Override - public boolean canProcess(Request request, GenericType resultType) { - return request instanceof Statement && resultType.equals(CONTINUOUS_RESULT_SYNC); - } - - @Override - public ContinuousResultSet process( - Statement request, - DefaultSession session, - InternalDriverContext context, - String sessionLogPrefix) { - BlockingOperation.checkNotDriverThread(); - ContinuousAsyncResultSet firstPage = - CompletableFutures.getUninterruptibly( - asyncProcessor.process(request, session, context, sessionLogPrefix)); - return new DefaultContinuousResultSet(firstPage); - } - - @Override - public ContinuousResultSet newFailure(RuntimeException error) { - throw error; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousRequestHandlerBase.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousRequestHandlerBase.java deleted file mode 100644 index 0453022cb6a..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousRequestHandlerBase.java +++ /dev/null @@ -1,1645 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.continuous; - -import com.datastax.dse.driver.api.core.DseProtocolVersion; -import com.datastax.dse.driver.api.core.cql.continuous.ContinuousAsyncResultSet; -import com.datastax.dse.driver.internal.core.DseProtocolFeature; -import com.datastax.dse.driver.internal.core.cql.DseConversions; -import com.datastax.dse.protocol.internal.request.Revise; -import com.datastax.dse.protocol.internal.response.result.DseRowsMetadata; -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.DriverTimeoutException; -import com.datastax.oss.driver.api.core.NodeUnavailableException; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.RequestThrottlingException; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.connection.FrameTooLongException; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; -import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; -import com.datastax.oss.driver.api.core.metrics.NodeMetric; -import com.datastax.oss.driver.api.core.metrics.SessionMetric; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import com.datastax.oss.driver.api.core.retry.RetryVerdict; -import com.datastax.oss.driver.api.core.servererrors.BootstrappingException; -import com.datastax.oss.driver.api.core.servererrors.CoordinatorException; -import com.datastax.oss.driver.api.core.servererrors.FunctionFailureException; -import com.datastax.oss.driver.api.core.servererrors.ProtocolError; -import com.datastax.oss.driver.api.core.servererrors.QueryValidationException; -import com.datastax.oss.driver.api.core.servererrors.ReadTimeoutException; -import com.datastax.oss.driver.api.core.servererrors.UnavailableException; -import com.datastax.oss.driver.api.core.servererrors.WriteTimeoutException; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.session.throttling.RequestThrottler; -import com.datastax.oss.driver.api.core.session.throttling.Throttled; -import com.datastax.oss.driver.internal.core.adminrequest.ThrottledAdminRequestHandler; -import com.datastax.oss.driver.internal.core.adminrequest.UnexpectedResponseException; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.channel.ResponseCallback; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.cql.Conversions; -import com.datastax.oss.driver.internal.core.cql.DefaultExecutionInfo; -import com.datastax.oss.driver.internal.core.metadata.DefaultNode; -import com.datastax.oss.driver.internal.core.metrics.NodeMetricUpdater; -import com.datastax.oss.driver.internal.core.metrics.SessionMetricUpdater; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.datastax.oss.driver.internal.core.session.RepreparePayload; -import com.datastax.oss.driver.internal.core.util.Loggers; -import com.datastax.oss.driver.internal.core.util.collection.SimpleQueryPlan; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.protocol.internal.Frame; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.request.Prepare; -import com.datastax.oss.protocol.internal.response.Error; -import com.datastax.oss.protocol.internal.response.Result; -import com.datastax.oss.protocol.internal.response.error.Unprepared; -import com.datastax.oss.protocol.internal.response.result.Rows; -import com.datastax.oss.protocol.internal.response.result.Void; -import com.datastax.oss.protocol.internal.util.Bytes; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import io.netty.handler.codec.EncoderException; -import io.netty.util.Timeout; -import io.netty.util.Timer; -import io.netty.util.concurrent.Future; -import io.netty.util.concurrent.GenericFutureListener; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.AbstractMap; -import java.util.ArrayDeque; -import java.util.List; -import java.util.Map; -import java.util.Queue; -import java.util.concurrent.CancellationException; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.locks.ReentrantLock; -import net.jcip.annotations.GuardedBy; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Handles a request that supports multiple response messages (a.k.a. continuous paging request). - */ -@ThreadSafe -public abstract class ContinuousRequestHandlerBase - implements Throttled { - - private static final Logger LOG = LoggerFactory.getLogger(ContinuousRequestHandlerBase.class); - - protected final String logPrefix; - protected final StatementT initialStatement; - protected final DefaultSession session; - private final CqlIdentifier keyspace; - protected final InternalDriverContext context; - private final Queue queryPlan; - protected final RequestThrottler throttler; - private final boolean protocolBackpressureAvailable; - private final Timer timer; - private final SessionMetricUpdater sessionMetricUpdater; - private final boolean specExecEnabled; - private final SessionMetric clientTimeoutsMetric; - private final SessionMetric continuousRequestsMetric; - private final NodeMetric messagesMetric; - private final List scheduledExecutions; - - // The errors on the nodes that were already tried. - // We don't use a map because nodes can appear multiple times. - protected final List> errors = new CopyOnWriteArrayList<>(); - - /** - * The list of in-flight executions, one per node. Executions may be triggered by speculative - * executions or retries. An execution is added to this list when the write operation completes. - * It is removed from this list when the callback has done reading responses. - */ - private final List inFlightCallbacks = new CopyOnWriteArrayList<>(); - - /** The callback selected to stream results back to the client. */ - private final CompletableFuture chosenCallback = new CompletableFuture<>(); - - /** - * How many speculative executions are currently running (including the initial execution). We - * track this in order to know when to fail the request if all executions have reached the end of - * the query plan. - */ - private final AtomicInteger activeExecutionsCount = new AtomicInteger(0); - - /** - * How many speculative executions have started (excluding the initial execution), whether they - * have completed or not. We track this in order to fill execution info objects with this - * information. - */ - protected final AtomicInteger startedSpeculativeExecutionsCount = new AtomicInteger(0); - - // Set when the execution starts, and is never modified after. - private final long startTimeNanos; - private volatile Timeout globalTimeout; - - private final Class resultSetClass; - - public ContinuousRequestHandlerBase( - @NonNull StatementT statement, - @NonNull DefaultSession session, - @NonNull InternalDriverContext context, - @NonNull String sessionLogPrefix, - @NonNull Class resultSetClass, - boolean specExecEnabled, - SessionMetric clientTimeoutsMetric, - SessionMetric continuousRequestsMetric, - NodeMetric messagesMetric) { - this.resultSetClass = resultSetClass; - - ProtocolVersion protocolVersion = context.getProtocolVersion(); - if (!context - .getProtocolVersionRegistry() - .supports(protocolVersion, DseProtocolFeature.CONTINUOUS_PAGING)) { - throw new IllegalStateException( - "Cannot execute continuous paging requests with protocol version " + protocolVersion); - } - this.clientTimeoutsMetric = clientTimeoutsMetric; - this.continuousRequestsMetric = continuousRequestsMetric; - this.messagesMetric = messagesMetric; - this.logPrefix = sessionLogPrefix + "|" + this.hashCode(); - LOG.trace("[{}] Creating new continuous handler for request {}", logPrefix, statement); - this.initialStatement = statement; - this.session = session; - this.keyspace = session.getKeyspace().orElse(null); - this.context = context; - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(statement, context); - this.queryPlan = - statement.getNode() != null - ? new SimpleQueryPlan(statement.getNode()) - : context - .getLoadBalancingPolicyWrapper() - .newQueryPlan(statement, executionProfile.getName(), session); - this.timer = context.getNettyOptions().getTimer(); - - this.protocolBackpressureAvailable = - protocolVersion.getCode() >= DseProtocolVersion.DSE_V2.getCode(); - this.throttler = context.getRequestThrottler(); - this.sessionMetricUpdater = session.getMetricUpdater(); - this.startTimeNanos = System.nanoTime(); - this.specExecEnabled = specExecEnabled; - this.scheduledExecutions = this.specExecEnabled ? new CopyOnWriteArrayList<>() : null; - } - - @NonNull - protected abstract Duration getGlobalTimeout(); - - @NonNull - protected abstract Duration getPageTimeout(@NonNull StatementT statement, int pageNumber); - - @NonNull - protected abstract Duration getReviseRequestTimeout(@NonNull StatementT statement); - - protected abstract int getMaxEnqueuedPages(@NonNull StatementT statement); - - protected abstract int getMaxPages(@NonNull StatementT statement); - - @NonNull - protected abstract Message getMessage(@NonNull StatementT statement); - - protected abstract boolean isTracingEnabled(@NonNull StatementT statement); - - @NonNull - protected abstract Map createPayload(@NonNull StatementT statement); - - @NonNull - protected abstract ResultSetT createEmptyResultSet(@NonNull ExecutionInfo executionInfo); - - protected abstract int pageNumber(@NonNull ResultSetT resultSet); - - @NonNull - protected abstract ResultSetT createResultSet( - @NonNull StatementT statement, - @NonNull Rows rows, - @NonNull ExecutionInfo executionInfo, - @NonNull ColumnDefinitions columnDefinitions) - throws IOException; - - // MAIN LIFECYCLE - - @Override - public void onThrottleReady(boolean wasDelayed) { - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(initialStatement, context); - if (wasDelayed - // avoid call to nanoTime() if metric is disabled: - && sessionMetricUpdater.isEnabled( - DefaultSessionMetric.THROTTLING_DELAY, executionProfile.getName())) { - session - .getMetricUpdater() - .updateTimer( - DefaultSessionMetric.THROTTLING_DELAY, - executionProfile.getName(), - System.nanoTime() - startTimeNanos, - TimeUnit.NANOSECONDS); - } - activeExecutionsCount.incrementAndGet(); - sendRequest(initialStatement, null, 0, 0, specExecEnabled); - } - - @Override - public void onThrottleFailure(@NonNull RequestThrottlingException error) { - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(initialStatement, context); - session - .getMetricUpdater() - .incrementCounter(DefaultSessionMetric.THROTTLING_ERRORS, executionProfile.getName()); - abortGlobalRequestOrChosenCallback(error); - } - - private void abortGlobalRequestOrChosenCallback(@NonNull Throwable error) { - if (!chosenCallback.completeExceptionally(error)) { - chosenCallback.thenAccept(callback -> callback.abort(error, false)); - } - } - - public CompletionStage handle() { - globalTimeout = scheduleGlobalTimeout(); - return fetchNextPage(); - } - - /** - * Builds the future that will get returned to the user from the initial execute call or a - * fetchNextPage() on the async API. - */ - public CompletionStage fetchNextPage() { - CompletableFuture result = new CompletableFuture<>(); - - // This is equivalent to - // `chosenCallback.thenCompose(NodeResponseCallback::dequeueOrCreatePending)`, except - // that we need to cancel `result` if `resultSetError` is a CancellationException. - chosenCallback.whenComplete( - (callback, callbackError) -> { - if (callbackError != null) { - result.completeExceptionally(callbackError); - } else { - callback - .dequeueOrCreatePending() - .whenComplete( - (resultSet, resultSetError) -> { - if (resultSetError != null) { - result.completeExceptionally(resultSetError); - } else { - result.complete(resultSet); - } - }); - } - }); - - // If the user cancels the future, propagate to our internal components - result.whenComplete( - (rs, t) -> { - if (t instanceof CancellationException) { - cancel(); - } - }); - - return result; - } - - /** - * Sends the initial request to the next available node. - * - * @param node if not null, it will be attempted first before the rest of the query plan. It - * happens only when we retry on the same host. - * @param currentExecutionIndex 0 for the initial execution, 1 for the first speculative one, etc. - * @param retryCount the number of times that the retry policy was invoked for this execution - * already (note that some internal retries don't go through the policy, and therefore don't - * increment this counter) - * @param scheduleSpeculativeExecution whether to schedule the next speculative execution - */ - private void sendRequest( - StatementT statement, - @Nullable Node node, - int currentExecutionIndex, - int retryCount, - boolean scheduleSpeculativeExecution) { - DriverChannel channel = null; - if (node == null || (channel = session.getChannel(node, logPrefix)) == null) { - while ((node = queryPlan.poll()) != null) { - channel = session.getChannel(node, logPrefix); - if (channel != null) { - break; - } else { - recordError(node, new NodeUnavailableException(node)); - } - } - } - if (channel == null) { - // We've reached the end of the query plan without finding any node to write to; abort the - // continuous paging session. - if (activeExecutionsCount.decrementAndGet() == 0) { - abortGlobalRequestOrChosenCallback(AllNodesFailedException.fromErrors(errors)); - } - } else if (!chosenCallback.isDone()) { - NodeResponseCallback nodeResponseCallback = - new NodeResponseCallback( - statement, - node, - channel, - currentExecutionIndex, - retryCount, - scheduleSpeculativeExecution, - logPrefix); - inFlightCallbacks.add(nodeResponseCallback); - channel - .write( - getMessage(statement), - isTracingEnabled(statement), - createPayload(statement), - nodeResponseCallback) - .addListener(nodeResponseCallback); - } - } - - private Timeout scheduleGlobalTimeout() { - Duration globalTimeout = getGlobalTimeout(); - if (globalTimeout.toNanos() <= 0) { - return null; - } - LOG.trace("[{}] Scheduling global timeout for pages in {}", logPrefix, globalTimeout); - return timer.newTimeout( - timeout -> - abortGlobalRequestOrChosenCallback( - new DriverTimeoutException("Query timed out after " + globalTimeout)), - globalTimeout.toNanos(), - TimeUnit.NANOSECONDS); - } - - /** - * Cancels the continuous paging request. - * - *

Called from user code, see {@link DefaultContinuousAsyncResultSet#cancel()}, or from a - * driver I/O thread. - */ - public void cancel() { - // If chosenCallback is already set, this is a no-op and the chosen callback will be handled by - // cancelScheduledTasks - chosenCallback.cancel(true); - - cancelScheduledTasks(null); - cancelGlobalTimeout(); - throttler.signalCancel(this); - } - - private void cancelGlobalTimeout() { - if (globalTimeout != null) { - globalTimeout.cancel(); - } - } - - /** - * Cancel all pending and scheduled executions, except the one passed as an argument to the - * method. - * - * @param toIgnore An optional execution to ignore (will not be cancelled). - */ - private void cancelScheduledTasks(@Nullable NodeResponseCallback toIgnore) { - if (scheduledExecutions != null) { - for (Timeout scheduledExecution : scheduledExecutions) { - scheduledExecution.cancel(); - } - } - for (NodeResponseCallback callback : inFlightCallbacks) { - if (toIgnore == null || toIgnore != callback) { - callback.cancel(); - } - } - } - - @VisibleForTesting - int getState() { - try { - return chosenCallback.get().getState(); - } catch (CancellationException e) { - // Happens if the test cancels before the callback was chosen - return NodeResponseCallback.STATE_FAILED; - } catch (InterruptedException | ExecutionException e) { - // We never interrupt or fail chosenCallback (other than canceling) - throw new AssertionError("Unexpected error", e); - } - } - - @VisibleForTesting - CompletableFuture getPendingResult() { - try { - return chosenCallback.get().getPendingResult(); - } catch (Exception e) { - // chosenCallback should always be complete by the time tests call this - throw new AssertionError("Expected callback to be chosen at this point"); - } - } - - private void recordError(@NonNull Node node, @NonNull Throwable error) { - errors.add(new AbstractMap.SimpleEntry<>(node, error)); - } - - /** - * Handles the interaction with a single node in the query plan. - * - *

An instance of this class is created each time we (re)try a node. The first callback that - * has something ready to enqueue will be allowed to stream results back to the client; the others - * will be cancelled. - */ - private class NodeResponseCallback - implements ResponseCallback, GenericFutureListener> { - - private final long messageStartTimeNanos = System.nanoTime(); - private final StatementT statement; - private final Node node; - private final DriverChannel channel; - // The identifier of the current execution (0 for the initial execution, 1 for the first - // speculative execution, etc.) - private final int executionIndex; - // How many times we've invoked the retry policy and it has returned a "retry" decision (0 for - // the first attempt of each execution). - private final String logPrefix; - private final boolean scheduleSpeculativeExecution; - - private final DriverExecutionProfile executionProfile; - - // Coordinates concurrent accesses between the client and I/O threads - private final ReentrantLock lock = new ReentrantLock(); - - // The page queue, storing responses that we have received and have not been consumed by the - // client yet. We instantiate it lazily to avoid unnecessary allocation; this is also used to - // check if the callback ever tried to enqueue something. - @GuardedBy("lock") - private Queue queue; - - // If the client requests a page and we can't serve it immediately (empty queue), then we create - // this future and have the client wait on it. Otherwise this field is null. - @GuardedBy("lock") - private CompletableFuture pendingResult; - - // How many pages were requested. This is the total number of pages requested from the - // beginning. - // It will be zero if the protocol does not support numPagesRequested (DSE_V1) - @GuardedBy("lock") - private int numPagesRequested; - - // An integer that represents the state of the continuous paging request: - // - if positive, it is the sequence number of the next expected page; - // - if negative, it is a terminal state, identified by the constants below. - @GuardedBy("lock") - private int state = 1; - - // Whether isLastResponse has returned true already - @GuardedBy("lock") - private boolean sawLastResponse; - - @GuardedBy("lock") - private boolean sentCancelRequest; - - private static final int STATE_FINISHED = -1; - private static final int STATE_FAILED = -2; - - @GuardedBy("lock") - private int streamId = -1; - - // These are set when the first page arrives, and are never modified after. - private volatile ColumnDefinitions columnDefinitions; - - private volatile Timeout pageTimeout; - - // How many times we've invoked the retry policy and it has returned a "retry" decision (0 for - // the first attempt, 1 for the first retry, etc.). - private final int retryCount; - - // SpeculativeExecution node metrics should be executed only for the first page (first - // invocation) - private final AtomicBoolean stopNodeMessageTimerReported = new AtomicBoolean(false); - private final AtomicBoolean nodeErrorReported = new AtomicBoolean(false); - private final AtomicBoolean nodeSuccessReported = new AtomicBoolean(false); - - public NodeResponseCallback( - StatementT statement, - Node node, - DriverChannel channel, - int executionIndex, - int retryCount, - boolean scheduleSpeculativeExecution, - String logPrefix) { - this.statement = statement; - this.node = node; - this.channel = channel; - this.executionIndex = executionIndex; - this.retryCount = retryCount; - this.scheduleSpeculativeExecution = scheduleSpeculativeExecution; - this.logPrefix = logPrefix + "|" + executionIndex; - this.executionProfile = Conversions.resolveExecutionProfile(statement, context); - } - - @Override - public void onStreamIdAssigned(int streamId) { - LOG.trace("[{}] Assigned streamId {} on node {}", logPrefix, streamId, node); - lock.lock(); - try { - this.streamId = streamId; - if (state < 0) { - // This happens if we were cancelled before getting the stream id, we have a request in - // flight that needs to be cancelled - releaseStreamId(); - } - } finally { - lock.unlock(); - } - } - - @Override - public boolean isLastResponse(@NonNull Frame responseFrame) { - lock.lock(); - try { - Message message = responseFrame.message; - boolean isLastResponse; - - if (sentCancelRequest) { - // The only response we accept is the SERVER_ERROR triggered by a successful cancellation. - // Otherwise we risk releasing and reusing the stream id while the cancel request is still - // in flight, and it might end up cancelling an unrelated request. - // Note that there is a chance that the request ends normally right after we send the - // cancel request. In that case this method never returns true and the stream id will - // remain orphaned forever. This should be very rare so this is acceptable. - if (message instanceof Error) { - Error error = (Error) message; - isLastResponse = - (error.code == ProtocolConstants.ErrorCode.SERVER_ERROR) - && error.message.contains("Session cancelled by the user"); - } else { - isLastResponse = false; - } - } else if (message instanceof Rows) { - Rows rows = (Rows) message; - DseRowsMetadata metadata = (DseRowsMetadata) rows.getMetadata(); - isLastResponse = metadata.isLastContinuousPage; - } else { - isLastResponse = message instanceof Error; - } - - if (isLastResponse) { - sawLastResponse = true; - } - return isLastResponse; - } finally { - lock.unlock(); - } - } - - /** - * Invoked when the write from {@link #sendRequest} completes. - * - * @param future The future representing the outcome of the write operation. - */ - @Override - public void operationComplete(@NonNull Future future) { - if (!future.isSuccess()) { - Throwable error = future.cause(); - if (error instanceof EncoderException - && error.getCause() instanceof FrameTooLongException) { - trackNodeError(node, error.getCause()); - lock.lock(); - try { - abort(error.getCause(), false); - } finally { - lock.unlock(); - } - } else { - LOG.trace( - "[{}] Failed to send request on {}, trying next node (cause: {})", - logPrefix, - channel, - error); - ((DefaultNode) node) - .getMetricUpdater() - .incrementCounter(DefaultNodeMetric.UNSENT_REQUESTS, executionProfile.getName()); - recordError(node, error); - trackNodeError(node, error.getCause()); - sendRequest(statement, null, executionIndex, retryCount, scheduleSpeculativeExecution); - } - } else { - LOG.trace("[{}] Request sent on {}", logPrefix, channel); - if (scheduleSpeculativeExecution - && Conversions.resolveIdempotence(statement, executionProfile)) { - int nextExecution = executionIndex + 1; - // Note that `node` is the first node of the execution, it might not be the "slow" one - // if there were retries, but in practice retries are rare. - long nextDelay = - Conversions.resolveSpeculativeExecutionPolicy(context, executionProfile) - .nextExecution(node, keyspace, statement, nextExecution); - if (nextDelay >= 0) { - scheduleSpeculativeExecution(nextExecution, nextDelay); - } else { - LOG.trace( - "[{}] Speculative execution policy returned {}, no next execution", - logPrefix, - nextDelay); - } - } - pageTimeout = schedulePageTimeout(1); - } - } - - private void scheduleSpeculativeExecution(int nextExecutionIndex, long delay) { - LOG.trace( - "[{}] Scheduling speculative execution {} in {} ms", - logPrefix, - nextExecutionIndex, - delay); - try { - scheduledExecutions.add( - timer.newTimeout( - (Timeout timeout) -> { - if (!chosenCallback.isDone()) { - LOG.trace( - "[{}] Starting speculative execution {}", logPrefix, nextExecutionIndex); - activeExecutionsCount.incrementAndGet(); - startedSpeculativeExecutionsCount.incrementAndGet(); - NodeMetricUpdater nodeMetricUpdater = ((DefaultNode) node).getMetricUpdater(); - if (nodeMetricUpdater.isEnabled( - DefaultNodeMetric.SPECULATIVE_EXECUTIONS, executionProfile.getName())) { - nodeMetricUpdater.incrementCounter( - DefaultNodeMetric.SPECULATIVE_EXECUTIONS, executionProfile.getName()); - } - sendRequest(statement, null, nextExecutionIndex, 0, true); - } - }, - delay, - TimeUnit.MILLISECONDS)); - } catch (IllegalStateException e) { - logTimeoutSchedulingError(e); - } - } - - private Timeout schedulePageTimeout(int expectedPage) { - if (expectedPage < 0) { - return null; - } - Duration timeout = getPageTimeout(statement, expectedPage); - if (timeout.toNanos() <= 0) { - return null; - } - LOG.trace("[{}] Scheduling timeout for page {} in {}", logPrefix, expectedPage, timeout); - return timer.newTimeout( - t -> onPageTimeout(expectedPage), timeout.toNanos(), TimeUnit.NANOSECONDS); - } - - private void onPageTimeout(int expectedPage) { - lock.lock(); - try { - if (state == expectedPage) { - abort( - new DriverTimeoutException( - String.format("Timed out waiting for page %d", expectedPage)), - false); - } else { - // Ignore timeout if the request has moved on in the interim. - LOG.trace( - "[{}] Timeout fired for page {} but query already at state {}, skipping", - logPrefix, - expectedPage, - state); - } - } finally { - lock.unlock(); - } - } - - /** - * Invoked when a continuous paging response is received, either a successful or failed one. - * - *

Delegates further processing to appropriate methods: {@link #processResultResponse(Result, - * Frame)} if the response was successful, or {@link #processErrorResponse(Error)} if it wasn't. - * - * @param response the received {@link Frame}. - */ - @Override - public void onResponse(@NonNull Frame response) { - stopNodeMessageTimer(); - cancelTimeout(pageTimeout); - lock.lock(); - try { - if (state < 0) { - LOG.trace("[{}] Got result but the request has been cancelled, ignoring", logPrefix); - return; - } - try { - Message responseMessage = response.message; - if (responseMessage instanceof Result) { - LOG.trace("[{}] Got result", logPrefix); - processResultResponse((Result) responseMessage, response); - } else if (responseMessage instanceof Error) { - LOG.trace("[{}] Got error response", logPrefix); - processErrorResponse((Error) responseMessage); - } else { - IllegalStateException error = - new IllegalStateException("Unexpected response " + responseMessage); - trackNodeError(node, error); - abort(error, false); - } - } catch (Throwable t) { - trackNodeError(node, t); - abort(t, false); - } - } finally { - lock.unlock(); - } - } - - /** - * Invoked when a continuous paging request hits an unexpected error. - * - *

Delegates further processing to to the retry policy ({@link - * #processRetryVerdict(RetryVerdict, Throwable)}. - * - * @param error the error encountered, usually a network problem. - */ - @Override - public void onFailure(@NonNull Throwable error) { - cancelTimeout(pageTimeout); - LOG.trace(String.format("[%s] Request failure", logPrefix), error); - RetryVerdict verdict; - if (!Conversions.resolveIdempotence(statement, executionProfile) - || error instanceof FrameTooLongException) { - verdict = RetryVerdict.RETHROW; - } else { - try { - RetryPolicy retryPolicy = Conversions.resolveRetryPolicy(context, executionProfile); - verdict = retryPolicy.onRequestAbortedVerdict(statement, error, retryCount); - } catch (Throwable cause) { - abort( - new IllegalStateException("Unexpected error while invoking the retry policy", cause), - false); - return; - } - } - updateErrorMetrics( - ((DefaultNode) node).getMetricUpdater(), - verdict, - DefaultNodeMetric.ABORTED_REQUESTS, - DefaultNodeMetric.RETRIES_ON_ABORTED, - DefaultNodeMetric.IGNORES_ON_ABORTED); - lock.lock(); - try { - processRetryVerdict(verdict, error); - } finally { - lock.unlock(); - } - } - - // PROCESSING METHODS - - /** - * Processes a new result response, creating the corresponding {@link ResultSetT} object and - * then enqueuing it or serving it directly to the user if he was waiting for it. - * - * @param result the result to process. It is normally a {@link Rows} object, but may be a - * {@link Void} object if the retry policy decided to ignore an error. - * @param frame the {@link Frame} (used to create the {@link ExecutionInfo} the first time). - */ - @SuppressWarnings("GuardedBy") // this method is only called with the lock held - private void processResultResponse(@NonNull Result result, @Nullable Frame frame) { - assert lock.isHeldByCurrentThread(); - try { - ExecutionInfo executionInfo = createExecutionInfo(result, frame); - if (result instanceof Rows) { - DseRowsMetadata rowsMetadata = (DseRowsMetadata) ((Rows) result).getMetadata(); - if (columnDefinitions == null) { - // Contrary to ROWS responses from regular queries, - // the first page always includes metadata so we use this - // regardless of whether or not the query was from a prepared statement. - columnDefinitions = Conversions.toColumnDefinitions(rowsMetadata, context); - } - int pageNumber = rowsMetadata.continuousPageNumber; - int currentPage = state; - if (pageNumber != currentPage) { - abort( - new IllegalStateException( - String.format( - "Received page %d but was expecting %d", pageNumber, currentPage)), - false); - } else { - int pageSize = ((Rows) result).getData().size(); - ResultSetT resultSet = - createResultSet(statement, (Rows) result, executionInfo, columnDefinitions); - if (rowsMetadata.isLastContinuousPage) { - LOG.trace("[{}] Received last page ({} - {} rows)", logPrefix, pageNumber, pageSize); - state = STATE_FINISHED; - reenableAutoReadIfNeeded(); - enqueueOrCompletePending(resultSet); - stopGlobalRequestTimer(); - cancelTimeout(globalTimeout); - } else { - LOG.trace("[{}] Received page {} ({} rows)", logPrefix, pageNumber, pageSize); - if (currentPage > 0) { - state = currentPage + 1; - } - enqueueOrCompletePending(resultSet); - } - } - } else { - // Void responses happen only when the retry decision is ignore. - assert result instanceof Void; - ResultSetT resultSet = createEmptyResultSet(executionInfo); - LOG.trace( - "[{}] Continuous paging interrupted by retry policy decision to ignore error", - logPrefix); - state = STATE_FINISHED; - reenableAutoReadIfNeeded(); - enqueueOrCompletePending(resultSet); - stopGlobalRequestTimer(); - cancelTimeout(globalTimeout); - } - } catch (Throwable error) { - abort(error, false); - } - } - - /** - * Processes an unsuccessful response. - * - *

Depending on the error, may trigger: - * - *

    - *
  1. a re-prepare cycle, see {@link #processUnprepared(Unprepared)}; - *
  2. an immediate retry on the next host, bypassing the retry policy, if the host was - * bootstrapping; - *
  3. an immediate abortion if the error is unrecoverable; - *
  4. further processing if the error is recoverable, see {@link - * #processRecoverableError(CoordinatorException)} - *
- * - * @param errorMessage the error message received. - */ - @SuppressWarnings("GuardedBy") // this method is only called with the lock held - private void processErrorResponse(@NonNull Error errorMessage) { - assert lock.isHeldByCurrentThread(); - if (errorMessage instanceof Unprepared) { - processUnprepared((Unprepared) errorMessage); - } else { - CoordinatorException error = DseConversions.toThrowable(node, errorMessage, context); - if (error instanceof BootstrappingException) { - LOG.trace("[{}] {} is bootstrapping, trying next node", logPrefix, node); - recordError(node, error); - trackNodeError(node, error); - sendRequest(statement, null, executionIndex, retryCount, false); - } else if (error instanceof QueryValidationException - || error instanceof FunctionFailureException - || error instanceof ProtocolError - || state > 1) { - // we only process recoverable errors for the first page, - // errors on subsequent pages will always trigger an immediate abortion - LOG.trace("[{}] Unrecoverable error, rethrowing", logPrefix); - NodeMetricUpdater metricUpdater = ((DefaultNode) node).getMetricUpdater(); - metricUpdater.incrementCounter( - DefaultNodeMetric.OTHER_ERRORS, executionProfile.getName()); - trackNodeError(node, error); - abort(error, true); - } else { - try { - processRecoverableError(error); - } catch (Throwable cause) { - abort(cause, false); - } - } - } - } - - /** - * Processes a recoverable error. - * - *

In most cases, delegates to the retry policy and its decision, see {@link - * #processRetryVerdict(RetryVerdict, Throwable)}. - * - * @param error the recoverable error. - */ - private void processRecoverableError(@NonNull CoordinatorException error) { - assert lock.isHeldByCurrentThread(); - NodeMetricUpdater metricUpdater = ((DefaultNode) node).getMetricUpdater(); - RetryVerdict verdict; - RetryPolicy retryPolicy = Conversions.resolveRetryPolicy(context, executionProfile); - if (error instanceof ReadTimeoutException) { - ReadTimeoutException readTimeout = (ReadTimeoutException) error; - verdict = - retryPolicy.onReadTimeoutVerdict( - statement, - readTimeout.getConsistencyLevel(), - readTimeout.getBlockFor(), - readTimeout.getReceived(), - readTimeout.wasDataPresent(), - retryCount); - updateErrorMetrics( - metricUpdater, - verdict, - DefaultNodeMetric.READ_TIMEOUTS, - DefaultNodeMetric.RETRIES_ON_READ_TIMEOUT, - DefaultNodeMetric.IGNORES_ON_READ_TIMEOUT); - } else if (error instanceof WriteTimeoutException) { - WriteTimeoutException writeTimeout = (WriteTimeoutException) error; - if (Conversions.resolveIdempotence(statement, executionProfile)) { - verdict = - retryPolicy.onWriteTimeoutVerdict( - statement, - writeTimeout.getConsistencyLevel(), - writeTimeout.getWriteType(), - writeTimeout.getBlockFor(), - writeTimeout.getReceived(), - retryCount); - } else { - verdict = RetryVerdict.RETHROW; - } - updateErrorMetrics( - metricUpdater, - verdict, - DefaultNodeMetric.WRITE_TIMEOUTS, - DefaultNodeMetric.RETRIES_ON_WRITE_TIMEOUT, - DefaultNodeMetric.IGNORES_ON_WRITE_TIMEOUT); - } else if (error instanceof UnavailableException) { - UnavailableException unavailable = (UnavailableException) error; - verdict = - retryPolicy.onUnavailableVerdict( - statement, - unavailable.getConsistencyLevel(), - unavailable.getRequired(), - unavailable.getAlive(), - retryCount); - updateErrorMetrics( - metricUpdater, - verdict, - DefaultNodeMetric.UNAVAILABLES, - DefaultNodeMetric.RETRIES_ON_UNAVAILABLE, - DefaultNodeMetric.IGNORES_ON_UNAVAILABLE); - } else { - verdict = - Conversions.resolveIdempotence(statement, executionProfile) - ? retryPolicy.onErrorResponseVerdict(statement, error, retryCount) - : RetryVerdict.RETHROW; - updateErrorMetrics( - metricUpdater, - verdict, - DefaultNodeMetric.OTHER_ERRORS, - DefaultNodeMetric.RETRIES_ON_OTHER_ERROR, - DefaultNodeMetric.IGNORES_ON_OTHER_ERROR); - } - processRetryVerdict(verdict, error); - } - - /** - * Processes an {@link Unprepared} error by re-preparing then retrying on the same host. - * - * @param errorMessage the unprepared error message. - */ - @SuppressWarnings("GuardedBy") // this method is only called with the lock held - private void processUnprepared(@NonNull Unprepared errorMessage) { - assert lock.isHeldByCurrentThread(); - ByteBuffer idToReprepare = ByteBuffer.wrap(errorMessage.id); - LOG.trace( - "[{}] Statement {} is not prepared on {}, re-preparing", - logPrefix, - Bytes.toHexString(idToReprepare), - node); - RepreparePayload repreparePayload = session.getRepreparePayloads().get(idToReprepare); - if (repreparePayload == null) { - throw new IllegalStateException( - String.format( - "Tried to execute unprepared query %s but we don't have the data to re-prepare it", - Bytes.toHexString(idToReprepare))); - } - Prepare prepare = repreparePayload.toMessage(); - Duration timeout = executionProfile.getDuration(DefaultDriverOption.REQUEST_TIMEOUT); - ThrottledAdminRequestHandler.prepare( - channel, - true, - prepare, - repreparePayload.customPayload, - timeout, - throttler, - sessionMetricUpdater, - logPrefix) - .start() - .whenComplete( - (repreparedId, exception) -> { - // If we run into an unrecoverable error, surface it to the client instead of - // retrying - Throwable fatalError = null; - if (exception == null) { - if (!repreparedId.equals(idToReprepare)) { - IllegalStateException illegalStateException = - new IllegalStateException( - String.format( - "ID mismatch while trying to reprepare (expected %s, got %s). " - + "This prepared statement won't work anymore. " - + "This usually happens when you run a 'USE...' query after " - + "the statement was prepared.", - Bytes.toHexString(idToReprepare), Bytes.toHexString(repreparedId))); - trackNodeError(node, illegalStateException); - fatalError = illegalStateException; - } else { - LOG.trace( - "[{}] Re-prepare successful, retrying on the same node ({})", - logPrefix, - node); - sendRequest(statement, node, executionIndex, retryCount, false); - } - } else { - if (exception instanceof UnexpectedResponseException) { - Message prepareErrorMessage = ((UnexpectedResponseException) exception).message; - if (prepareErrorMessage instanceof Error) { - CoordinatorException prepareError = - DseConversions.toThrowable(node, (Error) prepareErrorMessage, context); - if (prepareError instanceof QueryValidationException - || prepareError instanceof FunctionFailureException - || prepareError instanceof ProtocolError) { - LOG.trace("[{}] Unrecoverable error on re-prepare, rethrowing", logPrefix); - trackNodeError(node, prepareError); - fatalError = prepareError; - } - } - } else if (exception instanceof RequestThrottlingException) { - trackNodeError(node, exception); - fatalError = exception; - } - if (fatalError == null) { - LOG.trace("[{}] Re-prepare failed, trying next node", logPrefix); - recordError(node, exception); - trackNodeError(node, exception); - sendRequest(statement, null, executionIndex, retryCount, false); - } - } - if (fatalError != null) { - lock.lock(); - try { - abort(fatalError, true); - } finally { - lock.unlock(); - } - } - }); - } - - /** - * Processes the retry decision by triggering a retry, aborting or ignoring; also records the - * failures for further access. - * - * @param verdict the verdict to process. - * @param error the original error. - */ - private void processRetryVerdict(@NonNull RetryVerdict verdict, @NonNull Throwable error) { - assert lock.isHeldByCurrentThread(); - LOG.trace("[{}] Processing retry decision {}", logPrefix, verdict); - switch (verdict.getRetryDecision()) { - case RETRY_SAME: - recordError(node, error); - trackNodeError(node, error); - sendRequest( - verdict.getRetryRequest(statement), node, executionIndex, retryCount + 1, false); - break; - case RETRY_NEXT: - recordError(node, error); - trackNodeError(node, error); - sendRequest( - verdict.getRetryRequest(statement), null, executionIndex, retryCount + 1, false); - break; - case RETHROW: - trackNodeError(node, error); - abort(error, true); - break; - case IGNORE: - processResultResponse(Void.INSTANCE, null); - break; - } - } - - // PAGE HANDLING - - /** - * Enqueues a response or, if the client was already waiting for it, completes the pending - * future. - * - *

Guarded by {@link #lock}. - * - * @param pageOrError the next page, or an error. - */ - @SuppressWarnings("GuardedBy") // this method is only called with the lock held - private void enqueueOrCompletePending(@NonNull Object pageOrError) { - assert lock.isHeldByCurrentThread(); - - if (queue == null) { - // This is the first time this callback tries to stream something back to the client, check - // if it can be selected - if (!chosenCallback.complete(this)) { - if (LOG.isTraceEnabled()) { - LOG.trace( - "[{}] Trying to enqueue {} but another callback was already chosen, aborting", - logPrefix, - asTraceString(pageOrError)); - } - // Discard the data, this callback will be canceled shortly since the chosen callback - // invoked cancelScheduledTasks - return; - } - - queue = new ArrayDeque<>(getMaxEnqueuedPages(statement)); - numPagesRequested = protocolBackpressureAvailable ? getMaxEnqueuedPages(statement) : 0; - cancelScheduledTasks(this); - } - - if (pendingResult != null) { - if (LOG.isTraceEnabled()) { - LOG.trace( - "[{}] Client was waiting on empty queue, completing with {}", - logPrefix, - asTraceString(pageOrError)); - } - CompletableFuture tmp = pendingResult; - // null out pendingResult before completing it because its completion - // may trigger a call to fetchNextPage -> dequeueOrCreatePending, - // which expects pendingResult to be null. - pendingResult = null; - completeResultSetFuture(tmp, pageOrError); - } else { - if (LOG.isTraceEnabled()) { - LOG.trace("[{}] Enqueuing {}", logPrefix, asTraceString(pageOrError)); - } - queue.add(pageOrError); - // Backpressure without protocol support: if the queue grows too large, - // disable auto-read so that the channel eventually becomes - // non-writable on the server side (causing it to back off for a while) - if (!protocolBackpressureAvailable - && queue.size() == getMaxEnqueuedPages(statement) - && state > 0) { - LOG.trace( - "[{}] Exceeded {} queued response pages, disabling auto-read", - logPrefix, - queue.size()); - channel.config().setAutoRead(false); - } - } - } - - /** - * Dequeue a response or, if the queue is empty, create the future that will get notified of the - * next response, when it arrives. - * - *

Called from user code, see {@link ContinuousAsyncResultSet#fetchNextPage()}. - * - * @return the next page's future; never null. - */ - @NonNull - public CompletableFuture dequeueOrCreatePending() { - lock.lock(); - try { - // If the client was already waiting for a page, there's no way it can call this method - // again - // (this is guaranteed by our public API because in order to ask for the next page, - // you need the reference to the previous page). - assert pendingResult == null; - - Object head = null; - if (queue != null) { - head = queue.poll(); - if (!protocolBackpressureAvailable - && head != null - && queue.size() == getMaxEnqueuedPages(statement) - 1) { - LOG.trace( - "[{}] Back to {} queued response pages, re-enabling auto-read", - logPrefix, - queue.size()); - channel.config().setAutoRead(true); - } - maybeRequestMore(); - } - - if (head != null) { - if (state == STATE_FAILED && !(head instanceof Throwable)) { - LOG.trace( - "[{}] Client requested next page on cancelled queue, discarding page and returning cancelled future", - logPrefix); - return cancelledResultSetFuture(); - } else { - if (LOG.isTraceEnabled()) { - LOG.trace( - "[{}] Client requested next page on non-empty queue, returning immediate future of {}", - logPrefix, - asTraceString(head)); - } - return immediateResultSetFuture(head); - } - } else { - if (state == STATE_FAILED) { - LOG.trace( - "[{}] Client requested next page on cancelled empty queue, returning cancelled future", - logPrefix); - return cancelledResultSetFuture(); - } else { - LOG.trace( - "[{}] Client requested next page but queue is empty, installing future", logPrefix); - pendingResult = new CompletableFuture<>(); - // Only schedule a timeout if we're past the first page (the first page's timeout is - // handled in sendRequest). - if (state > 1) { - pageTimeout = schedulePageTimeout(state); - // Note: each new timeout is cancelled when the next response arrives, see - // onResponse(Frame). - } - return pendingResult; - } - } - } finally { - lock.unlock(); - } - } - - /** - * If the total number of results in the queue and in-flight (requested - received) is less than - * half the queue size, then request more pages, unless the {@link #state} is failed, we're - * still waiting for the first page (so maybe still throttled or in the middle of a retry), or - * we don't support backpressure at the protocol level. - */ - @SuppressWarnings("GuardedBy") - private void maybeRequestMore() { - assert lock.isHeldByCurrentThread(); - if (state < 2 || streamId == -1 || !protocolBackpressureAvailable) { - return; - } - // if we have already requested more than the client needs, then no need to request some more - int maxPages = getMaxPages(statement); - if (maxPages > 0 && numPagesRequested >= maxPages) { - return; - } - // the pages received so far, which is the state minus one - int received = state - 1; - int requested = numPagesRequested; - // the pages that fit in the queue, which is the queue free space minus the requests in flight - int freeSpace = getMaxEnqueuedPages(statement) - queue.size(); - int inFlight = requested - received; - int numPagesFittingInQueue = freeSpace - inFlight; - if (numPagesFittingInQueue > 0 - && numPagesFittingInQueue >= getMaxEnqueuedPages(statement) / 2) { - LOG.trace("[{}] Requesting more {} pages", logPrefix, numPagesFittingInQueue); - numPagesRequested = requested + numPagesFittingInQueue; - sendMorePagesRequest(numPagesFittingInQueue); - } - } - - /** - * Sends a request for more pages (a.k.a. backpressure request). - * - * @param nextPages the number of extra pages to request. - */ - @SuppressWarnings("GuardedBy") - private void sendMorePagesRequest(int nextPages) { - assert lock.isHeldByCurrentThread(); - assert channel != null : "expected valid connection in order to request more pages"; - assert protocolBackpressureAvailable; - assert streamId != -1; - - LOG.trace("[{}] Sending request for more pages", logPrefix); - ThrottledAdminRequestHandler.query( - channel, - true, - Revise.requestMoreContinuousPages(streamId, nextPages), - statement.getCustomPayload(), - getReviseRequestTimeout(statement), - throttler, - session.getMetricUpdater(), - logPrefix, - "request " + nextPages + " more pages for id " + streamId) - .start() - .handle( - (result, error) -> { - if (error != null) { - Loggers.warnWithException( - LOG, "[{}] Error requesting more pages, aborting.", logPrefix, error); - lock.lock(); - try { - // Set fromServer to false because we want the callback to still cancel the - // session if possible or else the server will wait on a timeout. - abort(error, false); - } finally { - lock.unlock(); - } - } - return null; - }); - } - - /** Cancels the given timeout, if non null. */ - private void cancelTimeout(Timeout timeout) { - if (timeout != null) { - LOG.trace("[{}] Cancelling timeout", logPrefix); - timeout.cancel(); - } - } - - // CANCELLATION - - public void cancel() { - lock.lock(); - try { - if (state < 0) { - return; - } else { - LOG.trace( - "[{}] Cancelling continuous paging session with state {} on node {}", - logPrefix, - state, - node); - state = STATE_FAILED; - if (pendingResult != null) { - pendingResult.cancel(true); - } - releaseStreamId(); - } - } finally { - lock.unlock(); - } - reenableAutoReadIfNeeded(); - } - - @SuppressWarnings("GuardedBy") - private void releaseStreamId() { - assert lock.isHeldByCurrentThread(); - // If we saw the last response already, InFlightHandler will release the id so no need to - // cancel explicitly - if (streamId >= 0 && !sawLastResponse && !channel.closeFuture().isDone()) { - // This orphans the stream id, but it will still be held until we see the last response: - channel.cancel(this); - // This tells the server to stop streaming, and send a terminal response: - sendCancelRequest(); - } - } - - @SuppressWarnings("GuardedBy") - private void sendCancelRequest() { - assert lock.isHeldByCurrentThread(); - LOG.trace("[{}] Sending cancel request", logPrefix); - ThrottledAdminRequestHandler.query( - channel, - true, - Revise.cancelContinuousPaging(streamId), - statement.getCustomPayload(), - getReviseRequestTimeout(statement), - throttler, - session.getMetricUpdater(), - logPrefix, - "cancel request") - .start() - .handle( - (result, error) -> { - if (error != null) { - Loggers.warnWithException( - LOG, - "[{}] Error sending cancel request. " - + "This is not critical (the request will eventually time out server-side).", - logPrefix, - error); - } else { - LOG.trace("[{}] Continuous paging session cancelled successfully", logPrefix); - } - return null; - }); - sentCancelRequest = true; - } - - // TERMINATION - - private void reenableAutoReadIfNeeded() { - // Make sure we don't leave the channel unreadable - LOG.trace("[{}] Re-enabling auto-read", logPrefix); - if (!protocolBackpressureAvailable) { - channel.config().setAutoRead(true); - } - } - - // ERROR HANDLING - - private void trackNodeError(@NonNull Node node, @NonNull Throwable error) { - if (nodeErrorReported.compareAndSet(false, true)) { - long latencyNanos = System.nanoTime() - this.messageStartTimeNanos; - context - .getRequestTracker() - .onNodeError(this.statement, error, latencyNanos, executionProfile, node, logPrefix); - } - } - - /** - * Aborts the continuous paging session due to an error that can be either from the server or - * the client. - * - * @param error the error that causes the abortion. - * @param fromServer whether the error was triggered by the coordinator or by the driver. - */ - @SuppressWarnings("GuardedBy") // this method is only called with the lock held - private void abort(@NonNull Throwable error, boolean fromServer) { - assert lock.isHeldByCurrentThread(); - LOG.trace( - "[{}] Aborting due to {} ({})", - logPrefix, - error.getClass().getSimpleName(), - error.getMessage()); - if (channel == null) { - // This only happens when sending the initial request, if no host was available - // or if the iterator returned by the LBP threw an exception. - // In either case the write was not even attempted, and - // we set the state right now. - enqueueOrCompletePending(error); - state = STATE_FAILED; - } else if (state > 0) { - enqueueOrCompletePending(error); - if (fromServer) { - // We can safely assume the server won't send any more responses, - // so set the state and call release() right now. - state = STATE_FAILED; - reenableAutoReadIfNeeded(); - } else { - // attempt to cancel first, i.e. ask server to stop sending responses, - // and only then release. - cancel(); - } - } - stopGlobalRequestTimer(); - cancelTimeout(globalTimeout); - } - - // METRICS - - private void stopNodeMessageTimer() { - if (stopNodeMessageTimerReported.compareAndSet(false, true)) { - ((DefaultNode) node) - .getMetricUpdater() - .updateTimer( - messagesMetric, - executionProfile.getName(), - System.nanoTime() - messageStartTimeNanos, - TimeUnit.NANOSECONDS); - } - } - - private void stopGlobalRequestTimer() { - session - .getMetricUpdater() - .updateTimer( - continuousRequestsMetric, - null, - System.nanoTime() - startTimeNanos, - TimeUnit.NANOSECONDS); - } - - private void updateErrorMetrics( - @NonNull NodeMetricUpdater metricUpdater, - @NonNull RetryVerdict verdict, - @NonNull DefaultNodeMetric error, - @NonNull DefaultNodeMetric retriesOnError, - @NonNull DefaultNodeMetric ignoresOnError) { - metricUpdater.incrementCounter(error, executionProfile.getName()); - switch (verdict.getRetryDecision()) { - case RETRY_SAME: - case RETRY_NEXT: - metricUpdater.incrementCounter(DefaultNodeMetric.RETRIES, executionProfile.getName()); - metricUpdater.incrementCounter(retriesOnError, executionProfile.getName()); - break; - case IGNORE: - metricUpdater.incrementCounter(DefaultNodeMetric.IGNORES, executionProfile.getName()); - metricUpdater.incrementCounter(ignoresOnError, executionProfile.getName()); - break; - case RETHROW: - // nothing do do - } - } - - // UTILITY METHODS - - @NonNull - private CompletableFuture immediateResultSetFuture(@NonNull Object pageOrError) { - CompletableFuture future = new CompletableFuture<>(); - completeResultSetFuture(future, pageOrError); - return future; - } - - @NonNull - private CompletableFuture cancelledResultSetFuture() { - return immediateResultSetFuture( - new CancellationException( - "Can't get more results because the continuous query has failed already. " - + "Most likely this is because the query was cancelled")); - } - - private void completeResultSetFuture( - @NonNull CompletableFuture future, @NonNull Object pageOrError) { - long now = System.nanoTime(); - long totalLatencyNanos = now - startTimeNanos; - long nodeLatencyNanos = now - messageStartTimeNanos; - if (resultSetClass.isInstance(pageOrError)) { - if (future.complete(resultSetClass.cast(pageOrError))) { - throttler.signalSuccess(ContinuousRequestHandlerBase.this); - if (nodeSuccessReported.compareAndSet(false, true)) { - context - .getRequestTracker() - .onNodeSuccess(statement, nodeLatencyNanos, executionProfile, node, logPrefix); - } - context - .getRequestTracker() - .onSuccess(statement, totalLatencyNanos, executionProfile, node, logPrefix); - } - } else { - Throwable error = (Throwable) pageOrError; - if (future.completeExceptionally(error)) { - context - .getRequestTracker() - .onError(statement, error, totalLatencyNanos, executionProfile, node, logPrefix); - if (error instanceof DriverTimeoutException) { - throttler.signalTimeout(ContinuousRequestHandlerBase.this); - session - .getMetricUpdater() - .incrementCounter(clientTimeoutsMetric, executionProfile.getName()); - } else if (!(error instanceof RequestThrottlingException)) { - throttler.signalError(ContinuousRequestHandlerBase.this, error); - } - } - } - } - - @NonNull - private ExecutionInfo createExecutionInfo(@NonNull Result result, @Nullable Frame response) { - ByteBuffer pagingState = - result instanceof Rows ? ((Rows) result).getMetadata().pagingState : null; - return new DefaultExecutionInfo( - statement, - node, - startedSpeculativeExecutionsCount.get(), - executionIndex, - errors, - pagingState, - response, - true, - session, - context, - executionProfile); - } - - private void logTimeoutSchedulingError(IllegalStateException timeoutError) { - // If we're racing with session shutdown, the timer might be stopped already. We don't want - // to schedule more executions anyway, so swallow the error. - if (!"cannot be started once stopped".equals(timeoutError.getMessage())) { - Loggers.warnWithException( - LOG, "[{}] Error while scheduling timeout", logPrefix, timeoutError); - } - } - - @NonNull - private String asTraceString(@NonNull Object pageOrError) { - return resultSetClass.isInstance(pageOrError) - ? "page " + pageNumber(resultSetClass.cast(pageOrError)) - : ((Exception) pageOrError).getClass().getSimpleName(); - } - - private int getState() { - lock.lock(); - try { - return state; - } finally { - lock.unlock(); - } - } - - private CompletableFuture getPendingResult() { - lock.lock(); - try { - return pendingResult; - } finally { - lock.unlock(); - } - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/DefaultContinuousAsyncResultSet.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/DefaultContinuousAsyncResultSet.java deleted file mode 100644 index 8562fde5905..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/DefaultContinuousAsyncResultSet.java +++ /dev/null @@ -1,169 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.continuous; - -import com.datastax.dse.driver.api.core.cql.continuous.ContinuousAsyncResultSet; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.internal.core.cql.EmptyColumnDefinitions; -import com.datastax.oss.driver.internal.core.util.CountingIterator; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Collections; -import java.util.concurrent.CompletionStage; -import net.jcip.annotations.NotThreadSafe; - -@NotThreadSafe // wraps a mutable queue -public class DefaultContinuousAsyncResultSet implements ContinuousAsyncResultSet { - - private final Iterable currentPage; - private final ColumnDefinitions columnDefinitions; - private final int pageNumber; - private final boolean hasMorePages; - private final ExecutionInfo executionInfo; - private final ContinuousCqlRequestHandler handler; - private final CountingIterator iterator; - - public DefaultContinuousAsyncResultSet( - CountingIterator iterator, - ColumnDefinitions columnDefinitions, - int pageNumber, - boolean hasMorePages, - ExecutionInfo executionInfo, - ContinuousCqlRequestHandler handler) { - this.columnDefinitions = columnDefinitions; - this.pageNumber = pageNumber; - this.hasMorePages = hasMorePages; - this.executionInfo = executionInfo; - this.handler = handler; - this.iterator = iterator; - this.currentPage = () -> iterator; - } - - @NonNull - @Override - public ColumnDefinitions getColumnDefinitions() { - return columnDefinitions; - } - - @Override - public boolean wasApplied() { - // always return true for non-conditional updates - return true; - } - - @NonNull - @Override - public ExecutionInfo getExecutionInfo() { - return executionInfo; - } - - @Override - public int pageNumber() { - return pageNumber; - } - - @Override - public boolean hasMorePages() { - return hasMorePages; - } - - @NonNull - @Override - public Iterable currentPage() { - return currentPage; - } - - @Override - public int remaining() { - return iterator.remaining(); - } - - @NonNull - @Override - public CompletionStage fetchNextPage() throws IllegalStateException { - if (!hasMorePages()) { - throw new IllegalStateException( - "Can't call fetchNextPage() on the last page (use hasMorePages() to check)"); - } - return handler.fetchNextPage(); - } - - @Override - public void cancel() { - handler.cancel(); - } - - public static ContinuousAsyncResultSet empty(ExecutionInfo executionInfo) { - - return new ContinuousAsyncResultSet() { - - @NonNull - @Override - public ColumnDefinitions getColumnDefinitions() { - return EmptyColumnDefinitions.INSTANCE; - } - - @NonNull - @Override - public ExecutionInfo getExecutionInfo() { - return executionInfo; - } - - @NonNull - @Override - public Iterable currentPage() { - return Collections.emptyList(); - } - - @Override - public int remaining() { - return 0; - } - - @Override - public boolean hasMorePages() { - return false; - } - - @Override - public int pageNumber() { - return 1; - } - - @NonNull - @Override - public CompletionStage fetchNextPage() - throws IllegalStateException { - throw new IllegalStateException( - "Can't call fetchNextPage() on the last page (use hasMorePages() to check)"); - } - - @Override - public void cancel() { - // noop - } - - @Override - public boolean wasApplied() { - // always true - return true; - } - }; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/DefaultContinuousResultSet.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/DefaultContinuousResultSet.java deleted file mode 100644 index 929400bc7a6..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/DefaultContinuousResultSet.java +++ /dev/null @@ -1,132 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.continuous; - -import com.datastax.dse.driver.api.core.cql.continuous.ContinuousAsyncResultSet; -import com.datastax.dse.driver.api.core.cql.continuous.ContinuousResultSet; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.internal.core.util.CountingIterator; -import com.datastax.oss.driver.internal.core.util.concurrent.BlockingOperation; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; -import net.jcip.annotations.NotThreadSafe; - -/** - * This class is roughly equivalent to {@link - * com.datastax.oss.driver.internal.core.cql.MultiPageResultSet}, except that {@link - * RowIterator#maybeMoveToNextPage()} needs to check for cancellation before fetching the next page. - */ -@NotThreadSafe -public class DefaultContinuousResultSet implements ContinuousResultSet { - - private final RowIterator iterator; - private final List executionInfos = new ArrayList<>(); - private final ColumnDefinitions columnDefinitions; - - public DefaultContinuousResultSet(ContinuousAsyncResultSet firstPage) { - iterator = new RowIterator(firstPage); - columnDefinitions = firstPage.getColumnDefinitions(); - executionInfos.add(firstPage.getExecutionInfo()); - } - - @Override - public void cancel() { - iterator.cancel(); - } - - @NonNull - @Override - public ColumnDefinitions getColumnDefinitions() { - return columnDefinitions; - } - - @NonNull - @Override - public List getExecutionInfos() { - return executionInfos; - } - - @NonNull - @Override - public Iterator iterator() { - return iterator; - } - - @Override - public boolean isFullyFetched() { - return iterator.isFullyFetched(); - } - - @Override - public int getAvailableWithoutFetching() { - return iterator.remaining(); - } - - @Override - public boolean wasApplied() { - return iterator.wasApplied(); - } - - private class RowIterator extends CountingIterator { - private ContinuousAsyncResultSet currentPage; - private Iterator currentRows; - private boolean cancelled = false; - - private RowIterator(ContinuousAsyncResultSet firstPage) { - super(firstPage.remaining()); - currentPage = firstPage; - currentRows = firstPage.currentPage().iterator(); - } - - @Override - protected Row computeNext() { - maybeMoveToNextPage(); - return currentRows.hasNext() ? currentRows.next() : endOfData(); - } - - private void maybeMoveToNextPage() { - if (!cancelled && !currentRows.hasNext() && currentPage.hasMorePages()) { - BlockingOperation.checkNotDriverThread(); - ContinuousAsyncResultSet nextPage = - CompletableFutures.getUninterruptibly(currentPage.fetchNextPage()); - currentPage = nextPage; - remaining += currentPage.remaining(); - currentRows = nextPage.currentPage().iterator(); - executionInfos.add(nextPage.getExecutionInfo()); - } - } - - private boolean isFullyFetched() { - return !currentPage.hasMorePages(); - } - - private boolean wasApplied() { - return currentPage.wasApplied(); - } - - private void cancel() { - currentPage.cancel(); - cancelled = true; - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/reactive/ContinuousCqlRequestReactiveProcessor.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/reactive/ContinuousCqlRequestReactiveProcessor.java deleted file mode 100644 index afe0e864181..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/reactive/ContinuousCqlRequestReactiveProcessor.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.continuous.reactive; - -import com.datastax.dse.driver.api.core.cql.continuous.reactive.ContinuousReactiveResultSet; -import com.datastax.dse.driver.internal.core.cql.continuous.ContinuousCqlRequestAsyncProcessor; -import com.datastax.dse.driver.internal.core.cql.reactive.FailedReactiveResultSet; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.datastax.oss.driver.internal.core.session.RequestProcessor; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class ContinuousCqlRequestReactiveProcessor - implements RequestProcessor, ContinuousReactiveResultSet> { - - public static final GenericType CONTINUOUS_REACTIVE_RESULT_SET = - GenericType.of(ContinuousReactiveResultSet.class); - - private final ContinuousCqlRequestAsyncProcessor asyncProcessor; - - public ContinuousCqlRequestReactiveProcessor(ContinuousCqlRequestAsyncProcessor asyncProcessor) { - this.asyncProcessor = asyncProcessor; - } - - @Override - public boolean canProcess(Request request, GenericType resultType) { - return request instanceof Statement && resultType.equals(CONTINUOUS_REACTIVE_RESULT_SET); - } - - @Override - public ContinuousReactiveResultSet process( - Statement request, - DefaultSession session, - InternalDriverContext context, - String sessionLogPrefix) { - return new DefaultContinuousReactiveResultSet( - () -> asyncProcessor.process(request, session, context, sessionLogPrefix)); - } - - @Override - public ContinuousReactiveResultSet newFailure(RuntimeException error) { - return new FailedReactiveResultSet(error); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/reactive/DefaultContinuousReactiveResultSet.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/reactive/DefaultContinuousReactiveResultSet.java deleted file mode 100644 index b3f301edea6..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/reactive/DefaultContinuousReactiveResultSet.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.continuous.reactive; - -import com.datastax.dse.driver.api.core.cql.continuous.ContinuousAsyncResultSet; -import com.datastax.dse.driver.api.core.cql.continuous.reactive.ContinuousReactiveResultSet; -import com.datastax.dse.driver.internal.core.cql.reactive.ReactiveResultSetBase; -import java.util.concurrent.Callable; -import java.util.concurrent.CompletionStage; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class DefaultContinuousReactiveResultSet - extends ReactiveResultSetBase implements ContinuousReactiveResultSet { - - public DefaultContinuousReactiveResultSet( - Callable> firstPage) { - super(firstPage); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/CqlRequestReactiveProcessor.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/CqlRequestReactiveProcessor.java deleted file mode 100644 index 3539c2e698c..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/CqlRequestReactiveProcessor.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.reactive; - -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveResultSet; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.cql.CqlRequestAsyncProcessor; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.datastax.oss.driver.internal.core.session.RequestProcessor; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class CqlRequestReactiveProcessor - implements RequestProcessor, ReactiveResultSet> { - - public static final GenericType REACTIVE_RESULT_SET = - GenericType.of(ReactiveResultSet.class); - - private final CqlRequestAsyncProcessor asyncProcessor; - - public CqlRequestReactiveProcessor(CqlRequestAsyncProcessor asyncProcessor) { - this.asyncProcessor = asyncProcessor; - } - - @Override - public boolean canProcess(Request request, GenericType resultType) { - return request instanceof Statement && resultType.equals(REACTIVE_RESULT_SET); - } - - @Override - public ReactiveResultSet process( - Statement request, - DefaultSession session, - InternalDriverContext context, - String sessionLogPrefix) { - return new DefaultReactiveResultSet( - () -> asyncProcessor.process(request, session, context, sessionLogPrefix)); - } - - @Override - public ReactiveResultSet newFailure(RuntimeException error) { - return new FailedReactiveResultSet(error); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/DefaultReactiveResultSet.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/DefaultReactiveResultSet.java deleted file mode 100644 index 33b6dc02f48..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/DefaultReactiveResultSet.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.reactive; - -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import java.util.concurrent.Callable; -import java.util.concurrent.CompletionStage; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class DefaultReactiveResultSet extends ReactiveResultSetBase { - - public DefaultReactiveResultSet(Callable> firstPage) { - super(firstPage); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/DefaultReactiveRow.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/DefaultReactiveRow.java deleted file mode 100644 index ca3b93e7f6b..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/DefaultReactiveRow.java +++ /dev/null @@ -1,580 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.reactive; - -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveRow; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.data.CqlDuration; -import com.datastax.oss.driver.api.core.data.TupleValue; -import com.datastax.oss.driver.api.core.data.UdtValue; -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.math.BigDecimal; -import java.math.BigInteger; -import java.net.InetAddress; -import java.nio.ByteBuffer; -import java.time.Instant; -import java.time.LocalDate; -import java.time.LocalTime; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; -import net.jcip.annotations.NotThreadSafe; - -@NotThreadSafe -class DefaultReactiveRow implements ReactiveRow { - - private final Row row; - private final ExecutionInfo executionInfo; - - DefaultReactiveRow(@NonNull Row row, @NonNull ExecutionInfo executionInfo) { - this.row = row; - this.executionInfo = executionInfo; - } - - @NonNull - @Override - public ExecutionInfo getExecutionInfo() { - return executionInfo; - } - - @NonNull - @Override - public ColumnDefinitions getColumnDefinitions() { - return row.getColumnDefinitions(); - } - - @Override - public ByteBuffer getBytesUnsafe(int i) { - return row.getBytesUnsafe(i); - } - - @Override - public boolean isNull(int i) { - return row.isNull(i); - } - - @Override - public T get(int i, TypeCodec codec) { - return row.get(i, codec); - } - - @Override - public T get(int i, GenericType targetType) { - return row.get(i, targetType); - } - - @Override - public T get(int i, Class targetClass) { - return row.get(i, targetClass); - } - - @Override - public Object getObject(int i) { - return row.getObject(i); - } - - @Override - public boolean getBoolean(int i) { - return row.getBoolean(i); - } - - @Override - public byte getByte(int i) { - return row.getByte(i); - } - - @Override - public double getDouble(int i) { - return row.getDouble(i); - } - - @Override - public float getFloat(int i) { - return row.getFloat(i); - } - - @Override - public int getInt(int i) { - return row.getInt(i); - } - - @Override - public long getLong(int i) { - return row.getLong(i); - } - - @Override - public short getShort(int i) { - return row.getShort(i); - } - - @Override - public Instant getInstant(int i) { - return row.getInstant(i); - } - - @Override - public LocalDate getLocalDate(int i) { - return row.getLocalDate(i); - } - - @Override - public LocalTime getLocalTime(int i) { - return row.getLocalTime(i); - } - - @Override - public ByteBuffer getByteBuffer(int i) { - return row.getByteBuffer(i); - } - - @Override - public String getString(int i) { - return row.getString(i); - } - - @Override - public BigInteger getBigInteger(int i) { - return row.getBigInteger(i); - } - - @Override - public BigDecimal getBigDecimal(int i) { - return row.getBigDecimal(i); - } - - @Override - public UUID getUuid(int i) { - return row.getUuid(i); - } - - @Override - public InetAddress getInetAddress(int i) { - return row.getInetAddress(i); - } - - @Override - public CqlDuration getCqlDuration(int i) { - return row.getCqlDuration(i); - } - - @Override - public Token getToken(int i) { - return row.getToken(i); - } - - @Override - public List getList(int i, @NonNull Class elementsClass) { - return row.getList(i, elementsClass); - } - - @Override - public Set getSet(int i, @NonNull Class elementsClass) { - return row.getSet(i, elementsClass); - } - - @Override - public Map getMap(int i, @NonNull Class keyClass, @NonNull Class valueClass) { - return row.getMap(i, keyClass, valueClass); - } - - @Override - public UdtValue getUdtValue(int i) { - return row.getUdtValue(i); - } - - @Override - public TupleValue getTupleValue(int i) { - return row.getTupleValue(i); - } - - @Override - public int size() { - return row.size(); - } - - @NonNull - @Override - public DataType getType(int i) { - return row.getType(i); - } - - @NonNull - @Override - public CodecRegistry codecRegistry() { - return row.codecRegistry(); - } - - @NonNull - @Override - public ProtocolVersion protocolVersion() { - return row.protocolVersion(); - } - - @Override - public ByteBuffer getBytesUnsafe(@NonNull String name) { - return row.getBytesUnsafe(name); - } - - @Override - public boolean isNull(@NonNull String name) { - return row.isNull(name); - } - - @Override - public T get(@NonNull String name, @NonNull TypeCodec codec) { - return row.get(name, codec); - } - - @Override - public T get(@NonNull String name, @NonNull GenericType targetType) { - return row.get(name, targetType); - } - - @Override - public T get(@NonNull String name, @NonNull Class targetClass) { - return row.get(name, targetClass); - } - - @Override - public Object getObject(@NonNull String name) { - return row.getObject(name); - } - - @Override - public boolean getBoolean(@NonNull String name) { - return row.getBoolean(name); - } - - @Override - public byte getByte(@NonNull String name) { - return row.getByte(name); - } - - @Override - public double getDouble(@NonNull String name) { - return row.getDouble(name); - } - - @Override - public float getFloat(@NonNull String name) { - return row.getFloat(name); - } - - @Override - public int getInt(@NonNull String name) { - return row.getInt(name); - } - - @Override - public long getLong(@NonNull String name) { - return row.getLong(name); - } - - @Override - public short getShort(@NonNull String name) { - return row.getShort(name); - } - - @Override - public Instant getInstant(@NonNull String name) { - return row.getInstant(name); - } - - @Override - public LocalDate getLocalDate(@NonNull String name) { - return row.getLocalDate(name); - } - - @Override - public LocalTime getLocalTime(@NonNull String name) { - return row.getLocalTime(name); - } - - @Override - public ByteBuffer getByteBuffer(@NonNull String name) { - return row.getByteBuffer(name); - } - - @Override - public String getString(@NonNull String name) { - return row.getString(name); - } - - @Override - public BigInteger getBigInteger(@NonNull String name) { - return row.getBigInteger(name); - } - - @Override - public BigDecimal getBigDecimal(@NonNull String name) { - return row.getBigDecimal(name); - } - - @Override - public UUID getUuid(@NonNull String name) { - return row.getUuid(name); - } - - @Override - public InetAddress getInetAddress(@NonNull String name) { - return row.getInetAddress(name); - } - - @Override - public CqlDuration getCqlDuration(@NonNull String name) { - return row.getCqlDuration(name); - } - - @Override - public Token getToken(@NonNull String name) { - return row.getToken(name); - } - - @Override - public List getList(@NonNull String name, @NonNull Class elementsClass) { - return row.getList(name, elementsClass); - } - - @Override - public Set getSet(@NonNull String name, @NonNull Class elementsClass) { - return row.getSet(name, elementsClass); - } - - @Override - public Map getMap( - @NonNull String name, @NonNull Class keyClass, @NonNull Class valueClass) { - return row.getMap(name, keyClass, valueClass); - } - - @Override - public UdtValue getUdtValue(@NonNull String name) { - return row.getUdtValue(name); - } - - @Override - public TupleValue getTupleValue(@NonNull String name) { - return row.getTupleValue(name); - } - - @NonNull - @Override - public List allIndicesOf(@NonNull String name) { - return row.allIndicesOf(name); - } - - @Override - public int firstIndexOf(@NonNull String name) { - return row.firstIndexOf(name); - } - - @NonNull - @Override - public DataType getType(@NonNull String name) { - return row.getType(name); - } - - @Override - public ByteBuffer getBytesUnsafe(@NonNull CqlIdentifier id) { - return row.getBytesUnsafe(id); - } - - @Override - public boolean isNull(@NonNull CqlIdentifier id) { - return row.isNull(id); - } - - @Override - public T get(@NonNull CqlIdentifier id, @NonNull TypeCodec codec) { - return row.get(id, codec); - } - - @Override - public T get(@NonNull CqlIdentifier id, @NonNull GenericType targetType) { - return row.get(id, targetType); - } - - @Override - public T get(@NonNull CqlIdentifier id, @NonNull Class targetClass) { - return row.get(id, targetClass); - } - - @Override - public Object getObject(@NonNull CqlIdentifier id) { - return row.getObject(id); - } - - @Override - public boolean getBoolean(@NonNull CqlIdentifier id) { - return row.getBoolean(id); - } - - @Override - public byte getByte(@NonNull CqlIdentifier id) { - return row.getByte(id); - } - - @Override - public double getDouble(@NonNull CqlIdentifier id) { - return row.getDouble(id); - } - - @Override - public float getFloat(@NonNull CqlIdentifier id) { - return row.getFloat(id); - } - - @Override - public int getInt(@NonNull CqlIdentifier id) { - return row.getInt(id); - } - - @Override - public long getLong(@NonNull CqlIdentifier id) { - return row.getLong(id); - } - - @Override - public short getShort(@NonNull CqlIdentifier id) { - return row.getShort(id); - } - - @Override - public Instant getInstant(@NonNull CqlIdentifier id) { - return row.getInstant(id); - } - - @Override - public LocalDate getLocalDate(@NonNull CqlIdentifier id) { - return row.getLocalDate(id); - } - - @Override - public LocalTime getLocalTime(@NonNull CqlIdentifier id) { - return row.getLocalTime(id); - } - - @Override - public ByteBuffer getByteBuffer(@NonNull CqlIdentifier id) { - return row.getByteBuffer(id); - } - - @Override - public String getString(@NonNull CqlIdentifier id) { - return row.getString(id); - } - - @Override - public BigInteger getBigInteger(@NonNull CqlIdentifier id) { - return row.getBigInteger(id); - } - - @Override - public BigDecimal getBigDecimal(@NonNull CqlIdentifier id) { - return row.getBigDecimal(id); - } - - @Override - public UUID getUuid(@NonNull CqlIdentifier id) { - return row.getUuid(id); - } - - @Override - public InetAddress getInetAddress(@NonNull CqlIdentifier id) { - return row.getInetAddress(id); - } - - @Override - public CqlDuration getCqlDuration(@NonNull CqlIdentifier id) { - return row.getCqlDuration(id); - } - - @Override - public Token getToken(@NonNull CqlIdentifier id) { - return row.getToken(id); - } - - @Override - public List getList(@NonNull CqlIdentifier id, @NonNull Class elementsClass) { - return row.getList(id, elementsClass); - } - - @Override - public Set getSet(@NonNull CqlIdentifier id, @NonNull Class elementsClass) { - return row.getSet(id, elementsClass); - } - - @Override - public Map getMap( - @NonNull CqlIdentifier id, @NonNull Class keyClass, @NonNull Class valueClass) { - return row.getMap(id, keyClass, valueClass); - } - - @Override - public UdtValue getUdtValue(@NonNull CqlIdentifier id) { - return row.getUdtValue(id); - } - - @Override - public TupleValue getTupleValue(@NonNull CqlIdentifier id) { - return row.getTupleValue(id); - } - - @NonNull - @Override - public List allIndicesOf(@NonNull CqlIdentifier id) { - return row.allIndicesOf(id); - } - - @Override - public int firstIndexOf(@NonNull CqlIdentifier id) { - return row.firstIndexOf(id); - } - - @NonNull - @Override - public DataType getType(@NonNull CqlIdentifier id) { - return row.getType(id); - } - - @Override - public boolean isDetached() { - return row.isDetached(); - } - - @Override - public void attach(@NonNull AttachmentPoint attachmentPoint) { - row.attach(attachmentPoint); - } - - @Override - public String toString() { - return "DefaultReactiveRow{row=" + row + ", executionInfo=" + executionInfo + '}'; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/EmptySubscription.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/EmptySubscription.java deleted file mode 100644 index f760ecc395e..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/EmptySubscription.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.reactive; - -import org.reactivestreams.Subscription; - -public class EmptySubscription implements Subscription { - - public static final EmptySubscription INSTANCE = new EmptySubscription(); - - private EmptySubscription() {} - - @Override - public void request(long n) {} - - @Override - public void cancel() {} -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/FailedPublisher.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/FailedPublisher.java deleted file mode 100644 index 638434bb2d0..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/FailedPublisher.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.reactive; - -import java.util.Objects; -import org.reactivestreams.Publisher; -import org.reactivestreams.Subscriber; - -/** - * A {@link Publisher} that immediately signals the error passed at instantiation to all its - * subscribers. - */ -public class FailedPublisher implements Publisher { - - protected final Throwable error; - - public FailedPublisher(Throwable error) { - this.error = error; - } - - @Override - public void subscribe(Subscriber subscriber) { - Objects.requireNonNull(subscriber, "Subscriber cannot be null"); - // Per rule 1.9, we need to call onSubscribe before any other signal. Pass a dummy - // subscription since we know it will never be used. - subscriber.onSubscribe(EmptySubscription.INSTANCE); - // Signal the error to the subscriber right away. This is safe to do because per rule 2.10, - // a Subscriber MUST be prepared to receive an onError signal without a preceding - // Subscription.request(long n) call. - // Also, per rule 2.13: onError MUST return normally except when any provided parameter - // is null (which is not the case here); so we don't need care about catching errors here. - subscriber.onError(error); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/FailedReactiveResultSet.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/FailedReactiveResultSet.java deleted file mode 100644 index 31c34d649aa..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/FailedReactiveResultSet.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.reactive; - -import com.datastax.dse.driver.api.core.cql.continuous.reactive.ContinuousReactiveResultSet; -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveResultSet; -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveRow; -import com.datastax.dse.driver.internal.core.cql.continuous.reactive.ContinuousCqlRequestReactiveProcessor; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import edu.umd.cs.findbugs.annotations.NonNull; -import org.reactivestreams.Publisher; - -/** - * A {@link ReactiveResultSet} that immediately signals the error passed at instantiation to all its - * subscribers. - * - * @see CqlRequestReactiveProcessor#newFailure(java.lang.RuntimeException) - * @see ContinuousCqlRequestReactiveProcessor#newFailure(java.lang.RuntimeException) - */ -public class FailedReactiveResultSet extends FailedPublisher - implements ReactiveResultSet, ContinuousReactiveResultSet { - - public FailedReactiveResultSet(Throwable error) { - super(error); - } - - @NonNull - @Override - public Publisher getColumnDefinitions() { - return new FailedPublisher<>(error); - } - - @NonNull - @Override - public Publisher getExecutionInfos() { - return new FailedPublisher<>(error); - } - - @NonNull - @Override - public Publisher wasApplied() { - return new FailedPublisher<>(error); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/ReactiveOperators.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/ReactiveOperators.java deleted file mode 100644 index f058149f570..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/ReactiveOperators.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.reactive; - -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.concurrent.atomic.AtomicLong; - -public final class ReactiveOperators { - - /** - * Atomically adds the given value to the given AtomicLong, bound to Long.MAX_VALUE. - * - * @param current the current value. - * @param toAdd the delta to add. - */ - public static void addCap(@NonNull AtomicLong current, long toAdd) { - long r, u; - do { - r = current.get(); - if (r == Long.MAX_VALUE) { - return; - } - u = r + toAdd; - if (u < 0L) { - u = Long.MAX_VALUE; - } - } while (!current.compareAndSet(r, u)); - } - - /** - * Atomically subtracts the given value from the given AtomicLong, bound to 0. - * - * @param current the current value. - * @param toSub the delta to subtract. - */ - public static void subCap(@NonNull AtomicLong current, long toSub) { - long r, u; - do { - r = current.get(); - if (r == 0 || r == Long.MAX_VALUE) { - return; - } - u = Math.max(r - toSub, 0); - } while (!current.compareAndSet(r, u)); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/ReactiveResultSetBase.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/ReactiveResultSetBase.java deleted file mode 100644 index 5ba00e22298..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/ReactiveResultSetBase.java +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.reactive; - -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveResultSet; -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveRow; -import com.datastax.oss.driver.api.core.AsyncPagingIterable; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.Row; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Objects; -import java.util.concurrent.Callable; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.atomic.AtomicBoolean; -import net.jcip.annotations.ThreadSafe; -import org.reactivestreams.Publisher; -import org.reactivestreams.Subscriber; - -@ThreadSafe -public abstract class ReactiveResultSetBase> - implements ReactiveResultSet { - - private final Callable> firstPage; - - private final AtomicBoolean alreadySubscribed = new AtomicBoolean(false); - - private final SimpleUnicastProcessor columnDefinitionsPublisher = - new SimpleUnicastProcessor<>(); - - private final SimpleUnicastProcessor executionInfosPublisher = - new SimpleUnicastProcessor<>(); - - private final SimpleUnicastProcessor wasAppliedPublisher = - new SimpleUnicastProcessor<>(); - - protected ReactiveResultSetBase(Callable> firstPage) { - this.firstPage = firstPage; - } - - @Override - public void subscribe(@NonNull Subscriber subscriber) { - // As per rule 1.9, we need to throw an NPE if subscriber is null - Objects.requireNonNull(subscriber, "Subscriber cannot be null"); - // As per rule 1.11, this publisher is allowed to support only one subscriber. - if (alreadySubscribed.compareAndSet(false, true)) { - ReactiveResultSetSubscription subscription = - new ReactiveResultSetSubscription<>( - subscriber, columnDefinitionsPublisher, executionInfosPublisher, wasAppliedPublisher); - try { - subscriber.onSubscribe(subscription); - // must be done after onSubscribe - subscription.start(firstPage); - } catch (Throwable t) { - // As per rule 2.13: In the case that this rule is violated, - // any associated Subscription to the Subscriber MUST be considered as - // cancelled, and the caller MUST raise this error condition in a fashion - // that is adequate for the runtime environment. - subscription.doOnError( - new IllegalStateException( - subscriber - + " violated the Reactive Streams rule 2.13 by throwing an exception from onSubscribe.", - t)); - } - } else { - subscriber.onSubscribe(EmptySubscription.INSTANCE); - subscriber.onError( - new IllegalStateException("This publisher does not support multiple subscriptions")); - } - // As per 2.13, this method must return normally (i.e. not throw) - } - - @NonNull - @Override - public Publisher getColumnDefinitions() { - return columnDefinitionsPublisher; - } - - @NonNull - @Override - public Publisher getExecutionInfos() { - return executionInfosPublisher; - } - - @NonNull - @Override - public Publisher wasApplied() { - return wasAppliedPublisher; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/ReactiveResultSetSubscription.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/ReactiveResultSetSubscription.java deleted file mode 100644 index 500a291e9d2..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/ReactiveResultSetSubscription.java +++ /dev/null @@ -1,493 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.reactive; - -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveRow; -import com.datastax.dse.driver.internal.core.util.concurrent.BoundedConcurrentQueue; -import com.datastax.oss.driver.api.core.AsyncPagingIterable; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.driver.shaded.guava.common.collect.Iterators; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Collections; -import java.util.Iterator; -import java.util.Objects; -import java.util.concurrent.Callable; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionException; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; -import net.jcip.annotations.ThreadSafe; -import org.reactivestreams.Subscriber; -import org.reactivestreams.Subscription; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A single-subscriber subscription that executes one single query and emits all the returned rows. - * - *

This class can handle both continuous and non-continuous result sets. - */ -@ThreadSafe -public class ReactiveResultSetSubscription> - implements Subscription { - - private static final Logger LOG = LoggerFactory.getLogger(ReactiveResultSetSubscription.class); - - private static final int MAX_ENQUEUED_PAGES = 4; - - /** Tracks the number of items requested by the subscriber. */ - private final AtomicLong requested = new AtomicLong(0); - - /** The pages received so far, with a maximum of MAX_ENQUEUED_PAGES elements. */ - private final BoundedConcurrentQueue> pages = - new BoundedConcurrentQueue<>(MAX_ENQUEUED_PAGES); - - /** - * Used to signal that a thread is currently draining, i.e., emitting items to the subscriber. - * When it is zero, that means there is no ongoing emission. This mechanism effectively serializes - * access to the drain() method, and also keeps track of missed attempts to enter it, since each - * thread that attempts to drain will increment this counter. - * - * @see #drain() - */ - private final AtomicInteger draining = new AtomicInteger(0); - - /** - * Waited upon by the driver and completed when the subscriber requests its first item. - * - *

Used to hold off emitting results until the subscriber issues its first request for items. - * Since this future is only completed from {@link #request(long)}, this effectively conditions - * the enqueueing of the first page to the reception of the subscriber's first request. - * - *

This mechanism avoids sending terminal signals before a request is made when the stream is - * empty. Note that as per 2.9, "a Subscriber MUST be prepared to receive an onComplete signal - * with or without a preceding Subscription.request(long n) call." However, the TCK considers it - * as unfair behavior. - * - * @see #start(Callable) - * @see #request(long) - */ - private final CompletableFuture firstSubscriberRequestArrived = new CompletableFuture<>(); - - /** non-final because it has to be de-referenced, see {@link #clear()}. */ - private volatile Subscriber mainSubscriber; - - private volatile Subscriber columnDefinitionsSubscriber; - - private volatile Subscriber executionInfosSubscriber; - - private volatile Subscriber wasAppliedSubscriber; - - /** - * Set to true when the subscription is cancelled, which happens when an error is encountered, - * when the result set is fully consumed and the subscription terminates, or when the subscriber - * manually calls {@link #cancel()}. - */ - private volatile boolean cancelled = false; - - ReactiveResultSetSubscription( - @NonNull Subscriber mainSubscriber, - @NonNull Subscriber columnDefinitionsSubscriber, - @NonNull Subscriber executionInfosSubscriber, - @NonNull Subscriber wasAppliedSubscriber) { - this.mainSubscriber = mainSubscriber; - this.columnDefinitionsSubscriber = columnDefinitionsSubscriber; - this.executionInfosSubscriber = executionInfosSubscriber; - this.wasAppliedSubscriber = wasAppliedSubscriber; - } - - /** - * Starts the query execution. - * - *

Must be called immediately after creating the subscription, but after {@link - * Subscriber#onSubscribe(Subscription)}. - * - * @param firstPage The future that, when complete, will produce the first page. - */ - void start(@NonNull Callable> firstPage) { - firstSubscriberRequestArrived.thenAccept( - (aVoid) -> fetchNextPageAndEnqueue(new Page<>(firstPage), true)); - } - - @Override - public void request(long n) { - // As per 3.6: after the Subscription is cancelled, additional - // calls to request() MUST be NOPs. - if (!cancelled) { - if (n < 1) { - // Validate request as per rule 3.9 - doOnError( - new IllegalArgumentException( - mainSubscriber - + " violated the Reactive Streams rule 3.9 by requesting a non-positive number of elements.")); - } else { - // As per rule 3.17, when demand overflows Long.MAX_VALUE - // it can be treated as "effectively unbounded" - ReactiveOperators.addCap(requested, n); - // Set the first future to true if not done yet. - // This will make the first page of results ready for consumption, - // see start(). - // As per 2.7 it is the subscriber's responsibility to provide - // external synchronization when calling request(), - // so the check-then-act idiom below is good enough - // (and besides, complete() is idempotent). - if (!firstSubscriberRequestArrived.isDone()) { - firstSubscriberRequestArrived.complete(null); - } - drain(); - } - } - } - - @Override - public void cancel() { - // As per 3.5: Subscription.cancel() MUST respect the responsiveness of - // its caller by returning in a timely manner, MUST be idempotent and - // MUST be thread-safe. - if (!cancelled) { - cancelled = true; - if (draining.getAndIncrement() == 0) { - // If nobody is draining, clear now; - // otherwise, the draining thread will notice - // that the cancelled flag was set - // and will clear for us. - clear(); - } - } - } - - /** - * Attempts to drain available items, i.e. emit them to the subscriber. - * - *

Access to this method is serialized by the field {@link #draining}: only one thread at a - * time can drain, but threads that attempt to drain while other thread is already draining - * increment that field; the draining thread, before finishing its work, checks for such failed - * attempts and triggers another round of draining if that was the case. - * - *

The loop is interrupted when 1) the requested amount has been met or 2) when there are no - * more items readily available or 3) the subscription has been cancelled. - * - *

The loop also checks for stream exhaustion and emits a terminal {@code onComplete} signal in - * this case. - * - *

This method may run on a driver IO thread when invoked from {@link - * #fetchNextPageAndEnqueue(Page, boolean)}, or on a subscriber thread, when invoked from {@link - * #request(long)}. - */ - @SuppressWarnings("ConditionalBreakInInfiniteLoop") - private void drain() { - // As per 3.4: this method SHOULD respect the responsiveness - // of its caller by returning in a timely manner. - // We accomplish this by a wait-free implementation. - if (draining.getAndIncrement() != 0) { - // Someone else is already draining, so do nothing, - // the other thread will notice that we attempted to drain. - // This also allows to abide by rule 3.3 and avoid - // cycles such as request() -> onNext() -> request() etc. - return; - } - int missed = 1; - // Note: when termination is detected inside this loop, - // we MUST call clear() manually. - for (; ; ) { - // The requested number of items at this point - long r = requested.get(); - // The number of items emitted thus far - long emitted = 0L; - while (emitted != r) { - if (cancelled) { - clear(); - return; - } - Object result; - try { - result = tryNext(); - } catch (Throwable t) { - doOnError(t); - clear(); - return; - } - if (result == null) { - break; - } - if (result instanceof Throwable) { - doOnError((Throwable) result); - clear(); - return; - } - doOnNext((ReactiveRow) result); - emitted++; - } - if (isExhausted()) { - doOnComplete(); - clear(); - return; - } - if (cancelled) { - clear(); - return; - } - if (emitted != 0) { - // if any item was emitted, adjust the requested field - ReactiveOperators.subCap(requested, emitted); - } - // if another thread tried to call drain() while we were busy, - // then we should do another drain round. - missed = draining.addAndGet(-missed); - if (missed == 0) { - break; - } - } - } - - /** - * Tries to return the next item, if one is readily available, and returns {@code null} otherwise. - * - *

Cannot run concurrently due to the {@link #draining} field. - */ - @Nullable - private Object tryNext() { - Page current = pages.peek(); - if (current != null) { - if (current.hasMoreRows()) { - return current.nextRow(); - } else if (current.hasMorePages()) { - // Discard current page as it is consumed. - // Don't discard the last page though as we need it - // to test isExhausted(). It will be GC'ed when a terminal signal - // is issued anyway, so that's no big deal. - if (pages.poll() == null) { - throw new AssertionError("Queue is empty, this should not happen"); - } - // if the next page is readily available, - // serve its first row now, no need to wait - // for the next drain. - return tryNext(); - } - } - // No items available right now. - return null; - } - - /** - * Returns {@code true} when the entire stream has been consumed and no more items can be emitted. - * When that is the case, a terminal signal is sent. - * - *

Cannot run concurrently due to the draining field. - */ - private boolean isExhausted() { - Page current = pages.peek(); - // Note: current can only be null when: - // 1) we are waiting for the first page and it hasn't arrived yet; - // 2) we just discarded the current page, but the next page hasn't arrived yet. - // In any case, a null here means it is not the last page, since the last page - // stays in the queue until the very end of the operation. - return current != null && !current.hasMoreRows() && !current.hasMorePages(); - } - - /** - * Runs on a subscriber thread initially, see {@link #start(Callable)}. Subsequent executions run - * on the thread that completes the pair of futures [current.fetchNextPage, pages.offer] and - * enqueues. This can be a driver IO thread or a subscriber thread; in both cases, cannot run - * concurrently due to the fact that one can only fetch the next page when the current one is - * arrived and enqueued. - */ - private void fetchNextPageAndEnqueue(@NonNull Page current, boolean firstPage) { - current - .fetchNextPage() - // as soon as the response arrives, - // create the new page - .handle( - (rs, t) -> { - Page page; - if (t == null) { - page = toPage(rs); - executionInfosSubscriber.onNext(rs.getExecutionInfo()); - if (!page.hasMorePages()) { - executionInfosSubscriber.onComplete(); - } - if (firstPage) { - columnDefinitionsSubscriber.onNext(rs.getColumnDefinitions()); - columnDefinitionsSubscriber.onComplete(); - // Avoid calling wasApplied on empty pages as some implementations may throw - // IllegalStateException; if the page is empty, this wasn't a CAS query, in which - // case, as per the method's contract, wasApplied should be true. - boolean wasApplied = rs.remaining() == 0 || rs.wasApplied(); - wasAppliedSubscriber.onNext(wasApplied); - wasAppliedSubscriber.onComplete(); - } - } else { - // Unwrap CompletionExceptions created by combined futures - if (t instanceof CompletionException) { - t = t.getCause(); - } - page = toErrorPage(t); - executionInfosSubscriber.onError(t); - if (firstPage) { - columnDefinitionsSubscriber.onError(t); - wasAppliedSubscriber.onError(t); - } - } - return page; - }) - .thenCompose(pages::offer) - .thenAccept( - page -> { - if (page.hasMorePages() && !cancelled) { - // preemptively fetch the next page, if available - fetchNextPageAndEnqueue(page, false); - } - drain(); - }); - } - - private void doOnNext(@NonNull ReactiveRow result) { - try { - mainSubscriber.onNext(result); - } catch (Throwable t) { - LOG.error( - mainSubscriber - + " violated the Reactive Streams rule 2.13 by throwing an exception from onNext.", - t); - cancel(); - } - } - - private void doOnComplete() { - try { - // Then we signal onComplete as per rules 1.2 and 1.5 - mainSubscriber.onComplete(); - } catch (Throwable t) { - LOG.error( - mainSubscriber - + " violated the Reactive Streams rule 2.13 by throwing an exception from onComplete.", - t); - } - // We need to consider this Subscription as cancelled as per rule 1.6 - cancel(); - } - - // package-private because it can be invoked by the publisher if the subscription handshake - // process fails. - void doOnError(@NonNull Throwable error) { - try { - // Then we signal the error downstream, as per rules 1.2 and 1.4. - mainSubscriber.onError(error); - } catch (Throwable t) { - t.addSuppressed(error); - LOG.error( - mainSubscriber - + " violated the Reactive Streams rule 2.13 by throwing an exception from onError.", - t); - } - // We need to consider this Subscription as cancelled as per rule 1.6 - cancel(); - } - - private void clear() { - // We don't need these pages anymore and should not hold references - // to them. - pages.clear(); - // As per 3.13, Subscription.cancel() MUST request the Publisher to - // eventually drop any references to the corresponding subscriber. - // Our own publishers do not keep references to this subscription, - // but downstream processors might do so, which is why we need to - // defensively clear the subscriber reference when we are done. - mainSubscriber = null; - columnDefinitionsSubscriber = null; - executionInfosSubscriber = null; - wasAppliedSubscriber = null; - } - - /** - * Converts the received result object into a {@link Page}. - * - * @param rs the result object to convert. - * @return a new page. - */ - @NonNull - private Page toPage(@NonNull ResultSetT rs) { - ExecutionInfo executionInfo = rs.getExecutionInfo(); - Iterator results = - Iterators.transform( - rs.currentPage().iterator(), - row -> new DefaultReactiveRow(Objects.requireNonNull(row), executionInfo)); - return new Page<>(results, rs.hasMorePages() ? rs::fetchNextPage : null); - } - - /** Converts the given error into a {@link Page}, containing the error as its only element. */ - @NonNull - private Page toErrorPage(@NonNull Throwable t) { - return new Page<>(Iterators.singletonIterator(t), null); - } - - /** - * A page object comprises an iterator over the page's results, and a future pointing to the next - * page (or {@code null}, if it's the last page). - */ - static class Page> { - - @NonNull final Iterator iterator; - - // A pointer to the next page, or null if this is the last page. - @Nullable final Callable> nextPage; - - /** called only from start() */ - Page(@NonNull Callable> nextPage) { - this.iterator = Collections.emptyIterator(); - this.nextPage = nextPage; - } - - Page(@NonNull Iterator iterator, @Nullable Callable> nextPage) { - this.iterator = iterator; - this.nextPage = nextPage; - } - - boolean hasMorePages() { - return nextPage != null; - } - - @NonNull - CompletionStage fetchNextPage() { - try { - return Objects.requireNonNull(nextPage).call(); - } catch (Exception e) { - // This is a synchronous failure in the driver. - // It can happen in rare cases when the driver throws an exception instead of returning a - // failed future; e.g. if someone tries to execute a continuous paging request but the - // protocol version in use does not support it. - // We treat it as a failed future. - return CompletableFutures.failedFuture(e); - } - } - - boolean hasMoreRows() { - return iterator.hasNext(); - } - - @NonNull - Object nextRow() { - return iterator.next(); - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/SimpleUnicastProcessor.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/SimpleUnicastProcessor.java deleted file mode 100644 index 845cbe2349b..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/SimpleUnicastProcessor.java +++ /dev/null @@ -1,265 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.reactive; - -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Objects; -import java.util.Queue; -import java.util.concurrent.ConcurrentLinkedDeque; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; -import org.reactivestreams.Processor; -import org.reactivestreams.Subscriber; -import org.reactivestreams.Subscription; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A simple {@link Processor} that receives items form an upstream publisher, stores them in an - * internal queue, then serves them to one single downstream subscriber. It does not support - * multiple subscriptions. - * - *

Implementation note: this class is intended to serve as the common implementation for all - * secondary publishers exposed by the driver's reactive API, and in particular, for publishers of - * query metadata objects. Since such publishers are not critical, and usually only publish a - * handful of items, this implementation favors simplicity over efficiency (in particular, it uses - * an unbounded linked queue, but in practice there is no risk that this queue could grow - * uncontrollably). - * - * @param The type of elements received and emitted by this processor. - */ -public class SimpleUnicastProcessor - implements Processor, Subscription { - - private static final Logger LOG = LoggerFactory.getLogger(SimpleUnicastProcessor.class); - - private static final Object ON_COMPLETE = new Object(); - - private final Queue queue = new ConcurrentLinkedDeque<>(); - - private final AtomicBoolean once = new AtomicBoolean(false); - - private final AtomicInteger draining = new AtomicInteger(0); - - private final AtomicLong requested = new AtomicLong(0); - - private volatile Subscriber subscriber; - - private volatile boolean cancelled; - - @Override - public void subscribe(Subscriber subscriber) { - // As per rule 1.9, we need to throw an NPE if subscriber is null - Objects.requireNonNull(subscriber, "Subscriber cannot be null"); - // As per rule 1.11, this publisher supports only one subscriber. - if (once.compareAndSet(false, true)) { - this.subscriber = subscriber; - try { - subscriber.onSubscribe(this); - } catch (Throwable t) { - // As per rule 2.13: In the case that this rule is violated, - // any associated Subscription to the Subscriber MUST be considered as - // cancelled, and the caller MUST raise this error condition in a fashion - // that is adequate for the runtime environment. - doOnError( - new IllegalStateException( - subscriber - + " violated the Reactive Streams rule 2.13 by throwing an exception from onSubscribe.", - t)); - } - } else { - subscriber.onSubscribe(EmptySubscription.INSTANCE); - subscriber.onError( - new IllegalStateException("This publisher does not support multiple subscriptions")); - } - // As per 2.13, this method must return normally (i.e. not throw) - } - - @Override - public void onSubscribe(Subscription s) { - // no-op - } - - @Override - public void onNext(ElementT value) { - if (!cancelled) { - queue.offer(value); - drain(); - } - } - - @Override - public void onError(Throwable error) { - if (!cancelled) { - queue.offer(error); - drain(); - } - } - - @Override - public void onComplete() { - if (!cancelled) { - queue.offer(ON_COMPLETE); - drain(); - } - } - - @Override - public void request(long n) { - // As per 3.6: after the Subscription is cancelled, additional - // calls to request() MUST be NOPs. - if (!cancelled) { - if (n < 1) { - // Validate request as per rule 3.9 - doOnError( - new IllegalArgumentException( - subscriber - + " violated the Reactive Streams rule 3.9 by requesting a non-positive number of elements.")); - } else { - // As per rule 3.17, when demand overflows Long.MAX_VALUE - // it can be treated as "effectively unbounded" - ReactiveOperators.addCap(requested, n); - drain(); - } - } - } - - @Override - public void cancel() { - // As per 3.5: Subscription.cancel() MUST respect the responsiveness of - // its caller by returning in a timely manner, MUST be idempotent and - // MUST be thread-safe. - if (!cancelled) { - cancelled = true; - if (draining.getAndIncrement() == 0) { - // If nobody is draining, clear now; - // otherwise, the draining thread will notice - // that the cancelled flag was set - // and will clear for us. - clear(); - } - } - } - - @SuppressWarnings("ConditionalBreakInInfiniteLoop") - private void drain() { - if (draining.getAndIncrement() != 0) { - return; - } - int missed = 1; - for (; ; ) { - // Note: when termination is detected inside this loop, - // we MUST call clear() manually. - long requested = this.requested.get(); - long emitted = 0L; - while (requested != emitted) { - if (cancelled) { - clear(); - return; - } - Object t = queue.poll(); - if (t == null) { - break; - } - if (t instanceof Throwable) { - Throwable error = (Throwable) t; - doOnError(error); - clear(); - return; - } else if (t == ON_COMPLETE) { - doOnComplete(); - clear(); - return; - } else { - @SuppressWarnings("unchecked") - ElementT item = (ElementT) t; - doOnNext(item); - emitted++; - } - } - if (cancelled) { - clear(); - return; - } - if (emitted != 0) { - // if any item was emitted, adjust the requested field - ReactiveOperators.subCap(this.requested, emitted); - } - // if another thread tried to call drain() while we were busy, - // then we should do another drain round. - missed = draining.addAndGet(-missed); - if (missed == 0) { - break; - } - } - } - - private void doOnNext(@NonNull ElementT result) { - try { - subscriber.onNext(result); - } catch (Throwable t) { - LOG.error( - subscriber - + " violated the Reactive Streams rule 2.13 by throwing an exception from onNext.", - t); - cancel(); - } - } - - private void doOnComplete() { - try { - // Then we signal onComplete as per rules 1.2 and 1.5 - subscriber.onComplete(); - } catch (Throwable t) { - LOG.error( - subscriber - + " violated the Reactive Streams rule 2.13 by throwing an exception from onComplete.", - t); - } - // We need to consider this Subscription as cancelled as per rule 1.6 - cancel(); - } - - private void doOnError(@NonNull Throwable error) { - try { - // Then we signal the error downstream, as per rules 1.2 and 1.4. - subscriber.onError(error); - } catch (Throwable t) { - t.addSuppressed(error); - LOG.error( - subscriber - + " violated the Reactive Streams rule 2.13 by throwing an exception from onError.", - t); - } - // We need to consider this Subscription as cancelled as per rule 1.6 - cancel(); - } - - private void clear() { - // We don't need the elements anymore and should not hold references - // to them. - queue.clear(); - // As per 3.13, Subscription.cancel() MUST request the Publisher to - // eventually drop any references to the corresponding subscriber. - // Our own publishers do not keep references to this subscription, - // but downstream processors might do so, which is why we need to - // defensively clear the subscriber reference when we are done. - subscriber = null; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultGeometry.java b/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultGeometry.java deleted file mode 100644 index 885d9bd48b7..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultGeometry.java +++ /dev/null @@ -1,202 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.data.geometry; - -import com.datastax.dse.driver.api.core.data.geometry.Geometry; -import com.datastax.dse.driver.api.core.data.geometry.Point; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.esri.core.geometry.GeometryException; -import com.esri.core.geometry.SpatialReference; -import com.esri.core.geometry.ogc.OGCGeometry; -import com.esri.core.geometry.ogc.OGCLineString; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.InvalidObjectException; -import java.io.ObjectInputStream; -import java.io.Serializable; -import java.nio.ByteBuffer; -import net.jcip.annotations.Immutable; - -@Immutable -public abstract class DefaultGeometry implements Geometry, Serializable { - - private static final long serialVersionUID = 1L; - - /** - * Default spatial reference for Well Known Text / Well Known Binary. - * - *

4326 is the EPSG identifier of the World Geodetic System (WGS) in - * its later revision, WGS 84. - */ - public static final SpatialReference SPATIAL_REFERENCE_4326 = SpatialReference.create(4326); - - @NonNull - public static T fromOgcWellKnownText( - @NonNull String source, @NonNull Class klass) { - OGCGeometry geometry; - try { - geometry = OGCGeometry.fromText(source); - } catch (IllegalArgumentException e) { - throw new IllegalArgumentException(e.getMessage()); - } - validateType(geometry, klass); - return klass.cast(geometry); - } - - @NonNull - public static T fromOgcWellKnownBinary( - @NonNull ByteBuffer source, @NonNull Class klass) { - OGCGeometry geometry; - try { - geometry = OGCGeometry.fromBinary(source); - } catch (IllegalArgumentException e) { - throw new IllegalArgumentException(e.getMessage()); - } - validateType(geometry, klass); - return klass.cast(geometry); - } - - @NonNull - public static T fromOgcGeoJson( - @NonNull String source, @NonNull Class klass) { - OGCGeometry geometry; - try { - geometry = OGCGeometry.fromGeoJson(source); - } catch (Exception e) { - throw new IllegalArgumentException(e.getMessage()); - } - validateType(geometry, klass); - return klass.cast(geometry); - } - - private static void validateType(OGCGeometry geometry, Class klass) { - if (!geometry.getClass().equals(klass)) { - throw new IllegalArgumentException( - String.format( - "%s is not of type %s", geometry.getClass().getSimpleName(), klass.getSimpleName())); - } - } - - private final OGCGeometry ogcGeometry; - - protected DefaultGeometry(@NonNull OGCGeometry ogcGeometry) { - this.ogcGeometry = ogcGeometry; - Preconditions.checkNotNull(ogcGeometry); - validateOgcGeometry(ogcGeometry); - } - - private static void validateOgcGeometry(OGCGeometry geometry) { - try { - if (geometry.is3D()) { - throw new IllegalArgumentException(String.format("'%s' is not 2D", geometry.asText())); - } - if (!geometry.isSimple()) { - throw new IllegalArgumentException( - String.format( - "'%s' is not simple. Points and edges cannot self-intersect.", geometry.asText())); - } - } catch (GeometryException e) { - throw new IllegalArgumentException("Invalid geometry" + e.getMessage()); - } - } - - @NonNull - public static ImmutableList getPoints(@NonNull OGCLineString lineString) { - ImmutableList.Builder builder = ImmutableList.builder(); - for (int i = 0; i < lineString.numPoints(); i++) { - builder.add(new DefaultPoint(lineString.pointN(i))); - } - return builder.build(); - } - - protected static com.esri.core.geometry.Point toEsri(Point p) { - return new com.esri.core.geometry.Point(p.X(), p.Y()); - } - - @NonNull - public OGCGeometry getOgcGeometry() { - return ogcGeometry; - } - - @NonNull - public com.esri.core.geometry.Geometry getEsriGeometry() { - return ogcGeometry.getEsriGeometry(); - } - - @NonNull - @Override - public String asWellKnownText() { - return ogcGeometry.asText(); - } - - @NonNull - @Override - public ByteBuffer asWellKnownBinary() { - return WkbUtil.asLittleEndianBinary(ogcGeometry); - } - - @NonNull - @Override - public String asGeoJson() { - return ogcGeometry.asGeoJson(); - } - - @Override - public boolean contains(@NonNull Geometry other) { - Preconditions.checkNotNull(other); - if (other instanceof DefaultGeometry) { - DefaultGeometry defautlOther = (DefaultGeometry) other; - return getOgcGeometry().contains(defautlOther.getOgcGeometry()); - } - return false; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof DefaultGeometry)) { - return false; - } - DefaultGeometry that = (DefaultGeometry) o; - return this.getOgcGeometry().equals(that.getOgcGeometry()); - } - - @Override - public int hashCode() { - // OGCGeometry subclasses do not overwrite Object.hashCode() - // while com.esri.core.geometry.Geometry subclasses usually do, - // so use these instead; this is consistent with equals - // because OGCGeometry.equals() actually compare between - // com.esri.core.geometry.Geometry objects - return getEsriGeometry().hashCode(); - } - - // Should never be called since we serialize a proxy (see subclasses) - @SuppressWarnings("UnusedVariable") - private void readObject(ObjectInputStream stream) throws InvalidObjectException { - throw new InvalidObjectException("Proxy required"); - } - - @Override - public String toString() { - return asWellKnownText(); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultLineString.java b/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultLineString.java deleted file mode 100644 index 1cf64bb366d..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultLineString.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.data.geometry; - -import com.datastax.dse.driver.api.core.data.geometry.LineString; -import com.datastax.dse.driver.api.core.data.geometry.Point; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.esri.core.geometry.Polyline; -import com.esri.core.geometry.ogc.OGCLineString; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.List; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultLineString extends DefaultGeometry implements LineString { - - private static final long serialVersionUID = 1280189361978382248L; - - private static OGCLineString fromPoints(Point p1, Point p2, Point... pn) { - Polyline polyline = new Polyline(toEsri(p1), toEsri(p2)); - for (Point p : pn) { - polyline.lineTo(toEsri(p)); - } - return new OGCLineString(polyline, 0, DefaultGeometry.SPATIAL_REFERENCE_4326); - } - - private final List points; - - public DefaultLineString(@NonNull Point p1, @NonNull Point p2, @NonNull Point... pn) { - super(fromPoints(p1, p2, pn)); - this.points = ImmutableList.builder().add(p1).add(p2).add(pn).build(); - } - - public DefaultLineString(@NonNull OGCLineString lineString) { - super(lineString); - this.points = getPoints(lineString); - } - - @NonNull - @Override - public List getPoints() { - return points; - } - - /** - * This object gets replaced by an internal proxy for serialization. - * - * @serialData a single byte array containing the Well-Known Binary representation. - */ - private Object writeReplace() { - return new WkbSerializationProxy(this.asWellKnownBinary()); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultPoint.java b/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultPoint.java deleted file mode 100644 index c9540b10d8a..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultPoint.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.data.geometry; - -import com.datastax.dse.driver.api.core.data.geometry.Point; -import com.esri.core.geometry.ogc.OGCPoint; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultPoint extends DefaultGeometry implements Point { - - private static final long serialVersionUID = -8337622213980781285L; - - public DefaultPoint(double x, double y) { - this( - new OGCPoint( - new com.esri.core.geometry.Point(x, y), DefaultGeometry.SPATIAL_REFERENCE_4326)); - } - - public DefaultPoint(@NonNull OGCPoint point) { - super(point); - } - - @NonNull - @Override - public OGCPoint getOgcGeometry() { - return (OGCPoint) super.getOgcGeometry(); - } - - @Override - public double X() { - return getOgcGeometry().X(); - } - - @Override - public double Y() { - return getOgcGeometry().Y(); - } - - /** - * This object gets replaced by an internal proxy for serialization. - * - * @serialData a single byte array containing the Well-Known Binary representation. - */ - private Object writeReplace() { - return new WkbSerializationProxy(this.asWellKnownBinary()); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultPolygon.java b/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultPolygon.java deleted file mode 100644 index 27d375d42b1..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultPolygon.java +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.data.geometry; - -import com.datastax.dse.driver.api.core.data.geometry.Point; -import com.datastax.dse.driver.api.core.data.geometry.Polygon; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.esri.core.geometry.Operator; -import com.esri.core.geometry.OperatorFactoryLocal; -import com.esri.core.geometry.OperatorSimplifyOGC; -import com.esri.core.geometry.ogc.OGCPolygon; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Collections; -import java.util.List; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultPolygon extends DefaultGeometry implements Polygon { - - private static final long serialVersionUID = 3694196802962890314L; - - private final List exteriorRing; - private final List> interiorRings; - - public DefaultPolygon( - @NonNull Point p1, @NonNull Point p2, @NonNull Point p3, @NonNull Point... pn) { - super(fromPoints(p1, p2, p3, pn)); - this.exteriorRing = ImmutableList.builder().add(p1).add(p2).add(p3).add(pn).build(); - this.interiorRings = Collections.emptyList(); - } - - public DefaultPolygon(@NonNull OGCPolygon polygon) { - super(polygon); - if (polygon.isEmpty()) { - this.exteriorRing = ImmutableList.of(); - } else { - this.exteriorRing = getPoints(polygon.exteriorRing()); - } - - ImmutableList.Builder> builder = ImmutableList.builder(); - for (int i = 0; i < polygon.numInteriorRing(); i++) { - builder.add(getPoints(polygon.interiorRingN(i))); - } - this.interiorRings = builder.build(); - } - - @NonNull - @Override - public List getExteriorRing() { - return exteriorRing; - } - - @NonNull - @Override - public List> getInteriorRings() { - return interiorRings; - } - - private static OGCPolygon fromPoints(Point p1, Point p2, Point p3, Point... pn) { - com.esri.core.geometry.Polygon polygon = new com.esri.core.geometry.Polygon(); - addPath(polygon, p1, p2, p3, pn); - return new OGCPolygon(simplify(polygon), DefaultGeometry.SPATIAL_REFERENCE_4326); - } - - private static void addPath( - com.esri.core.geometry.Polygon polygon, Point p1, Point p2, Point p3, Point[] pn) { - - polygon.startPath(toEsri(p1)); - polygon.lineTo(toEsri(p2)); - polygon.lineTo(toEsri(p3)); - for (Point p : pn) { - polygon.lineTo(toEsri(p)); - } - } - - private static com.esri.core.geometry.Polygon simplify(com.esri.core.geometry.Polygon polygon) { - OperatorSimplifyOGC op = - (OperatorSimplifyOGC) - OperatorFactoryLocal.getInstance().getOperator(Operator.Type.SimplifyOGC); - return (com.esri.core.geometry.Polygon) - op.execute(polygon, DefaultGeometry.SPATIAL_REFERENCE_4326, true, null); - } - - /** - * This object gets replaced by an internal proxy for serialization. - * - * @serialData a single byte array containing the Well-Known Binary representation. - */ - private Object writeReplace() { - return new WkbSerializationProxy(this.asWellKnownBinary()); - } - - public static class Builder implements Polygon.Builder { - private final com.esri.core.geometry.Polygon polygon = new com.esri.core.geometry.Polygon(); - - @NonNull - @Override - public Builder addRing( - @NonNull Point p1, @NonNull Point p2, @NonNull Point p3, @NonNull Point... pn) { - addPath(polygon, p1, p2, p3, pn); - return this; - } - - /** - * Builds the polygon. - * - * @return the polygon. - */ - @NonNull - @Override - public Polygon build() { - return new DefaultPolygon( - new OGCPolygon(simplify(polygon), DefaultGeometry.SPATIAL_REFERENCE_4326)); - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/Distance.java b/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/Distance.java deleted file mode 100644 index 518f6aa1346..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/Distance.java +++ /dev/null @@ -1,237 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.data.geometry; - -import static java.util.regex.Pattern.CASE_INSENSITIVE; - -import com.datastax.dse.driver.api.core.data.geometry.Geometry; -import com.datastax.dse.driver.api.core.data.geometry.LineString; -import com.datastax.dse.driver.api.core.data.geometry.Point; -import com.datastax.dse.driver.api.core.data.geometry.Polygon; -import com.datastax.dse.driver.api.core.graph.predicates.Geo; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.esri.core.geometry.MultiPath; -import com.esri.core.geometry.ogc.OGCGeometry; -import com.esri.core.geometry.ogc.OGCPoint; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.util.Objects; -import java.util.regex.Matcher; -import java.util.regex.Pattern; -import net.jcip.annotations.Immutable; - -/** - * The driver-side representation of DSE's {@code Geo.distance}. - * - *

This is a circle in a two-dimensional XY plane represented by its center point and radius. It - * is used as a search criteria to determine whether or not another geospatial object lies within a - * circular area. - * - *

Note that this shape has no equivalent in the OGC and GeoJSON standards: as a consequence, - * {@link #asWellKnownText()} returns a custom format, and {@link #getOgcGeometry()}, {@link - * #asWellKnownBinary()}, and {@link #asGeoJson()} throw {@link UnsupportedOperationException}. - * - *

Unlike other geo types, this class is never exposed directly to driver clients: it is used - * internally by {@linkplain Geo#inside(Point, double) geo predicates}, but cannot be a column type, - * nor appear in CQL or graph results. Therefore it doesn't have a public-facing interface, nor a - * built-in codec. - */ -@Immutable -public class Distance extends DefaultGeometry { - - private static final Pattern WKT_PATTERN = - Pattern.compile( - "distance *\\( *\\( *([\\d\\.-]+) *([\\d+\\.-]+) *\\) *([\\d+\\.-]+) *\\)", - CASE_INSENSITIVE); - - /** - * Creates a distance from its Well-known - * Text (WKT) representation. - * - * @param source the Well-known Text representation to parse. - * @return the point represented by the WKT. - * @throws IllegalArgumentException if the string does not contain a valid Well-known Text - * representation. - * @see Distance#asWellKnownText() - */ - @NonNull - public static Distance fromWellKnownText(@NonNull String source) { - Matcher matcher = WKT_PATTERN.matcher(source.trim()); - if (matcher.matches() && matcher.groupCount() == 3) { - try { - return new Distance( - new DefaultPoint( - Double.parseDouble(matcher.group(1)), Double.parseDouble(matcher.group(2))), - Double.parseDouble(matcher.group(3))); - } catch (NumberFormatException var3) { - throw new IllegalArgumentException(String.format("Unable to parse %s", source)); - } - } else { - throw new IllegalArgumentException(String.format("Unable to parse %s", source)); - } - } - - private final DefaultPoint center; - - private final double radius; - - /** - * Creates a new distance with the given center and radius. - * - * @param center The center point. - * @param radius The radius of the circle representing distance. - */ - public Distance(@NonNull Point center, double radius) { - super(((DefaultPoint) center).getOgcGeometry()); - Preconditions.checkNotNull(center); - Preconditions.checkArgument(radius >= 0.0D, "Radius must be >= 0 (got %s)", radius); - this.center = ((DefaultPoint) center); - this.radius = radius; - } - - /** @return The center point of the circle representing this distance. */ - @NonNull - public Point getCenter() { - return center; - } - - /** @return The radius of the circle representing this distance. */ - public double getRadius() { - return radius; - } - - /** - * Returns a Well-known Text (WKT) - * representation of this geospatial type. - * - *

Since there is no Well-known Text specification for Distance, this returns a custom format - * of: DISTANCE((center.x center.y) radius) - * - * @return a Well-known Text representation of this object. - */ - @NonNull - @Override - public String asWellKnownText() { - return String.format("DISTANCE((%s %s) %s)", this.center.X(), this.center.Y(), this.radius); - } - - /** - * The distance type has no equivalent in the OGC standard: this method throws an {@link - * UnsupportedOperationException}. - */ - @NonNull - @Override - public OGCGeometry getOgcGeometry() { - throw new UnsupportedOperationException(); - } - - /** - * The distance type has no equivalent in the OGC standard: this method throws an {@link - * UnsupportedOperationException}. - */ - @NonNull - @Override - public ByteBuffer asWellKnownBinary() { - throw new UnsupportedOperationException(); - } - - /** - * The distance type has no equivalent in the GeoJSON standard: this method throws an {@link - * UnsupportedOperationException}. - */ - @Override - @NonNull - public String asGeoJson() { - throw new UnsupportedOperationException(); - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof Distance) { - Distance that = (Distance) other; - return Objects.equals(this.center, that.center) && this.radius == that.radius; - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(center, radius); - } - - @SuppressWarnings("SimplifiableConditionalExpression") - @Override - public boolean contains(@NonNull Geometry geometry) { - return geometry instanceof Distance - ? this.containsDistance((Distance) geometry) - : geometry instanceof Point - ? this.containsPoint((Point) geometry) - : geometry instanceof LineString - ? this.containsLineString((LineString) geometry) - : geometry instanceof Polygon ? this.containsPolygon((Polygon) geometry) : false; - } - - private boolean containsDistance(Distance distance) { - return this.center.getOgcGeometry().distance(distance.center.getOgcGeometry()) + distance.radius - <= this.radius; - } - - private boolean containsPoint(Point point) { - return this.containsOGCPoint(((DefaultPoint) point).getOgcGeometry()); - } - - private boolean containsLineString(LineString lineString) { - MultiPath multiPath = - (MultiPath) ((DefaultLineString) lineString).getOgcGeometry().getEsriGeometry(); - return containsMultiPath(multiPath); - } - - private boolean containsPolygon(Polygon polygon) { - MultiPath multiPath = - (com.esri.core.geometry.Polygon) - ((DefaultPolygon) polygon).getOgcGeometry().getEsriGeometry(); - return containsMultiPath(multiPath); - } - - private boolean containsMultiPath(MultiPath multiPath) { - int numPoints = multiPath.getPointCount(); - for (int i = 0; i < numPoints; ++i) { - OGCPoint point = new OGCPoint(multiPath.getPoint(i), DefaultGeometry.SPATIAL_REFERENCE_4326); - if (!this.containsOGCPoint(point)) { - return false; - } - } - return true; - } - - private boolean containsOGCPoint(OGCPoint point) { - return this.center.getOgcGeometry().distance(point) <= this.radius; - } - - /** - * This object gets replaced by an internal proxy for serialization. - * - * @serialData Point (wkb) for center followed by double for radius - */ - private Object writeReplace() { - return new DistanceSerializationProxy(this); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/DistanceSerializationProxy.java b/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/DistanceSerializationProxy.java deleted file mode 100644 index 515af121980..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/DistanceSerializationProxy.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.data.geometry; - -import com.datastax.dse.driver.api.core.data.geometry.Point; -import java.io.Serializable; - -/** - * A thin wrapper around {@link Distance}, that gets substituted during the serialization / - * deserialization process. This allows {@link Distance} to be immutable and reference centers' OGC - * counterpart. - */ -public class DistanceSerializationProxy implements Serializable { - - private static final long serialVersionUID = 1L; - - private final Point center; - private final double radius; - - public DistanceSerializationProxy(Distance distance) { - this.center = distance.getCenter(); - this.radius = distance.getRadius(); - } - - private Object readResolve() { - return new Distance(center, radius); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/WkbSerializationProxy.java b/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/WkbSerializationProxy.java deleted file mode 100644 index 92c0f6de2d5..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/WkbSerializationProxy.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.data.geometry; - -import com.datastax.dse.driver.api.core.data.geometry.LineString; -import com.datastax.dse.driver.api.core.data.geometry.Point; -import com.datastax.dse.driver.api.core.data.geometry.Polygon; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.io.Serializable; -import java.nio.ByteBuffer; -import java.nio.ByteOrder; -import net.jcip.annotations.Immutable; - -/** - * A thin wrapper around a Well-Known Binary byte sequence, that gets substituted for {@link - * DefaultGeometry} instances during the serialization / deserialization process. This allows - * immutable geometry classes. - */ -@Immutable -class WkbSerializationProxy implements Serializable { - - private static final long serialVersionUID = 1L; - - private final byte[] wkb; - - WkbSerializationProxy(ByteBuffer wkb) { - this.wkb = Bytes.getArray(wkb); - } - - private Object readResolve() { - ByteBuffer buffer = ByteBuffer.wrap(wkb).order(ByteOrder.nativeOrder()); - int type = buffer.getInt(1); - - if (type == 1) { - return Point.fromWellKnownBinary(buffer); - } else if (type == 2) { - return LineString.fromWellKnownBinary(buffer); - } else if (type == 3) { - return Polygon.fromWellKnownBinary(buffer); - } else { - throw new IllegalArgumentException( - "Unknown geospatial type code in serialized form: " + type); - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/WkbUtil.java b/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/WkbUtil.java deleted file mode 100644 index 3f18b32fda2..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/WkbUtil.java +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.data.geometry; - -import com.esri.core.geometry.Geometry; -import com.esri.core.geometry.Operator; -import com.esri.core.geometry.OperatorExportToWkb; -import com.esri.core.geometry.OperatorFactoryLocal; -import com.esri.core.geometry.WkbExportFlags; -import com.esri.core.geometry.ogc.OGCGeometry; -import com.esri.core.geometry.ogc.OGCLineString; -import com.esri.core.geometry.ogc.OGCPoint; -import com.esri.core.geometry.ogc.OGCPolygon; -import java.lang.reflect.Method; -import java.nio.ByteBuffer; -import java.nio.ByteOrder; - -/** - * Helper class to serialize OGC geometries to Well-Known Binary, forcing the byte order to little - * endian. - * - *

WKB encodes the byte order, so in theory we could send the buffer in any order, even if it is - * different from the server. However DSE server performs an additional validation step server-side: - * it deserializes to Java, serializes back to WKB, and then compares the original buffer to the - * "re-serialized" one. If they don't match, a MarshalException is thrown. So with a client in - * big-endian and a server in little-endian, we would get: - * - *

- * incoming buffer (big endian) --> Java --> reserialized buffer (little endian)
- * 
- * - * Since the two buffers have a different endian-ness, they don't match. - * - *

The ESRI library defaults to the native byte order and doesn't let us change it. Therefore: - * - *

    - *
  • if the native order is little endian (vast majority of cases), this class simply delegates - * to the appropriate public API method; - *
  • if the native order is big endian, it re-implements the serialization code, using - * reflection to get access to a private method. If reflection fails for any reason (updated - * ESRI library, security manager...), a runtime exception will be thrown. - *
- */ -class WkbUtil { - - private static final boolean IS_NATIVE_LITTLE_ENDIAN = - ByteOrder.nativeOrder().equals(ByteOrder.LITTLE_ENDIAN) - && System.getProperty("com.datastax.driver.dse.geometry.FORCE_REFLECTION_WKB") - == null; // only for tests - - static ByteBuffer asLittleEndianBinary(OGCGeometry ogcGeometry) { - if (IS_NATIVE_LITTLE_ENDIAN) { - return ogcGeometry.asBinary(); // the default implementation does what we want - } else { - int exportFlags; - if (ogcGeometry instanceof OGCPoint) { - exportFlags = 0; - } else if (ogcGeometry instanceof OGCLineString) { - exportFlags = WkbExportFlags.wkbExportLineString; - } else if (ogcGeometry instanceof OGCPolygon) { - exportFlags = WkbExportFlags.wkbExportPolygon; - } else { - throw new AssertionError("Unsupported type: " + ogcGeometry.getClass()); - } - - // Copy-pasted from OperatorExportToWkbLocal#execute, except for the flags and order - int size = exportToWKB(exportFlags, ogcGeometry.getEsriGeometry(), null); - ByteBuffer wkbBuffer = ByteBuffer.allocate(size).order(ByteOrder.LITTLE_ENDIAN); - exportToWKB(exportFlags, ogcGeometry.getEsriGeometry(), wkbBuffer); - return wkbBuffer; - } - } - - // Provides reflective access to the private static method OperatorExportToWkbLocal#exportToWKB - private static int exportToWKB(int exportFlags, Geometry geometry, ByteBuffer wkbBuffer) { - assert !IS_NATIVE_LITTLE_ENDIAN; - try { - return (Integer) exportToWKB.invoke(null, exportFlags, geometry, wkbBuffer); - } catch (Exception e) { - throw new RuntimeException( - "Couldn't invoke private method OperatorExportToWkbLocal#exportToWKB", e); - } - } - - private static final Method exportToWKB; - - static { - if (IS_NATIVE_LITTLE_ENDIAN) { - exportToWKB = null; // won't be used - } else { - try { - OperatorExportToWkb op = - (OperatorExportToWkb) - OperatorFactoryLocal.getInstance().getOperator(Operator.Type.ExportToWkb); - exportToWKB = - op.getClass() - .getDeclaredMethod("exportToWKB", int.class, Geometry.class, ByteBuffer.class); - exportToWKB.setAccessible(true); - } catch (NoSuchMethodException e) { - throw new RuntimeException( - "Couldn't get access to private method OperatorExportToWkbLocal#exportToWKB", e); - } - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/ByteBufUtil.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/ByteBufUtil.java deleted file mode 100644 index 333ba6099d3..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/ByteBufUtil.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import io.netty.buffer.ByteBuf; -import io.netty.buffer.Unpooled; -import java.nio.ByteBuffer; - -public class ByteBufUtil { - - // Does not move the reader index of the ByteBuf parameter - public static ByteBuffer toByteBuffer(ByteBuf buffer) { - if (buffer.isDirect()) { - return buffer.nioBuffer(); - } - final byte[] bytes = new byte[buffer.readableBytes()]; - buffer.getBytes(buffer.readerIndex(), bytes); - return ByteBuffer.wrap(bytes); - } - - static ByteBuf toByteBuf(ByteBuffer buffer) { - return Unpooled.wrappedBuffer(buffer); - } - - // read a predefined amount of bytes from the netty buffer and move its readerIndex - public static ByteBuffer readBytes(ByteBuf nettyBuf, int size) { - ByteBuffer res = ByteBuffer.allocate(size); - nettyBuf.readBytes(res); - res.flip(); - return res; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/BytecodeGraphStatement.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/BytecodeGraphStatement.java deleted file mode 100644 index b6fe05a987c..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/BytecodeGraphStatement.java +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.dse.driver.api.core.graph.FluentGraphStatement; -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.metadata.Node; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.Collections; -import java.util.Map; -import org.apache.tinkerpop.gremlin.process.traversal.Bytecode; - -/** - * A dedicated statement implementation for implicit traversal execution via a {@link - * DseGraphRemoteConnection}. - * - *

This is a simplified version of {@link FluentGraphStatement} that exposes the bytecode - * directly instead of the traversal. - * - *

This class is for internal use only. - */ -public class BytecodeGraphStatement extends GraphStatementBase { - - private final Bytecode bytecode; - - public BytecodeGraphStatement( - Bytecode bytecode, DriverExecutionProfile executionProfile, String executionProfileName) { - this( - bytecode, - null, - null, - null, - Statement.NO_DEFAULT_TIMESTAMP, - executionProfile, - executionProfileName, - Collections.emptyMap(), - null, - null, - null, - null, - null, - null); - } - - private BytecodeGraphStatement( - Bytecode bytecode, - Boolean isIdempotent, - Duration timeout, - Node node, - long timestamp, - DriverExecutionProfile executionProfile, - String executionProfileName, - Map customPayload, - String graphName, - String traversalSource, - String subProtocol, - ConsistencyLevel consistencyLevel, - ConsistencyLevel readConsistencyLevel, - ConsistencyLevel writeConsistencyLevel) { - super( - isIdempotent, - timeout, - node, - timestamp, - executionProfile, - executionProfileName, - customPayload, - graphName, - traversalSource, - subProtocol, - consistencyLevel, - readConsistencyLevel, - writeConsistencyLevel); - this.bytecode = bytecode; - } - - public Bytecode getBytecode() { - return bytecode; - } - - @Override - protected BytecodeGraphStatement newInstance( - Boolean isIdempotent, - Duration timeout, - Node node, - long timestamp, - DriverExecutionProfile executionProfile, - String executionProfileName, - Map customPayload, - String graphName, - String traversalSource, - String subProtocol, - ConsistencyLevel consistencyLevel, - ConsistencyLevel readConsistencyLevel, - ConsistencyLevel writeConsistencyLevel) { - return new BytecodeGraphStatement( - bytecode, - isIdempotent, - timeout, - node, - timestamp, - executionProfile, - executionProfileName, - customPayload, - graphName, - traversalSource, - subProtocol, - consistencyLevel, - readConsistencyLevel, - writeConsistencyLevel); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/ContinuousAsyncGraphResultSet.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/ContinuousAsyncGraphResultSet.java deleted file mode 100644 index 9c7f773c3a2..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/ContinuousAsyncGraphResultSet.java +++ /dev/null @@ -1,151 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.dse.driver.api.core.graph.AsyncGraphResultSet; -import com.datastax.dse.driver.api.core.graph.GraphNode; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.internal.core.util.CountingIterator; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Collections; -import java.util.Queue; -import java.util.concurrent.CompletionStage; -import net.jcip.annotations.NotThreadSafe; - -@NotThreadSafe // wraps a mutable queue -public class ContinuousAsyncGraphResultSet implements AsyncGraphResultSet { - - private final CountingIterator iterator; - private final int pageNumber; - private final boolean hasMorePages; - private final ExecutionInfo executionInfo; - private final ContinuousGraphRequestHandler continuousGraphRequestHandler; - private final Iterable currentPage; - - public ContinuousAsyncGraphResultSet( - ExecutionInfo executionInfo, - Queue data, - int pageNumber, - boolean hasMorePages, - ContinuousGraphRequestHandler continuousGraphRequestHandler, - GraphProtocol graphProtocol) { - - this.iterator = new GraphResultIterator(data, graphProtocol); - this.pageNumber = pageNumber; - this.hasMorePages = hasMorePages; - this.executionInfo = executionInfo; - this.continuousGraphRequestHandler = continuousGraphRequestHandler; - this.currentPage = () -> iterator; - } - - @NonNull - @Override - public ExecutionInfo getRequestExecutionInfo() { - return executionInfo; - } - - @NonNull - @Override - @Deprecated - public com.datastax.dse.driver.api.core.graph.GraphExecutionInfo getExecutionInfo() { - return GraphExecutionInfoConverter.convert(executionInfo); - } - - @Override - public int remaining() { - return iterator.remaining(); - } - - @NonNull - @Override - public Iterable currentPage() { - return currentPage; - } - - @Override - public boolean hasMorePages() { - return hasMorePages; - } - - @NonNull - @Override - public CompletionStage fetchNextPage() throws IllegalStateException { - if (!hasMorePages()) { - throw new IllegalStateException( - "Can't call fetchNextPage() on the last page (use hasMorePages() to check)"); - } - return continuousGraphRequestHandler.fetchNextPage(); - } - - @Override - public void cancel() { - continuousGraphRequestHandler.cancel(); - } - - /** Returns the current page's number. Pages are numbered starting from 1. */ - public int pageNumber() { - return pageNumber; - } - - static AsyncGraphResultSet empty(ExecutionInfo executionInfo) { - - return new AsyncGraphResultSet() { - - @NonNull - @Override - public ExecutionInfo getRequestExecutionInfo() { - return executionInfo; - } - - @NonNull - @Override - @Deprecated - public com.datastax.dse.driver.api.core.graph.GraphExecutionInfo getExecutionInfo() { - return GraphExecutionInfoConverter.convert(executionInfo); - } - - @NonNull - @Override - public Iterable currentPage() { - return Collections.emptyList(); - } - - @Override - public int remaining() { - return 0; - } - - @Override - public boolean hasMorePages() { - return false; - } - - @NonNull - @Override - public CompletionStage fetchNextPage() throws IllegalStateException { - throw new IllegalStateException( - "Can't call fetchNextPage() on the last page (use hasMorePages() to check)"); - } - - @Override - public void cancel() { - // noop - } - }; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/ContinuousGraphRequestHandler.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/ContinuousGraphRequestHandler.java deleted file mode 100644 index 07d9e4c84a3..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/ContinuousGraphRequestHandler.java +++ /dev/null @@ -1,191 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.dse.driver.api.core.graph.AsyncGraphResultSet; -import com.datastax.dse.driver.api.core.graph.GraphNode; -import com.datastax.dse.driver.api.core.graph.GraphStatement; -import com.datastax.dse.driver.api.core.metrics.DseNodeMetric; -import com.datastax.dse.driver.api.core.metrics.DseSessionMetric; -import com.datastax.dse.driver.internal.core.cql.continuous.ContinuousRequestHandlerBase; -import com.datastax.dse.driver.internal.core.graph.binary.GraphBinaryModule; -import com.datastax.dse.protocol.internal.response.result.DseRowsMetadata; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.cql.Conversions; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.datastax.oss.driver.shaded.guava.common.base.MoreObjects; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.response.result.Rows; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.ArrayDeque; -import java.util.List; -import java.util.Map; -import java.util.Queue; -import net.jcip.annotations.ThreadSafe; - -/** - * Handles a Graph request that supports multiple response messages (a.k.a. continuous paging - * request). - */ -@ThreadSafe -public class ContinuousGraphRequestHandler - extends ContinuousRequestHandlerBase, AsyncGraphResultSet> { - - private final GraphBinaryModule graphBinaryModule; - private final GraphSupportChecker graphSupportChecker; - private final Duration globalTimeout; - - ContinuousGraphRequestHandler( - @NonNull GraphStatement statement, - @NonNull DefaultSession session, - @NonNull InternalDriverContext context, - @NonNull String sessionLogPrefix, - @NonNull GraphBinaryModule graphBinaryModule, - @NonNull GraphSupportChecker graphSupportChecker) { - super( - statement, - session, - context, - sessionLogPrefix, - AsyncGraphResultSet.class, - true, - DseSessionMetric.GRAPH_CLIENT_TIMEOUTS, - DseSessionMetric.GRAPH_REQUESTS, - DseNodeMetric.GRAPH_MESSAGES); - this.graphBinaryModule = graphBinaryModule; - this.graphSupportChecker = graphSupportChecker; - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(statement, context); - globalTimeout = - MoreObjects.firstNonNull( - statement.getTimeout(), - executionProfile.getDuration(DseDriverOption.GRAPH_TIMEOUT, Duration.ZERO)); - // NOTE that ordering of the following statement matters. - // We should register this request after all fields have been initialized. - throttler.register(this); - } - - @NonNull - @Override - protected Duration getGlobalTimeout() { - return globalTimeout; - } - - @NonNull - @Override - protected Duration getPageTimeout(@NonNull GraphStatement statement, int pageNumber) { - return Duration.ZERO; - } - - @NonNull - @Override - protected Duration getReviseRequestTimeout(@NonNull GraphStatement statement) { - return Duration.ZERO; - } - - @Override - protected int getMaxEnqueuedPages(@NonNull GraphStatement statement) { - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(statement, context); - return executionProfile.getInt(DseDriverOption.GRAPH_CONTINUOUS_PAGING_MAX_ENQUEUED_PAGES); - } - - @Override - protected int getMaxPages(@NonNull GraphStatement statement) { - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(statement, context); - return executionProfile.getInt(DseDriverOption.GRAPH_CONTINUOUS_PAGING_MAX_PAGES); - } - - @NonNull - @Override - protected Message getMessage(@NonNull GraphStatement statement) { - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(statement, context); - GraphProtocol subProtocol = - graphSupportChecker.inferGraphProtocol(statement, executionProfile, context); - return GraphConversions.createContinuousMessageFromGraphStatement( - statement, subProtocol, executionProfile, context, graphBinaryModule); - } - - @Override - protected boolean isTracingEnabled(@NonNull GraphStatement statement) { - return statement.isTracing(); - } - - @NonNull - @Override - protected Map createPayload(@NonNull GraphStatement statement) { - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(statement, context); - GraphProtocol subProtocol = - graphSupportChecker.inferGraphProtocol(statement, executionProfile, context); - return GraphConversions.createCustomPayload( - statement, subProtocol, executionProfile, context, graphBinaryModule); - } - - @NonNull - @Override - protected AsyncGraphResultSet createEmptyResultSet(@NonNull ExecutionInfo executionInfo) { - return ContinuousAsyncGraphResultSet.empty(executionInfo); - } - - @NonNull - @Override - protected ContinuousAsyncGraphResultSet createResultSet( - @NonNull GraphStatement statement, - @NonNull Rows rows, - @NonNull ExecutionInfo executionInfo, - @NonNull ColumnDefinitions columnDefinitions) - throws IOException { - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(statement, context); - GraphProtocol subProtocol = - graphSupportChecker.inferGraphProtocol(statement, executionProfile, context); - - Queue graphNodes = new ArrayDeque<>(); - for (List row : rows.getData()) { - if (subProtocol.isGraphBinary()) { - graphNodes.offer(GraphConversions.createGraphBinaryGraphNode(row, this.graphBinaryModule)); - } else { - graphNodes.offer(GraphSONUtils.createGraphNode(row, subProtocol)); - } - } - - DseRowsMetadata metadata = (DseRowsMetadata) rows.getMetadata(); - return new ContinuousAsyncGraphResultSet( - executionInfo, - graphNodes, - metadata.continuousPageNumber, - !metadata.isLastContinuousPage, - this, - subProtocol); - } - - @Override - protected int pageNumber(@NonNull AsyncGraphResultSet resultSet) { - return ((ContinuousAsyncGraphResultSet) resultSet).pageNumber(); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/CqlCollectionPredicate.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/CqlCollectionPredicate.java deleted file mode 100644 index 349321da0cf..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/CqlCollectionPredicate.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import java.util.Collection; -import java.util.Map; -import java.util.Objects; -import org.javatuples.Pair; - -/** Predicates that can be used on CQL Collections. */ -public enum CqlCollectionPredicate implements DsePredicate { - contains { - @Override - public boolean test(Object value, Object condition) { - preEvaluate(condition); - Preconditions.checkArgument(value instanceof Collection); - return ((Collection) value).contains(condition); - } - }, - - containsKey { - @Override - public boolean test(Object value, Object condition) { - preEvaluate(condition); - Preconditions.checkArgument(value instanceof Map); - return ((Map) value).containsKey(condition); - } - }, - - containsValue { - @Override - public boolean test(Object value, Object condition) { - preEvaluate(condition); - Preconditions.checkArgument(value instanceof Map); - return ((Map) value).containsValue(condition); - } - }, - - entryEq { - @Override - public boolean test(Object value, Object condition) { - preEvaluate(condition); - Preconditions.checkArgument(condition instanceof Pair); - Preconditions.checkArgument(value instanceof Map); - Pair pair = (Pair) condition; - Map map = (Map) value; - return Objects.equals(map.get(pair.getValue0()), pair.getValue1()); - } - }; - - @Override - public boolean isValidCondition(Object condition) { - if (condition instanceof Pair) { - Pair pair = (Pair) condition; - return pair.getValue0() != null && pair.getValue1() != null; - } - return condition != null; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DefaultAsyncGraphResultSet.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DefaultAsyncGraphResultSet.java deleted file mode 100644 index abc7cc9514e..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DefaultAsyncGraphResultSet.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.dse.driver.api.core.graph.AsyncGraphResultSet; -import com.datastax.dse.driver.api.core.graph.GraphNode; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.internal.core.util.CountingIterator; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Queue; -import java.util.concurrent.CompletionStage; -import net.jcip.annotations.NotThreadSafe; - -@NotThreadSafe // wraps a mutable queue -public class DefaultAsyncGraphResultSet implements AsyncGraphResultSet { - - private final ExecutionInfo executionInfo; - private final CountingIterator iterator; - private final Iterable currentPage; - - public DefaultAsyncGraphResultSet( - ExecutionInfo executionInfo, Queue data, GraphProtocol graphProtocol) { - this.executionInfo = executionInfo; - this.iterator = new GraphResultIterator(data, graphProtocol); - this.currentPage = () -> iterator; - } - - @NonNull - @Override - public ExecutionInfo getRequestExecutionInfo() { - return executionInfo; - } - - @NonNull - @Override - @Deprecated - public com.datastax.dse.driver.api.core.graph.GraphExecutionInfo getExecutionInfo() { - return GraphExecutionInfoConverter.convert(executionInfo); - } - - @Override - public int remaining() { - return iterator.remaining(); - } - - @NonNull - @Override - public Iterable currentPage() { - return currentPage; - } - - @Override - public boolean hasMorePages() { - return false; - } - - @NonNull - @Override - public CompletionStage fetchNextPage() throws IllegalStateException { - throw new IllegalStateException( - "No next page. Use #hasMorePages before calling this method to avoid this error."); - } - - @Override - public void cancel() { - // nothing to do - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DefaultBatchGraphStatement.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DefaultBatchGraphStatement.java deleted file mode 100644 index e16287c415d..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DefaultBatchGraphStatement.java +++ /dev/null @@ -1,154 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.dse.driver.api.core.graph.BatchGraphStatement; -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import net.jcip.annotations.Immutable; -import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; - -@Immutable -public class DefaultBatchGraphStatement extends GraphStatementBase - implements BatchGraphStatement { - - private final List traversals; - - public DefaultBatchGraphStatement( - Iterable traversals, - Boolean isIdempotent, - Duration timeout, - Node node, - long timestamp, - DriverExecutionProfile executionProfile, - String executionProfileName, - Map customPayload, - String graphName, - String traversalSource, - String subProtocol, - ConsistencyLevel consistencyLevel, - ConsistencyLevel readConsistencyLevel, - ConsistencyLevel writeConsistencyLevel) { - super( - isIdempotent, - timeout, - node, - timestamp, - executionProfile, - executionProfileName, - customPayload, - graphName, - traversalSource, - subProtocol, - consistencyLevel, - readConsistencyLevel, - writeConsistencyLevel); - this.traversals = ImmutableList.copyOf(traversals); - } - - @NonNull - @Override - public DefaultBatchGraphStatement addTraversal(@NonNull GraphTraversal newTraversal) { - return new DefaultBatchGraphStatement( - ImmutableList.builder().addAll(traversals).add(newTraversal).build(), - isIdempotent(), - getTimeout(), - getNode(), - getTimestamp(), - getExecutionProfile(), - getExecutionProfileName(), - getCustomPayload(), - getGraphName(), - getTraversalSource(), - getSubProtocol(), - getConsistencyLevel(), - getReadConsistencyLevel(), - getWriteConsistencyLevel()); - } - - @NonNull - @Override - public DefaultBatchGraphStatement addTraversals(@NonNull Iterable newTraversals) { - return new DefaultBatchGraphStatement( - ImmutableList.builder().addAll(traversals).addAll(newTraversals).build(), - isIdempotent(), - getTimeout(), - getNode(), - getTimestamp(), - getExecutionProfile(), - getExecutionProfileName(), - getCustomPayload(), - getGraphName(), - getTraversalSource(), - getSubProtocol(), - getConsistencyLevel(), - getReadConsistencyLevel(), - getWriteConsistencyLevel()); - } - - @Override - public int size() { - return this.traversals.size(); - } - - @Override - protected BatchGraphStatement newInstance( - Boolean isIdempotent, - Duration timeout, - Node node, - long timestamp, - DriverExecutionProfile executionProfile, - String executionProfileName, - Map customPayload, - String graphName, - String traversalSource, - String subProtocol, - ConsistencyLevel consistencyLevel, - ConsistencyLevel readConsistencyLevel, - ConsistencyLevel writeConsistencyLevel) { - return new DefaultBatchGraphStatement( - traversals, - isIdempotent, - timeout, - node, - timestamp, - executionProfile, - executionProfileName, - customPayload, - graphName, - traversalSource, - subProtocol, - consistencyLevel, - readConsistencyLevel, - writeConsistencyLevel); - } - - @NonNull - @Override - public Iterator iterator() { - return this.traversals.iterator(); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DefaultDseRemoteConnectionBuilder.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DefaultDseRemoteConnectionBuilder.java deleted file mode 100644 index 146e8e17ea2..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DefaultDseRemoteConnectionBuilder.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.dse.driver.api.core.graph.DseGraphRemoteConnectionBuilder; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import net.jcip.annotations.NotThreadSafe; -import org.apache.tinkerpop.gremlin.process.remote.RemoteConnection; - -@NotThreadSafe -public class DefaultDseRemoteConnectionBuilder implements DseGraphRemoteConnectionBuilder { - - private final CqlSession session; - private DriverExecutionProfile executionProfile; - private String executionProfileName; - - public DefaultDseRemoteConnectionBuilder(CqlSession session) { - this.session = session; - } - - @Override - public RemoteConnection build() { - return new DseGraphRemoteConnection(session, executionProfile, executionProfileName); - } - - @Override - public DseGraphRemoteConnectionBuilder withExecutionProfile( - DriverExecutionProfile executionProfile) { - this.executionProfile = executionProfile; - return this; - } - - @Override - public DseGraphRemoteConnectionBuilder withExecutionProfileName(String executionProfileName) { - this.executionProfileName = executionProfileName; - return this; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DefaultFluentGraphStatement.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DefaultFluentGraphStatement.java deleted file mode 100644 index 0f6f1faabbf..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DefaultFluentGraphStatement.java +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.dse.driver.api.core.graph.FluentGraphStatement; -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.Map; -import net.jcip.annotations.Immutable; -import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; - -@Immutable -public class DefaultFluentGraphStatement extends GraphStatementBase - implements FluentGraphStatement { - - private final GraphTraversal traversal; - - public DefaultFluentGraphStatement( - GraphTraversal traversal, - Boolean isIdempotent, - Duration timeout, - Node node, - long timestamp, - DriverExecutionProfile executionProfile, - String executionProfileName, - Map customPayload, - String graphName, - String traversalSource, - String subProtocol, - ConsistencyLevel consistencyLevel, - ConsistencyLevel readConsistencyLevel, - ConsistencyLevel writeConsistencyLevel) { - super( - isIdempotent, - timeout, - node, - timestamp, - executionProfile, - executionProfileName, - customPayload, - graphName, - traversalSource, - subProtocol, - consistencyLevel, - readConsistencyLevel, - writeConsistencyLevel); - this.traversal = traversal; - } - - @Override - protected FluentGraphStatement newInstance( - Boolean isIdempotent, - Duration timeout, - Node node, - long timestamp, - DriverExecutionProfile executionProfile, - String executionProfileName, - Map customPayload, - String graphName, - String traversalSource, - String subProtocol, - ConsistencyLevel consistencyLevel, - ConsistencyLevel readConsistencyLevel, - ConsistencyLevel writeConsistencyLevel) { - return new DefaultFluentGraphStatement( - traversal, - isIdempotent, - timeout, - node, - timestamp, - executionProfile, - executionProfileName, - customPayload, - graphName, - traversalSource, - subProtocol, - consistencyLevel, - readConsistencyLevel, - writeConsistencyLevel); - } - - @NonNull - @Override - public GraphTraversal getTraversal() { - return traversal; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DefaultScriptGraphStatement.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DefaultScriptGraphStatement.java deleted file mode 100644 index 71f79134237..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DefaultScriptGraphStatement.java +++ /dev/null @@ -1,207 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.protocol.internal.util.collection.NullAllowingImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.Map; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultScriptGraphStatement extends GraphStatementBase - implements ScriptGraphStatement { - - private final String script; - private final Boolean isSystemQuery; - private final NullAllowingImmutableMap queryParams; - - public DefaultScriptGraphStatement( - String script, - Map queryParams, - Boolean isSystemQuery, - Boolean isIdempotent, - Duration timeout, - Node node, - long timestamp, - DriverExecutionProfile executionProfile, - String executionProfileName, - Map customPayload, - String graphName, - String traversalSource, - String subProtocol, - ConsistencyLevel consistencyLevel, - ConsistencyLevel readConsistencyLevel, - ConsistencyLevel writeConsistencyLevel) { - super( - isIdempotent, - timeout, - node, - timestamp, - executionProfile, - executionProfileName, - customPayload, - graphName, - traversalSource, - subProtocol, - consistencyLevel, - readConsistencyLevel, - writeConsistencyLevel); - this.script = script; - this.isSystemQuery = isSystemQuery; - this.queryParams = NullAllowingImmutableMap.copyOf(queryParams); - } - - //// Script GraphStatement level options - - @NonNull - @Override - public String getScript() { - return script; - } - - @NonNull - @Override - public ScriptGraphStatement setSystemQuery(@Nullable Boolean newValue) { - return new DefaultScriptGraphStatement( - script, - queryParams, - newValue, - isIdempotent(), - getTimeout(), - getNode(), - getTimestamp(), - getExecutionProfile(), - getExecutionProfileName(), - getCustomPayload(), - getGraphName(), - getTraversalSource(), - getSubProtocol(), - getConsistencyLevel(), - getReadConsistencyLevel(), - getWriteConsistencyLevel()); - } - - @Nullable - @Override - public Boolean isSystemQuery() { - return isSystemQuery; - } - - @NonNull - @Override - public Map getQueryParams() { - return this.queryParams; - } - - @NonNull - @Override - public ScriptGraphStatement setQueryParam(@NonNull String name, @Nullable Object value) { - NullAllowingImmutableMap.Builder newQueryParamsBuilder = - NullAllowingImmutableMap.builder(); - for (Map.Entry entry : queryParams.entrySet()) { - if (!entry.getKey().equals(name)) { - newQueryParamsBuilder.put(entry.getKey(), entry.getValue()); - } - } - newQueryParamsBuilder.put(name, value); - return setQueryParams(newQueryParamsBuilder.build()); - } - - @NonNull - @Override - public ScriptGraphStatement removeQueryParam(@NonNull String name) { - if (!queryParams.containsKey(name)) { - return this; - } else { - NullAllowingImmutableMap.Builder newQueryParamsBuilder = - NullAllowingImmutableMap.builder(); - for (Map.Entry entry : queryParams.entrySet()) { - if (!entry.getKey().equals(name)) { - newQueryParamsBuilder.put(entry.getKey(), entry.getValue()); - } - } - return setQueryParams(newQueryParamsBuilder.build()); - } - } - - private ScriptGraphStatement setQueryParams(Map newQueryParams) { - return new DefaultScriptGraphStatement( - script, - newQueryParams, - isSystemQuery, - isIdempotent(), - getTimeout(), - getNode(), - getTimestamp(), - getExecutionProfile(), - getExecutionProfileName(), - getCustomPayload(), - getGraphName(), - getTraversalSource(), - getSubProtocol(), - getConsistencyLevel(), - getReadConsistencyLevel(), - getWriteConsistencyLevel()); - } - - @Override - protected ScriptGraphStatement newInstance( - Boolean isIdempotent, - Duration timeout, - Node node, - long timestamp, - DriverExecutionProfile executionProfile, - String executionProfileName, - Map customPayload, - String graphName, - String traversalSource, - String subProtocol, - ConsistencyLevel consistencyLevel, - ConsistencyLevel readConsistencyLevel, - ConsistencyLevel writeConsistencyLevel) { - return new DefaultScriptGraphStatement( - script, - queryParams, - isSystemQuery, - isIdempotent, - timeout, - node, - timestamp, - executionProfile, - executionProfileName, - customPayload, - graphName, - traversalSource, - subProtocol, - consistencyLevel, - readConsistencyLevel, - writeConsistencyLevel); - } - - @Override - public String toString() { - return String.format("ScriptGraphStatement['%s', params: %s]", this.script, this.queryParams); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DseGraphRemoteConnection.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DseGraphRemoteConnection.java deleted file mode 100644 index a5ec0a1d115..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DseGraphRemoteConnection.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import java.util.concurrent.CompletableFuture; -import net.jcip.annotations.Immutable; -import org.apache.tinkerpop.gremlin.process.remote.RemoteConnection; -import org.apache.tinkerpop.gremlin.process.remote.traversal.RemoteTraversal; -import org.apache.tinkerpop.gremlin.process.traversal.Bytecode; - -@Immutable -public class DseGraphRemoteConnection implements RemoteConnection { - - private final CqlSession session; - private final DriverExecutionProfile executionProfile; - private final String executionProfileName; - - public DseGraphRemoteConnection( - CqlSession session, DriverExecutionProfile executionProfile, String executionProfileName) { - this.session = session; - this.executionProfile = executionProfile; - this.executionProfileName = executionProfileName; - } - - @Override - public CompletableFuture> submitAsync(Bytecode bytecode) { - return session - .executeAsync(new BytecodeGraphStatement(bytecode, executionProfile, executionProfileName)) - .toCompletableFuture() - .thenApply(DseGraphTraversal::new); - } - - @Override - public void close() throws Exception { - // do not close the DseSession here. - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DseGraphTraversal.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DseGraphTraversal.java deleted file mode 100644 index e0a5cf2d675..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DseGraphTraversal.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.dse.driver.api.core.graph.AsyncGraphResultSet; -import com.datastax.dse.driver.api.core.graph.GraphNode; -import java.util.Iterator; -import java.util.NoSuchElementException; -import net.jcip.annotations.NotThreadSafe; -import org.apache.tinkerpop.gremlin.process.remote.traversal.AbstractRemoteTraversal; -import org.apache.tinkerpop.gremlin.process.remote.traversal.DefaultRemoteTraverser; -import org.apache.tinkerpop.gremlin.process.traversal.Traverser; - -@NotThreadSafe -class DseGraphTraversal extends AbstractRemoteTraversal { - - private final Iterator graphNodeIterator; - - public DseGraphTraversal(AsyncGraphResultSet firstPage) { - this.graphNodeIterator = GraphResultSets.toSync(firstPage).iterator(); - } - - @Override - public boolean hasNext() { - return graphNodeIterator.hasNext(); - } - - @Override - public E next() { - return nextTraverser().get(); - } - - @Override - @SuppressWarnings("unchecked") - public Traverser.Admin nextTraverser() { - if (hasNext()) { - GraphNode nextGraphNode = graphNodeIterator.next(); - - // get the Raw object from the ObjectGraphNode, create a new remote Traverser - // with bulk = 1 because bulk is not supported yet. Casting should be ok - // because we have been able to deserialize into the right type. - return new DefaultRemoteTraverser<>((E) nextGraphNode.as(Object.class), 1); - } else { - // finished iterating/nothing to iterate. Normal behaviour. - throw new NoSuchElementException(); - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DsePredicate.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DsePredicate.java deleted file mode 100644 index b5f8c30fd8c..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DsePredicate.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import java.util.function.BiPredicate; - -/** - * An extension of TinkerPop's {@link BiPredicate} adding simple pre-condition checking methods that - * have to be written in the implementations. - */ -public interface DsePredicate extends BiPredicate { - - default void preEvaluate(Object condition) { - Preconditions.checkArgument( - this.isValidCondition(condition), "Invalid condition provided: %s", condition); - } - - boolean isValidCondition(Object condition); -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/EditDistance.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/EditDistance.java deleted file mode 100644 index 5ab836babbf..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/EditDistance.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.oss.driver.shaded.guava.common.base.Objects; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import java.io.Serializable; -import net.jcip.annotations.Immutable; - -/** - * A container for a term and maximum edit distance. - * - *

The context in which this is used determines the semantics of the edit distance. For instance, - * it might indicate single-character edits if used with fuzzy search queries or whole word - * movements if used with phrase proximity queries. - */ -@Immutable -public class EditDistance implements Serializable { - - private static final long serialVersionUID = 1L; - - public static final int DEFAULT_EDIT_DISTANCE = 0; - - public final String query; - public final int distance; - - public EditDistance(String query) { - this(query, DEFAULT_EDIT_DISTANCE); - } - - public EditDistance(String query, int distance) { - Preconditions.checkNotNull(query, "Query cannot be null."); - Preconditions.checkArgument(distance >= 0, "Edit distance cannot be negative."); - this.query = query; - this.distance = distance; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof EditDistance)) { - return false; - } - EditDistance that = (EditDistance) o; - return distance == that.distance && Objects.equal(query, that.query); - } - - @Override - public int hashCode() { - return Objects.hashCode(query, distance); - } - - @Override - public String toString() { - return "EditDistance{" + "query='" + query + '\'' + ", distance=" + distance + '}'; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GeoPredicate.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GeoPredicate.java deleted file mode 100644 index 39949e97198..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GeoPredicate.java +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.dse.driver.api.core.data.geometry.Geometry; -import com.datastax.dse.driver.api.core.data.geometry.LineString; -import com.datastax.dse.driver.api.core.data.geometry.Point; -import com.datastax.dse.driver.api.core.data.geometry.Polygon; -import com.datastax.dse.driver.api.core.graph.predicates.Geo; -import com.datastax.dse.driver.internal.core.data.geometry.Distance; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; - -/** - * List of predicates for geolocation usage with DseGraph and Search indexes. Should not be accessed - * directly but through the {@link Geo} static methods. - */ -public enum GeoPredicate implements DsePredicate { - - /** Matches values within the distance specified by the condition over a Haversine geometry. */ - inside { - @Override - public boolean test(Object value, Object condition) { - preEvaluate(condition); - if (value == null) { - return false; - } - Preconditions.checkArgument(value instanceof Geometry); - Distance distance = (Distance) condition; - if (value instanceof Point) { - return haversineDistanceInDegrees(distance.getCenter(), (Point) value) - <= distance.getRadius(); - } else if (value instanceof Polygon) { - for (Point point : ((Polygon) value).getExteriorRing()) { - if (haversineDistanceInDegrees(distance.getCenter(), point) > distance.getRadius()) { - return false; - } - } - } else if (value instanceof LineString) { - for (Point point : ((LineString) value).getPoints()) { - if (haversineDistanceInDegrees(distance.getCenter(), point) > distance.getRadius()) { - return false; - } - } - } else { - throw new UnsupportedOperationException( - String.format("Value type '%s' unsupported", value.getClass().getName())); - } - - return true; - } - - @Override - public String toString() { - return "inside"; - } - }, - - /** - * Matches values contained in the geometric entity specified by the condition on a 2D Euclidean - * plane. - */ - insideCartesian { - @Override - public boolean test(Object value, Object condition) { - preEvaluate(condition); - if (value == null) { - return false; - } - Preconditions.checkArgument(value instanceof Geometry); - return ((Geometry) condition).contains((Geometry) value); - } - - @Override - public String toString() { - return "insideCartesian"; - } - }; - - @Override - public boolean isValidCondition(Object condition) { - return condition != null; - } - - static double haversineDistanceInDegrees(Point p1, Point p2) { - double dLat = Math.toRadians(p2.Y() - p1.Y()); - double dLon = Math.toRadians(p2.X() - p1.X()); - double lat1 = Math.toRadians(p1.Y()); - double lat2 = Math.toRadians(p2.Y()); - - double a = - Math.pow(Math.sin(dLat / 2), 2) - + Math.pow(Math.sin(dLon / 2), 2) * Math.cos(lat1) * Math.cos(lat2); - double c = 2 * Math.asin(Math.sqrt(a)); - return Math.toDegrees(c); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GeoUtils.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GeoUtils.java deleted file mode 100644 index 80d55dac69d..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GeoUtils.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -public class GeoUtils { - private static final double DEGREES_TO_RADIANS = Math.PI / 180; - private static final double EARTH_MEAN_RADIUS_KM = 6371.0087714; - private static final double DEG_TO_KM = DEGREES_TO_RADIANS * EARTH_MEAN_RADIUS_KM; - private static final double KM_TO_MILES = 0.621371192; - public static final double KM_TO_DEG = 1 / DEG_TO_KM; - public static final double MILES_TO_KM = 1 / KM_TO_MILES; -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphConversions.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphConversions.java deleted file mode 100644 index c95b26b2e26..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphConversions.java +++ /dev/null @@ -1,410 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import static java.nio.charset.StandardCharsets.UTF_8; - -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.dse.driver.api.core.graph.BatchGraphStatement; -import com.datastax.dse.driver.api.core.graph.FluentGraphStatement; -import com.datastax.dse.driver.api.core.graph.GraphNode; -import com.datastax.dse.driver.api.core.graph.GraphStatement; -import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; -import com.datastax.dse.driver.internal.core.graph.binary.GraphBinaryModule; -import com.datastax.dse.driver.internal.core.graph.binary.buffer.DseNettyBufferFactory; -import com.datastax.dse.protocol.internal.request.RawBytesQuery; -import com.datastax.dse.protocol.internal.request.query.ContinuousPagingOptions; -import com.datastax.dse.protocol.internal.request.query.DseQueryOptions; -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.DefaultConsistencyLevel; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.cql.Conversions; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.Iterators; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.request.Query; -import com.datastax.oss.protocol.internal.util.collection.NullAllowingImmutableMap; -import io.netty.buffer.ByteBuf; -import java.io.IOException; -import java.io.UncheckedIOException; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import org.apache.tinkerpop.gremlin.process.traversal.Traverser; -import org.apache.tinkerpop.gremlin.structure.io.Buffer; -import org.apache.tinkerpop.gremlin.structure.io.BufferFactory; - -/** - * Utility class to move boilerplate out of {@link GraphRequestHandler}. - * - *

We extend {@link Conversions} only for methods that can be directly reused as-is; if something - * needs to be customized, it will be duplicated here instead of making the parent method - * "pluggable". - */ -public class GraphConversions extends Conversions { - - static final String GRAPH_LANG_OPTION_KEY = "graph-language"; - static final String GRAPH_NAME_OPTION_KEY = "graph-name"; - static final String GRAPH_SOURCE_OPTION_KEY = "graph-source"; - static final String GRAPH_READ_CONSISTENCY_LEVEL_OPTION_KEY = "graph-read-consistency"; - static final String GRAPH_WRITE_CONSISTENCY_LEVEL_OPTION_KEY = "graph-write-consistency"; - static final String GRAPH_RESULTS_OPTION_KEY = "graph-results"; - static final String GRAPH_TIMEOUT_OPTION_KEY = "request-timeout"; - static final String GRAPH_BINARY_QUERY_OPTION_KEY = "graph-binary-query"; - - static final String LANGUAGE_GROOVY = "gremlin-groovy"; - static final String LANGUAGE_BYTECODE = "bytecode-json"; - - private static final BufferFactory FACTORY = new DseNettyBufferFactory(); - - @VisibleForTesting static final byte[] EMPTY_STRING_QUERY = "".getBytes(UTF_8); - - public static Message createContinuousMessageFromGraphStatement( - GraphStatement statement, - GraphProtocol subProtocol, - DriverExecutionProfile config, - InternalDriverContext context, - GraphBinaryModule graphBinaryModule) { - - final List encodedQueryParams; - if (!(statement instanceof ScriptGraphStatement) - || ((ScriptGraphStatement) statement).getQueryParams().isEmpty()) { - encodedQueryParams = Collections.emptyList(); - } else { - try { - Map queryParams = ((ScriptGraphStatement) statement).getQueryParams(); - if (subProtocol.isGraphBinary()) { - Buffer graphBinaryParams = graphBinaryModule.serialize(queryParams); - encodedQueryParams = Collections.singletonList(graphBinaryParams.nioBuffer()); - graphBinaryParams.release(); - } else { - encodedQueryParams = - Collections.singletonList( - GraphSONUtils.serializeToByteBuffer(queryParams, subProtocol)); - } - } catch (IOException e) { - throw new UncheckedIOException( - "Couldn't serialize parameters for GraphStatement: " + statement, e); - } - } - - int consistencyLevel = - DefaultConsistencyLevel.valueOf(config.getString(DefaultDriverOption.REQUEST_CONSISTENCY)) - .getProtocolCode(); - - long timestamp = statement.getTimestamp(); - if (timestamp == Statement.NO_DEFAULT_TIMESTAMP) { - timestamp = context.getTimestampGenerator().next(); - } - - int pageSize = config.getInt(DseDriverOption.GRAPH_CONTINUOUS_PAGING_PAGE_SIZE); - int maxPages = config.getInt(DseDriverOption.GRAPH_CONTINUOUS_PAGING_MAX_PAGES); - int maxPagesPerSecond = - config.getInt(DseDriverOption.GRAPH_CONTINUOUS_PAGING_MAX_PAGES_PER_SECOND); - int maxEnqueuedPages = - config.getInt(DseDriverOption.GRAPH_CONTINUOUS_PAGING_MAX_ENQUEUED_PAGES); - ContinuousPagingOptions options = - new ContinuousPagingOptions(maxPages, maxPagesPerSecond, maxEnqueuedPages); - - DseQueryOptions queryOptions = - new DseQueryOptions( - consistencyLevel, - encodedQueryParams, - Collections.emptyMap(), // ignored by the DSE Graph server - true, // also ignored - pageSize, - null, - ProtocolConstants.ConsistencyLevel.LOCAL_SERIAL, // also ignored - timestamp, - null, // also ignored - false, // graph CP does not support sizeInBytes - options); - - if (statement instanceof ScriptGraphStatement) { - return new Query(((ScriptGraphStatement) statement).getScript(), queryOptions); - } else { - return new RawBytesQuery(getQueryBytes(statement, subProtocol), queryOptions); - } - } - - static Message createMessageFromGraphStatement( - GraphStatement statement, - GraphProtocol subProtocol, - DriverExecutionProfile config, - InternalDriverContext context, - GraphBinaryModule graphBinaryModule) { - - final List encodedQueryParams; - if (!(statement instanceof ScriptGraphStatement) - || ((ScriptGraphStatement) statement).getQueryParams().isEmpty()) { - encodedQueryParams = Collections.emptyList(); - } else { - try { - Map queryParams = ((ScriptGraphStatement) statement).getQueryParams(); - if (subProtocol.isGraphBinary()) { - Buffer graphBinaryParams = graphBinaryModule.serialize(queryParams); - encodedQueryParams = Collections.singletonList(graphBinaryParams.nioBuffer()); - graphBinaryParams.release(); - } else { - encodedQueryParams = - Collections.singletonList( - GraphSONUtils.serializeToByteBuffer(queryParams, subProtocol)); - } - } catch (IOException e) { - throw new UncheckedIOException( - "Couldn't serialize parameters for GraphStatement: " + statement, e); - } - } - - ConsistencyLevel consistency = statement.getConsistencyLevel(); - int consistencyLevel = - (consistency == null) - ? context - .getConsistencyLevelRegistry() - .nameToCode(config.getString(DefaultDriverOption.REQUEST_CONSISTENCY)) - : consistency.getProtocolCode(); - - long timestamp = statement.getTimestamp(); - if (timestamp == Statement.NO_DEFAULT_TIMESTAMP) { - timestamp = context.getTimestampGenerator().next(); - } - - DseQueryOptions queryOptions = - new DseQueryOptions( - consistencyLevel, - encodedQueryParams, - Collections.emptyMap(), // ignored by the DSE Graph server - true, // also ignored - 50, // also ignored - null, // also ignored - ProtocolConstants.ConsistencyLevel.LOCAL_SERIAL, // also ignored - timestamp, - null, // also ignored - false, // also ignored - null // also ignored - ); - - if (statement instanceof ScriptGraphStatement) { - return new Query(((ScriptGraphStatement) statement).getScript(), queryOptions); - } else { - return new RawBytesQuery(getQueryBytes(statement, subProtocol), queryOptions); - } - } - - // This method returns either a Bytecode object, or a List if the statement is a - // BatchGraphStatement - @VisibleForTesting - public static Object bytecodeToSerialize(GraphStatement statement) { - Preconditions.checkArgument( - statement instanceof FluentGraphStatement - || statement instanceof BatchGraphStatement - || statement instanceof BytecodeGraphStatement, - "To serialize bytecode the query must be a fluent or batch statement, but was: %s", - statement.getClass()); - - Object toSerialize; - if (statement instanceof FluentGraphStatement) { - toSerialize = ((FluentGraphStatement) statement).getTraversal().asAdmin().getBytecode(); - } else if (statement instanceof BatchGraphStatement) { - // transform the Iterator to List - toSerialize = - ImmutableList.copyOf( - Iterators.transform( - ((BatchGraphStatement) statement).iterator(), - traversal -> traversal.asAdmin().getBytecode())); - } else { - toSerialize = ((BytecodeGraphStatement) statement).getBytecode(); - } - return toSerialize; - } - - private static byte[] getQueryBytes(GraphStatement statement, GraphProtocol graphSubProtocol) { - try { - return graphSubProtocol.isGraphBinary() - // if GraphBinary, the query is encoded in the custom payload, and not in the query field - // see GraphConversions#createCustomPayload() - ? EMPTY_STRING_QUERY - : GraphSONUtils.serializeToBytes(bytecodeToSerialize(statement), graphSubProtocol); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - } - - public static Map createCustomPayload( - GraphStatement statement, - GraphProtocol subProtocol, - DriverExecutionProfile config, - InternalDriverContext context, - GraphBinaryModule graphBinaryModule) { - - ProtocolVersion protocolVersion = context.getProtocolVersion(); - - NullAllowingImmutableMap.Builder payload = - NullAllowingImmutableMap.builder(); - Map statementOptions = statement.getCustomPayload(); - payload.putAll(statementOptions); - - final String graphLanguage; - - // Don't override anything that's already provided at the statement level - if (!statementOptions.containsKey(GRAPH_LANG_OPTION_KEY)) { - graphLanguage = - statement instanceof ScriptGraphStatement ? LANGUAGE_GROOVY : LANGUAGE_BYTECODE; - payload.put(GRAPH_LANG_OPTION_KEY, TypeCodecs.TEXT.encode(graphLanguage, protocolVersion)); - } else { - graphLanguage = - TypeCodecs.TEXT.decode(statementOptions.get(GRAPH_LANG_OPTION_KEY), protocolVersion); - Preconditions.checkNotNull( - graphLanguage, "A null value was set for the graph-language custom payload key."); - } - - if (!isSystemQuery(statement, config)) { - if (!statementOptions.containsKey(GRAPH_NAME_OPTION_KEY)) { - String graphName = statement.getGraphName(); - if (graphName == null) { - graphName = config.getString(DseDriverOption.GRAPH_NAME, null); - } - if (graphName != null) { - payload.put(GRAPH_NAME_OPTION_KEY, TypeCodecs.TEXT.encode(graphName, protocolVersion)); - } - } - if (!statementOptions.containsKey(GRAPH_SOURCE_OPTION_KEY)) { - String traversalSource = statement.getTraversalSource(); - if (traversalSource == null) { - traversalSource = config.getString(DseDriverOption.GRAPH_TRAVERSAL_SOURCE, null); - } - if (traversalSource != null) { - payload.put( - GRAPH_SOURCE_OPTION_KEY, TypeCodecs.TEXT.encode(traversalSource, protocolVersion)); - } - } - } - - // the payload allows null entry values so doing a get directly here and checking for null - final ByteBuffer payloadInitialProtocol = statementOptions.get(GRAPH_RESULTS_OPTION_KEY); - if (payloadInitialProtocol == null) { - Preconditions.checkNotNull(subProtocol); - payload.put( - GRAPH_RESULTS_OPTION_KEY, - TypeCodecs.TEXT.encode(subProtocol.toInternalCode(), protocolVersion)); - } else { - subProtocol = - GraphProtocol.fromString(TypeCodecs.TEXT.decode(payloadInitialProtocol, protocolVersion)); - } - - if (subProtocol.isGraphBinary() && graphLanguage.equals(LANGUAGE_BYTECODE)) { - Object bytecodeQuery = bytecodeToSerialize(statement); - try { - Buffer bytecodeByteBuf = graphBinaryModule.serialize(bytecodeQuery); - payload.put(GRAPH_BINARY_QUERY_OPTION_KEY, bytecodeByteBuf.nioBuffer()); - bytecodeByteBuf.release(); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - } - - if (!statementOptions.containsKey(GRAPH_READ_CONSISTENCY_LEVEL_OPTION_KEY)) { - ConsistencyLevel readCl = statement.getReadConsistencyLevel(); - String readClString = - readCl != null - ? readCl.name() - : config.getString(DseDriverOption.GRAPH_READ_CONSISTENCY_LEVEL, null); - if (readClString != null) { - payload.put( - GRAPH_READ_CONSISTENCY_LEVEL_OPTION_KEY, - TypeCodecs.TEXT.encode(readClString, protocolVersion)); - } - } - - if (!statementOptions.containsKey(GRAPH_WRITE_CONSISTENCY_LEVEL_OPTION_KEY)) { - ConsistencyLevel writeCl = statement.getWriteConsistencyLevel(); - String writeClString = - writeCl != null - ? writeCl.name() - : config.getString(DseDriverOption.GRAPH_WRITE_CONSISTENCY_LEVEL, null); - if (writeClString != null) { - payload.put( - GRAPH_WRITE_CONSISTENCY_LEVEL_OPTION_KEY, - TypeCodecs.TEXT.encode(writeClString, protocolVersion)); - } - } - - if (!statementOptions.containsKey(GRAPH_TIMEOUT_OPTION_KEY)) { - Duration timeout = statement.getTimeout(); - if (timeout == null) { - timeout = config.getDuration(DseDriverOption.GRAPH_TIMEOUT, Duration.ZERO); - } - if (timeout != null && !timeout.isZero()) { - payload.put( - GRAPH_TIMEOUT_OPTION_KEY, - TypeCodecs.BIGINT.encode(timeout.toMillis(), protocolVersion)); - } - } - return payload.build(); - } - - private static boolean isSystemQuery(GraphStatement statement, DriverExecutionProfile config) { - if (statement instanceof ScriptGraphStatement) { - Boolean statementValue = ((ScriptGraphStatement) statement).isSystemQuery(); - if (statementValue != null) { - return statementValue; - } - } - return config.getBoolean(DseDriverOption.GRAPH_IS_SYSTEM_QUERY, false); - } - - public static GraphNode createGraphBinaryGraphNode( - List data, GraphBinaryModule graphBinaryModule) throws IOException { - // there should be only one column in the given row - Preconditions.checkArgument(data.size() == 1, "Invalid row given to deserialize"); - - Buffer toDeserialize = FACTORY.wrap(data.get(0)); - Object deserializedObject = graphBinaryModule.deserialize(toDeserialize); - toDeserialize.release(); - assert deserializedObject instanceof Traverser - : "Graph protocol error. Received object should be a Traverser but it is not."; - return new ObjectGraphNode(deserializedObject); - } - - public static Duration resolveGraphRequestTimeout( - GraphStatement statement, InternalDriverContext context) { - DriverExecutionProfile executionProfile = resolveExecutionProfile(statement, context); - return statement.getTimeout() != null - ? statement.getTimeout() - : executionProfile.getDuration(DseDriverOption.GRAPH_TIMEOUT); - } - - public static GraphProtocol resolveGraphSubProtocol( - GraphStatement statement, - GraphSupportChecker graphSupportChecker, - InternalDriverContext context) { - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(statement, context); - return graphSupportChecker.inferGraphProtocol(statement, executionProfile, context); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphExecutionInfoConverter.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphExecutionInfoConverter.java deleted file mode 100644 index b6472f690d3..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphExecutionInfoConverter.java +++ /dev/null @@ -1,180 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.dse.driver.api.core.graph.GraphStatement; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.QueryTrace; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.UUID; -import java.util.concurrent.CompletionStage; - -/** - * Handles conversions from / to GraphExecutionInfo and ExecutionInfo since GraphExecutionInfo has - * been deprecated by JAVA-2556. - */ -public class GraphExecutionInfoConverter { - - /** - * Called exclusively from default methods in API interfaces {@link - * com.datastax.dse.driver.api.core.graph.GraphResultSet} and {@link - * com.datastax.dse.driver.api.core.graph.AsyncGraphResultSet}. Graph result set implementations - * do not use this method but rather the other one below. - */ - @SuppressWarnings("deprecation") - public static ExecutionInfo convert( - com.datastax.dse.driver.api.core.graph.GraphExecutionInfo graphExecutionInfo) { - return new ExecutionInfo() { - - @NonNull - @Override - public Request getRequest() { - return graphExecutionInfo.getStatement(); - } - - @NonNull - @Override - public Statement getStatement() { - throw new ClassCastException("GraphStatement cannot be cast to Statement"); - } - - @Nullable - @Override - public Node getCoordinator() { - return graphExecutionInfo.getCoordinator(); - } - - @Override - public int getSpeculativeExecutionCount() { - return graphExecutionInfo.getSpeculativeExecutionCount(); - } - - @Override - public int getSuccessfulExecutionIndex() { - return graphExecutionInfo.getSuccessfulExecutionIndex(); - } - - @NonNull - @Override - public List> getErrors() { - return graphExecutionInfo.getErrors(); - } - - @Nullable - @Override - public ByteBuffer getPagingState() { - return null; - } - - @NonNull - @Override - public List getWarnings() { - return graphExecutionInfo.getWarnings(); - } - - @NonNull - @Override - public Map getIncomingPayload() { - return graphExecutionInfo.getIncomingPayload(); - } - - @Override - public boolean isSchemaInAgreement() { - return true; - } - - @Nullable - @Override - public UUID getTracingId() { - return null; - } - - @NonNull - @Override - public CompletionStage getQueryTraceAsync() { - return CompletableFutures.failedFuture( - new IllegalStateException("Tracing was disabled for this request")); - } - - @Override - public int getResponseSizeInBytes() { - return -1; - } - - @Override - public int getCompressedResponseSizeInBytes() { - return -1; - } - }; - } - - /** - * Called from graph result set implementations, to convert the original {@link ExecutionInfo} - * produced by request handlers into the (deprecated) type GraphExecutionInfo. - */ - @SuppressWarnings("deprecation") - public static com.datastax.dse.driver.api.core.graph.GraphExecutionInfo convert( - ExecutionInfo executionInfo) { - return new com.datastax.dse.driver.api.core.graph.GraphExecutionInfo() { - - @Override - public GraphStatement getStatement() { - return (GraphStatement) executionInfo.getRequest(); - } - - @Override - public Node getCoordinator() { - return executionInfo.getCoordinator(); - } - - @Override - public int getSpeculativeExecutionCount() { - return executionInfo.getSpeculativeExecutionCount(); - } - - @Override - public int getSuccessfulExecutionIndex() { - return executionInfo.getSuccessfulExecutionIndex(); - } - - @Override - public List> getErrors() { - return executionInfo.getErrors(); - } - - @Override - public List getWarnings() { - return executionInfo.getWarnings(); - } - - @Override - public Map getIncomingPayload() { - return executionInfo.getIncomingPayload(); - } - }; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphProtocol.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphProtocol.java deleted file mode 100644 index 6b7a9f4c430..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphProtocol.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; - -public enum GraphProtocol { - GRAPHSON_1_0("graphson-1.0"), - GRAPHSON_2_0("graphson-2.0"), - GRAPH_BINARY_1_0("graph-binary-1.0"), - ; - - private static final Map BY_CODE; - - static { - Map tmp = new HashMap<>(); - for (GraphProtocol value : values()) { - tmp.put(value.stringRepresentation, value); - } - BY_CODE = Collections.unmodifiableMap(tmp); - } - - private final String stringRepresentation; - - GraphProtocol(String stringRepresentation) { - this.stringRepresentation = stringRepresentation; - } - - @NonNull - public String toInternalCode() { - return stringRepresentation; - } - - @NonNull - public static GraphProtocol fromString(@Nullable String stringRepresentation) { - if (stringRepresentation == null || !BY_CODE.containsKey(stringRepresentation)) { - StringBuilder sb = - new StringBuilder( - String.format( - "Graph protocol used [\"%s\"] unknown. Possible values are: [ \"%s\"", - stringRepresentation, GraphProtocol.values()[0].toInternalCode())); - for (int i = 1; i < GraphProtocol.values().length; i++) { - sb.append(String.format(", \"%s\"", GraphProtocol.values()[i].toInternalCode())); - } - sb.append("]"); - throw new IllegalArgumentException(sb.toString()); - } - return BY_CODE.get(stringRepresentation); - } - - public boolean isGraphBinary() { - return this == GRAPH_BINARY_1_0; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphRequestAsyncProcessor.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphRequestAsyncProcessor.java deleted file mode 100644 index 050b03c66f4..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphRequestAsyncProcessor.java +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.dse.driver.api.core.graph.AsyncGraphResultSet; -import com.datastax.dse.driver.api.core.graph.BatchGraphStatement; -import com.datastax.dse.driver.api.core.graph.FluentGraphStatement; -import com.datastax.dse.driver.api.core.graph.GraphStatement; -import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; -import com.datastax.dse.driver.internal.core.graph.binary.GraphBinaryModule; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.datastax.oss.driver.internal.core.session.RequestProcessor; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.concurrent.CompletionStage; -import net.jcip.annotations.ThreadSafe; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryReader; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryWriter; -import org.apache.tinkerpop.gremlin.structure.io.binary.TypeSerializerRegistry; - -@ThreadSafe -public class GraphRequestAsyncProcessor - implements RequestProcessor, CompletionStage> { - - private final GraphBinaryModule graphBinaryModule; - private final GraphSupportChecker graphSupportChecker; - - public GraphRequestAsyncProcessor( - DefaultDriverContext context, GraphSupportChecker graphSupportChecker) { - TypeSerializerRegistry typeSerializerRegistry = - GraphBinaryModule.createDseTypeSerializerRegistry(context); - this.graphBinaryModule = - new GraphBinaryModule( - new GraphBinaryReader(typeSerializerRegistry), - new GraphBinaryWriter(typeSerializerRegistry)); - this.graphSupportChecker = graphSupportChecker; - } - - @NonNull - public GraphBinaryModule getGraphBinaryModule() { - return graphBinaryModule; - } - - @Override - public boolean canProcess(Request request, GenericType resultType) { - return (request instanceof ScriptGraphStatement - || request instanceof FluentGraphStatement - || request instanceof BatchGraphStatement - || request instanceof BytecodeGraphStatement) - && resultType.equals(GraphStatement.ASYNC); - } - - @Override - public CompletionStage process( - GraphStatement request, - DefaultSession session, - InternalDriverContext context, - String sessionLogPrefix) { - - if (graphSupportChecker.isPagingEnabled(request, context)) { - return new ContinuousGraphRequestHandler( - request, - session, - context, - sessionLogPrefix, - getGraphBinaryModule(), - graphSupportChecker) - .handle(); - } else { - return new GraphRequestHandler( - request, - session, - context, - sessionLogPrefix, - getGraphBinaryModule(), - graphSupportChecker) - .handle(); - } - } - - @Override - public CompletionStage newFailure(RuntimeException error) { - return CompletableFutures.failedFuture(error); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphRequestHandler.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphRequestHandler.java deleted file mode 100644 index 5c9ceb00df2..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphRequestHandler.java +++ /dev/null @@ -1,871 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.dse.driver.api.core.graph.AsyncGraphResultSet; -import com.datastax.dse.driver.api.core.graph.GraphNode; -import com.datastax.dse.driver.api.core.graph.GraphStatement; -import com.datastax.dse.driver.api.core.metrics.DseNodeMetric; -import com.datastax.dse.driver.api.core.metrics.DseSessionMetric; -import com.datastax.dse.driver.internal.core.graph.binary.GraphBinaryModule; -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.DriverTimeoutException; -import com.datastax.oss.driver.api.core.NodeUnavailableException; -import com.datastax.oss.driver.api.core.RequestThrottlingException; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.connection.FrameTooLongException; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; -import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import com.datastax.oss.driver.api.core.retry.RetryVerdict; -import com.datastax.oss.driver.api.core.servererrors.BootstrappingException; -import com.datastax.oss.driver.api.core.servererrors.CoordinatorException; -import com.datastax.oss.driver.api.core.servererrors.FunctionFailureException; -import com.datastax.oss.driver.api.core.servererrors.ProtocolError; -import com.datastax.oss.driver.api.core.servererrors.QueryValidationException; -import com.datastax.oss.driver.api.core.servererrors.ReadTimeoutException; -import com.datastax.oss.driver.api.core.servererrors.UnavailableException; -import com.datastax.oss.driver.api.core.servererrors.WriteTimeoutException; -import com.datastax.oss.driver.api.core.session.throttling.RequestThrottler; -import com.datastax.oss.driver.api.core.session.throttling.Throttled; -import com.datastax.oss.driver.api.core.tracker.RequestTracker; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.channel.ResponseCallback; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.cql.Conversions; -import com.datastax.oss.driver.internal.core.cql.DefaultExecutionInfo; -import com.datastax.oss.driver.internal.core.metadata.DefaultNode; -import com.datastax.oss.driver.internal.core.metrics.NodeMetricUpdater; -import com.datastax.oss.driver.internal.core.metrics.SessionMetricUpdater; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.datastax.oss.driver.internal.core.tracker.NoopRequestTracker; -import com.datastax.oss.driver.internal.core.tracker.RequestLogger; -import com.datastax.oss.driver.internal.core.util.Loggers; -import com.datastax.oss.driver.internal.core.util.collection.SimpleQueryPlan; -import com.datastax.oss.protocol.internal.Frame; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.response.Error; -import com.datastax.oss.protocol.internal.response.Result; -import com.datastax.oss.protocol.internal.response.result.Rows; -import com.datastax.oss.protocol.internal.response.result.Void; -import edu.umd.cs.findbugs.annotations.NonNull; -import io.netty.handler.codec.EncoderException; -import io.netty.util.Timeout; -import io.netty.util.Timer; -import io.netty.util.concurrent.Future; -import io.netty.util.concurrent.GenericFutureListener; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.AbstractMap; -import java.util.ArrayDeque; -import java.util.List; -import java.util.Map; -import java.util.Queue; -import java.util.concurrent.CancellationException; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@ThreadSafe -public class GraphRequestHandler implements Throttled { - - private static final Logger LOG = LoggerFactory.getLogger(GraphRequestHandler.class); - - private static final long NANOTIME_NOT_MEASURED_YET = -1; - private static final int NO_SUCCESSFUL_EXECUTION = -1; - - private final long startTimeNanos; - private final String logPrefix; - private final GraphStatement initialStatement; - private final DefaultSession session; - private final InternalDriverContext context; - protected final CompletableFuture result; - private final Timer timer; - - /** - * How many speculative executions are currently running (including the initial execution). We - * track this in order to know when to fail the request if all executions have reached the end of - * the query plan. - */ - private final AtomicInteger activeExecutionsCount; - - /** - * How many speculative executions have started (excluding the initial execution), whether they - * have completed or not. We track this in order to fill {@link - * ExecutionInfo#getSpeculativeExecutionCount()}. - */ - private final AtomicInteger startedSpeculativeExecutionsCount; - - private final Timeout scheduledTimeout; - private final List scheduledExecutions; - private final List inFlightCallbacks; - private final RequestThrottler throttler; - private final RequestTracker requestTracker; - private final SessionMetricUpdater sessionMetricUpdater; - private final GraphBinaryModule graphBinaryModule; - private final GraphSupportChecker graphSupportChecker; - - // The errors on the nodes that were already tried (lazily initialized on the first error). - // We don't use a map because nodes can appear multiple times. - private volatile List> errors; - - GraphRequestHandler( - @NonNull GraphStatement statement, - @NonNull DefaultSession dseSession, - @NonNull InternalDriverContext context, - @NonNull String sessionLogPrefix, - @NonNull GraphBinaryModule graphBinaryModule, - @NonNull GraphSupportChecker graphSupportChecker) { - this.startTimeNanos = System.nanoTime(); - this.logPrefix = sessionLogPrefix + "|" + this.hashCode(); - LOG.trace("[{}] Creating new Graph request handler for request {}", logPrefix, statement); - this.initialStatement = statement; - this.session = dseSession; - this.context = context; - this.graphSupportChecker = graphSupportChecker; - this.result = new CompletableFuture<>(); - this.result.exceptionally( - t -> { - try { - if (t instanceof CancellationException) { - cancelScheduledTasks(); - context.getRequestThrottler().signalCancel(this); - } - } catch (Throwable t2) { - Loggers.warnWithException(LOG, "[{}] Uncaught exception", logPrefix, t2); - } - return null; - }); - this.graphBinaryModule = graphBinaryModule; - this.timer = context.getNettyOptions().getTimer(); - - this.activeExecutionsCount = new AtomicInteger(1); - this.startedSpeculativeExecutionsCount = new AtomicInteger(0); - this.scheduledExecutions = new CopyOnWriteArrayList<>(); - this.inFlightCallbacks = new CopyOnWriteArrayList<>(); - - this.requestTracker = context.getRequestTracker(); - this.sessionMetricUpdater = session.getMetricUpdater(); - - Duration timeout = GraphConversions.resolveGraphRequestTimeout(statement, context); - this.scheduledTimeout = scheduleTimeout(timeout); - - this.throttler = context.getRequestThrottler(); - this.throttler.register(this); - } - - @Override - public void onThrottleReady(boolean wasDelayed) { - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(initialStatement, context); - if (wasDelayed - // avoid call to nanoTime() if metric is disabled: - && sessionMetricUpdater.isEnabled( - DefaultSessionMetric.THROTTLING_DELAY, executionProfile.getName())) { - sessionMetricUpdater.updateTimer( - DefaultSessionMetric.THROTTLING_DELAY, - executionProfile.getName(), - System.nanoTime() - startTimeNanos, - TimeUnit.NANOSECONDS); - } - Queue queryPlan = - initialStatement.getNode() != null - ? new SimpleQueryPlan(initialStatement.getNode()) - : context - .getLoadBalancingPolicyWrapper() - .newQueryPlan(initialStatement, executionProfile.getName(), session); - sendRequest(initialStatement, null, queryPlan, 0, 0, true); - } - - public CompletionStage handle() { - return result; - } - - private Timeout scheduleTimeout(Duration timeoutDuration) { - if (timeoutDuration != null && timeoutDuration.toNanos() > 0) { - try { - return this.timer.newTimeout( - (Timeout timeout1) -> - setFinalError( - initialStatement, - new DriverTimeoutException("Query timed out after " + timeoutDuration), - null, - NO_SUCCESSFUL_EXECUTION), - timeoutDuration.toNanos(), - TimeUnit.NANOSECONDS); - } catch (IllegalStateException e) { - // If we raced with session shutdown the timer might be closed already, rethrow with a more - // explicit message - result.completeExceptionally( - "cannot be started once stopped".equals(e.getMessage()) - ? new IllegalStateException("Session is closed") - : e); - } - } - return null; - } - - /** - * Sends the request to the next available node. - * - * @param retriedNode if not null, it will be attempted first before the rest of the query plan. - * @param queryPlan the list of nodes to try (shared with all other executions) - * @param currentExecutionIndex 0 for the initial execution, 1 for the first speculative one, etc. - * @param retryCount the number of times that the retry policy was invoked for this execution - * already (note that some internal retries don't go through the policy, and therefore don't - * increment this counter) - * @param scheduleNextExecution whether to schedule the next speculative execution - */ - private void sendRequest( - GraphStatement statement, - Node retriedNode, - Queue queryPlan, - int currentExecutionIndex, - int retryCount, - boolean scheduleNextExecution) { - if (result.isDone()) { - return; - } - Node node = retriedNode; - DriverChannel channel = null; - if (node == null || (channel = session.getChannel(node, logPrefix)) == null) { - while (!result.isDone() && (node = queryPlan.poll()) != null) { - channel = session.getChannel(node, logPrefix); - if (channel != null) { - break; - } else { - recordError(node, new NodeUnavailableException(node)); - } - } - } - if (channel == null) { - // We've reached the end of the query plan without finding any node to write to - if (!result.isDone() && activeExecutionsCount.decrementAndGet() == 0) { - // We're the last execution so fail the result - setFinalError( - statement, - AllNodesFailedException.fromErrors(this.errors), - null, - NO_SUCCESSFUL_EXECUTION); - } - } else { - NodeResponseCallback nodeResponseCallback = - new NodeResponseCallback( - statement, - node, - queryPlan, - channel, - currentExecutionIndex, - retryCount, - scheduleNextExecution, - logPrefix); - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(statement, context); - GraphProtocol graphSubProtocol = - GraphConversions.resolveGraphSubProtocol(statement, graphSupportChecker, context); - Message message = - GraphConversions.createMessageFromGraphStatement( - statement, graphSubProtocol, executionProfile, context, graphBinaryModule); - Map customPayload = - GraphConversions.createCustomPayload( - statement, graphSubProtocol, executionProfile, context, graphBinaryModule); - channel - .write(message, statement.isTracing(), customPayload, nodeResponseCallback) - .addListener(nodeResponseCallback); - } - } - - private void recordError(Node node, Throwable error) { - // Use a local variable to do only a single single volatile read in the nominal case - List> errorsSnapshot = this.errors; - if (errorsSnapshot == null) { - synchronized (GraphRequestHandler.this) { - errorsSnapshot = this.errors; - if (errorsSnapshot == null) { - this.errors = errorsSnapshot = new CopyOnWriteArrayList<>(); - } - } - } - errorsSnapshot.add(new AbstractMap.SimpleEntry<>(node, error)); - } - - private void cancelScheduledTasks() { - if (this.scheduledTimeout != null) { - this.scheduledTimeout.cancel(); - } - if (scheduledExecutions != null) { - for (Timeout scheduledExecution : scheduledExecutions) { - scheduledExecution.cancel(); - } - } - for (NodeResponseCallback callback : inFlightCallbacks) { - callback.cancel(); - } - } - - private void setFinalResult( - Result resultMessage, Frame responseFrame, NodeResponseCallback callback) { - try { - ExecutionInfo executionInfo = buildExecutionInfo(callback, responseFrame); - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(callback.statement, context); - GraphProtocol subProtocol = - GraphConversions.resolveGraphSubProtocol( - callback.statement, graphSupportChecker, context); - Queue graphNodes = new ArrayDeque<>(); - for (List row : ((Rows) resultMessage).getData()) { - if (subProtocol.isGraphBinary()) { - graphNodes.offer( - GraphConversions.createGraphBinaryGraphNode( - row, GraphRequestHandler.this.graphBinaryModule)); - } else { - graphNodes.offer(GraphSONUtils.createGraphNode(row, subProtocol)); - } - } - - DefaultAsyncGraphResultSet resultSet = - new DefaultAsyncGraphResultSet(executionInfo, graphNodes, subProtocol); - if (result.complete(resultSet)) { - cancelScheduledTasks(); - throttler.signalSuccess(this); - - // Only call nanoTime() if we're actually going to use it - long completionTimeNanos = NANOTIME_NOT_MEASURED_YET, - totalLatencyNanos = NANOTIME_NOT_MEASURED_YET; - if (!(requestTracker instanceof NoopRequestTracker)) { - completionTimeNanos = System.nanoTime(); - totalLatencyNanos = completionTimeNanos - startTimeNanos; - long nodeLatencyNanos = completionTimeNanos - callback.nodeStartTimeNanos; - requestTracker.onNodeSuccess( - callback.statement, nodeLatencyNanos, executionProfile, callback.node, logPrefix); - requestTracker.onSuccess( - callback.statement, totalLatencyNanos, executionProfile, callback.node, logPrefix); - } - if (sessionMetricUpdater.isEnabled( - DseSessionMetric.GRAPH_REQUESTS, executionProfile.getName())) { - if (completionTimeNanos == NANOTIME_NOT_MEASURED_YET) { - completionTimeNanos = System.nanoTime(); - totalLatencyNanos = completionTimeNanos - startTimeNanos; - } - sessionMetricUpdater.updateTimer( - DseSessionMetric.GRAPH_REQUESTS, - executionProfile.getName(), - totalLatencyNanos, - TimeUnit.NANOSECONDS); - } - } - // log the warnings if they have NOT been disabled - if (!executionInfo.getWarnings().isEmpty() - && executionProfile.getBoolean(DefaultDriverOption.REQUEST_LOG_WARNINGS) - && LOG.isWarnEnabled()) { - logServerWarnings(callback.statement, executionInfo.getWarnings()); - } - } catch (Throwable error) { - setFinalError(callback.statement, error, callback.node, NO_SUCCESSFUL_EXECUTION); - } - } - - private void logServerWarnings(GraphStatement statement, List warnings) { - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(statement, context); - // use the RequestLogFormatter to format the query - StringBuilder statementString = new StringBuilder(); - context - .getRequestLogFormatter() - .appendRequest( - statement, - executionProfile.getInt( - DefaultDriverOption.REQUEST_LOGGER_MAX_QUERY_LENGTH, - RequestLogger.DEFAULT_REQUEST_LOGGER_MAX_QUERY_LENGTH), - executionProfile.getBoolean( - DefaultDriverOption.REQUEST_LOGGER_VALUES, - RequestLogger.DEFAULT_REQUEST_LOGGER_SHOW_VALUES), - executionProfile.getInt( - DefaultDriverOption.REQUEST_LOGGER_MAX_VALUES, - RequestLogger.DEFAULT_REQUEST_LOGGER_MAX_VALUES), - executionProfile.getInt( - DefaultDriverOption.REQUEST_LOGGER_MAX_VALUE_LENGTH, - RequestLogger.DEFAULT_REQUEST_LOGGER_MAX_VALUE_LENGTH), - statementString); - // log each warning separately - warnings.forEach( - (warning) -> - LOG.warn("Query '{}' generated server side warning(s): {}", statementString, warning)); - } - - private ExecutionInfo buildExecutionInfo(NodeResponseCallback callback, Frame responseFrame) { - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(callback.statement, context); - return new DefaultExecutionInfo( - callback.statement, - callback.node, - startedSpeculativeExecutionsCount.get(), - callback.execution, - errors, - null, - responseFrame, - true, - session, - context, - executionProfile); - } - - @Override - public void onThrottleFailure(@NonNull RequestThrottlingException error) { - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(initialStatement, context); - sessionMetricUpdater.incrementCounter( - DefaultSessionMetric.THROTTLING_ERRORS, executionProfile.getName()); - setFinalError(initialStatement, error, null, NO_SUCCESSFUL_EXECUTION); - } - - private void setFinalError( - GraphStatement statement, Throwable error, Node node, int execution) { - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(statement, context); - if (error instanceof DriverException) { - ((DriverException) error) - .setExecutionInfo( - new DefaultExecutionInfo( - statement, - node, - startedSpeculativeExecutionsCount.get(), - execution, - errors, - null, - null, - true, - session, - context, - executionProfile)); - } - if (result.completeExceptionally(error)) { - cancelScheduledTasks(); - if (!(requestTracker instanceof NoopRequestTracker)) { - long latencyNanos = System.nanoTime() - startTimeNanos; - requestTracker.onError(statement, error, latencyNanos, executionProfile, node, logPrefix); - } - if (error instanceof DriverTimeoutException) { - throttler.signalTimeout(this); - sessionMetricUpdater.incrementCounter( - DseSessionMetric.GRAPH_CLIENT_TIMEOUTS, executionProfile.getName()); - } else if (!(error instanceof RequestThrottlingException)) { - throttler.signalError(this, error); - } - } - } - - /** - * Handles the interaction with a single node in the query plan. - * - *

An instance of this class is created each time we (re)try a node. - */ - private class NodeResponseCallback - implements ResponseCallback, GenericFutureListener> { - - private final long nodeStartTimeNanos = System.nanoTime(); - private final GraphStatement statement; - private final Node node; - private final Queue queryPlan; - private final DriverChannel channel; - // The identifier of the current execution (0 for the initial execution, 1 for the first - // speculative execution, etc.) - private final int execution; - // How many times we've invoked the retry policy and it has returned a "retry" decision (0 for - // the first attempt of each execution). - private final int retryCount; - private final boolean scheduleNextExecution; - private final String logPrefix; - private final DriverExecutionProfile executionProfile; - - private NodeResponseCallback( - GraphStatement statement, - Node node, - Queue queryPlan, - DriverChannel channel, - int execution, - int retryCount, - boolean scheduleNextExecution, - String logPrefix) { - this.statement = statement; - this.node = node; - this.queryPlan = queryPlan; - this.channel = channel; - this.execution = execution; - this.retryCount = retryCount; - this.scheduleNextExecution = scheduleNextExecution; - this.logPrefix = logPrefix + "|" + execution; - this.executionProfile = Conversions.resolveExecutionProfile(statement, context); - } - - // this gets invoked once the write completes. - @Override - public void operationComplete(Future future) { - if (!future.isSuccess()) { - Throwable error = future.cause(); - if (error instanceof EncoderException - && error.getCause() instanceof FrameTooLongException) { - trackNodeError(node, error.getCause(), NANOTIME_NOT_MEASURED_YET); - setFinalError(statement, error.getCause(), node, execution); - } else { - LOG.trace( - "[{}] Failed to send request on {}, trying next node (cause: {})", - logPrefix, - channel, - error); - recordError(node, error); - trackNodeError(node, error, NANOTIME_NOT_MEASURED_YET); - ((DefaultNode) node) - .getMetricUpdater() - .incrementCounter(DefaultNodeMetric.UNSENT_REQUESTS, executionProfile.getName()); - sendRequest( - statement, - null, - queryPlan, - execution, - retryCount, - scheduleNextExecution); // try next node - } - } else { - LOG.trace("[{}] Request sent on {}", logPrefix, channel); - if (result.isDone()) { - // If the handler completed since the last time we checked, cancel directly because we - // don't know if cancelScheduledTasks() has run yet - cancel(); - } else { - inFlightCallbacks.add(this); - if (scheduleNextExecution - && Conversions.resolveIdempotence(statement, executionProfile)) { - int nextExecution = execution + 1; - long nextDelay; - try { - nextDelay = - Conversions.resolveSpeculativeExecutionPolicy(context, executionProfile) - .nextExecution(node, null, statement, nextExecution); - } catch (Throwable cause) { - // This is a bug in the policy, but not fatal since we have at least one other - // execution already running. Don't fail the whole request. - LOG.error( - "[{}] Unexpected error while invoking the speculative execution policy", - logPrefix, - cause); - return; - } - if (nextDelay >= 0) { - scheduleSpeculativeExecution(nextExecution, nextDelay); - } else { - LOG.trace( - "[{}] Speculative execution policy returned {}, no next execution", - logPrefix, - nextDelay); - } - } - } - } - } - - private void scheduleSpeculativeExecution(int index, long delay) { - LOG.trace("[{}] Scheduling speculative execution {} in {} ms", logPrefix, index, delay); - try { - scheduledExecutions.add( - timer.newTimeout( - (Timeout timeout1) -> { - if (!result.isDone()) { - LOG.trace( - "[{}] Starting speculative execution {}", - GraphRequestHandler.this.logPrefix, - index); - activeExecutionsCount.incrementAndGet(); - startedSpeculativeExecutionsCount.incrementAndGet(); - // Note that `node` is the first node of the execution, it might not be the - // "slow" one if there were retries, but in practice retries are rare. - ((DefaultNode) node) - .getMetricUpdater() - .incrementCounter( - DefaultNodeMetric.SPECULATIVE_EXECUTIONS, executionProfile.getName()); - sendRequest(statement, null, queryPlan, index, 0, true); - } - }, - delay, - TimeUnit.MILLISECONDS)); - } catch (IllegalStateException e) { - // If we're racing with session shutdown, the timer might be stopped already. We don't want - // to schedule more executions anyway, so swallow the error. - if (!"cannot be started once stopped".equals(e.getMessage())) { - Loggers.warnWithException( - LOG, "[{}] Error while scheduling speculative execution", logPrefix, e); - } - } - } - - @Override - public void onResponse(Frame responseFrame) { - long nodeResponseTimeNanos = NANOTIME_NOT_MEASURED_YET; - NodeMetricUpdater nodeMetricUpdater = ((DefaultNode) node).getMetricUpdater(); - if (nodeMetricUpdater.isEnabled(DseNodeMetric.GRAPH_MESSAGES, executionProfile.getName())) { - nodeResponseTimeNanos = System.nanoTime(); - long nodeLatency = System.nanoTime() - nodeStartTimeNanos; - nodeMetricUpdater.updateTimer( - DseNodeMetric.GRAPH_MESSAGES, - executionProfile.getName(), - nodeLatency, - TimeUnit.NANOSECONDS); - } - inFlightCallbacks.remove(this); - if (result.isDone()) { - return; - } - try { - Message responseMessage = responseFrame.message; - if (responseMessage instanceof Result) { - LOG.trace("[{}] Got result, completing", logPrefix); - setFinalResult((Result) responseMessage, responseFrame, this); - } else if (responseMessage instanceof Error) { - LOG.trace("[{}] Got error response, processing", logPrefix); - processErrorResponse((Error) responseMessage); - } else { - trackNodeError( - node, - new IllegalStateException("Unexpected response " + responseMessage), - nodeResponseTimeNanos); - setFinalError( - statement, - new IllegalStateException("Unexpected response " + responseMessage), - node, - execution); - } - } catch (Throwable t) { - trackNodeError(node, t, nodeResponseTimeNanos); - setFinalError(statement, t, node, execution); - } - } - - private void processErrorResponse(Error errorMessage) { - CoordinatorException error = Conversions.toThrowable(node, errorMessage, context); - NodeMetricUpdater metricUpdater = ((DefaultNode) node).getMetricUpdater(); - if (error instanceof BootstrappingException) { - LOG.trace("[{}] {} is bootstrapping, trying next node", logPrefix, node); - recordError(node, error); - trackNodeError(node, error, NANOTIME_NOT_MEASURED_YET); - sendRequest(statement, null, queryPlan, execution, retryCount, false); - } else if (error instanceof QueryValidationException - || error instanceof FunctionFailureException - || error instanceof ProtocolError) { - LOG.trace("[{}] Unrecoverable error, rethrowing", logPrefix); - metricUpdater.incrementCounter(DefaultNodeMetric.OTHER_ERRORS, executionProfile.getName()); - trackNodeError(node, error, NANOTIME_NOT_MEASURED_YET); - setFinalError(statement, error, node, execution); - } else { - RetryPolicy retryPolicy = Conversions.resolveRetryPolicy(context, executionProfile); - RetryVerdict verdict; - if (error instanceof ReadTimeoutException) { - ReadTimeoutException readTimeout = (ReadTimeoutException) error; - verdict = - retryPolicy.onReadTimeoutVerdict( - statement, - readTimeout.getConsistencyLevel(), - readTimeout.getBlockFor(), - readTimeout.getReceived(), - readTimeout.wasDataPresent(), - retryCount); - updateErrorMetrics( - metricUpdater, - verdict, - DefaultNodeMetric.READ_TIMEOUTS, - DefaultNodeMetric.RETRIES_ON_READ_TIMEOUT, - DefaultNodeMetric.IGNORES_ON_READ_TIMEOUT); - } else if (error instanceof WriteTimeoutException) { - WriteTimeoutException writeTimeout = (WriteTimeoutException) error; - verdict = - Conversions.resolveIdempotence(statement, executionProfile) - ? retryPolicy.onWriteTimeoutVerdict( - statement, - writeTimeout.getConsistencyLevel(), - writeTimeout.getWriteType(), - writeTimeout.getBlockFor(), - writeTimeout.getReceived(), - retryCount) - : RetryVerdict.RETHROW; - updateErrorMetrics( - metricUpdater, - verdict, - DefaultNodeMetric.WRITE_TIMEOUTS, - DefaultNodeMetric.RETRIES_ON_WRITE_TIMEOUT, - DefaultNodeMetric.IGNORES_ON_WRITE_TIMEOUT); - } else if (error instanceof UnavailableException) { - UnavailableException unavailable = (UnavailableException) error; - verdict = - retryPolicy.onUnavailableVerdict( - statement, - unavailable.getConsistencyLevel(), - unavailable.getRequired(), - unavailable.getAlive(), - retryCount); - updateErrorMetrics( - metricUpdater, - verdict, - DefaultNodeMetric.UNAVAILABLES, - DefaultNodeMetric.RETRIES_ON_UNAVAILABLE, - DefaultNodeMetric.IGNORES_ON_UNAVAILABLE); - } else { - verdict = - Conversions.resolveIdempotence(statement, executionProfile) - ? retryPolicy.onErrorResponseVerdict(statement, error, retryCount) - : RetryVerdict.RETHROW; - updateErrorMetrics( - metricUpdater, - verdict, - DefaultNodeMetric.OTHER_ERRORS, - DefaultNodeMetric.RETRIES_ON_OTHER_ERROR, - DefaultNodeMetric.IGNORES_ON_OTHER_ERROR); - } - processRetryVerdict(verdict, error); - } - } - - private void processRetryVerdict(RetryVerdict verdict, Throwable error) { - LOG.trace("[{}] Processing retry decision {}", logPrefix, verdict); - switch (verdict.getRetryDecision()) { - case RETRY_SAME: - recordError(node, error); - trackNodeError(node, error, NANOTIME_NOT_MEASURED_YET); - sendRequest( - verdict.getRetryRequest(statement), - node, - queryPlan, - execution, - retryCount + 1, - false); - break; - case RETRY_NEXT: - recordError(node, error); - trackNodeError(node, error, NANOTIME_NOT_MEASURED_YET); - sendRequest( - verdict.getRetryRequest(statement), - null, - queryPlan, - execution, - retryCount + 1, - false); - break; - case RETHROW: - trackNodeError(node, error, NANOTIME_NOT_MEASURED_YET); - setFinalError(statement, error, node, execution); - break; - case IGNORE: - setFinalResult(Void.INSTANCE, null, this); - break; - } - } - - private void updateErrorMetrics( - NodeMetricUpdater metricUpdater, - RetryVerdict verdict, - DefaultNodeMetric error, - DefaultNodeMetric retriesOnError, - DefaultNodeMetric ignoresOnError) { - metricUpdater.incrementCounter(error, executionProfile.getName()); - switch (verdict.getRetryDecision()) { - case RETRY_SAME: - case RETRY_NEXT: - metricUpdater.incrementCounter(DefaultNodeMetric.RETRIES, executionProfile.getName()); - metricUpdater.incrementCounter(retriesOnError, executionProfile.getName()); - break; - case IGNORE: - metricUpdater.incrementCounter(DefaultNodeMetric.IGNORES, executionProfile.getName()); - metricUpdater.incrementCounter(ignoresOnError, executionProfile.getName()); - break; - case RETHROW: - // nothing do do - } - } - - @Override - public void onFailure(Throwable error) { - inFlightCallbacks.remove(this); - if (result.isDone()) { - return; - } - LOG.trace("[{}] Request failure, processing: {}", logPrefix, error); - RetryVerdict verdict; - if (!Conversions.resolveIdempotence(statement, executionProfile) - || error instanceof FrameTooLongException) { - verdict = RetryVerdict.RETHROW; - } else { - try { - RetryPolicy retryPolicy = Conversions.resolveRetryPolicy(context, executionProfile); - verdict = retryPolicy.onRequestAbortedVerdict(statement, error, retryCount); - } catch (Throwable cause) { - setFinalError( - statement, - new IllegalStateException("Unexpected error while invoking the retry policy", cause), - node, - NO_SUCCESSFUL_EXECUTION); - return; - } - } - processRetryVerdict(verdict, error); - updateErrorMetrics( - ((DefaultNode) node).getMetricUpdater(), - verdict, - DefaultNodeMetric.ABORTED_REQUESTS, - DefaultNodeMetric.RETRIES_ON_ABORTED, - DefaultNodeMetric.IGNORES_ON_ABORTED); - } - - void cancel() { - try { - if (!channel.closeFuture().isDone()) { - this.channel.cancel(this); - } - } catch (Throwable t) { - Loggers.warnWithException(LOG, "[{}] Error cancelling", logPrefix, t); - } - } - - /** - * @param nodeResponseTimeNanos the time we received the response, if it's already been - * measured. If {@link #NANOTIME_NOT_MEASURED_YET}, it hasn't and we need to measure it now - * (this is to avoid unnecessary calls to System.nanoTime) - */ - private void trackNodeError(Node node, Throwable error, long nodeResponseTimeNanos) { - if (requestTracker instanceof NoopRequestTracker) { - return; - } - if (nodeResponseTimeNanos == NANOTIME_NOT_MEASURED_YET) { - nodeResponseTimeNanos = System.nanoTime(); - } - long latencyNanos = nodeResponseTimeNanos - this.nodeStartTimeNanos; - requestTracker.onNodeError(statement, error, latencyNanos, executionProfile, node, logPrefix); - } - - @Override - public String toString() { - return logPrefix; - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphRequestSyncProcessor.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphRequestSyncProcessor.java deleted file mode 100644 index bc2381482a8..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphRequestSyncProcessor.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.dse.driver.api.core.graph.AsyncGraphResultSet; -import com.datastax.dse.driver.api.core.graph.BatchGraphStatement; -import com.datastax.dse.driver.api.core.graph.FluentGraphStatement; -import com.datastax.dse.driver.api.core.graph.GraphResultSet; -import com.datastax.dse.driver.api.core.graph.GraphStatement; -import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.datastax.oss.driver.internal.core.session.RequestProcessor; -import com.datastax.oss.driver.internal.core.util.concurrent.BlockingOperation; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class GraphRequestSyncProcessor - implements RequestProcessor, GraphResultSet> { - - private final GraphRequestAsyncProcessor asyncProcessor; - - public GraphRequestSyncProcessor(GraphRequestAsyncProcessor asyncProcessor) { - this.asyncProcessor = asyncProcessor; - } - - @Override - public boolean canProcess(Request request, GenericType resultType) { - return (request instanceof ScriptGraphStatement - || request instanceof FluentGraphStatement - || request instanceof BatchGraphStatement - || request instanceof BytecodeGraphStatement) - && resultType.equals(GraphStatement.SYNC); - } - - @Override - public GraphResultSet process( - GraphStatement request, - DefaultSession session, - InternalDriverContext context, - String sessionLogPrefix) { - BlockingOperation.checkNotDriverThread(); - AsyncGraphResultSet firstPage = - CompletableFutures.getUninterruptibly( - asyncProcessor.process(request, session, context, sessionLogPrefix)); - return GraphResultSets.toSync(firstPage); - } - - @Override - public GraphResultSet newFailure(RuntimeException error) { - throw error; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphResultIterator.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphResultIterator.java deleted file mode 100644 index 7e9043affec..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphResultIterator.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.dse.driver.api.core.graph.GraphNode; -import com.datastax.oss.driver.internal.core.util.CountingIterator; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import java.util.Queue; -import net.jcip.annotations.NotThreadSafe; -import org.apache.tinkerpop.gremlin.process.traversal.Traverser; - -@NotThreadSafe // wraps a mutable queue -class GraphResultIterator extends CountingIterator { - - private final Queue data; - private final GraphProtocol graphProtocol; - - // Sometimes a traversal can yield the same result multiple times consecutively. To avoid - // duplicating the data, DSE graph sends it only once with a counter indicating how many times - // it's repeated. - private long repeat = 0; - private GraphNode lastGraphNode = null; - - GraphResultIterator(Queue data, GraphProtocol graphProtocol) { - super(data.size()); - this.data = data; - this.graphProtocol = graphProtocol; - } - - @Override - protected GraphNode computeNext() { - if (repeat > 1) { - repeat -= 1; - // Note that we don't make a defensive copy, we assume the client won't mutate the node - return lastGraphNode; - } - - GraphNode container = data.poll(); - if (container == null) { - return endOfData(); - } - - if (graphProtocol.isGraphBinary()) { - // results are contained in a Traverser object and not a Map if the protocol - // is GraphBinary - Preconditions.checkState( - container.as(Object.class) instanceof Traverser, - "Graph protocol error. Received object should be a Traverser but it is not."); - Traverser t = container.as(Traverser.class); - this.repeat = t.bulk(); - this.lastGraphNode = new ObjectGraphNode(t.get()); - return lastGraphNode; - } else { - // The repeat counter is called "bulk" in the JSON payload - GraphNode b = container.getByKey("bulk"); - if (b != null) { - this.repeat = b.asLong(); - } - - lastGraphNode = container.getByKey("result"); - return lastGraphNode; - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphResultSets.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphResultSets.java deleted file mode 100644 index fb21f857cfa..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphResultSets.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.dse.driver.api.core.graph.AsyncGraphResultSet; -import com.datastax.dse.driver.api.core.graph.GraphResultSet; - -public class GraphResultSets { - - public static GraphResultSet toSync(AsyncGraphResultSet firstPage) { - if (firstPage.hasMorePages()) { - return new MultiPageGraphResultSet(firstPage); - } else { - return new SinglePageGraphResultSet(firstPage); - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphSON1SerdeTP.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphSON1SerdeTP.java deleted file mode 100644 index f880bca3764..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphSON1SerdeTP.java +++ /dev/null @@ -1,346 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.dse.driver.api.core.data.geometry.Geometry; -import com.datastax.dse.driver.api.core.data.geometry.LineString; -import com.datastax.dse.driver.api.core.data.geometry.Point; -import com.datastax.dse.driver.api.core.data.geometry.Polygon; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.net.InetAddresses; -import java.io.IOException; -import java.net.Inet4Address; -import java.net.Inet6Address; -import java.net.InetAddress; -import java.util.List; -import java.util.Map; -import org.apache.tinkerpop.shaded.jackson.core.JsonGenerator; -import org.apache.tinkerpop.shaded.jackson.core.JsonParseException; -import org.apache.tinkerpop.shaded.jackson.core.JsonParser; -import org.apache.tinkerpop.shaded.jackson.core.Version; -import org.apache.tinkerpop.shaded.jackson.databind.DeserializationContext; -import org.apache.tinkerpop.shaded.jackson.databind.JsonDeserializer; -import org.apache.tinkerpop.shaded.jackson.databind.JsonSerializer; -import org.apache.tinkerpop.shaded.jackson.databind.SerializerProvider; -import org.apache.tinkerpop.shaded.jackson.databind.deser.std.StdDeserializer; -import org.apache.tinkerpop.shaded.jackson.databind.module.SimpleModule; -import org.apache.tinkerpop.shaded.jackson.databind.ser.std.StdSerializer; - -public class GraphSON1SerdeTP { - - //////////////////////// DESERIALIZERS //////////////////////// - - /** - * Default deserializer used by the driver for {@link InetAddress} instances. The actual subclass - * returned by this deserializer depends on the type of address: {@link Inet4Address IPV4} or - * {@link Inet6Address IPV6}. - */ - static class DefaultInetAddressDeserializer extends StdDeserializer { - - private static final long serialVersionUID = 1L; - - private final Class inetClass; - - DefaultInetAddressDeserializer(Class inetClass) { - super(inetClass); - this.inetClass = inetClass; - } - - @Override - public T deserialize(JsonParser parser, DeserializationContext ctx) throws IOException { - String ip = parser.readValueAs(String.class); - try { - InetAddress inet = InetAddresses.forString(ip); - return inetClass.cast(inet); - } catch (ClassCastException e) { - throw new JsonParseException( - parser, - String.format("Inet address cannot be cast to %s: %s", inetClass.getSimpleName(), ip), - e); - } catch (IllegalArgumentException e) { - throw new JsonParseException(parser, String.format("Expected inet address, got %s", ip), e); - } - } - } - - /** - * Default deserializer used by the driver for geospatial types. It deserializes such types into - * {@link Geometry} instances. The actual subclass depends on the type being deserialized. - */ - static class DefaultGeometryDeserializer extends StdDeserializer { - - private static final long serialVersionUID = 1L; - - private final Class geometryClass; - - DefaultGeometryDeserializer(Class geometryClass) { - super(geometryClass); - this.geometryClass = geometryClass; - } - - @Override - public T deserialize(JsonParser parser, DeserializationContext ctx) throws IOException { - String wkt = parser.readValueAs(String.class); - Geometry geometry; - if (wkt.startsWith("POINT")) geometry = Point.fromWellKnownText(wkt); - else if (wkt.startsWith("LINESTRING")) geometry = LineString.fromWellKnownText(wkt); - else if (wkt.startsWith("POLYGON")) geometry = Polygon.fromWellKnownText(wkt); - else throw new JsonParseException(parser, "Unknown geometry type: " + wkt); - return geometryClass.cast(geometry); - } - } - - /** Base class for serializing the {@code java.time.*} types to ISO-8061 formats. */ - abstract static class AbstractJavaTimeSerializer extends StdSerializer { - - private static final long serialVersionUID = 1L; - - AbstractJavaTimeSerializer(final Class clazz) { - super(clazz); - } - - @Override - public void serialize( - final T value, final JsonGenerator gen, final SerializerProvider serializerProvider) - throws IOException { - gen.writeString(value.toString()); - } - } - - /** Base class for deserializing the {@code java.time.*} types from ISO-8061 formats. */ - abstract static class AbstractJavaTimeJacksonDeserializer extends StdDeserializer { - - private static final long serialVersionUID = 1L; - - AbstractJavaTimeJacksonDeserializer(final Class clazz) { - super(clazz); - } - - abstract T parse(final String val); - - @Override - public T deserialize( - final JsonParser jsonParser, final DeserializationContext deserializationContext) - throws IOException { - return parse(jsonParser.getText()); - } - } - - static final class DurationJacksonSerializer - extends AbstractJavaTimeSerializer { - - private static final long serialVersionUID = 1L; - - DurationJacksonSerializer() { - super(java.time.Duration.class); - } - } - - static final class DurationJacksonDeserializer - extends AbstractJavaTimeJacksonDeserializer { - - private static final long serialVersionUID = 1L; - - DurationJacksonDeserializer() { - super(java.time.Duration.class); - } - - @Override - public java.time.Duration parse(final String val) { - return java.time.Duration.parse(val); - } - } - - static final class InstantJacksonSerializer - extends AbstractJavaTimeSerializer { - - private static final long serialVersionUID = 1L; - - InstantJacksonSerializer() { - super(java.time.Instant.class); - } - } - - static final class InstantJacksonDeserializer - extends AbstractJavaTimeJacksonDeserializer { - - private static final long serialVersionUID = 1L; - - InstantJacksonDeserializer() { - super(java.time.Instant.class); - } - - @Override - public java.time.Instant parse(final String val) { - return java.time.Instant.parse(val); - } - } - - static final class LocalDateJacksonSerializer - extends AbstractJavaTimeSerializer { - - private static final long serialVersionUID = 1L; - - LocalDateJacksonSerializer() { - super(java.time.LocalDate.class); - } - } - - static final class LocalDateJacksonDeserializer - extends AbstractJavaTimeJacksonDeserializer { - - private static final long serialVersionUID = 1L; - - LocalDateJacksonDeserializer() { - super(java.time.LocalDate.class); - } - - @Override - public java.time.LocalDate parse(final String val) { - return java.time.LocalDate.parse(val); - } - } - - static final class LocalTimeJacksonSerializer - extends AbstractJavaTimeSerializer { - - private static final long serialVersionUID = 1L; - - LocalTimeJacksonSerializer() { - super(java.time.LocalTime.class); - } - } - - static final class LocalTimeJacksonDeserializer - extends AbstractJavaTimeJacksonDeserializer { - - private static final long serialVersionUID = 1L; - - LocalTimeJacksonDeserializer() { - super(java.time.LocalTime.class); - } - - @Override - public java.time.LocalTime parse(final String val) { - return java.time.LocalTime.parse(val); - } - } - - //////////////////////// SERIALIZERS //////////////////////// - - /** Default serializer used by the driver for {@link LegacyGraphNode} instances. */ - static class DefaultGraphNodeSerializer extends StdSerializer { - - private static final long serialVersionUID = 1L; - - DefaultGraphNodeSerializer() { - super(LegacyGraphNode.class); - } - - @Override - public void serialize( - LegacyGraphNode value, JsonGenerator jsonGenerator, SerializerProvider serializerProvider) - throws IOException { - jsonGenerator.writeTree(value.getDelegate()); - } - } - - /** - * Default serializer used by the driver for geospatial types. It serializes {@link Geometry} - * instances into their Well-Known Text (WKT) equivalent. - */ - static class DefaultGeometrySerializer extends StdSerializer { - - private static final long serialVersionUID = 1L; - - DefaultGeometrySerializer() { - super(Geometry.class); - } - - @Override - public void serialize( - Geometry value, JsonGenerator jsonGenerator, SerializerProvider serializers) - throws IOException { - jsonGenerator.writeString(value.asWellKnownText()); - } - } - - /** The default Jackson module used by DSE Graph. */ - static class GraphSON1DefaultModule extends SimpleModule { - - private static final long serialVersionUID = 1L; - - GraphSON1DefaultModule(String name, Version version) { - super(name, version, createDeserializers(), createSerializers()); - } - - private static Map, JsonDeserializer> createDeserializers() { - - return ImmutableMap., JsonDeserializer>builder() - - // Inet (there is no built-in deserializer for InetAddress and subclasses) - .put(InetAddress.class, new DefaultInetAddressDeserializer<>(InetAddress.class)) - .put(Inet4Address.class, new DefaultInetAddressDeserializer<>(Inet4Address.class)) - .put(Inet6Address.class, new DefaultInetAddressDeserializer<>(Inet6Address.class)) - - // Geospatial types - .put(Geometry.class, new DefaultGeometryDeserializer<>(Geometry.class)) - .put(Point.class, new DefaultGeometryDeserializer<>(Point.class)) - .put(LineString.class, new DefaultGeometryDeserializer<>(LineString.class)) - .put(Polygon.class, new DefaultGeometryDeserializer<>(Polygon.class)) - .build(); - } - - private static List> createSerializers() { - return ImmutableList.>builder() - .add(new DefaultGraphNodeSerializer()) - .add(new DefaultGeometrySerializer()) - .build(); - } - } - - /** Serializers and deserializers for JSR 310 {@code java.time.*}. */ - static class GraphSON1JavaTimeModule extends SimpleModule { - - private static final long serialVersionUID = 1L; - - GraphSON1JavaTimeModule(String name, Version version) { - super(name, version, createDeserializers(), createSerializers()); - } - - private static Map, JsonDeserializer> createDeserializers() { - - return ImmutableMap., JsonDeserializer>builder() - .put(java.time.Duration.class, new DurationJacksonDeserializer()) - .put(java.time.Instant.class, new InstantJacksonDeserializer()) - .put(java.time.LocalDate.class, new LocalDateJacksonDeserializer()) - .put(java.time.LocalTime.class, new LocalTimeJacksonDeserializer()) - .build(); - } - - private static List> createSerializers() { - return ImmutableList.>builder() - .add(new DurationJacksonSerializer()) - .add(new InstantJacksonSerializer()) - .add(new LocalDateJacksonSerializer()) - .add(new LocalTimeJacksonSerializer()) - .build(); - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphSON2SerdeTP.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphSON2SerdeTP.java deleted file mode 100644 index d79afc71822..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphSON2SerdeTP.java +++ /dev/null @@ -1,430 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.dse.driver.api.core.data.geometry.Geometry; -import com.datastax.dse.driver.api.core.data.geometry.LineString; -import com.datastax.dse.driver.api.core.data.geometry.Point; -import com.datastax.dse.driver.api.core.data.geometry.Polygon; -import com.datastax.dse.driver.api.core.graph.predicates.Geo; -import com.datastax.dse.driver.api.core.graph.predicates.Search; -import com.datastax.dse.driver.internal.core.data.geometry.DefaultLineString; -import com.datastax.dse.driver.internal.core.data.geometry.DefaultPoint; -import com.datastax.dse.driver.internal.core.data.geometry.DefaultPolygon; -import com.datastax.dse.driver.internal.core.data.geometry.Distance; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import java.io.IOException; -import java.util.Collection; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import org.apache.tinkerpop.gremlin.process.traversal.P; -import org.apache.tinkerpop.gremlin.process.traversal.util.AndP; -import org.apache.tinkerpop.gremlin.process.traversal.util.ConnectiveP; -import org.apache.tinkerpop.gremlin.process.traversal.util.OrP; -import org.apache.tinkerpop.gremlin.structure.io.graphson.AbstractObjectDeserializer; -import org.apache.tinkerpop.gremlin.structure.io.graphson.GraphSONTokens; -import org.apache.tinkerpop.gremlin.structure.io.graphson.TinkerPopJacksonModule; -import org.apache.tinkerpop.shaded.jackson.core.JsonGenerator; -import org.apache.tinkerpop.shaded.jackson.core.JsonParser; -import org.apache.tinkerpop.shaded.jackson.databind.DeserializationContext; -import org.apache.tinkerpop.shaded.jackson.databind.SerializerProvider; -import org.apache.tinkerpop.shaded.jackson.databind.deser.std.StdDeserializer; -import org.apache.tinkerpop.shaded.jackson.databind.jsontype.TypeSerializer; -import org.apache.tinkerpop.shaded.jackson.databind.module.SimpleModule; -import org.apache.tinkerpop.shaded.jackson.databind.ser.std.StdScalarSerializer; -import org.apache.tinkerpop.shaded.jackson.databind.ser.std.StdSerializer; - -public class GraphSON2SerdeTP { - - /** - * A Jackson Module to use for TinkerPop serialization/deserialization. It extends {@link - * org.apache.tinkerpop.gremlin.structure.io.graphson.TinkerPopJacksonModule} because of the - * specific typing format used in GraphSON. - */ - public static class DseGraphModule extends TinkerPopJacksonModule { - - private static final long serialVersionUID = 1L; - - public DseGraphModule() { - super("dse-driver-2.0"); - addSerializer(DefaultPoint.class, new PointGeometrySerializer()); - addSerializer(DefaultLineString.class, new LineStringGeometrySerializer()); - addSerializer(DefaultPolygon.class, new PolygonGeometrySerializer()); - addSerializer(Distance.class, new DistanceGeometrySerializer()); - // override TinkerPop's P predicates because of DSE's Search and Geo predicates - addSerializer(P.class, new DsePJacksonSerializer()); - addSerializer(EditDistance.class, new EditDistanceSerializer()); - - addDeserializer(DefaultLineString.class, new LineStringGeometryDeserializer()); - addDeserializer(DefaultPoint.class, new PointGeometryDeserializer()); - addDeserializer(DefaultPolygon.class, new PolygonGeometryDeserializer()); - addDeserializer(Distance.class, new DistanceGeometryDeserializer()); - // override TinkerPop's P predicates because of DSE's Search and Geo predicates - addDeserializer(P.class, new DsePJacksonDeserializer()); - } - - @SuppressWarnings("rawtypes") - @Override - public Map getTypeDefinitions() { - Map definitions = new HashMap<>(); - definitions.put(DefaultLineString.class, "LineString"); - definitions.put(DefaultPoint.class, "Point"); - definitions.put(DefaultPolygon.class, "Polygon"); - definitions.put(byte[].class, "Blob"); - definitions.put(Distance.class, "Distance"); - definitions.put(P.class, "P"); - return definitions; - } - - @Override - public String getTypeNamespace() { - return "dse"; - } - - abstract static class AbstractGeometryJacksonDeserializer - extends StdDeserializer { - - private static final long serialVersionUID = 1L; - - AbstractGeometryJacksonDeserializer(final Class clazz) { - super(clazz); - } - - public abstract T parse(final String val); - - @Override - public T deserialize( - final JsonParser jsonParser, final DeserializationContext deserializationContext) - throws IOException { - return parse(jsonParser.getText()); - } - } - - abstract static class AbstractGeometryJacksonSerializer - extends StdScalarSerializer { - - private static final long serialVersionUID = 1L; - - AbstractGeometryJacksonSerializer(final Class clazz) { - super(clazz); - } - - @Override - public void serialize( - final T value, final JsonGenerator gen, final SerializerProvider serializerProvider) - throws IOException { - gen.writeString(value.asWellKnownText()); - } - } - - public static class LineStringGeometrySerializer - extends AbstractGeometryJacksonSerializer { - - private static final long serialVersionUID = 1L; - - LineStringGeometrySerializer() { - super(LineString.class); - } - } - - public static class LineStringGeometryDeserializer - extends AbstractGeometryJacksonDeserializer { - - private static final long serialVersionUID = 1L; - - LineStringGeometryDeserializer() { - super(DefaultLineString.class); - } - - @Override - public DefaultLineString parse(final String val) { - return (DefaultLineString) LineString.fromWellKnownText(val); - } - } - - public static class PolygonGeometrySerializer - extends AbstractGeometryJacksonSerializer { - - private static final long serialVersionUID = 1L; - - PolygonGeometrySerializer() { - super(Polygon.class); - } - } - - public static class PolygonGeometryDeserializer - extends AbstractGeometryJacksonDeserializer { - - private static final long serialVersionUID = 1L; - - PolygonGeometryDeserializer() { - super(DefaultPolygon.class); - } - - @Override - public DefaultPolygon parse(final String val) { - return (DefaultPolygon) Polygon.fromWellKnownText(val); - } - } - - public static class PointGeometrySerializer extends AbstractGeometryJacksonSerializer { - - private static final long serialVersionUID = 1L; - - PointGeometrySerializer() { - super(Point.class); - } - } - - public static class PointGeometryDeserializer - extends AbstractGeometryJacksonDeserializer { - - private static final long serialVersionUID = 1L; - - PointGeometryDeserializer() { - super(DefaultPoint.class); - } - - @Override - public DefaultPoint parse(final String val) { - return (DefaultPoint) Point.fromWellKnownText(val); - } - } - - public static class DistanceGeometrySerializer - extends AbstractGeometryJacksonSerializer { - - private static final long serialVersionUID = 1L; - - DistanceGeometrySerializer() { - super(Distance.class); - } - } - - public static class DistanceGeometryDeserializer - extends AbstractGeometryJacksonDeserializer { - - private static final long serialVersionUID = 1L; - - DistanceGeometryDeserializer() { - super(Distance.class); - } - - @Override - public Distance parse(final String val) { - return Distance.fromWellKnownText(val); - } - } - - @SuppressWarnings("rawtypes") - static final class DsePJacksonSerializer extends StdScalarSerializer

{ - - private static final long serialVersionUID = 1L; - - DsePJacksonSerializer() { - super(P.class); - } - - @Override - public void serialize( - final P p, final JsonGenerator jsonGenerator, final SerializerProvider serializerProvider) - throws IOException { - jsonGenerator.writeStartObject(); - jsonGenerator.writeStringField("predicateType", getPredicateType(p)); - jsonGenerator.writeStringField( - GraphSONTokens.PREDICATE, - p instanceof ConnectiveP - ? p instanceof AndP ? GraphSONTokens.AND : GraphSONTokens.OR - : p.getBiPredicate().toString()); - if (p instanceof ConnectiveP) { - jsonGenerator.writeArrayFieldStart(GraphSONTokens.VALUE); - for (final P predicate : ((ConnectiveP) p).getPredicates()) { - jsonGenerator.writeObject(predicate); - } - jsonGenerator.writeEndArray(); - } else { - if (p.getValue() instanceof Collection) { - jsonGenerator.writeArrayFieldStart(GraphSONTokens.VALUE); - for (final Object object : (Collection) p.getValue()) { - jsonGenerator.writeObject(object); - } - jsonGenerator.writeEndArray(); - } else { - jsonGenerator.writeObjectField(GraphSONTokens.VALUE, p.getValue()); - } - } - jsonGenerator.writeEndObject(); - } - - private String getPredicateType(P p) { - if (p.getBiPredicate() instanceof SearchPredicate) { - return Search.class.getSimpleName(); - } else if (p.getBiPredicate() instanceof GeoPredicate) { - return Geo.class.getSimpleName(); - } else { - return P.class.getSimpleName(); - } - } - } - - @SuppressWarnings({"unchecked", "rawtypes"}) - static final class DsePJacksonDeserializer extends AbstractObjectDeserializer

{ - - private static final long serialVersionUID = 1L; - - DsePJacksonDeserializer() { - super(P.class); - } - - @Override - public P createObject(final Map data) { - final String predicate = (String) data.get(GraphSONTokens.PREDICATE); - final String predicateType = (String) data.get("predicateType"); - final Object value = data.get(GraphSONTokens.VALUE); - if (predicate.equals(GraphSONTokens.AND) || predicate.equals(GraphSONTokens.OR)) { - return predicate.equals(GraphSONTokens.AND) - ? new AndP((List

) value) - : new OrP((List

) value); - } else { - try { - if (value instanceof Collection) { - if (predicate.equals("between")) { - return P.between(((List) value).get(0), ((List) value).get(1)); - } else if (predicateType.equals(P.class.getSimpleName()) - && predicate.equals("inside")) { - return P.between(((List) value).get(0), ((List) value).get(1)); - } else if (predicate.equals("outside")) { - return P.outside(((List) value).get(0), ((List) value).get(1)); - } else if (predicate.equals("within")) { - return P.within((Collection) value); - } else if (predicate.equals("without")) { - return P.without((Collection) value); - } else { - return (P) - P.class.getMethod(predicate, Collection.class).invoke(null, (Collection) value); - } - } else { - if (predicate.equals(SearchPredicate.prefix.name())) { - return Search.prefix((String) value); - } else if (predicate.equals(SearchPredicate.tokenPrefix.name())) { - return Search.tokenPrefix((String) value); - } else if (predicate.equals(SearchPredicate.regex.name())) { - return Search.regex((String) value); - } else if (predicate.equals(SearchPredicate.tokenRegex.name())) { - return Search.tokenRegex((String) value); - } else if (predicate.equals(SearchPredicate.token.name())) { - return Search.token((String) value); - } else if (predicate.equals(SearchPredicate.fuzzy.name())) { - Map arguments = (Map) value; - return Search.fuzzy( - (String) arguments.get("query"), (int) arguments.get("distance")); - } else if (predicate.equals(SearchPredicate.tokenFuzzy.name())) { - Map arguments = (Map) value; - return Search.tokenFuzzy( - (String) arguments.get("query"), (int) arguments.get("distance")); - } else if (predicate.equals(SearchPredicate.phrase.name())) { - Map arguments = (Map) value; - return Search.phrase( - (String) arguments.get("query"), (int) arguments.get("distance")); - } else if (predicateType.equals(Geo.class.getSimpleName()) - && predicate.equals(GeoPredicate.inside.name())) { - return Geo.inside( - ((Distance) value).getCenter(), - ((Distance) value).getRadius(), - Geo.Unit.DEGREES); - } else if (predicateType.equals(Geo.class.getSimpleName()) - && predicate.equals(GeoPredicate.insideCartesian.name())) { - return Geo.inside(((Distance) value).getCenter(), ((Distance) value).getRadius()); - } else { - return (P) P.class.getMethod(predicate, Object.class).invoke(null, value); - } - } - } catch (final Exception e) { - throw new IllegalStateException(e.getMessage(), e); - } - } - } - } - - public static class EditDistanceSerializer extends StdSerializer { - - private static final long serialVersionUID = 1L; - - EditDistanceSerializer() { - super(EditDistance.class); - } - - @Override - public void serialize( - EditDistance editDistance, JsonGenerator generator, SerializerProvider provider) - throws IOException { - generator.writeObject( - ImmutableMap.of("query", editDistance.query, "distance", editDistance.distance)); - } - - @Override - public void serializeWithType( - EditDistance editDistance, - JsonGenerator generator, - SerializerProvider provider, - TypeSerializer serializer) - throws IOException { - serialize(editDistance, generator, provider); - } - } - } - - public static class DriverObjectsModule extends SimpleModule { - - private static final long serialVersionUID = 1L; - - public DriverObjectsModule() { - super("datastax-driver-module"); - addSerializer(ObjectGraphNode.class, new ObjectGraphNodeGraphSON2Serializer()); - } - - static final class ObjectGraphNodeGraphSON2Serializer extends StdSerializer { - - private static final long serialVersionUID = 1L; - - protected ObjectGraphNodeGraphSON2Serializer() { - super(ObjectGraphNode.class); - } - - @Override - public void serialize( - ObjectGraphNode objectGraphNode, - JsonGenerator jsonGenerator, - SerializerProvider serializerProvider) - throws IOException { - jsonGenerator.writeObject(objectGraphNode.as(Object.class)); - } - - @Override - public void serializeWithType( - ObjectGraphNode objectGraphNode, - JsonGenerator jsonGenerator, - SerializerProvider serializerProvider, - TypeSerializer typeSerializer) - throws IOException { - serialize(objectGraphNode, jsonGenerator, serializerProvider); - } - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphSONUtils.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphSONUtils.java deleted file mode 100644 index 02b35f7ee36..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphSONUtils.java +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.dse.driver.api.core.graph.GraphNode; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.shaded.guava.common.base.Suppliers; -import com.datastax.oss.driver.shaded.guava.common.base.Throwables; -import com.datastax.oss.driver.shaded.guava.common.cache.CacheBuilder; -import com.datastax.oss.driver.shaded.guava.common.cache.CacheLoader; -import com.datastax.oss.driver.shaded.guava.common.cache.LoadingCache; -import com.datastax.oss.protocol.internal.util.Bytes; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.List; -import java.util.concurrent.ExecutionException; -import java.util.function.Supplier; -import org.apache.tinkerpop.gremlin.structure.io.graphson.GraphSONMapper; -import org.apache.tinkerpop.gremlin.structure.io.graphson.GraphSONReader; -import org.apache.tinkerpop.gremlin.structure.io.graphson.GraphSONVersion; -import org.apache.tinkerpop.gremlin.structure.io.graphson.GraphSONXModuleV2d0; -import org.apache.tinkerpop.gremlin.tinkergraph.structure.TinkerIoRegistryV2d0; -import org.apache.tinkerpop.shaded.jackson.core.Version; -import org.apache.tinkerpop.shaded.jackson.databind.ObjectMapper; - -public class GraphSONUtils { - - private static final LoadingCache OBJECT_MAPPERS = - CacheBuilder.newBuilder() - .build( - new CacheLoader() { - @Override - public ObjectMapper load(@NonNull GraphProtocol graphSubProtocol) throws Exception { - switch (graphSubProtocol) { - case GRAPHSON_1_0: - com.datastax.oss.driver.api.core.Version driverVersion = - CqlSession.OSS_DRIVER_COORDINATES.getVersion(); - Version driverJacksonVersion = - new Version( - driverVersion.getMajor(), - driverVersion.getMinor(), - driverVersion.getPatch(), - driverVersion.getPreReleaseLabels() != null - && driverVersion.getPreReleaseLabels().contains("SNAPSHOT") - ? "SNAPSHOT" - : null, - "com.datastax.dse", - "dse-java-driver-core"); - - ObjectMapper mapper = - GraphSONMapper.build() - .version(GraphSONVersion.V1_0) - .create() - .createMapper(); - mapper.registerModule( - new GraphSON1SerdeTP.GraphSON1DefaultModule( - "graph-graphson1default", driverJacksonVersion)); - mapper.registerModule( - new GraphSON1SerdeTP.GraphSON1JavaTimeModule( - "graph-graphson1javatime", driverJacksonVersion)); - - return mapper; - case GRAPHSON_2_0: - return GraphSONMapper.build() - .version(GraphSONVersion.V2_0) - .addCustomModule(GraphSONXModuleV2d0.build().create(false)) - .addRegistry(TinkerIoRegistryV2d0.instance()) - .addCustomModule(new GraphSON2SerdeTP.DseGraphModule()) - .addCustomModule(new GraphSON2SerdeTP.DriverObjectsModule()) - .create() - .createMapper(); - - default: - throw new IllegalStateException( - String.format("GraphSON sub-protocol unknown: {%s}", graphSubProtocol)); - } - } - }); - - static final Supplier GRAPHSON1_READER = - Suppliers.memoize( - () -> - GraphSONReader.build() - .mapper(GraphSONMapper.build().version(GraphSONVersion.V1_0).create()) - .create()); - - public static ByteBuffer serializeToByteBuffer(Object object, GraphProtocol graphSubProtocol) - throws IOException { - return ByteBuffer.wrap(serializeToBytes(object, graphSubProtocol)); - } - - static byte[] serializeToBytes(Object object, GraphProtocol graphSubProtocol) throws IOException { - try { - return OBJECT_MAPPERS.get(graphSubProtocol).writeValueAsBytes(object); - } catch (ExecutionException e) { - Throwables.throwIfUnchecked(e); - throw new RuntimeException(e); - } - } - - public static GraphNode createGraphNode(List data, GraphProtocol graphSubProtocol) - throws IOException { - try { - ObjectMapper mapper = OBJECT_MAPPERS.get(graphSubProtocol); - switch (graphSubProtocol) { - case GRAPHSON_1_0: - return new LegacyGraphNode(mapper.readTree(Bytes.getArray(data.get(0))), mapper); - case GRAPHSON_2_0: - return new ObjectGraphNode(mapper.readValue(Bytes.getArray(data.get(0)), Object.class)); - default: - // Should already be caught when we lookup in the cache - throw new AssertionError( - String.format("Unknown GraphSON sub-protocol: {%s}", graphSubProtocol)); - } - } catch (ExecutionException e) { - Throwables.throwIfUnchecked(e); - throw new RuntimeException(e); - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphStatementBase.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphStatementBase.java deleted file mode 100644 index b8baa2f5e49..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphStatementBase.java +++ /dev/null @@ -1,413 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.dse.driver.api.core.graph.GraphStatement; -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.Map; -import net.jcip.annotations.Immutable; - -@Immutable -public abstract class GraphStatementBase> - implements GraphStatement { - private final Boolean isIdempotent; - private final Duration timeout; - private final Node node; - private final long timestamp; - private final DriverExecutionProfile executionProfile; - private final String executionProfileName; - private final Map customPayload; - private final String graphName; - private final String traversalSource; - private final String subProtocol; - private final ConsistencyLevel consistencyLevel; - private final ConsistencyLevel readConsistencyLevel; - private final ConsistencyLevel writeConsistencyLevel; - - protected GraphStatementBase( - Boolean isIdempotent, - Duration timeout, - Node node, - long timestamp, - DriverExecutionProfile executionProfile, - String executionProfileName, - Map customPayload, - String graphName, - String traversalSource, - String subProtocol, - ConsistencyLevel consistencyLevel, - ConsistencyLevel readConsistencyLevel, - ConsistencyLevel writeConsistencyLevel) { - this.isIdempotent = isIdempotent; - this.timeout = timeout; - this.node = node; - this.timestamp = timestamp; - this.executionProfile = executionProfile; - this.executionProfileName = executionProfileName; - this.customPayload = customPayload; - this.graphName = graphName; - this.traversalSource = traversalSource; - this.subProtocol = subProtocol; - this.consistencyLevel = consistencyLevel; - this.readConsistencyLevel = readConsistencyLevel; - this.writeConsistencyLevel = writeConsistencyLevel; - } - - protected abstract SelfT newInstance( - Boolean isIdempotent, - Duration timeout, - Node node, - long timestamp, - DriverExecutionProfile executionProfile, - String executionProfileName, - Map customPayload, - String graphName, - String traversalSource, - String subProtocol, - ConsistencyLevel consistencyLevel, - ConsistencyLevel readConsistencyLevel, - ConsistencyLevel writeConsistencyLevel); - - @Override - public Boolean isIdempotent() { - return isIdempotent; - } - - @NonNull - @Override - public SelfT setIdempotent(@Nullable Boolean newIdempotence) { - return newInstance( - newIdempotence, - timeout, - node, - timestamp, - executionProfile, - executionProfileName, - customPayload, - graphName, - traversalSource, - subProtocol, - consistencyLevel, - readConsistencyLevel, - writeConsistencyLevel); - } - - @Nullable - @Override - public Duration getTimeout() { - return timeout; - } - - @NonNull - @Override - public SelfT setTimeout(@Nullable Duration newTimeout) { - return newInstance( - isIdempotent, - newTimeout, - node, - timestamp, - executionProfile, - executionProfileName, - customPayload, - graphName, - traversalSource, - subProtocol, - consistencyLevel, - readConsistencyLevel, - writeConsistencyLevel); - } - - @Nullable - @Override - public Node getNode() { - return node; - } - - @NonNull - @Override - public SelfT setNode(@Nullable Node newNode) { - return newInstance( - isIdempotent, - timeout, - newNode, - timestamp, - executionProfile, - executionProfileName, - customPayload, - graphName, - traversalSource, - subProtocol, - consistencyLevel, - readConsistencyLevel, - writeConsistencyLevel); - } - - @Override - public long getTimestamp() { - return this.timestamp; - } - - @NonNull - @Override - public SelfT setTimestamp(long newTimestamp) { - return newInstance( - isIdempotent, - timeout, - node, - newTimestamp, - executionProfile, - executionProfileName, - customPayload, - graphName, - traversalSource, - subProtocol, - consistencyLevel, - readConsistencyLevel, - writeConsistencyLevel); - } - - @Nullable - @Override - public DriverExecutionProfile getExecutionProfile() { - return executionProfile; - } - - @NonNull - @Override - public SelfT setExecutionProfile(@Nullable DriverExecutionProfile newExecutionProfile) { - return newInstance( - isIdempotent, - timeout, - node, - timestamp, - newExecutionProfile, - executionProfileName, - customPayload, - graphName, - traversalSource, - subProtocol, - consistencyLevel, - readConsistencyLevel, - writeConsistencyLevel); - } - - @Nullable - @Override - public String getExecutionProfileName() { - return executionProfileName; - } - - @NonNull - @Override - public SelfT setExecutionProfileName(@Nullable String newExecutionProfileName) { - return newInstance( - isIdempotent, - timeout, - node, - timestamp, - executionProfile, - newExecutionProfileName, - customPayload, - graphName, - traversalSource, - subProtocol, - consistencyLevel, - readConsistencyLevel, - writeConsistencyLevel); - } - - @NonNull - @Override - public Map getCustomPayload() { - return customPayload; - } - - @NonNull - @Override - public SelfT setCustomPayload(@NonNull Map newCustomPayload) { - return newInstance( - isIdempotent, - timeout, - node, - timestamp, - executionProfile, - executionProfileName, - newCustomPayload, - graphName, - traversalSource, - subProtocol, - consistencyLevel, - readConsistencyLevel, - writeConsistencyLevel); - } - - @Nullable - @Override - public String getGraphName() { - return graphName; - } - - @NonNull - @Override - public SelfT setGraphName(@Nullable String newGraphName) { - return newInstance( - isIdempotent, - timeout, - node, - timestamp, - executionProfile, - executionProfileName, - customPayload, - newGraphName, - traversalSource, - subProtocol, - consistencyLevel, - readConsistencyLevel, - writeConsistencyLevel); - } - - @Nullable - @Override - public String getTraversalSource() { - return traversalSource; - } - - @NonNull - @Override - public SelfT setTraversalSource(@Nullable String newTraversalSource) { - return newInstance( - isIdempotent, - timeout, - node, - timestamp, - executionProfile, - executionProfileName, - customPayload, - graphName, - newTraversalSource, - subProtocol, - consistencyLevel, - readConsistencyLevel, - writeConsistencyLevel); - } - - @Nullable - @Override - public String getSubProtocol() { - return subProtocol; - } - - @NonNull - @Override - public SelfT setSubProtocol(@Nullable String newSubProtocol) { - return newInstance( - isIdempotent, - timeout, - node, - timestamp, - executionProfile, - executionProfileName, - customPayload, - graphName, - traversalSource, - newSubProtocol, - consistencyLevel, - readConsistencyLevel, - writeConsistencyLevel); - } - - @Nullable - @Override - public ConsistencyLevel getConsistencyLevel() { - return consistencyLevel; - } - - @Override - public SelfT setConsistencyLevel(@Nullable ConsistencyLevel newConsistencyLevel) { - return newInstance( - isIdempotent, - timeout, - node, - timestamp, - executionProfile, - executionProfileName, - customPayload, - graphName, - traversalSource, - subProtocol, - newConsistencyLevel, - readConsistencyLevel, - writeConsistencyLevel); - } - - @Nullable - @Override - public ConsistencyLevel getReadConsistencyLevel() { - return readConsistencyLevel; - } - - @NonNull - @Override - public SelfT setReadConsistencyLevel(@Nullable ConsistencyLevel newReadConsistencyLevel) { - return newInstance( - isIdempotent, - timeout, - node, - timestamp, - executionProfile, - executionProfileName, - customPayload, - graphName, - traversalSource, - subProtocol, - consistencyLevel, - newReadConsistencyLevel, - writeConsistencyLevel); - } - - @Nullable - @Override - public ConsistencyLevel getWriteConsistencyLevel() { - return writeConsistencyLevel; - } - - @NonNull - @Override - public SelfT setWriteConsistencyLevel(@Nullable ConsistencyLevel newWriteConsistencyLevel) { - return newInstance( - isIdempotent, - timeout, - node, - timestamp, - executionProfile, - executionProfileName, - customPayload, - graphName, - traversalSource, - subProtocol, - consistencyLevel, - readConsistencyLevel, - newWriteConsistencyLevel); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphSupportChecker.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphSupportChecker.java deleted file mode 100644 index 6e586bbcf3f..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphSupportChecker.java +++ /dev/null @@ -1,176 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.dse.driver.api.core.graph.GraphStatement; -import com.datastax.dse.driver.api.core.graph.PagingEnabledOptions; -import com.datastax.dse.driver.api.core.metadata.DseNodeProperties; -import com.datastax.dse.driver.internal.core.DseProtocolFeature; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.cql.Conversions; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Collection; -import java.util.Objects; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class GraphSupportChecker { - - private static final Logger LOG = LoggerFactory.getLogger(GraphSupportChecker.class); - - /** - * The minimum DSE version supporting both graph paging and the GraphBinary sub-protocol is DSE - * 6.8. - */ - private static final Version MIN_DSE_VERSION_GRAPH_BINARY_AND_PAGING = - Objects.requireNonNull(Version.parse("6.8.0")); - - private volatile Boolean contextGraphPagingEnabled; - private volatile Boolean isDse68OrAbove; - - /** - * Checks whether graph paging is available. - * - *

Graph paging is available if: - * - *

    - *
  1. Continuous paging is generally available (this implies protocol version {@link - * com.datastax.dse.driver.api.core.DseProtocolVersion#DSE_V1 DSE_V1} or higher); - *
  2. Graph paging is set to ENABLED or AUTO in the configuration - * with {@link DseDriverOption#GRAPH_PAGING_ENABLED}; - *
  3. If graph paging is set to AUTO, then a check will be performed to verify - * that all hosts are running DSE 6.8+; if that is the case, then graph paging will be - * assumed to be available. - *
- * - * Note that the hosts check will be done only once, then memoized; if other hosts join the - * cluster later and do not support graph paging, the user has to manually disable graph paging. - */ - public boolean isPagingEnabled( - @NonNull GraphStatement graphStatement, @NonNull InternalDriverContext context) { - DriverExecutionProfile driverExecutionProfile = - Conversions.resolveExecutionProfile(graphStatement, context); - PagingEnabledOptions pagingEnabledOptions = - PagingEnabledOptions.valueOf( - driverExecutionProfile.getString(DseDriverOption.GRAPH_PAGING_ENABLED)); - if (LOG.isTraceEnabled()) { - LOG.trace("GRAPH_PAGING_ENABLED: {}", pagingEnabledOptions); - } - if (pagingEnabledOptions == PagingEnabledOptions.DISABLED) { - return false; - } else if (pagingEnabledOptions == PagingEnabledOptions.ENABLED) { - return true; - } else { - return isContextGraphPagingEnabled(context); - } - } - - /** - * Infers the {@link GraphProtocol} to use to execute the given statement. - * - *

The graph protocol is computed as follows: - * - *

    - *
  1. If the statement declares the protocol to use with {@link - * GraphStatement#getSubProtocol()}, then that protocol is returned. - *
  2. If the driver configuration explicitly defines the protocol to use (see {@link - * DseDriverOption#GRAPH_SUB_PROTOCOL} and reference.conf), then that protocol is returned. - *
  3. Otherwise, the graph protocol to use is determined by the DSE version of hosts in the - * cluster. If any host has DSE version 6.7.x or lower, the default graph protocol is {@link - * GraphProtocol#GRAPHSON_2_0}. If all hosts have DSE version 6.8.0 or higher, the default - * graph protocol is {@link GraphProtocol#GRAPH_BINARY_1_0}. - *
- * - * Note that the hosts check will be done only once, then memoized; if other hosts join the and do - * not support the computed graph protocol, the user has to manually set the graph protocol to - * use. - * - *

Also note that GRAPH_BINARY_1_0 can only be used with "core" graph engines; if - * you are targeting a "classic" graph engine instead, the user has to manually set the graph - * protocol to something else. - */ - @NonNull - public GraphProtocol inferGraphProtocol( - @NonNull GraphStatement statement, - @NonNull DriverExecutionProfile config, - @NonNull InternalDriverContext context) { - String graphProtocol = statement.getSubProtocol(); - if (graphProtocol == null) { - // use the protocol specified in configuration, otherwise get the default from the context - graphProtocol = - config.isDefined(DseDriverOption.GRAPH_SUB_PROTOCOL) - ? config.getString(DseDriverOption.GRAPH_SUB_PROTOCOL) - : getDefaultGraphProtocol(context).toInternalCode(); - } - // should not be null because we call config.getString() with a default value - Objects.requireNonNull( - graphProtocol, - "Could not determine the graph protocol for the query. This is a bug, please report."); - - return GraphProtocol.fromString(graphProtocol); - } - - private boolean isContextGraphPagingEnabled(InternalDriverContext context) { - if (contextGraphPagingEnabled == null) { - ProtocolVersion protocolVersion = context.getProtocolVersion(); - if (!context - .getProtocolVersionRegistry() - .supports(protocolVersion, DseProtocolFeature.CONTINUOUS_PAGING)) { - contextGraphPagingEnabled = false; - } else { - if (isDse68OrAbove == null) { - isDse68OrAbove = checkIsDse68OrAbove(context); - } - contextGraphPagingEnabled = isDse68OrAbove; - } - } - return contextGraphPagingEnabled; - } - - /** - * Determines the default {@link GraphProtocol} for the given context. - * - * @return The default GraphProtocol to used based on the provided context. - */ - @VisibleForTesting - GraphProtocol getDefaultGraphProtocol(@NonNull InternalDriverContext context) { - if (isDse68OrAbove == null) { - isDse68OrAbove = checkIsDse68OrAbove(context); - } - // if the DSE version can't be determined, default to GraphSON 2.0 - return isDse68OrAbove ? GraphProtocol.GRAPH_BINARY_1_0 : GraphProtocol.GRAPHSON_2_0; - } - - private boolean checkIsDse68OrAbove(@NonNull InternalDriverContext context) { - Collection nodes = context.getMetadataManager().getMetadata().getNodes().values(); - - for (Node node : nodes) { - Version dseVersion = (Version) node.getExtras().get(DseNodeProperties.DSE_VERSION); - if (dseVersion == null || dseVersion.compareTo(MIN_DSE_VERSION_GRAPH_BINARY_AND_PAGING) < 0) { - return false; - } - } - return true; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/LegacyGraphNode.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/LegacyGraphNode.java deleted file mode 100644 index 1749bf00873..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/LegacyGraphNode.java +++ /dev/null @@ -1,323 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.dse.driver.api.core.graph.GraphNode; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.shaded.guava.common.base.Objects; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import java.io.ByteArrayInputStream; -import java.io.IOException; -import java.io.UncheckedIOException; -import java.nio.charset.StandardCharsets; -import java.util.List; -import java.util.Map; -import java.util.Set; -import net.jcip.annotations.Immutable; -import org.apache.tinkerpop.gremlin.process.traversal.Path; -import org.apache.tinkerpop.gremlin.structure.Edge; -import org.apache.tinkerpop.gremlin.structure.Property; -import org.apache.tinkerpop.gremlin.structure.Vertex; -import org.apache.tinkerpop.gremlin.structure.VertexProperty; -import org.apache.tinkerpop.gremlin.structure.io.graphson.GraphSONTokens; -import org.apache.tinkerpop.gremlin.structure.util.Attachable; -import org.apache.tinkerpop.shaded.jackson.core.JsonParser; -import org.apache.tinkerpop.shaded.jackson.databind.JavaType; -import org.apache.tinkerpop.shaded.jackson.databind.JsonNode; -import org.apache.tinkerpop.shaded.jackson.databind.ObjectMapper; - -/** - * Legacy implementation for GraphSON 1 results. - * - *

The server returns plain JSON with no type information. The driver works with the JSON - * representation directly. - */ -@Immutable -public class LegacyGraphNode implements GraphNode { - private static final String TYPE = "type"; - private static final String VERTEX_TYPE = "vertex"; - private static final String EDGE_TYPE = "edge"; - - private static final GenericType> LIST_TYPE = GenericType.listOf(Object.class); - private static final GenericType> MAP_TYPE = - GenericType.mapOf(String.class, Object.class); - - private final JsonNode delegate; - private final ObjectMapper objectMapper; - - public LegacyGraphNode(JsonNode delegate, ObjectMapper objectMapper) { - Preconditions.checkNotNull(delegate); - Preconditions.checkNotNull(objectMapper); - this.delegate = delegate; - this.objectMapper = objectMapper; - } - - /** - * The underlying JSON representation. - * - *

This is an implementation detail, it's only exposed through the internal API. - */ - public JsonNode getDelegate() { - return delegate; - } - - /** - * The object mapper used to deserialize results in {@link #as(Class)} and {@link - * #as(GenericType)}. - * - *

This is an implementation detail, it's only exposed through the internal API. - */ - public ObjectMapper getObjectMapper() { - return objectMapper; - } - - @Override - public boolean isNull() { - return delegate.isNull(); - } - - @Override - public boolean isMap() { - return delegate.isObject(); - } - - @Override - public Iterable keys() { - return (Iterable) delegate::fieldNames; - } - - @Override - public LegacyGraphNode getByKey(Object key) { - if (!(key instanceof String)) { - return null; - } - JsonNode node = delegate.get(((String) key)); - if (node == null) { - return null; - } - return new LegacyGraphNode(node, objectMapper); - } - - @Override - @SuppressWarnings("unchecked") - public Map asMap() { - return (Map) as(MAP_TYPE); - } - - @Override - public boolean isList() { - return delegate.isArray(); - } - - @Override - public int size() { - return delegate.size(); - } - - @Override - public LegacyGraphNode getByIndex(int index) { - JsonNode node = delegate.get(index); - if (node == null) { - return null; - } - return new LegacyGraphNode(node, objectMapper); - } - - @Override - @SuppressWarnings("unchecked") - public List asList() { - return (List) as(LIST_TYPE); - } - - @Override - public boolean isValue() { - return delegate.isValueNode(); - } - - @Override - public int asInt() { - return delegate.asInt(); - } - - @Override - public boolean asBoolean() { - return delegate.asBoolean(); - } - - @Override - public long asLong() { - return delegate.asLong(); - } - - @Override - public double asDouble() { - return delegate.asDouble(); - } - - @Override - public String asString() { - return delegate.asText(); - } - - @Override - public boolean isVertex() { - return isType(VERTEX_TYPE); - } - - @Override - public Vertex asVertex() { - try { - return GraphSONUtils.GRAPHSON1_READER - .get() - .readVertex( - new ByteArrayInputStream(delegate.toString().getBytes(StandardCharsets.UTF_8)), - null, - null, - null); - } catch (IOException e) { - throw new UncheckedIOException("Could not deserialize node as Vertex.", e); - } - } - - @Override - public boolean isEdge() { - return isType(EDGE_TYPE); - } - - @Override - public Edge asEdge() { - try { - return GraphSONUtils.GRAPHSON1_READER - .get() - .readEdge( - new ByteArrayInputStream(delegate.toString().getBytes(StandardCharsets.UTF_8)), - Attachable::get); - } catch (IOException e) { - throw new UncheckedIOException("Could not deserialize node as Edge.", e); - } - } - - @Override - public boolean isPath() { - return false; - } - - @Override - public Path asPath() { - throw new UnsupportedOperationException( - "GraphSON1 does not support Path, use another Graph sub-protocol such as GraphSON2."); - } - - @Override - public boolean isProperty() { - return delegate.has(GraphSONTokens.KEY) && delegate.has(GraphSONTokens.VALUE); - } - - @Override - @SuppressWarnings("unchecked") - public Property asProperty() { - try { - return GraphSONUtils.GRAPHSON1_READER - .get() - .readProperty( - new ByteArrayInputStream(delegate.toString().getBytes(StandardCharsets.UTF_8)), - Attachable::get); - } catch (IOException e) { - throw new UncheckedIOException("Could not deserialize node as Property.", e); - } - } - - @Override - public boolean isVertexProperty() { - return delegate.has(GraphSONTokens.ID) - && delegate.has(GraphSONTokens.VALUE) - && delegate.has(GraphSONTokens.LABEL); - } - - @Override - @SuppressWarnings("unchecked") - public VertexProperty asVertexProperty() { - try { - return GraphSONUtils.GRAPHSON1_READER - .get() - .readVertexProperty( - new ByteArrayInputStream(delegate.toString().getBytes(StandardCharsets.UTF_8)), - Attachable::get); - } catch (IOException e) { - throw new UncheckedIOException("Could not deserialize node as VertexProperty.", e); - } - } - - @Override - public boolean isSet() { - return false; - } - - @Override - public Set asSet() { - throw new UnsupportedOperationException( - "GraphSON1 does not support Set, use another Graph sub-protocol such as GraphSON2."); - } - - @Override - public ResultT as(Class clazz) { - try { - return objectMapper.treeToValue(delegate, clazz); - } catch (IOException e) { - throw new UncheckedIOException("Could not deserialize node as: " + clazz, e); - } - } - - @Override - public ResultT as(GenericType type) { - try { - JsonParser parser = objectMapper.treeAsTokens(delegate); - JavaType javaType = objectMapper.constructType(type.__getToken().getType()); - return objectMapper.readValue(parser, javaType); - } catch (IOException e) { - throw new UncheckedIOException("Could not deserialize node as: " + type, e); - } - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof LegacyGraphNode)) { - return false; - } - LegacyGraphNode that = (LegacyGraphNode) o; - return Objects.equal(delegate, that.delegate); - } - - @Override - public int hashCode() { - return Objects.hashCode(delegate); - } - - @Override - public String toString() { - return delegate.toString(); - } - - private boolean isType(String expectedTypeName) { - JsonNode type = delegate.get(TYPE); - return type != null && expectedTypeName.equals(type.asText()); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/MultiPageGraphResultSet.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/MultiPageGraphResultSet.java deleted file mode 100644 index fe81d73ba00..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/MultiPageGraphResultSet.java +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.dse.driver.api.core.graph.AsyncGraphResultSet; -import com.datastax.dse.driver.api.core.graph.GraphNode; -import com.datastax.dse.driver.api.core.graph.GraphResultSet; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.internal.core.util.CountingIterator; -import com.datastax.oss.driver.internal.core.util.concurrent.BlockingOperation; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; - -public class MultiPageGraphResultSet implements GraphResultSet { - private final RowIterator iterator; - private final List executionInfos = new ArrayList<>(); - - public MultiPageGraphResultSet(AsyncGraphResultSet firstPage) { - iterator = new RowIterator(firstPage); - executionInfos.add(firstPage.getRequestExecutionInfo()); - } - - @Override - public void cancel() { - iterator.cancel(); - } - - @NonNull - @Override - public ExecutionInfo getRequestExecutionInfo() { - return executionInfos.get(executionInfos.size() - 1); - } - - @NonNull - @Override - @Deprecated - public com.datastax.dse.driver.api.core.graph.GraphExecutionInfo getExecutionInfo() { - return GraphExecutionInfoConverter.convert(getRequestExecutionInfo()); - } - - /** - * The execution information for all the queries that have been performed so far to assemble this - * iterable. - * - *

This will have multiple elements if the query is paged, since the driver performs blocking - * background queries to fetch additional pages transparently as the result set is being iterated. - */ - @NonNull - public List getRequestExecutionInfos() { - return executionInfos; - } - - /** @deprecated use {@link #getRequestExecutionInfos()} instead. */ - @NonNull - @Deprecated - public List getExecutionInfos() { - return Lists.transform(executionInfos, GraphExecutionInfoConverter::convert); - } - - @NonNull - @Override - public Iterator iterator() { - return iterator; - } - - public class RowIterator extends CountingIterator { - private AsyncGraphResultSet currentPage; - private Iterator currentRows; - private boolean cancelled = false; - - private RowIterator(AsyncGraphResultSet firstPage) { - super(firstPage.remaining()); - currentPage = firstPage; - currentRows = firstPage.currentPage().iterator(); - } - - @Override - protected GraphNode computeNext() { - maybeMoveToNextPage(); - return currentRows.hasNext() ? currentRows.next() : endOfData(); - } - - private void maybeMoveToNextPage() { - if (!cancelled && !currentRows.hasNext() && currentPage.hasMorePages()) { - BlockingOperation.checkNotDriverThread(); - AsyncGraphResultSet nextPage = - CompletableFutures.getUninterruptibly(currentPage.fetchNextPage()); - currentPage = nextPage; - remaining += currentPage.remaining(); - currentRows = nextPage.currentPage().iterator(); - executionInfos.add(nextPage.getRequestExecutionInfo()); - } - } - - private void cancel() { - currentPage.cancel(); - cancelled = true; - } - - public boolean isCancelled() { - return cancelled; - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/ObjectGraphNode.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/ObjectGraphNode.java deleted file mode 100644 index 56123799fdd..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/ObjectGraphNode.java +++ /dev/null @@ -1,244 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.dse.driver.api.core.graph.GraphNode; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.shaded.guava.common.base.Objects; -import java.util.List; -import java.util.Map; -import java.util.Set; -import net.jcip.annotations.Immutable; -import org.apache.tinkerpop.gremlin.process.traversal.Path; -import org.apache.tinkerpop.gremlin.structure.Edge; -import org.apache.tinkerpop.gremlin.structure.Property; -import org.apache.tinkerpop.gremlin.structure.Vertex; -import org.apache.tinkerpop.gremlin.structure.VertexProperty; - -/** - * Modern implementation for GraphSON 2+ results. - * - *

The server returns results with type information. The driver works with the decoded objects - * directly. - */ -@Immutable -public class ObjectGraphNode implements GraphNode { - - private final Object delegate; - - public ObjectGraphNode(Object delegate) { - this.delegate = delegate; - } - - @Override - public boolean isNull() { - return delegate == null; - } - - @Override - public boolean isMap() { - return delegate instanceof Map; - } - - @Override - public Iterable keys() { - return ((Map) delegate).keySet(); - } - - @Override - public GraphNode getByKey(Object key) { - if (!isMap()) { - return null; - } - Map map = asMap(); - if (map.containsKey(key)) { - return new ObjectGraphNode(map.get(key)); - } - return null; - } - - @Override - @SuppressWarnings("unchecked") - public Map asMap() { - return (Map) delegate; - } - - @Override - public boolean isList() { - return delegate instanceof List; - } - - @Override - public int size() { - if (isList()) { - return asList().size(); - } else if (isMap()) { - return asMap().size(); - } else if (isSet()) { - return asSet().size(); - } else { - return 0; - } - } - - @Override - public GraphNode getByIndex(int index) { - if (!isList() || index < 0 || index >= size()) { - return null; - } - return new ObjectGraphNode(asList().get(index)); - } - - @Override - @SuppressWarnings("unchecked") - public List asList() { - return (List) delegate; - } - - @Override - public boolean isValue() { - return !(isList() - || isMap() - || isSet() - || isVertex() - || isEdge() - || isPath() - || isProperty() - || isVertexProperty()); - } - - @Override - public boolean isVertexProperty() { - return delegate instanceof VertexProperty; - } - - @Override - public boolean isProperty() { - return delegate instanceof Property; - } - - @Override - public boolean isPath() { - return delegate instanceof Path; - } - - @Override - public int asInt() { - return (Integer) delegate; - } - - @Override - public boolean asBoolean() { - return (Boolean) delegate; - } - - @Override - public long asLong() { - return (Long) delegate; - } - - @Override - public double asDouble() { - return (Double) delegate; - } - - @Override - public String asString() { - return (String) delegate; - } - - @Override - @SuppressWarnings("unchecked") - public T as(Class clazz) { - return (T) delegate; - } - - @Override - @SuppressWarnings("unchecked") - public T as(GenericType type) { - return (T) delegate; - } - - @Override - public boolean isVertex() { - return delegate instanceof Vertex; - } - - @Override - public Vertex asVertex() { - return (Vertex) delegate; - } - - @Override - public boolean isEdge() { - return delegate instanceof Edge; - } - - @Override - public Edge asEdge() { - return (Edge) delegate; - } - - @Override - public Path asPath() { - return (Path) delegate; - } - - @Override - @SuppressWarnings("unchecked") - public Property asProperty() { - return (Property) delegate; - } - - @Override - @SuppressWarnings("unchecked") - public VertexProperty asVertexProperty() { - return (VertexProperty) delegate; - } - - @Override - public boolean isSet() { - return delegate instanceof Set; - } - - @Override - @SuppressWarnings("unchecked") - public Set asSet() { - return (Set) delegate; - } - - @Override - public String toString() { - return this.delegate.toString(); - } - - @Override - public boolean equals(Object other) { - if (this == other) { - return true; - } - // Compare each others' delegates. - return other instanceof ObjectGraphNode - && Objects.equal(this.delegate, ((ObjectGraphNode) other).delegate); - } - - @Override - public int hashCode() { - return Objects.hashCode(delegate); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/SearchPredicate.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/SearchPredicate.java deleted file mode 100644 index b69c3a59cf0..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/SearchPredicate.java +++ /dev/null @@ -1,301 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.oss.driver.shaded.guava.common.collect.Sets; -import java.util.List; -import java.util.Set; -import java.util.regex.Pattern; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -/** - * List of predicates for geolocation usage with DseGraph and Search indexes. Should not be accessed - * directly but through the {@link com.datastax.dse.driver.api.core.graph.predicates.Search} static - * methods. - */ -public enum SearchPredicate implements DsePredicate { - /** Whether the text contains a given term as a token in the text (case insensitive). */ - token { - @Override - public boolean test(Object value, Object condition) { - preEvaluate(condition); - return value != null && evaluate(value.toString(), (String) condition); - } - - boolean evaluate(String value, String terms) { - Set tokens = Sets.newHashSet(tokenize(value.toLowerCase())); - terms = terms.trim(); - List tokenTerms = tokenize(terms.toLowerCase()); - if (!terms.isEmpty() && tokenTerms.isEmpty()) { - return false; - } - for (String term : tokenTerms) { - if (!tokens.contains(term)) { - return false; - } - } - return true; - } - - @Override - public boolean isValidCondition(Object condition) { - return condition != null && isNotBlank((String) condition); - } - - @Override - public String toString() { - return "token"; - } - }, - - /** Whether the text contains a token that starts with a given term (case insensitive). */ - tokenPrefix { - @Override - public boolean test(Object value, Object condition) { - preEvaluate(condition); - return value != null && evaluate(value.toString(), (String) condition); - } - - boolean evaluate(String value, String prefix) { - for (String token : tokenize(value.toLowerCase())) { - if (token.startsWith(prefix.toLowerCase().trim())) { - return true; - } - } - return false; - } - - @Override - public boolean isValidCondition(Object condition) { - return condition != null; - } - - @Override - public String toString() { - return "tokenPrefix"; - } - }, - - /** Whether the text contains a token that matches a regular expression (case insensitive). */ - tokenRegex { - @Override - public boolean test(Object value, Object condition) { - preEvaluate(condition); - return value != null && evaluate(value.toString(), (String) condition); - } - - boolean evaluate(String value, String regex) { - Pattern compiled = Pattern.compile(regex, Pattern.CASE_INSENSITIVE); - for (String token : tokenize(value.toLowerCase())) { - if (compiled.matcher(token).matches()) { - return true; - } - } - return false; - } - - @Override - public boolean isValidCondition(Object condition) { - return condition != null && isNotBlank((String) condition); - } - - @Override - public String toString() { - return "tokenRegex"; - } - }, - - /** - * Whether some token in the text is within a given edit distance from the given term (case - * insensitive). - */ - tokenFuzzy { - @Override - public boolean test(Object value, Object condition) { - preEvaluate(condition); - if (value == null) { - return false; - } - - EditDistance fuzzyCondition = (EditDistance) condition; - - for (String token : tokenize(value.toString().toLowerCase())) { - if (SearchUtils.getOptimalStringAlignmentDistance(token, fuzzyCondition.query.toLowerCase()) - <= fuzzyCondition.distance) { - return true; - } - } - - return false; - } - - @Override - public boolean isValidCondition(Object condition) { - return condition != null; - } - - @Override - public String toString() { - return "tokenFuzzy"; - } - }, - - /** Whether the text starts with a given prefix (case sensitive). */ - prefix { - @Override - public boolean test(Object value, Object condition) { - preEvaluate(condition); - return value != null && value.toString().startsWith(((String) condition).trim()); - } - - @Override - public boolean isValidCondition(Object condition) { - return condition != null; - } - - @Override - public String toString() { - return "prefix"; - } - }, - - /** Whether the text matches a regular expression (case sensitive). */ - regex { - @Override - public boolean test(Object value, Object condition) { - preEvaluate(condition); - return value != null - && Pattern.compile((String) condition, Pattern.DOTALL) - .matcher(value.toString()) - .matches(); - } - - @Override - public boolean isValidCondition(Object condition) { - return condition != null && isNotBlank((String) condition); - } - - @Override - public String toString() { - return "regex"; - } - }, - - /** Whether the text is within a given edit distance from the given term (case sensitive). */ - fuzzy { - @Override - public boolean test(Object value, Object condition) { - preEvaluate(condition); - if (value == null) { - return false; - } - EditDistance fuzzyCondition = (EditDistance) condition; - return SearchUtils.getOptimalStringAlignmentDistance(value.toString(), fuzzyCondition.query) - <= fuzzyCondition.distance; - } - - @Override - public boolean isValidCondition(Object condition) { - return condition != null; - } - - @Override - public String toString() { - return "fuzzy"; - } - }, - - /** - * Whether tokenized text contains a given phrase, optionally within a given proximity (case - * insensitive). - */ - phrase { - @Override - public boolean test(Object value, Object condition) { - preEvaluate(condition); - if (value == null) { - return false; - } - - EditDistance phraseCondition = (EditDistance) condition; - - List valueTokens = tokenize(value.toString().toLowerCase()); - List phraseTokens = tokenize(phraseCondition.query.toLowerCase()); - - int valuePosition = 0; - int phrasePosition = 0; - int distance = 0; - - // Look for matches while phrase/value tokens and distance budget remain - while (phrasePosition < phraseTokens.size() - && valuePosition < valueTokens.size() - && distance <= phraseCondition.distance) { - - if (phraseTokens.get(phrasePosition).equals(valueTokens.get(valuePosition))) { - // Early return-true when we've matched the whole phrase (within the specified distance) - if (phrasePosition == phraseTokens.size() - 1) { - return true; - } - phrasePosition++; - } else if (0 < phrasePosition) { - // We've previously found at least one matching token in the input string, - // but the current token does not match the phrase. Increment distance. - distance++; - } - - valuePosition++; - } - - return false; - } - - @Override - public boolean isValidCondition(Object condition) { - return condition != null; - } - - @Override - public String toString() { - return "phrase"; - } - }; - - private static boolean isNotBlank(String str) { - if (str == null || str.isEmpty()) { - return false; - } - int strLen = str.length(); - for (int i = 0; i < strLen; i++) { - if (!Character.isWhitespace(str.charAt(i))) { - return true; - } - } - return false; - } - - // Match anything that is not either: - // 1) a unicode letter, regardless of subcategory (same as Character.isLetter), or - // 2) a unicode decimal digit number (same as Character.isDigit) - private static final Pattern TOKEN_SPLIT_PATTERN = Pattern.compile("[^\\p{L}\\p{Nd}]"); - - static List tokenize(String str) { - String[] rawTokens = TOKEN_SPLIT_PATTERN.split(str); // could contain empty strings - return Stream.of(rawTokens).filter(t -> 0 < t.length()).collect(Collectors.toList()); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/SearchUtils.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/SearchUtils.java deleted file mode 100644 index 3440c40e87a..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/SearchUtils.java +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -public class SearchUtils { - - /** - * Finds the Optimal - * string alignment distance – also referred to as the Damerau-Levenshtein distance – between - * two strings. - * - *

This is the number of changes needed to change one string into another (insertions, - * deletions or substitutions of a single character, or transpositions of two adjacent - * characters). - * - *

This implementation is based on the Apache Commons Lang implementation of the Levenshtein - * distance, only adding support for transpositions. - * - *

Note that this is the distance used in Lucene for {@code FuzzyTermsEnum}. Lucene itself has - * an implementation of this algorithm, but it is much less efficient in terms of space (also note - * that Lucene's implementation does not return the distance, but a similarity score based on it). - * - * @param s the first string, must not be {@code null}. - * @param t the second string, must not be {@code null}. - * @return The Optimal string alignment distance between the two strings. - * @throws IllegalArgumentException if either String input is {@code null}. - * @see org.apache.commons.lang.StringUtils#getLevenshteinDistance(String, String) - * @see - * LuceneLevenshteinDistance - */ - public static int getOptimalStringAlignmentDistance(String s, String t) { - - /* - * Code adapted from https://github.com/apache/commons-lang/blob/LANG_2_6/src/main/java/org/apache/commons/lang/StringUtils.java - * which was originally released under the Apache 2.0 license with the following copyright: - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - if (s == null || t == null) { - throw new IllegalArgumentException("Strings must not be null"); - } - - int n = s.length(); // length of s - int m = t.length(); // length of t - - if (n == 0) { - return m; - } else if (m == 0) { - return n; - } - - if (n > m) { - // swap the input strings to consume less memory - String tmp = s; - s = t; - t = tmp; - n = m; - m = t.length(); - } - - // instead of maintaining the full matrix in memory, - // we use a sliding window containing 3 lines: - // the current line being written to, and - // the two previous ones. - - int d[] = new int[n + 1]; // current line in the cost matrix - int p1[] = new int[n + 1]; // first line above the current one in the cost matrix - int p2[] = new int[n + 1]; // second line above the current one in the cost matrix - int _d[]; // placeholder to assist in swapping p1, p2 and d - - // indexes into strings s and t - int i; // iterates through s - int j; // iterates through t - - for (i = 0; i <= n; i++) { - p1[i] = i; - } - - for (j = 1; j <= m; j++) { - - // jth character of t - char t_j = t.charAt(j - 1); - d[0] = j; - - for (i = 1; i <= n; i++) { - - char s_i = s.charAt(i - 1); - int cost = s_i == t_j ? 0 : 1; - - int deletion = d[i - 1] + 1; // cell to the left + 1 - int insertion = p1[i] + 1; // cell to the top + 1 - int substitution = p1[i - 1] + cost; // cell diagonally left and up + cost - - d[i] = Math.min(Math.min(deletion, insertion), substitution); - - // transposition - if (i > 1 && j > 1 && s_i == t.charAt(j - 2) && s.charAt(i - 2) == t_j) { - d[i] = Math.min(d[i], p2[i - 2] + cost); - } - } - - // swap arrays - _d = p2; - p2 = p1; - p1 = d; - d = _d; - } - - // our last action in the above loop was to switch d and p1, so p1 now - // actually has the most recent cost counts - return p1[n]; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/SinglePageGraphResultSet.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/SinglePageGraphResultSet.java deleted file mode 100644 index ff1d984d745..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/SinglePageGraphResultSet.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.dse.driver.api.core.graph.AsyncGraphResultSet; -import com.datastax.dse.driver.api.core.graph.GraphNode; -import com.datastax.dse.driver.api.core.graph.GraphResultSet; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Iterator; -import net.jcip.annotations.NotThreadSafe; - -@NotThreadSafe -public class SinglePageGraphResultSet implements GraphResultSet { - - private final AsyncGraphResultSet onlyPage; - - public SinglePageGraphResultSet(AsyncGraphResultSet onlyPage) { - this.onlyPage = onlyPage; - assert !onlyPage.hasMorePages(); - } - - @NonNull - @Override - public ExecutionInfo getRequestExecutionInfo() { - return onlyPage.getRequestExecutionInfo(); - } - - @NonNull - @Override - @Deprecated - public com.datastax.dse.driver.api.core.graph.GraphExecutionInfo getExecutionInfo() { - return onlyPage.getExecutionInfo(); - } - - @NonNull - @Override - public Iterator iterator() { - return onlyPage.currentPage().iterator(); - } - - @Override - public void cancel() { - onlyPage.cancel(); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/TinkerpopBufferUtil.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/TinkerpopBufferUtil.java deleted file mode 100644 index 5650d904350..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/TinkerpopBufferUtil.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import java.nio.ByteBuffer; -import org.apache.tinkerpop.gremlin.structure.io.Buffer; - -/** Mirror of {@link ByteBufUtil} for Tinkerpop Buffer's */ -public class TinkerpopBufferUtil { - - public static ByteBuffer readBytes(Buffer tinkerBuff, int size) { - ByteBuffer res = ByteBuffer.allocate(size); - tinkerBuff.readBytes(res); - res.flip(); - return res; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/AbstractDynamicGraphBinaryCustomSerializer.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/AbstractDynamicGraphBinaryCustomSerializer.java deleted file mode 100644 index 649f5310c5d..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/AbstractDynamicGraphBinaryCustomSerializer.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph.binary; - -import java.io.IOException; -import org.apache.tinkerpop.gremlin.structure.io.Buffer; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryReader; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryWriter; - -/** - * Convenience class for dynamic types implemented as Custom types in GraphBinary. This class will - * take care of handling {value_length} automatically for implementing classes. {@link - * #writeDynamicCustomValue(Object, Buffer, GraphBinaryWriter)} and {@link - * #readDynamicCustomValue(Buffer, GraphBinaryReader)} only need to handle writing the internal - * components of the custom type. - * - * @param the java type the implementing classes will encode and decode. - */ -public abstract class AbstractDynamicGraphBinaryCustomSerializer - extends AbstractSimpleGraphBinaryCustomSerializer { - protected abstract void writeDynamicCustomValue(T value, Buffer buffer, GraphBinaryWriter context) - throws IOException; - - protected abstract T readDynamicCustomValue(Buffer buffer, GraphBinaryReader context) - throws IOException; - - @Override - protected T readCustomValue(int valueLength, Buffer buffer, GraphBinaryReader context) - throws IOException { - int initialIndex = buffer.readerIndex(); - - // read actual custom value - T read = readDynamicCustomValue(buffer, context); - - // make sure we didn't read more than what was input as {value_length} - checkValueSize(valueLength, (buffer.readerIndex() - initialIndex)); - - return read; - } - - @Override - protected void writeCustomValue(T value, Buffer buffer, GraphBinaryWriter context) - throws IOException { - // Store the current writer index - final int valueLengthIndex = buffer.writerIndex(); - - // Write a dummy length that will be overwritten at the end of this method - buffer.writeInt(0); - - // Custom type's writer logic - writeDynamicCustomValue(value, buffer, context); - - // value_length = diff written - 4 bytes for the dummy length - final int valueLength = buffer.writerIndex() - valueLengthIndex - GraphBinaryUtils.sizeOfInt(); - - // Go back, write the {value_length} and then reset back the writer index - buffer.markWriterIndex().writerIndex(valueLengthIndex).writeInt(valueLength).resetWriterIndex(); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/AbstractSimpleGraphBinaryCustomSerializer.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/AbstractSimpleGraphBinaryCustomSerializer.java deleted file mode 100644 index 6dd149707e8..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/AbstractSimpleGraphBinaryCustomSerializer.java +++ /dev/null @@ -1,153 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph.binary; - -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import java.io.IOException; -import org.apache.tinkerpop.gremlin.structure.io.Buffer; -import org.apache.tinkerpop.gremlin.structure.io.binary.DataType; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryReader; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryWriter; -import org.apache.tinkerpop.gremlin.structure.io.binary.types.CustomTypeSerializer; - -/** - * A base custom type serializer for DSE types that handles most of the boiler plate code associated - * with GraphBinary's custom types. - * - *

The full format of a custom type in GraphBinary is the following: - * - *

{type_code}{custom_type_name}{custom_type_info_length}{custom_type_info_bytes}{value_flag}{value_length}{value_bytes} - * - *

This class is made to handle - * {type_code}{custom_type_name}{custom_type_info_length}{custom_type_info_bytes}{value_flag} for - * DSE types. - * - *

Implementing classes are still in charge of encoding {value_length}{value_bytes} in the {@link - * #readCustomValue(int, Buffer, GraphBinaryReader)} implementations. - * - *

Implementing classes must override {@link CustomTypeSerializer#getTypeName()} with their own - * type name. - * - * @param the java type the implementing classes will encode and decode. - */ -abstract class AbstractSimpleGraphBinaryCustomSerializer implements CustomTypeSerializer { - AbstractSimpleGraphBinaryCustomSerializer() { - super(); - } - - protected static final String INCORRECT_VALUE_LENGTH_ERROR_MESSAGE = - "{value_length} read for this value does not correspond to the size of a '%s' value. [%s] bytes required but got [%s]"; - - protected abstract T readCustomValue(int valueLength, Buffer buffer, GraphBinaryReader context) - throws IOException; - - protected abstract void writeCustomValue(T value, Buffer buffer, GraphBinaryWriter context) - throws IOException; - - protected void checkValueSize(int lengthRequired, int lengthFound) { - Preconditions.checkArgument( - lengthFound == lengthRequired, - INCORRECT_VALUE_LENGTH_ERROR_MESSAGE, - getTypeName(), - lengthRequired, - lengthFound); - } - - @Override - public DataType getDataType() { - return DataType.CUSTOM; - } - - @Override - public T read(Buffer buffer, GraphBinaryReader context) throws IOException { - // the type serializer registry will take care of deserializing {custom_type_name} - // read {custom_type_info_length} and verify it is 0. - // See #write(T, ByteBuf, GraphBinaryWriter) for why it is set to 0 - if (context.readValue(buffer, Integer.class, false) != 0) { - throw new IOException("{custom_type_info} should not be provided for this custom type"); - } - - return readValue(buffer, context, true); - } - - @Override - public T readValue(Buffer buffer, GraphBinaryReader context, boolean nullable) - throws IOException { - if (nullable) { - // read {value_flag} - final byte valueFlag = buffer.readByte(); - - // if value is null and the value is nullable - if ((valueFlag & 1) == 1) { - return null; - } - // Note: we don't error out if the valueFlag == "value is null" and nullable == false because - // the serializer - // should have errored out at write time if that was the case. - } - - // Read the byte length of the value bytes - final int valueLength = buffer.readInt(); - - if (valueLength <= 0) { - throw new IOException(String.format("Unexpected value length: %d", valueLength)); - } - - if (valueLength > buffer.readableBytes()) { - throw new IOException( - String.format( - "Not enough readable bytes: %d bytes required for value (%d bytes available)", - valueLength, buffer.readableBytes())); - } - - // subclasses are responsible for reading {value} - return readCustomValue(valueLength, buffer, context); - } - - @Override - public void write(final T value, final Buffer buffer, final GraphBinaryWriter context) - throws IOException { - // the type serializer registry will take care of serializing {custom_type_name} - // write "{custom_type_info_length}" to 0 because we don't need it for the DSE types - context.writeValue(0, buffer, false); - writeValue(value, buffer, context, true); - } - - @Override - public void writeValue( - final T value, final Buffer buffer, final GraphBinaryWriter context, final boolean nullable) - throws IOException { - if (value == null) { - if (!nullable) { - throw new IOException("Unexpected null value when nullable is false"); - } - - // writes {value_flag} to "1" which means "the value is null" - context.writeValueFlagNull(buffer); - return; - } - - if (nullable) { - // writes {value_flag} to "0" which means "value is not null" - context.writeValueFlagNone(buffer); - } - - // sub classes will be responsible for writing {value_length} and {value_bytes} - writeCustomValue(value, buffer, context); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/ComplexTypeSerializerUtil.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/ComplexTypeSerializerUtil.java deleted file mode 100644 index bec3c78743a..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/ComplexTypeSerializerUtil.java +++ /dev/null @@ -1,152 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph.binary; - -import com.datastax.dse.driver.internal.core.graph.TinkerpopBufferUtil; -import com.datastax.dse.driver.internal.core.graph.binary.buffer.DseNettyBufferFactory; -import com.datastax.dse.driver.internal.core.protocol.TinkerpopBufferPrimitiveCodec; -import com.datastax.oss.driver.api.core.data.GettableByIndex; -import com.datastax.oss.driver.api.core.data.SettableByIndex; -import com.datastax.oss.driver.api.core.type.CustomType; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.ListType; -import com.datastax.oss.driver.api.core.type.MapType; -import com.datastax.oss.driver.api.core.type.SetType; -import com.datastax.oss.driver.api.core.type.TupleType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; -import com.datastax.oss.driver.internal.core.type.DataTypeHelper; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.protocol.internal.PrimitiveCodec; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.response.result.RawType; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.BufferUnderflowException; -import java.nio.ByteBuffer; -import java.util.Objects; -import org.apache.tinkerpop.gremlin.structure.io.Buffer; - -class ComplexTypeSerializerUtil { - - private static final PrimitiveCodec codec = - new TinkerpopBufferPrimitiveCodec(new DseNettyBufferFactory()); - - static void encodeTypeDefinition( - DataType type, Buffer buffer, DefaultDriverContext driverContext) { - RawType protocolType = toProtocolSpec(type); - protocolType.encode(buffer, codec, driverContext.getProtocolVersion().getCode()); - } - - static DataType decodeTypeDefinition(Buffer buffer, DefaultDriverContext driverContext) { - RawType type = RawType.decode(buffer, codec, driverContext.getProtocolVersion().getCode()); - return DataTypeHelper.fromProtocolSpec(type, driverContext); - } - - /* Tinkerpop-based encoding of UDT values, based on the UdtCoded.encode() method, but using Tinkerpop buffers directly to avoid - unnecessary NIO ByteBuffer copies. */ - static void encodeValue(@Nullable GettableByIndex value, Buffer tinkerBuff) { - if (value == null) { - return; - } - - for (int i = 0; i < value.size(); i++) { - ByteBuffer fieldBuffer = value.getBytesUnsafe(i); - if (fieldBuffer == null) { - tinkerBuff.writeInt(-1); - } else { - tinkerBuff.writeInt(fieldBuffer.remaining()); - tinkerBuff.writeBytes(fieldBuffer.duplicate()); - } - } - } - - /* This method will move forward the Tinkerpop buffer given in parameter based on the UDT value read. - Content of the method is roughly equivalent to UdtCodec.decode(), but using Tinkerpop buffers directly to avoid - unnecessary NIO ByteBuffer copies. */ - static > T decodeValue(Buffer tinkerBuff, T val, int size) { - try { - for (int i = 0; i < size; i++) { - int fieldSize = tinkerBuff.readInt(); - if (fieldSize >= 0) { - // the reassignment is to shut down the error-prone warning about ignoring return values. - val = val.setBytesUnsafe(i, TinkerpopBufferUtil.readBytes(tinkerBuff, fieldSize)); - } - } - return val; - } catch (BufferUnderflowException e) { - throw new IllegalArgumentException("Not enough bytes to deserialize a UDT value", e); - } - } - - private static RawType toProtocolSpec(DataType dataType) { - int id = dataType.getProtocolCode(); - RawType type = RawType.PRIMITIVES.get(id); - if (type != null) { - return type; - } - - switch (id) { - case ProtocolConstants.DataType.CUSTOM: - CustomType customType = ((CustomType) dataType); - type = new RawType.RawCustom(customType.getClassName()); - break; - case ProtocolConstants.DataType.LIST: - ListType listType = ((ListType) dataType); - type = new RawType.RawList(toProtocolSpec(listType.getElementType())); - break; - case ProtocolConstants.DataType.SET: - SetType setType = ((SetType) dataType); - type = new RawType.RawSet(toProtocolSpec(setType.getElementType())); - break; - case ProtocolConstants.DataType.MAP: - MapType mapType = ((MapType) dataType); - type = - new RawType.RawMap( - toProtocolSpec(mapType.getKeyType()), toProtocolSpec(mapType.getValueType())); - break; - case ProtocolConstants.DataType.TUPLE: - TupleType tupleType = ((TupleType) dataType); - ImmutableList.Builder subTypesList = - ImmutableList.builderWithExpectedSize(tupleType.getComponentTypes().size()); - for (int i = 0; i < tupleType.getComponentTypes().size(); i++) { - subTypesList.add(toProtocolSpec(tupleType.getComponentTypes().get(i))); - } - type = new RawType.RawTuple(subTypesList.build()); - break; - case ProtocolConstants.DataType.UDT: - UserDefinedType userDefinedType = ((UserDefinedType) dataType); - ImmutableMap.Builder subTypesMap = - ImmutableMap.builderWithExpectedSize(userDefinedType.getFieldNames().size()); - for (int i = 0; i < userDefinedType.getFieldTypes().size(); i++) { - subTypesMap.put( - userDefinedType.getFieldNames().get(i).asInternal(), - toProtocolSpec(userDefinedType.getFieldTypes().get(i))); - } - type = - new RawType.RawUdt( - Objects.requireNonNull(userDefinedType.getKeyspace()).asInternal(), - userDefinedType.getName().asInternal(), - subTypesMap.build()); - break; - default: - throw new IllegalArgumentException("Unsupported type: " + dataType.asCql(true, true)); - } - return type; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/CqlDurationSerializer.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/CqlDurationSerializer.java deleted file mode 100644 index 1ac97de0ef4..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/CqlDurationSerializer.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph.binary; - -import com.datastax.oss.driver.api.core.data.CqlDuration; -import java.io.IOException; -import org.apache.tinkerpop.gremlin.structure.io.Buffer; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryReader; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryWriter; - -public class CqlDurationSerializer extends AbstractSimpleGraphBinaryCustomSerializer { - - @Override - public String getTypeName() { - return GraphBinaryModule.GRAPH_BINARY_DURATION_TYPE_NAME; - } - - @Override - protected CqlDuration readCustomValue( - final int valueLength, final Buffer buffer, final GraphBinaryReader context) - throws IOException { - checkValueSize(GraphBinaryUtils.sizeOfDuration(), valueLength); - return CqlDuration.newInstance( - context.readValue(buffer, Integer.class, false), - context.readValue(buffer, Integer.class, false), - context.readValue(buffer, Long.class, false)); - } - - @Override - protected void writeCustomValue(CqlDuration value, Buffer buffer, GraphBinaryWriter context) - throws IOException { - context.writeValue(GraphBinaryUtils.sizeOfDuration(), buffer, false); - context.writeValue(value.getMonths(), buffer, false); - context.writeValue(value.getDays(), buffer, false); - context.writeValue(value.getNanoseconds(), buffer, false); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/DistanceSerializer.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/DistanceSerializer.java deleted file mode 100644 index 9e281b2b84a..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/DistanceSerializer.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph.binary; - -import com.datastax.dse.driver.api.core.data.geometry.Point; -import com.datastax.dse.driver.internal.core.data.geometry.Distance; -import java.io.IOException; -import org.apache.tinkerpop.gremlin.structure.io.Buffer; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryReader; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryWriter; - -public class DistanceSerializer extends AbstractSimpleGraphBinaryCustomSerializer { - @Override - public String getTypeName() { - return GraphBinaryModule.GRAPH_BINARY_DISTANCE_TYPE_NAME; - } - - @Override - protected Distance readCustomValue(int valueLength, Buffer buffer, GraphBinaryReader context) - throws IOException { - Point p = context.readValue(buffer, Point.class, false); - checkValueSize(GraphBinaryUtils.sizeOfDistance(p), valueLength); - return new Distance(p, context.readValue(buffer, Double.class, false)); - } - - @Override - protected void writeCustomValue(Distance value, Buffer buffer, GraphBinaryWriter context) - throws IOException { - context.writeValue(GraphBinaryUtils.sizeOfDistance(value.getCenter()), buffer, false); - context.writeValue(value.getCenter(), buffer, false); - context.writeValue(value.getRadius(), buffer, false); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/EditDistanceSerializer.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/EditDistanceSerializer.java deleted file mode 100644 index b2831040123..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/EditDistanceSerializer.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph.binary; - -import com.datastax.dse.driver.internal.core.graph.EditDistance; -import java.io.IOException; -import org.apache.tinkerpop.gremlin.structure.io.Buffer; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryReader; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryWriter; - -public class EditDistanceSerializer - extends AbstractSimpleGraphBinaryCustomSerializer { - @Override - public String getTypeName() { - return GraphBinaryModule.GRAPH_BINARY_EDIT_DISTANCE_TYPE_NAME; - } - - @Override - protected EditDistance readCustomValue(int valueLength, Buffer buffer, GraphBinaryReader context) - throws IOException { - int distance = context.readValue(buffer, Integer.class, false); - String query = context.readValue(buffer, String.class, false); - checkValueSize(GraphBinaryUtils.sizeOfEditDistance(query), valueLength); - - return new EditDistance(query, distance); - } - - @Override - protected void writeCustomValue(EditDistance value, Buffer buffer, GraphBinaryWriter context) - throws IOException { - context.writeValue(GraphBinaryUtils.sizeOfEditDistance(value.query), buffer, false); - context.writeValue(value.distance, buffer, false); - context.writeValue(value.query, buffer, false); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/GeometrySerializer.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/GeometrySerializer.java deleted file mode 100644 index 996e79c7693..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/GeometrySerializer.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph.binary; - -import com.datastax.dse.driver.api.core.data.geometry.Geometry; -import com.datastax.dse.driver.internal.core.graph.TinkerpopBufferUtil; -import java.io.IOException; -import java.nio.ByteBuffer; -import org.apache.tinkerpop.gremlin.structure.io.Buffer; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryReader; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryWriter; - -public abstract class GeometrySerializer - extends AbstractSimpleGraphBinaryCustomSerializer { - public abstract T fromWellKnownBinary(ByteBuffer buffer); - - @Override - protected T readCustomValue(int valueLength, Buffer buffer, GraphBinaryReader context) - throws IOException { - return fromWellKnownBinary(TinkerpopBufferUtil.readBytes(buffer, valueLength)); - } - - @Override - protected void writeCustomValue(T value, Buffer buffer, GraphBinaryWriter context) - throws IOException { - ByteBuffer bb = value.asWellKnownBinary(); - - // writing the {value_length} - context.writeValue(bb.remaining(), buffer, false); - buffer.writeBytes(bb); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/GraphBinaryModule.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/GraphBinaryModule.java deleted file mode 100644 index 59f966a34c2..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/GraphBinaryModule.java +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph.binary; - -import com.datastax.dse.driver.api.core.data.geometry.LineString; -import com.datastax.dse.driver.api.core.data.geometry.Point; -import com.datastax.dse.driver.api.core.data.geometry.Polygon; -import com.datastax.dse.driver.internal.core.data.geometry.Distance; -import com.datastax.dse.driver.internal.core.graph.EditDistance; -import com.datastax.dse.driver.internal.core.graph.binary.buffer.DseNettyBufferFactory; -import com.datastax.oss.driver.api.core.data.CqlDuration; -import com.datastax.oss.driver.api.core.data.TupleValue; -import com.datastax.oss.driver.api.core.data.UdtValue; -import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.UnpooledByteBufAllocator; -import java.io.IOException; -import org.apache.tinkerpop.gremlin.structure.io.Buffer; -import org.apache.tinkerpop.gremlin.structure.io.BufferFactory; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryReader; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryWriter; -import org.apache.tinkerpop.gremlin.structure.io.binary.TypeSerializerRegistry; -import org.javatuples.Pair; - -public class GraphBinaryModule { - public static final UnpooledByteBufAllocator ALLOCATOR = new UnpooledByteBufAllocator(false); - private static final BufferFactory FACTORY = new DseNettyBufferFactory(); - - static final String GRAPH_BINARY_POINT_TYPE_NAME = "driver.dse.geometry.Point"; - static final String GRAPH_BINARY_LINESTRING_TYPE_NAME = "driver.dse.geometry.LineString"; - static final String GRAPH_BINARY_POLYGON_TYPE_NAME = "driver.dse.geometry.Polygon"; - static final String GRAPH_BINARY_DISTANCE_TYPE_NAME = "driver.dse.geometry.Distance"; - static final String GRAPH_BINARY_DURATION_TYPE_NAME = "driver.core.Duration"; - static final String GRAPH_BINARY_EDIT_DISTANCE_TYPE_NAME = "driver.dse.search.EditDistance"; - static final String GRAPH_BINARY_TUPLE_VALUE_TYPE_NAME = "driver.core.TupleValue"; - static final String GRAPH_BINARY_UDT_VALUE_TYPE_NAME = "driver.core.UDTValue"; - static final String GRAPH_BINARY_PAIR_TYPE_NAME = "org.javatuples.Pair"; - - private final GraphBinaryReader reader; - private final GraphBinaryWriter writer; - - public GraphBinaryModule(GraphBinaryReader reader, GraphBinaryWriter writer) { - this.reader = reader; - this.writer = writer; - } - - public static TypeSerializerRegistry createDseTypeSerializerRegistry( - DefaultDriverContext driverContext) { - return TypeSerializerRegistry.build() - .addCustomType(CqlDuration.class, new CqlDurationSerializer()) - .addCustomType(Point.class, new PointSerializer()) - .addCustomType(LineString.class, new LineStringSerializer()) - .addCustomType(Polygon.class, new PolygonSerializer()) - .addCustomType(Distance.class, new DistanceSerializer()) - .addCustomType(EditDistance.class, new EditDistanceSerializer()) - .addCustomType(TupleValue.class, new TupleValueSerializer(driverContext)) - .addCustomType(UdtValue.class, new UdtValueSerializer(driverContext)) - .addCustomType(Pair.class, new PairSerializer()) - .create(); - } - - @SuppressWarnings("TypeParameterUnusedInFormals") - public T deserialize(final Buffer buffer) throws IOException { - return reader.read(buffer); - } - - public Buffer serialize(final T value) throws IOException { - return serialize(value, FACTORY.create(ALLOCATOR.heapBuffer())); - } - - public Buffer serialize(final T value, final Buffer buffer) throws IOException { - try { - writer.write(value, buffer); - return buffer; - } catch (Exception e) { - buffer.release(); - throw e; - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/GraphBinaryUtils.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/GraphBinaryUtils.java deleted file mode 100644 index 42283cd5167..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/GraphBinaryUtils.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph.binary; - -import com.datastax.dse.driver.api.core.data.geometry.Point; -import java.nio.charset.StandardCharsets; - -class GraphBinaryUtils { - static int sizeOfInt() { - return 4; - } - - static int sizeOfLong() { - return 8; - } - - static int sizeOfDouble() { - return 8; - } - - static int sizeOfPoint(Point point) { - return point.asWellKnownBinary().remaining(); - } - - /* assumes UTF8 */ - static int sizeOfString(String s) { - // length + data length - return sizeOfInt() + s.getBytes(StandardCharsets.UTF_8).length; - } - - static int sizeOfDuration() { - return sizeOfInt() + sizeOfInt() + sizeOfLong(); - } - - static int sizeOfDistance(Point point) { - return sizeOfPoint(point) + sizeOfDouble(); - } - - static int sizeOfEditDistance(String s) { - return sizeOfInt() + sizeOfString(s); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/LineStringSerializer.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/LineStringSerializer.java deleted file mode 100644 index 4dfa8f8f0f1..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/LineStringSerializer.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph.binary; - -import com.datastax.dse.driver.api.core.data.geometry.LineString; -import java.nio.ByteBuffer; - -public class LineStringSerializer extends GeometrySerializer { - @Override - public String getTypeName() { - return GraphBinaryModule.GRAPH_BINARY_LINESTRING_TYPE_NAME; - } - - @Override - public LineString fromWellKnownBinary(ByteBuffer buffer) { - return LineString.fromWellKnownBinary(buffer); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/PairSerializer.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/PairSerializer.java deleted file mode 100644 index 3f13dd5b3a0..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/PairSerializer.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph.binary; - -import java.io.IOException; -import org.apache.tinkerpop.gremlin.structure.io.Buffer; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryReader; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryWriter; -import org.javatuples.Pair; - -public class PairSerializer extends AbstractDynamicGraphBinaryCustomSerializer { - - @Override - public String getTypeName() { - return GraphBinaryModule.GRAPH_BINARY_PAIR_TYPE_NAME; - } - - @Override - protected Pair readDynamicCustomValue(Buffer buffer, GraphBinaryReader context) - throws IOException { - return new Pair<>(context.read(buffer), context.read(buffer)); - } - - @Override - protected void writeDynamicCustomValue(Pair value, Buffer buffer, GraphBinaryWriter context) - throws IOException { - context.write(value.getValue0(), buffer); - context.write(value.getValue1(), buffer); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/PointSerializer.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/PointSerializer.java deleted file mode 100644 index 2204b0da073..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/PointSerializer.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph.binary; - -import com.datastax.dse.driver.api.core.data.geometry.Point; -import java.nio.ByteBuffer; - -public class PointSerializer extends GeometrySerializer { - - @Override - public String getTypeName() { - return GraphBinaryModule.GRAPH_BINARY_POINT_TYPE_NAME; - } - - @Override - public Point fromWellKnownBinary(ByteBuffer buffer) { - return Point.fromWellKnownBinary(buffer); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/PolygonSerializer.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/PolygonSerializer.java deleted file mode 100644 index 8e3bc67838a..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/PolygonSerializer.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph.binary; - -import com.datastax.dse.driver.api.core.data.geometry.Polygon; -import java.nio.ByteBuffer; - -public class PolygonSerializer extends GeometrySerializer { - @Override - public String getTypeName() { - return GraphBinaryModule.GRAPH_BINARY_POLYGON_TYPE_NAME; - } - - @Override - public Polygon fromWellKnownBinary(ByteBuffer buffer) { - return Polygon.fromWellKnownBinary(buffer); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/TupleValueSerializer.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/TupleValueSerializer.java deleted file mode 100644 index b7c6fc2098d..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/TupleValueSerializer.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph.binary; - -import com.datastax.oss.driver.api.core.data.TupleValue; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.TupleType; -import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; -import java.io.IOException; -import org.apache.tinkerpop.gremlin.structure.io.Buffer; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryReader; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryWriter; - -public class TupleValueSerializer extends AbstractDynamicGraphBinaryCustomSerializer { - - private final DefaultDriverContext driverContext; - - public TupleValueSerializer(DefaultDriverContext driverContext) { - this.driverContext = driverContext; - } - - @Override - public String getTypeName() { - return GraphBinaryModule.GRAPH_BINARY_TUPLE_VALUE_TYPE_NAME; - } - - @Override - public TupleValue readDynamicCustomValue(Buffer buffer, GraphBinaryReader context) - throws IOException { - // read the type first - DataType type = ComplexTypeSerializerUtil.decodeTypeDefinition(buffer, driverContext); - - assert type instanceof TupleType - : "GraphBinary TupleValue deserializer was called on a value that is not encoded as a TupleValue."; - - TupleType tupleType = (TupleType) type; - TupleValue value = tupleType.newValue(); - - // then decode the values from the buffer - return ComplexTypeSerializerUtil.decodeValue( - buffer, value, tupleType.getComponentTypes().size()); - } - - @Override - public void writeDynamicCustomValue(TupleValue value, Buffer buffer, GraphBinaryWriter context) - throws IOException { - // write type first in native protocol - ComplexTypeSerializerUtil.encodeTypeDefinition(value.getType(), buffer, driverContext); - - // write value after - ComplexTypeSerializerUtil.encodeValue(value, buffer); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/UdtValueSerializer.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/UdtValueSerializer.java deleted file mode 100644 index 3e617ebf926..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/UdtValueSerializer.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph.binary; - -import com.datastax.oss.driver.api.core.data.UdtValue; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; -import java.io.IOException; -import org.apache.tinkerpop.gremlin.structure.io.Buffer; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryReader; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryWriter; - -public class UdtValueSerializer extends AbstractDynamicGraphBinaryCustomSerializer { - private final DefaultDriverContext driverContext; - - public UdtValueSerializer(DefaultDriverContext driverContext) { - this.driverContext = driverContext; - } - - @Override - public String getTypeName() { - return GraphBinaryModule.GRAPH_BINARY_UDT_VALUE_TYPE_NAME; - } - - @Override - public UdtValue readDynamicCustomValue(Buffer buffer, GraphBinaryReader context) - throws IOException { - // read type definition first - DataType driverType = ComplexTypeSerializerUtil.decodeTypeDefinition(buffer, driverContext); - - assert driverType instanceof UserDefinedType - : "GraphBinary UdtValue deserializer was called on a value that is not encoded as a UdtValue."; - - UserDefinedType userDefinedType = (UserDefinedType) driverType; - UdtValue value = userDefinedType.newValue(); - - // then read values - return ComplexTypeSerializerUtil.decodeValue( - buffer, value, userDefinedType.getFieldTypes().size()); - } - - @Override - public void writeDynamicCustomValue(UdtValue value, Buffer buffer, GraphBinaryWriter context) - throws IOException { - // write type first in native protocol format - ComplexTypeSerializerUtil.encodeTypeDefinition(value.getType(), buffer, driverContext); - // write value after - ComplexTypeSerializerUtil.encodeValue(value, buffer); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/buffer/DseNettyBuffer.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/buffer/DseNettyBuffer.java deleted file mode 100644 index 590ac2e9be2..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/buffer/DseNettyBuffer.java +++ /dev/null @@ -1,267 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph.binary.buffer; - -import io.netty.buffer.ByteBuf; -import java.io.IOException; -import java.io.OutputStream; -import java.nio.ByteBuffer; -import org.apache.tinkerpop.gremlin.structure.io.Buffer; - -/** - * Internal impl of Tinkerpop Buffers. We implement an internal type here to allow for this class to - * use shaded Netty types (without bringing all of Tinkerpop into the shaded JAR). The impl is based - * on the initial impl of {@link NettyBuffer} but we don't guarantee that this class will mirror - * changes to that class over time. - */ -final class DseNettyBuffer implements Buffer { - private final ByteBuf buffer; - - /** - * Creates a new instance. - * - * @param buffer The buffer to wrap. - */ - DseNettyBuffer(ByteBuf buffer) { - if (buffer == null) { - throw new IllegalArgumentException("buffer can't be null"); - } - - this.buffer = buffer; - } - - @Override - public int readableBytes() { - return this.buffer.readableBytes(); - } - - @Override - public int readerIndex() { - return this.buffer.readerIndex(); - } - - @Override - public Buffer readerIndex(final int readerIndex) { - this.buffer.readerIndex(readerIndex); - return this; - } - - @Override - public int writerIndex() { - return this.buffer.writerIndex(); - } - - @Override - public Buffer writerIndex(final int writerIndex) { - this.buffer.writerIndex(writerIndex); - return this; - } - - @Override - public Buffer markWriterIndex() { - this.buffer.markWriterIndex(); - return this; - } - - @Override - public Buffer resetWriterIndex() { - this.buffer.resetWriterIndex(); - return this; - } - - @Override - public int capacity() { - return this.buffer.capacity(); - } - - @Override - public boolean isDirect() { - return this.buffer.isDirect(); - } - - @Override - public boolean readBoolean() { - return this.buffer.readBoolean(); - } - - @Override - public byte readByte() { - return this.buffer.readByte(); - } - - @Override - public short readShort() { - return this.buffer.readShort(); - } - - @Override - public int readInt() { - return this.buffer.readInt(); - } - - @Override - public long readLong() { - return this.buffer.readLong(); - } - - @Override - public float readFloat() { - return this.buffer.readFloat(); - } - - @Override - public double readDouble() { - return this.buffer.readDouble(); - } - - @Override - public Buffer readBytes(final byte[] destination) { - this.buffer.readBytes(destination); - return this; - } - - @Override - public Buffer readBytes(final byte[] destination, final int dstIndex, final int length) { - this.buffer.readBytes(destination, dstIndex, length); - return this; - } - - @Override - public Buffer readBytes(final ByteBuffer dst) { - this.buffer.readBytes(dst); - return this; - } - - @Override - public Buffer readBytes(final OutputStream out, final int length) throws IOException { - this.buffer.readBytes(out, length); - return this; - } - - @Override - public Buffer writeBoolean(final boolean value) { - this.buffer.writeBoolean(value); - return this; - } - - @Override - public Buffer writeByte(final int value) { - this.buffer.writeByte(value); - return this; - } - - @Override - public Buffer writeShort(final int value) { - this.buffer.writeShort(value); - return this; - } - - @Override - public Buffer writeInt(final int value) { - this.buffer.writeInt(value); - return this; - } - - @Override - public Buffer writeLong(final long value) { - this.buffer.writeLong(value); - return this; - } - - @Override - public Buffer writeFloat(final float value) { - this.buffer.writeFloat(value); - return this; - } - - @Override - public Buffer writeDouble(final double value) { - this.buffer.writeDouble(value); - return this; - } - - @Override - public Buffer writeBytes(final byte[] src) { - this.buffer.writeBytes(src); - return this; - } - - @Override - public Buffer writeBytes(final ByteBuffer src) { - this.buffer.writeBytes(src); - return this; - } - - @Override - public Buffer writeBytes(byte[] src, final int srcIndex, final int length) { - this.buffer.writeBytes(src, srcIndex, length); - return this; - } - - @Override - public boolean release() { - return this.buffer.release(); - } - - @Override - public Buffer retain() { - this.buffer.retain(); - return this; - } - - @Override - public int referenceCount() { - return this.buffer.refCnt(); - } - - @Override - public ByteBuffer[] nioBuffers() { - return this.buffer.nioBuffers(); - } - - @Override - public ByteBuffer nioBuffer() { - return this.buffer.nioBuffer(); - } - - @Override - public ByteBuffer nioBuffer(final int index, final int length) { - return this.buffer.nioBuffer(index, length); - } - - @Override - public ByteBuffer[] nioBuffers(final int index, final int length) { - return this.buffer.nioBuffers(index, length); - } - - @Override - public int nioBufferCount() { - return this.buffer.nioBufferCount(); - } - - @Override - public Buffer getBytes(final int index, final byte[] dst) { - this.buffer.getBytes(index, dst); - return this; - } - - /** Returns the underlying buffer. */ - public ByteBuf getUnderlyingBuffer() { - return this.buffer; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/buffer/DseNettyBufferFactory.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/buffer/DseNettyBufferFactory.java deleted file mode 100644 index 57ee3cb1a9d..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/buffer/DseNettyBufferFactory.java +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph.binary.buffer; - -import io.netty.buffer.ByteBuf; -import io.netty.buffer.ByteBufAllocator; -import io.netty.buffer.CompositeByteBuf; -import io.netty.buffer.Unpooled; -import io.netty.buffer.UnpooledByteBufAllocator; -import java.nio.ByteBuffer; -import java.util.function.Supplier; -import org.apache.tinkerpop.gremlin.structure.io.Buffer; -import org.apache.tinkerpop.gremlin.structure.io.BufferFactory; - -/** - * Internal BufferFactory impl for creation of Tinkerpop buffers. We implement an internal type here - * to allow for this class to use shaded Netty types (without bringing all of Tinkerpop into the - * shaded JAR). The impl is based on the initial impl of {@code - * org.apache.tinkerpop.gremlin.driver.ser.NettyBufferFactory} but we don't guarantee that this - * class will mirror changes to that class over time. - */ -public class DseNettyBufferFactory implements BufferFactory { - - private static final ByteBufAllocator DEFAULT_ALLOCATOR = new UnpooledByteBufAllocator(false); - - private final ByteBufAllocator allocator; - - public DseNettyBufferFactory() { - this.allocator = DEFAULT_ALLOCATOR; - } - - public DseNettyBufferFactory(ByteBufAllocator allocator) { - this.allocator = allocator; - } - - @Override - public Buffer create(final ByteBuf value) { - return new DseNettyBuffer(value); - } - - @Override - public Buffer wrap(final ByteBuffer value) { - return create(Unpooled.wrappedBuffer(value)); - } - - public Buffer heap() { - return create(allocator.heapBuffer()); - } - - public Buffer heap(int initialSize) { - return create(allocator.heapBuffer(initialSize)); - } - - public Buffer heap(int initialSize, int maxSize) { - return create(allocator.heapBuffer(initialSize, maxSize)); - } - - public Buffer io() { - return create(allocator.ioBuffer()); - } - - public Buffer io(int initialSize) { - return create(allocator.ioBuffer(initialSize)); - } - - public Buffer io(int initialSize, int maxSize) { - return create(allocator.ioBuffer(initialSize, maxSize)); - } - - public Buffer direct() { - return create(allocator.directBuffer()); - } - - public Buffer direct(int initialSize) { - return create(allocator.directBuffer(initialSize)); - } - - public Buffer direct(int initialSize, int maxSize) { - return create(allocator.directBuffer(initialSize, maxSize)); - } - - public Buffer composite(ByteBuf... components) { - - CompositeByteBuf buff = allocator.compositeBuffer(components.length); - buff.addComponents(components); - return create(buff); - } - - public Buffer composite(Buffer... components) { - ByteBuf[] nettyBufs = new ByteBuf[components.length]; - for (int i = 0; i < components.length; ++i) { - if (!(components[i] instanceof DseNettyBuffer)) { - throw new IllegalArgumentException("Can only concatenate DseNettyBuffer instances"); - } - nettyBufs[i] = ((DseNettyBuffer) components[i]).getUnderlyingBuffer(); - } - return composite(nettyBufs); - } - - public Buffer withBytes(int... bytes) { - return withBytes(this::heap, bytes); - } - - public Buffer withBytes(Supplier supplier, int... bytes) { - Buffer buff = supplier.get(); - for (int val : bytes) { - buff.writeByte(val); - } - return buff; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/reactive/DefaultReactiveGraphNode.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/reactive/DefaultReactiveGraphNode.java deleted file mode 100644 index fda0eed5333..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/reactive/DefaultReactiveGraphNode.java +++ /dev/null @@ -1,205 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph.reactive; - -import com.datastax.dse.driver.api.core.graph.GraphNode; -import com.datastax.dse.driver.api.core.graph.reactive.ReactiveGraphNode; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.List; -import java.util.Map; -import java.util.Set; -import net.jcip.annotations.NotThreadSafe; -import org.apache.tinkerpop.gremlin.process.traversal.Path; -import org.apache.tinkerpop.gremlin.structure.Edge; -import org.apache.tinkerpop.gremlin.structure.Property; -import org.apache.tinkerpop.gremlin.structure.Vertex; -import org.apache.tinkerpop.gremlin.structure.VertexProperty; - -@NotThreadSafe -class DefaultReactiveGraphNode implements ReactiveGraphNode { - - private final GraphNode graphNode; - private final ExecutionInfo executionInfo; - - DefaultReactiveGraphNode(@NonNull GraphNode graphNode, @NonNull ExecutionInfo executionInfo) { - this.graphNode = graphNode; - this.executionInfo = executionInfo; - } - - @NonNull - @Override - public ExecutionInfo getExecutionInfo() { - return executionInfo; - } - - @Override - public boolean isNull() { - return graphNode.isNull(); - } - - @Override - public boolean isMap() { - return graphNode.isMap(); - } - - @Override - public Iterable keys() { - return graphNode.keys(); - } - - @Override - public GraphNode getByKey(Object key) { - return graphNode.getByKey(key); - } - - @Override - public Map asMap() { - return graphNode.asMap(); - } - - @Override - public boolean isList() { - return graphNode.isList(); - } - - @Override - public int size() { - return graphNode.size(); - } - - @Override - public GraphNode getByIndex(int index) { - return graphNode.getByIndex(index); - } - - @Override - public List asList() { - return graphNode.asList(); - } - - @Override - public boolean isValue() { - return graphNode.isValue(); - } - - @Override - public int asInt() { - return graphNode.asInt(); - } - - @Override - public boolean asBoolean() { - return graphNode.asBoolean(); - } - - @Override - public long asLong() { - return graphNode.asLong(); - } - - @Override - public double asDouble() { - return graphNode.asDouble(); - } - - @Override - public String asString() { - return graphNode.asString(); - } - - @Override - public ResultT as(Class clazz) { - return graphNode.as(clazz); - } - - @Override - public ResultT as(GenericType type) { - return graphNode.as(type); - } - - @Override - public boolean isVertex() { - return graphNode.isVertex(); - } - - @Override - public Vertex asVertex() { - return graphNode.asVertex(); - } - - @Override - public boolean isEdge() { - return graphNode.isEdge(); - } - - @Override - public Edge asEdge() { - return graphNode.asEdge(); - } - - @Override - public boolean isPath() { - return graphNode.isPath(); - } - - @Override - public Path asPath() { - return graphNode.asPath(); - } - - @Override - public boolean isProperty() { - return graphNode.isProperty(); - } - - @Override - public Property asProperty() { - return graphNode.asProperty(); - } - - @Override - public boolean isVertexProperty() { - return graphNode.isVertexProperty(); - } - - @Override - public VertexProperty asVertexProperty() { - return graphNode.asVertexProperty(); - } - - @Override - public boolean isSet() { - return graphNode.isSet(); - } - - @Override - public Set asSet() { - return graphNode.asSet(); - } - - @Override - public String toString() { - return "DefaultReactiveGraphNode{graphNode=" - + graphNode - + ", executionInfo=" - + executionInfo - + '}'; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/reactive/DefaultReactiveGraphResultSet.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/reactive/DefaultReactiveGraphResultSet.java deleted file mode 100644 index 137e44e4d95..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/reactive/DefaultReactiveGraphResultSet.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph.reactive; - -import com.datastax.dse.driver.api.core.graph.AsyncGraphResultSet; -import com.datastax.dse.driver.api.core.graph.reactive.ReactiveGraphNode; -import com.datastax.dse.driver.api.core.graph.reactive.ReactiveGraphResultSet; -import com.datastax.dse.driver.internal.core.cql.reactive.EmptySubscription; -import com.datastax.dse.driver.internal.core.cql.reactive.SimpleUnicastProcessor; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Objects; -import java.util.concurrent.Callable; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.atomic.AtomicBoolean; -import net.jcip.annotations.ThreadSafe; -import org.reactivestreams.Publisher; -import org.reactivestreams.Subscriber; - -@ThreadSafe -public class DefaultReactiveGraphResultSet implements ReactiveGraphResultSet { - - private final Callable> firstPage; - - private final AtomicBoolean alreadySubscribed = new AtomicBoolean(false); - - private final SimpleUnicastProcessor executionInfosPublisher = - new SimpleUnicastProcessor<>(); - - public DefaultReactiveGraphResultSet(Callable> firstPage) { - this.firstPage = firstPage; - } - - @Override - public void subscribe(@NonNull Subscriber subscriber) { - // As per rule 1.9, we need to throw an NPE if subscriber is null - Objects.requireNonNull(subscriber, "Subscriber cannot be null"); - // As per rule 1.11, this publisher is allowed to support only one subscriber. - if (alreadySubscribed.compareAndSet(false, true)) { - ReactiveGraphResultSetSubscription subscription = - new ReactiveGraphResultSetSubscription(subscriber, executionInfosPublisher); - try { - subscriber.onSubscribe(subscription); - // must be done after onSubscribe - subscription.start(firstPage); - } catch (Throwable t) { - // As per rule 2.13: In the case that this rule is violated, - // any associated Subscription to the Subscriber MUST be considered as - // cancelled, and the caller MUST raise this error condition in a fashion - // that is adequate for the runtime environment. - subscription.doOnError( - new IllegalStateException( - subscriber - + " violated the Reactive Streams rule 2.13 by throwing an exception from onSubscribe.", - t)); - } - } else { - subscriber.onSubscribe(EmptySubscription.INSTANCE); - subscriber.onError( - new IllegalStateException("This publisher does not support multiple subscriptions")); - } - // As per 2.13, this method must return normally (i.e. not throw) - } - - @NonNull - @Override - public Publisher getExecutionInfos() { - return executionInfosPublisher; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/reactive/FailedReactiveGraphResultSet.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/reactive/FailedReactiveGraphResultSet.java deleted file mode 100644 index 45bbd8c62b0..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/reactive/FailedReactiveGraphResultSet.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph.reactive; - -import com.datastax.dse.driver.api.core.graph.reactive.ReactiveGraphNode; -import com.datastax.dse.driver.api.core.graph.reactive.ReactiveGraphResultSet; -import com.datastax.dse.driver.internal.core.cql.reactive.FailedPublisher; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import edu.umd.cs.findbugs.annotations.NonNull; -import org.reactivestreams.Publisher; - -public class FailedReactiveGraphResultSet extends FailedPublisher - implements ReactiveGraphResultSet { - - public FailedReactiveGraphResultSet(Throwable error) { - super(error); - } - - @NonNull - @Override - public Publisher getExecutionInfos() { - return new FailedPublisher<>(error); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/reactive/ReactiveGraphRequestProcessor.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/reactive/ReactiveGraphRequestProcessor.java deleted file mode 100644 index ed2cd28926c..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/reactive/ReactiveGraphRequestProcessor.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph.reactive; - -import com.datastax.dse.driver.api.core.graph.GraphStatement; -import com.datastax.dse.driver.api.core.graph.reactive.ReactiveGraphResultSet; -import com.datastax.dse.driver.internal.core.graph.GraphRequestAsyncProcessor; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.datastax.oss.driver.internal.core.session.RequestProcessor; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class ReactiveGraphRequestProcessor - implements RequestProcessor, ReactiveGraphResultSet> { - - public static final GenericType REACTIVE_GRAPH_RESULT_SET = - GenericType.of(ReactiveGraphResultSet.class); - - private final GraphRequestAsyncProcessor asyncGraphProcessor; - - public ReactiveGraphRequestProcessor(@NonNull GraphRequestAsyncProcessor asyncGraphProcessor) { - this.asyncGraphProcessor = asyncGraphProcessor; - } - - @Override - public boolean canProcess(Request request, GenericType resultType) { - return request instanceof GraphStatement && resultType.equals(REACTIVE_GRAPH_RESULT_SET); - } - - @Override - public ReactiveGraphResultSet process( - GraphStatement request, - DefaultSession session, - InternalDriverContext context, - String sessionLogPrefix) { - return new DefaultReactiveGraphResultSet( - () -> asyncGraphProcessor.process(request, session, context, sessionLogPrefix)); - } - - @Override - public ReactiveGraphResultSet newFailure(RuntimeException error) { - return new FailedReactiveGraphResultSet(error); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/reactive/ReactiveGraphResultSetSubscription.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/reactive/ReactiveGraphResultSetSubscription.java deleted file mode 100644 index c3234d74ebc..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/reactive/ReactiveGraphResultSetSubscription.java +++ /dev/null @@ -1,475 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph.reactive; - -import com.datastax.dse.driver.api.core.graph.AsyncGraphResultSet; -import com.datastax.dse.driver.api.core.graph.reactive.ReactiveGraphNode; -import com.datastax.dse.driver.internal.core.cql.reactive.ReactiveOperators; -import com.datastax.dse.driver.internal.core.util.concurrent.BoundedConcurrentQueue; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.driver.shaded.guava.common.collect.Iterators; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Collections; -import java.util.Iterator; -import java.util.Objects; -import java.util.concurrent.Callable; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionException; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; -import net.jcip.annotations.ThreadSafe; -import org.reactivestreams.Subscriber; -import org.reactivestreams.Subscription; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * This class is very similar to {@link - * com.datastax.dse.driver.internal.core.cql.reactive.ReactiveResultSetSubscription}. It exists - * merely because {@link AsyncGraphResultSet} is not a subtype of {@link - * com.datastax.oss.driver.api.core.AsyncPagingIterable} and thus it would be difficult to re-use - * ReactiveResultSetSubscription for graph result sets. - */ -@ThreadSafe -public class ReactiveGraphResultSetSubscription implements Subscription { - - private static final Logger LOG = - LoggerFactory.getLogger(ReactiveGraphResultSetSubscription.class); - - private static final int MAX_ENQUEUED_PAGES = 4; - - /** Tracks the number of items requested by the subscriber. */ - private final AtomicLong requested = new AtomicLong(0); - - /** The pages received so far, with a maximum of MAX_ENQUEUED_PAGES elements. */ - private final BoundedConcurrentQueue pages = - new BoundedConcurrentQueue<>(MAX_ENQUEUED_PAGES); - - /** - * Used to signal that a thread is currently draining, i.e., emitting items to the subscriber. - * When it is zero, that means there is no ongoing emission. This mechanism effectively serializes - * access to the drain() method, and also keeps track of missed attempts to enter it, since each - * thread that attempts to drain will increment this counter. - * - * @see #drain() - */ - private final AtomicInteger draining = new AtomicInteger(0); - - /** - * Waited upon by the driver and completed when the subscriber requests its first item. - * - *

Used to hold off emitting results until the subscriber issues its first request for items. - * Since this future is only completed from {@link #request(long)}, this effectively conditions - * the enqueueing of the first page to the reception of the subscriber's first request. - * - *

This mechanism avoids sending terminal signals before a request is made when the stream is - * empty. Note that as per 2.9, "a Subscriber MUST be prepared to receive an onComplete signal - * with or without a preceding Subscription.request(long n) call." However, the TCK considers it - * as unfair behavior. - * - * @see #start(Callable) - * @see #request(long) - */ - private final CompletableFuture firstSubscriberRequestArrived = new CompletableFuture<>(); - - /** non-final because it has to be de-referenced, see {@link #clear()}. */ - private volatile Subscriber mainSubscriber; - - private volatile Subscriber executionInfosSubscriber; - - /** - * Set to true when the subscription is cancelled, which happens when an error is encountered, - * when the result set is fully consumed and the subscription terminates, or when the subscriber - * manually calls {@link #cancel()}. - */ - private volatile boolean cancelled = false; - - ReactiveGraphResultSetSubscription( - @NonNull Subscriber mainSubscriber, - @NonNull Subscriber executionInfosSubscriber) { - this.mainSubscriber = mainSubscriber; - this.executionInfosSubscriber = executionInfosSubscriber; - } - - /** - * Starts the query execution. - * - *

Must be called immediately after creating the subscription, but after {@link - * Subscriber#onSubscribe(Subscription)}. - * - * @param firstPage The future that, when complete, will produce the first page. - */ - void start(@NonNull Callable> firstPage) { - firstSubscriberRequestArrived.thenAccept( - (aVoid) -> fetchNextPageAndEnqueue(new Page(firstPage))); - } - - @Override - public void request(long n) { - // As per 3.6: after the Subscription is cancelled, additional - // calls to request() MUST be NOPs. - if (!cancelled) { - if (n < 1) { - // Validate request as per rule 3.9 - doOnError( - new IllegalArgumentException( - mainSubscriber - + " violated the Reactive Streams rule 3.9 by requesting a non-positive number of elements.")); - } else { - // As per rule 3.17, when demand overflows Long.MAX_VALUE - // it can be treated as "effectively unbounded" - ReactiveOperators.addCap(requested, n); - // Set the first future to true if not done yet. - // This will make the first page of results ready for consumption, - // see start(). - // As per 2.7 it is the subscriber's responsibility to provide - // external synchronization when calling request(), - // so the check-then-act idiom below is good enough - // (and besides, complete() is idempotent). - if (!firstSubscriberRequestArrived.isDone()) { - firstSubscriberRequestArrived.complete(null); - } - drain(); - } - } - } - - @Override - public void cancel() { - // As per 3.5: Subscription.cancel() MUST respect the responsiveness of - // its caller by returning in a timely manner, MUST be idempotent and - // MUST be thread-safe. - if (!cancelled) { - cancelled = true; - if (draining.getAndIncrement() == 0) { - // If nobody is draining, clear now; - // otherwise, the draining thread will notice - // that the cancelled flag was set - // and will clear for us. - clear(); - } - } - } - - /** - * Attempts to drain available items, i.e. emit them to the subscriber. - * - *

Access to this method is serialized by the field {@link #draining}: only one thread at a - * time can drain, but threads that attempt to drain while other thread is already draining - * increment that field; the draining thread, before finishing its work, checks for such failed - * attempts and triggers another round of draining if that was the case. - * - *

The loop is interrupted when 1) the requested amount has been met or 2) when there are no - * more items readily available or 3) the subscription has been cancelled. - * - *

The loop also checks for stream exhaustion and emits a terminal {@code onComplete} signal in - * this case. - * - *

This method may run on a driver IO thread when invoked from {@link - * #fetchNextPageAndEnqueue(Page)}, or on a subscriber thread, when invoked from {@link - * #request(long)}. - */ - @SuppressWarnings("ConditionalBreakInInfiniteLoop") - private void drain() { - // As per 3.4: this method SHOULD respect the responsiveness - // of its caller by returning in a timely manner. - // We accomplish this by a wait-free implementation. - if (draining.getAndIncrement() != 0) { - // Someone else is already draining, so do nothing, - // the other thread will notice that we attempted to drain. - // This also allows to abide by rule 3.3 and avoid - // cycles such as request() -> onNext() -> request() etc. - return; - } - int missed = 1; - // Note: when termination is detected inside this loop, - // we MUST call clear() manually. - for (; ; ) { - // The requested number of items at this point - long r = requested.get(); - // The number of items emitted thus far - long emitted = 0L; - while (emitted != r) { - if (cancelled) { - clear(); - return; - } - Object result; - try { - result = tryNext(); - } catch (Throwable t) { - doOnError(t); - clear(); - return; - } - if (result == null) { - break; - } - if (result instanceof Throwable) { - doOnError((Throwable) result); - clear(); - return; - } - doOnNext((ReactiveGraphNode) result); - emitted++; - } - if (isExhausted()) { - doOnComplete(); - clear(); - return; - } - if (cancelled) { - clear(); - return; - } - if (emitted != 0) { - // if any item was emitted, adjust the requested field - ReactiveOperators.subCap(requested, emitted); - } - // if another thread tried to call drain() while we were busy, - // then we should do another drain round. - missed = draining.addAndGet(-missed); - if (missed == 0) { - break; - } - } - } - - /** - * Tries to return the next item, if one is readily available, and returns {@code null} otherwise. - * - *

Cannot run concurrently due to the {@link #draining} field. - */ - @Nullable - private Object tryNext() { - Page current = pages.peek(); - if (current != null) { - if (current.hasMoreRows()) { - return current.nextRow(); - } else if (current.hasMorePages()) { - // Discard current page as it is consumed. - // Don't discard the last page though as we need it - // to test isExhausted(). It will be GC'ed when a terminal signal - // is issued anyway, so that's no big deal. - if (pages.poll() == null) { - throw new AssertionError("Queue is empty, this should not happen"); - } - current = pages.peek(); - // if the next page is readily available, - // serve its first row now, no need to wait - // for the next drain. - if (current != null && current.hasMoreRows()) { - return current.nextRow(); - } - } - } - // No items available right now. - return null; - } - - /** - * Returns {@code true} when the entire stream has been consumed and no more items can be emitted. - * When that is the case, a terminal signal is sent. - * - *

Cannot run concurrently due to the draining field. - */ - private boolean isExhausted() { - Page current = pages.peek(); - // Note: current can only be null when: - // 1) we are waiting for the first page and it hasn't arrived yet; - // 2) we just discarded the current page, but the next page hasn't arrived yet. - // In any case, a null here means it is not the last page, since the last page - // stays in the queue until the very end of the operation. - return current != null && !current.hasMoreRows() && !current.hasMorePages(); - } - - /** - * Runs on a subscriber thread initially, see {@link #start(Callable)}. Subsequent executions run - * on the thread that completes the pair of futures [current.fetchNextPage, pages.offer] and - * enqueues. This can be a driver IO thread or a subscriber thread; in both cases, cannot run - * concurrently due to the fact that one can only fetch the next page when the current one is - * arrived and enqueued. - */ - private void fetchNextPageAndEnqueue(@NonNull Page current) { - current - .fetchNextPage() - // as soon as the response arrives, - // create the new page - .handle( - (rs, t) -> { - Page page; - if (t == null) { - page = toPage(rs); - executionInfosSubscriber.onNext(rs.getRequestExecutionInfo()); - if (!page.hasMorePages()) { - executionInfosSubscriber.onComplete(); - } - } else { - // Unwrap CompletionExceptions created by combined futures - if (t instanceof CompletionException) { - t = t.getCause(); - } - page = toErrorPage(t); - executionInfosSubscriber.onError(t); - } - return page; - }) - .thenCompose(pages::offer) - .thenAccept( - page -> { - if (page.hasMorePages() && !cancelled) { - // preemptively fetch the next page, if available - fetchNextPageAndEnqueue(page); - } - drain(); - }); - } - - private void doOnNext(@NonNull ReactiveGraphNode result) { - try { - mainSubscriber.onNext(result); - } catch (Throwable t) { - LOG.error( - mainSubscriber - + " violated the Reactive Streams rule 2.13 by throwing an exception from onNext.", - t); - cancel(); - } - } - - private void doOnComplete() { - try { - // Then we signal onComplete as per rules 1.2 and 1.5 - mainSubscriber.onComplete(); - } catch (Throwable t) { - LOG.error( - mainSubscriber - + " violated the Reactive Streams rule 2.13 by throwing an exception from onComplete.", - t); - } - // We need to consider this Subscription as cancelled as per rule 1.6 - cancel(); - } - - // package-private because it can be invoked by the publisher if the subscription handshake - // process fails. - void doOnError(@NonNull Throwable error) { - try { - // Then we signal the error downstream, as per rules 1.2 and 1.4. - mainSubscriber.onError(error); - } catch (Throwable t) { - t.addSuppressed(error); - LOG.error( - mainSubscriber - + " violated the Reactive Streams rule 2.13 by throwing an exception from onError.", - t); - } - // We need to consider this Subscription as cancelled as per rule 1.6 - cancel(); - } - - private void clear() { - // We don't need these pages anymore and should not hold references - // to them. - pages.clear(); - // As per 3.13, Subscription.cancel() MUST request the Publisher to - // eventually drop any references to the corresponding subscriber. - // Our own publishers do not keep references to this subscription, - // but downstream processors might do so, which is why we need to - // defensively clear the subscriber reference when we are done. - mainSubscriber = null; - executionInfosSubscriber = null; - } - - /** - * Converts the received result object into a {@link Page}. - * - * @param rs the result object to convert. - * @return a new page. - */ - @NonNull - private Page toPage(@NonNull AsyncGraphResultSet rs) { - ExecutionInfo executionInfo = rs.getRequestExecutionInfo(); - Iterator results = - Iterators.transform( - rs.currentPage().iterator(), - row -> new DefaultReactiveGraphNode(Objects.requireNonNull(row), executionInfo)); - return new Page(results, rs.hasMorePages() ? rs::fetchNextPage : null); - } - - /** Converts the given error into a {@link Page}, containing the error as its only element. */ - @NonNull - private Page toErrorPage(@NonNull Throwable t) { - return new Page(Iterators.singletonIterator(t), null); - } - - /** - * A page object comprises an iterator over the page's results, and a future pointing to the next - * page (or {@code null}, if it's the last page). - */ - static class Page { - - @NonNull final Iterator iterator; - - // A pointer to the next page, or null if this is the last page. - @Nullable final Callable> nextPage; - - /** called only from start() */ - Page(@NonNull Callable> nextPage) { - this.iterator = Collections.emptyIterator(); - this.nextPage = nextPage; - } - - Page( - @NonNull Iterator iterator, - @Nullable Callable> nextPage) { - this.iterator = iterator; - this.nextPage = nextPage; - } - - boolean hasMorePages() { - return nextPage != null; - } - - @NonNull - CompletionStage fetchNextPage() { - try { - return Objects.requireNonNull(nextPage).call(); - } catch (Exception e) { - // This is a synchronous failure in the driver. - // It can happen in rare cases when the driver throws an exception instead of returning a - // failed future; e.g. if someone tries to execute a continuous paging request but the - // protocol version in use does not support it. - // We treat it as a failed future. - return CompletableFutures.failedFuture(e); - } - } - - boolean hasMoreRows() { - return iterator.hasNext(); - } - - @NonNull - Object nextRow() { - return iterator.next(); - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/AddressFormatter.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/AddressFormatter.java deleted file mode 100644 index cecc951a3ab..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/AddressFormatter.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights; - -import java.net.InetAddress; -import java.net.InetSocketAddress; - -class AddressFormatter { - - static String nullSafeToString(Object address) { - if (address instanceof InetAddress) { - return nullSafeToString((InetAddress) address); - } else if (address instanceof InetSocketAddress) { - return nullSafeToString((InetSocketAddress) address); - } else if (address instanceof String) { - return address.toString(); - } else { - return ""; - } - } - - static String nullSafeToString(InetAddress inetAddress) { - return inetAddress != null ? inetAddress.getHostAddress() : null; - } - - static String nullSafeToString(InetSocketAddress inetSocketAddress) { - if (inetSocketAddress != null) { - if (inetSocketAddress.isUnresolved()) { - return String.format( - "%s:%s", - nullSafeToString(inetSocketAddress.getHostName()), inetSocketAddress.getPort()); - } else { - return String.format( - "%s:%s", nullSafeToString(inetSocketAddress.getAddress()), inetSocketAddress.getPort()); - } - } - return null; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/ConfigAntiPatternsFinder.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/ConfigAntiPatternsFinder.java deleted file mode 100644 index 7f5b9c20a0e..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/ConfigAntiPatternsFinder.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights; - -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.SSL_ENGINE_FACTORY_CLASS; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.SSL_HOSTNAME_VALIDATION; - -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import java.util.HashMap; -import java.util.Map; - -class ConfigAntiPatternsFinder { - Map findAntiPatterns(InternalDriverContext driverContext) { - Map antiPatterns = new HashMap<>(); - findSslAntiPattern(driverContext, antiPatterns); - return antiPatterns; - } - - private void findSslAntiPattern( - InternalDriverContext driverContext, Map antiPatterns) { - boolean isSslDefined = - driverContext.getConfig().getDefaultProfile().isDefined(SSL_ENGINE_FACTORY_CLASS); - boolean certValidation = - driverContext.getConfig().getDefaultProfile().getBoolean(SSL_HOSTNAME_VALIDATION, false); - if (isSslDefined && !certValidation) { - antiPatterns.put( - "sslWithoutCertValidation", - "Client-to-node encryption is enabled but server certificate validation is disabled"); - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/DataCentersFinder.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/DataCentersFinder.java deleted file mode 100644 index 7112b8dcdf7..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/DataCentersFinder.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights; - -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.CONNECTION_POOL_REMOTE_SIZE; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import java.util.Collection; -import java.util.HashSet; -import java.util.Set; - -class DataCentersFinder { - - Set getDataCenters(InternalDriverContext driverContext) { - return getDataCenters( - driverContext.getMetadataManager().getMetadata().getNodes().values(), - driverContext.getConfig().getDefaultProfile()); - } - - @VisibleForTesting - Set getDataCenters(Collection nodes, DriverExecutionProfile executionProfile) { - - int remoteConnectionsLength = executionProfile.getInt(CONNECTION_POOL_REMOTE_SIZE); - - Set dataCenters = new HashSet<>(); - for (Node n : nodes) { - NodeDistance distance = n.getDistance(); - - if (distance.equals(NodeDistance.LOCAL) - || (distance.equals(NodeDistance.REMOTE) && remoteConnectionsLength > 0)) { - dataCenters.add(n.getDatacenter()); - } - } - return dataCenters; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/ExecutionProfilesInfoFinder.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/ExecutionProfilesInfoFinder.java deleted file mode 100644 index a7c92d80d96..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/ExecutionProfilesInfoFinder.java +++ /dev/null @@ -1,184 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights; - -import static com.datastax.dse.driver.api.core.config.DseDriverOption.GRAPH_TRAVERSAL_SOURCE; - -import com.datastax.dse.driver.internal.core.insights.PackageUtil.ClassSettingDetails; -import com.datastax.dse.driver.internal.core.insights.schema.LoadBalancingInfo; -import com.datastax.dse.driver.internal.core.insights.schema.SpecificExecutionProfile; -import com.datastax.dse.driver.internal.core.insights.schema.SpeculativeExecutionInfo; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import java.util.HashMap; -import java.util.LinkedHashMap; -import java.util.Map; -import java.util.function.Function; -import java.util.stream.Collectors; - -class ExecutionProfilesInfoFinder { - Map getExecutionProfilesInfo( - InternalDriverContext driverContext) { - - SpecificExecutionProfile defaultProfile = - mapToSpecificProfile(driverContext.getConfig().getDefaultProfile()); - - return driverContext.getConfig().getProfiles().entrySet().stream() - .collect( - Collectors.toMap( - Map.Entry::getKey, - e -> { - if (isNotDefaultProfile(e)) { - SpecificExecutionProfile specificExecutionProfile = - mapToSpecificProfile(e.getValue()); - return retainOnlyDifferentFieldsFromSpecificProfile( - defaultProfile, specificExecutionProfile); - } else { - return defaultProfile; - } - })); - } - - private boolean isNotDefaultProfile(Map.Entry e) { - return !e.getKey().equals("default"); - } - - private SpecificExecutionProfile retainOnlyDifferentFieldsFromSpecificProfile( - SpecificExecutionProfile defaultProfile, SpecificExecutionProfile specificExecutionProfile) { - Integer readTimeout = - getIfDifferentOrReturnNull( - defaultProfile, specificExecutionProfile, SpecificExecutionProfile::getReadTimeout); - LoadBalancingInfo loadBalancingInfo = - getIfDifferentOrReturnNull( - defaultProfile, specificExecutionProfile, SpecificExecutionProfile::getLoadBalancing); - - SpeculativeExecutionInfo speculativeExecutionInfo = - getIfDifferentOrReturnNull( - defaultProfile, - specificExecutionProfile, - SpecificExecutionProfile::getSpeculativeExecution); - - String consistency = - getIfDifferentOrReturnNull( - defaultProfile, specificExecutionProfile, SpecificExecutionProfile::getConsistency); - - String serialConsistency = - getIfDifferentOrReturnNull( - defaultProfile, - specificExecutionProfile, - SpecificExecutionProfile::getSerialConsistency); - - Map graphOptions = - getIfDifferentOrReturnNull( - defaultProfile, specificExecutionProfile, SpecificExecutionProfile::getGraphOptions); - - return new SpecificExecutionProfile( - readTimeout, - loadBalancingInfo, - speculativeExecutionInfo, - consistency, - serialConsistency, - graphOptions); - } - - private T getIfDifferentOrReturnNull( - SpecificExecutionProfile defaultProfile, - SpecificExecutionProfile profile, - Function valueExtractor) { - T defaultProfileValue = valueExtractor.apply(defaultProfile); - T specificProfileValue = valueExtractor.apply(profile); - if (defaultProfileValue.equals(specificProfileValue)) { - return null; - } else { - return specificProfileValue; - } - } - - private SpecificExecutionProfile mapToSpecificProfile( - DriverExecutionProfile driverExecutionProfile) { - return new SpecificExecutionProfile( - (int) driverExecutionProfile.getDuration(DefaultDriverOption.REQUEST_TIMEOUT).toMillis(), - getLoadBalancingInfo(driverExecutionProfile), - getSpeculativeExecutionInfo(driverExecutionProfile), - driverExecutionProfile.getString(DefaultDriverOption.REQUEST_CONSISTENCY), - driverExecutionProfile.getString(DefaultDriverOption.REQUEST_SERIAL_CONSISTENCY), - getGraphOptions(driverExecutionProfile)); - } - - private SpeculativeExecutionInfo getSpeculativeExecutionInfo( - DriverExecutionProfile driverExecutionProfile) { - Map options = new LinkedHashMap<>(); - - putIfExists( - options, - "maxSpeculativeExecutions", - DefaultDriverOption.SPECULATIVE_EXECUTION_MAX, - driverExecutionProfile); - putIfExists( - options, "delay", DefaultDriverOption.SPECULATIVE_EXECUTION_DELAY, driverExecutionProfile); - - ClassSettingDetails speculativeExecutionDetails = - PackageUtil.getSpeculativeExecutionDetails( - driverExecutionProfile.getString( - DefaultDriverOption.SPECULATIVE_EXECUTION_POLICY_CLASS)); - return new SpeculativeExecutionInfo( - speculativeExecutionDetails.getClassName(), - options, - speculativeExecutionDetails.getFullPackage()); - } - - private void putIfExists( - Map options, - String key, - DefaultDriverOption option, - DriverExecutionProfile executionProfile) { - if (executionProfile.isDefined(option)) { - options.put(key, executionProfile.getInt(option)); - } - } - - private LoadBalancingInfo getLoadBalancingInfo(DriverExecutionProfile driverExecutionProfile) { - Map options = new LinkedHashMap<>(); - if (driverExecutionProfile.isDefined(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER)) { - options.put( - "localDataCenter", - driverExecutionProfile.getString(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER)); - } - @SuppressWarnings("deprecation") - boolean hasNodeFiltering = - driverExecutionProfile.isDefined(DefaultDriverOption.LOAD_BALANCING_FILTER_CLASS) - || driverExecutionProfile.isDefined( - DefaultDriverOption.LOAD_BALANCING_DISTANCE_EVALUATOR_CLASS); - options.put("filterFunction", hasNodeFiltering); - ClassSettingDetails loadBalancingDetails = - PackageUtil.getLoadBalancingDetails( - driverExecutionProfile.getString(DefaultDriverOption.LOAD_BALANCING_POLICY_CLASS)); - return new LoadBalancingInfo( - loadBalancingDetails.getClassName(), options, loadBalancingDetails.getFullPackage()); - } - - private Map getGraphOptions(DriverExecutionProfile driverExecutionProfile) { - Map graphOptionsMap = new HashMap<>(); - String graphTraversalSource = driverExecutionProfile.getString(GRAPH_TRAVERSAL_SOURCE, null); - if (graphTraversalSource != null) { - graphOptionsMap.put("source", graphTraversalSource); - } - return graphOptionsMap; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/InsightsClient.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/InsightsClient.java deleted file mode 100644 index f19687adf45..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/InsightsClient.java +++ /dev/null @@ -1,491 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights; - -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.AUTH_PROVIDER_CLASS; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.CONNECTION_POOL_REMOTE_SIZE; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.HEARTBEAT_INTERVAL; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.PROTOCOL_COMPRESSION; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.SSL_ENGINE_FACTORY_CLASS; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.SSL_HOSTNAME_VALIDATION; - -import com.datastax.dse.driver.api.core.DseProtocolVersion; -import com.datastax.dse.driver.internal.core.insights.PackageUtil.ClassSettingDetails; -import com.datastax.dse.driver.internal.core.insights.configuration.InsightsConfiguration; -import com.datastax.dse.driver.internal.core.insights.exceptions.InsightEventFormatException; -import com.datastax.dse.driver.internal.core.insights.schema.AuthProviderType; -import com.datastax.dse.driver.internal.core.insights.schema.Insight; -import com.datastax.dse.driver.internal.core.insights.schema.InsightMetadata; -import com.datastax.dse.driver.internal.core.insights.schema.InsightType; -import com.datastax.dse.driver.internal.core.insights.schema.InsightsStartupData; -import com.datastax.dse.driver.internal.core.insights.schema.InsightsStatusData; -import com.datastax.dse.driver.internal.core.insights.schema.PoolSizeByHostDistance; -import com.datastax.dse.driver.internal.core.insights.schema.SSL; -import com.datastax.dse.driver.internal.core.insights.schema.SessionStateForNode; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.session.SessionBuilder; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.uuid.Uuids; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRequestHandler; -import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.context.StartupOptionsBuilder; -import com.datastax.oss.driver.internal.core.control.ControlConnection; -import com.datastax.oss.driver.internal.core.pool.ChannelPool; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.protocol.internal.request.Query; -import com.datastax.oss.protocol.internal.request.query.QueryOptions; -import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.databind.ObjectMapper; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.net.SocketAddress; -import java.net.UnknownHostException; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.Collections; -import java.util.Date; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.ScheduledFuture; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.function.Supplier; -import java.util.stream.Collectors; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class InsightsClient { - private static final Logger LOGGER = LoggerFactory.getLogger(InsightsClient.class); - private static final String STARTUP_MESSAGE_NAME = "driver.startup"; - private static final String STATUS_MESSAGE_NAME = "driver.status"; - private static final String REPORT_INSIGHT_RPC = "CALL InsightsRpc.reportInsight(?)"; - private static final Map TAGS = ImmutableMap.of("language", "java"); - private static final String STARTUP_VERSION_1_ID = "v1"; - private static final String STATUS_VERSION_1_ID = "v1"; - private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); - private static final int MAX_NUMBER_OF_STATUS_ERROR_LOGS = 5; - static final String DEFAULT_JAVA_APPLICATION = "Default Java Application"; - - private final ControlConnection controlConnection; - private final String id = Uuids.random().toString(); - private final InsightsConfiguration insightsConfiguration; - private final AtomicInteger numberOfStatusEventErrors = new AtomicInteger(); - - private final InternalDriverContext driverContext; - private final Supplier timestampSupplier; - private final PlatformInfoFinder platformInfoFinder; - private final ReconnectionPolicyInfoFinder reconnectionPolicyInfoInfoFinder; - private final ExecutionProfilesInfoFinder executionProfilesInfoFinder; - private final ConfigAntiPatternsFinder configAntiPatternsFinder; - private final DataCentersFinder dataCentersFinder; - private final StackTraceElement[] initCallStackTrace; - - private volatile ScheduledFuture scheduleInsightsTask; - - public static InsightsClient createInsightsClient( - InsightsConfiguration insightsConfiguration, - InternalDriverContext driverContext, - StackTraceElement[] initCallStackTrace) { - DataCentersFinder dataCentersFinder = new DataCentersFinder(); - return new InsightsClient( - driverContext, - () -> new Date().getTime(), - insightsConfiguration, - new PlatformInfoFinder(), - new ReconnectionPolicyInfoFinder(), - new ExecutionProfilesInfoFinder(), - new ConfigAntiPatternsFinder(), - dataCentersFinder, - initCallStackTrace); - } - - InsightsClient( - InternalDriverContext driverContext, - Supplier timestampSupplier, - InsightsConfiguration insightsConfiguration, - PlatformInfoFinder platformInfoFinder, - ReconnectionPolicyInfoFinder reconnectionPolicyInfoInfoFinder, - ExecutionProfilesInfoFinder executionProfilesInfoFinder, - ConfigAntiPatternsFinder configAntiPatternsFinder, - DataCentersFinder dataCentersFinder, - StackTraceElement[] initCallStackTrace) { - this.driverContext = driverContext; - this.controlConnection = driverContext.getControlConnection(); - this.timestampSupplier = timestampSupplier; - this.insightsConfiguration = insightsConfiguration; - this.platformInfoFinder = platformInfoFinder; - this.reconnectionPolicyInfoInfoFinder = reconnectionPolicyInfoInfoFinder; - this.executionProfilesInfoFinder = executionProfilesInfoFinder; - this.configAntiPatternsFinder = configAntiPatternsFinder; - this.dataCentersFinder = dataCentersFinder; - this.initCallStackTrace = initCallStackTrace; - } - - public CompletionStage sendStartupMessage() { - try { - if (!shouldSendEvent()) { - return CompletableFuture.completedFuture(null); - } else { - String startupMessage = createStartupMessage(); - return sendJsonMessage(startupMessage) - .whenComplete( - (aVoid, throwable) -> { - if (throwable != null) { - LOGGER.debug( - "Error while sending startup message to Insights. Message was: " - + trimToFirst500characters(startupMessage), - throwable); - } - }); - } - } catch (Exception e) { - LOGGER.debug("Unexpected error while sending startup message to Insights.", e); - return CompletableFutures.failedFuture(e); - } - } - - private static String trimToFirst500characters(String startupMessage) { - return startupMessage.substring(0, Math.min(startupMessage.length(), 500)); - } - - public void scheduleStatusMessageSend() { - if (!shouldSendEvent()) { - return; - } - scheduleInsightsTask = - scheduleInsightsTask( - insightsConfiguration.getStatusEventDelayMillis(), - insightsConfiguration.getExecutor(), - this::sendStatusMessage); - } - - public void shutdown() { - if (scheduleInsightsTask != null) { - scheduleInsightsTask.cancel(false); - } - } - - @VisibleForTesting - public CompletionStage sendStatusMessage() { - try { - String statusMessage = createStatusMessage(); - CompletionStage result = sendJsonMessage(statusMessage); - return result.whenComplete( - (aVoid, throwable) -> { - if (throwable != null) { - if (numberOfStatusEventErrors.getAndIncrement() < MAX_NUMBER_OF_STATUS_ERROR_LOGS) { - LOGGER.debug( - "Error while sending status message to Insights. Message was: " - + trimToFirst500characters(statusMessage), - throwable); - } - } - }); - } catch (Exception e) { - LOGGER.debug("Unexpected error while sending status message to Insights.", e); - return CompletableFutures.failedFuture(e); - } - } - - private CompletionStage sendJsonMessage(String jsonMessage) { - - QueryOptions queryOptions = createQueryOptionsWithJson(jsonMessage); - String logPrefix = driverContext.getSessionName(); - Duration timeout = - driverContext - .getConfig() - .getDefaultProfile() - .getDuration(DefaultDriverOption.CONTROL_CONNECTION_TIMEOUT); - LOGGER.debug("sending JSON message: {}", jsonMessage); - - Query query = new Query(REPORT_INSIGHT_RPC, queryOptions); - return AdminRequestHandler.call(controlConnection.channel(), query, timeout, logPrefix).start(); - } - - private QueryOptions createQueryOptionsWithJson(String json) { - TypeCodec codec = - driverContext.getCodecRegistry().codecFor(DataTypes.TEXT, String.class); - ByteBuffer startupMessageSerialized = codec.encode(json, DseProtocolVersion.DSE_V2); - return new QueryOptions( - QueryOptions.DEFAULT.consistency, - Collections.singletonList(startupMessageSerialized), - QueryOptions.DEFAULT.namedValues, - QueryOptions.DEFAULT.skipMetadata, - QueryOptions.DEFAULT.pageSize, - QueryOptions.DEFAULT.pagingState, - QueryOptions.DEFAULT.serialConsistency, - QueryOptions.DEFAULT.defaultTimestamp, - QueryOptions.DEFAULT.keyspace, - QueryOptions.DEFAULT.nowInSeconds); - } - - private boolean shouldSendEvent() { - try { - return insightsConfiguration.isMonitorReportingEnabled() - && InsightsSupportVerifier.supportsInsights( - driverContext.getMetadataManager().getMetadata().getNodes().values()); - } catch (Exception e) { - LOGGER.debug("Unexpected error while checking Insights support.", e); - return false; - } - } - - @VisibleForTesting - String createStartupMessage() { - InsightMetadata insightMetadata = createMetadata(STARTUP_MESSAGE_NAME, STARTUP_VERSION_1_ID); - InsightsStartupData data = createStartupData(); - - try { - return OBJECT_MAPPER.writeValueAsString(new Insight<>(insightMetadata, data)); - } catch (JsonProcessingException e) { - throw new InsightEventFormatException("Problem when creating: " + STARTUP_MESSAGE_NAME, e); - } - } - - @VisibleForTesting - String createStatusMessage() { - InsightMetadata insightMetadata = createMetadata(STATUS_MESSAGE_NAME, STATUS_VERSION_1_ID); - InsightsStatusData data = createStatusData(); - - try { - return OBJECT_MAPPER.writeValueAsString(new Insight<>(insightMetadata, data)); - } catch (JsonProcessingException e) { - throw new InsightEventFormatException("Problem when creating: " + STATUS_MESSAGE_NAME, e); - } - } - - private InsightsStatusData createStatusData() { - Map startupOptions = driverContext.getStartupOptions(); - return InsightsStatusData.builder() - .withClientId(getClientId(startupOptions)) - .withSessionId(id) - .withControlConnection(getControlConnectionSocketAddress()) - .withConnectedNodes(getConnectedNodes()) - .build(); - } - - private Map getConnectedNodes() { - Map pools = driverContext.getPoolManager().getPools(); - return pools.entrySet().stream() - .collect( - Collectors.toMap( - entry -> AddressFormatter.nullSafeToString(entry.getKey().getEndPoint().resolve()), - this::constructSessionStateForNode)); - } - - private SessionStateForNode constructSessionStateForNode(Map.Entry entry) { - return new SessionStateForNode( - entry.getKey().getOpenConnections(), entry.getValue().getInFlight()); - } - - private InsightsStartupData createStartupData() { - Map startupOptions = driverContext.getStartupOptions(); - return InsightsStartupData.builder() - .withClientId(getClientId(startupOptions)) - .withSessionId(id) - .withApplicationName(getApplicationName(startupOptions)) - .withApplicationVersion(getApplicationVersion(startupOptions)) - .withDriverName(getDriverName(startupOptions)) - .withDriverVersion(getDriverVersion(startupOptions)) - .withContactPoints( - getResolvedContactPoints( - driverContext.getMetadataManager().getContactPoints().stream() - .map(n -> n.getEndPoint().resolve()) - .filter(InetSocketAddress.class::isInstance) - .map(InetSocketAddress.class::cast) - .collect(Collectors.toSet()))) - .withInitialControlConnection(getControlConnectionSocketAddress()) - .withProtocolVersion(driverContext.getProtocolVersion().getCode()) - .withLocalAddress(getLocalAddress()) - .withExecutionProfiles(executionProfilesInfoFinder.getExecutionProfilesInfo(driverContext)) - .withPoolSizeByHostDistance(getPoolSizeByHostDistance()) - .withHeartbeatInterval( - driverContext - .getConfig() - .getDefaultProfile() - .getDuration(HEARTBEAT_INTERVAL) - .toMillis()) - .withCompression( - driverContext.getConfig().getDefaultProfile().getString(PROTOCOL_COMPRESSION, "none")) - .withReconnectionPolicy( - reconnectionPolicyInfoInfoFinder.getReconnectionPolicyInfo( - driverContext.getReconnectionPolicy(), - driverContext.getConfig().getDefaultProfile())) - .withSsl(getSsl()) - .withAuthProvider(getAuthProvider()) - .withOtherOptions(getOtherOptions()) - .withPlatformInfo(platformInfoFinder.getInsightsPlatformInfo()) - .withConfigAntiPatterns(configAntiPatternsFinder.findAntiPatterns(driverContext)) - .withPeriodicStatusInterval(getPeriodicStatusInterval()) - .withHostName(getLocalHostName()) - .withApplicationNameWasGenerated(isApplicationNameGenerated(startupOptions)) - .withDataCenters(dataCentersFinder.getDataCenters(driverContext)) - .build(); - } - - private AuthProviderType getAuthProvider() { - String authProviderClassName = - driverContext - .getConfig() - .getDefaultProfile() - .getString(AUTH_PROVIDER_CLASS, "NoAuthProvider"); - ClassSettingDetails authProviderDetails = - PackageUtil.getAuthProviderDetails(authProviderClassName); - return new AuthProviderType( - authProviderDetails.getClassName(), authProviderDetails.getFullPackage()); - } - - private long getPeriodicStatusInterval() { - return TimeUnit.MILLISECONDS.toSeconds(insightsConfiguration.getStatusEventDelayMillis()); - } - - @VisibleForTesting - static Map> getResolvedContactPoints(Set contactPoints) { - if (contactPoints == null) { - return Collections.emptyMap(); - } - return contactPoints.stream() - .collect( - Collectors.groupingBy( - InetSocketAddress::getHostName, - Collectors.mapping(AddressFormatter::nullSafeToString, Collectors.toList()))); - } - - private String getDriverVersion(Map startupOptions) { - return startupOptions.get(StartupOptionsBuilder.DRIVER_VERSION_KEY); - } - - private String getDriverName(Map startupOptions) { - return startupOptions.get(StartupOptionsBuilder.DRIVER_NAME_KEY); - } - - private String getClientId(Map startupOptions) { - return startupOptions.get(StartupOptionsBuilder.CLIENT_ID_KEY); - } - - private boolean isApplicationNameGenerated(Map startupOptions) { - return startupOptions.get(StartupOptionsBuilder.APPLICATION_NAME_KEY) == null; - } - - private String getApplicationVersion(Map startupOptions) { - String applicationVersion = startupOptions.get(StartupOptionsBuilder.APPLICATION_VERSION_KEY); - if (applicationVersion == null) { - return ""; - } - return applicationVersion; - } - - private String getApplicationName(Map startupOptions) { - String applicationName = startupOptions.get(StartupOptionsBuilder.APPLICATION_NAME_KEY); - if (applicationName == null || applicationName.isEmpty()) { - return getClusterCreateCaller(initCallStackTrace); - } - return applicationName; - } - - @VisibleForTesting - static String getClusterCreateCaller(StackTraceElement[] stackTrace) { - for (int i = 0; i < stackTrace.length - 1; i++) { - if (isClusterStackTrace(stackTrace[i])) { - int nextElement = i + 1; - if (!isClusterStackTrace(stackTrace[nextElement])) { - return stackTrace[nextElement].getClassName(); - } - } - } - return DEFAULT_JAVA_APPLICATION; - } - - private static boolean isClusterStackTrace(StackTraceElement stackTraceElement) { - return stackTraceElement.getClassName().equals(DefaultDriverContext.class.getName()) - || stackTraceElement.getClassName().equals(SessionBuilder.class.getName()); - } - - private String getLocalHostName() { - try { - return InetAddress.getLocalHost().getHostName(); - } catch (UnknownHostException e) { - LOGGER.warn("Can not resolve the name of a host, returning null", e); - return null; - } - } - - private Map getOtherOptions() { - return Collections.emptyMap(); // todo - } - - private SSL getSsl() { - boolean isSslDefined = - driverContext.getConfig().getDefaultProfile().isDefined(SSL_ENGINE_FACTORY_CLASS); - boolean certValidation = - driverContext.getConfig().getDefaultProfile().getBoolean(SSL_HOSTNAME_VALIDATION, false); - return new SSL(isSslDefined, certValidation); - } - - private PoolSizeByHostDistance getPoolSizeByHostDistance() { - - return new PoolSizeByHostDistance( - driverContext.getConfig().getDefaultProfile().getInt(CONNECTION_POOL_LOCAL_SIZE), - driverContext.getConfig().getDefaultProfile().getInt(CONNECTION_POOL_REMOTE_SIZE), - 0); - } - - private String getControlConnectionSocketAddress() { - SocketAddress controlConnectionAddress = controlConnection.channel().getEndPoint().resolve(); - return AddressFormatter.nullSafeToString(controlConnectionAddress); - } - - private String getLocalAddress() { - SocketAddress controlConnectionLocalAddress = controlConnection.channel().localAddress(); - if (controlConnectionLocalAddress instanceof InetSocketAddress) { - return AddressFormatter.nullSafeToString( - ((InetSocketAddress) controlConnectionLocalAddress).getAddress()); - } - return null; - } - - private InsightMetadata createMetadata(String messageName, String messageVersion) { - return new InsightMetadata( - messageName, timestampSupplier.get(), TAGS, InsightType.EVENT, messageVersion); - } - - @VisibleForTesting - static ScheduledFuture scheduleInsightsTask( - long statusEventDelayMillis, - ScheduledExecutorService scheduledTasksExecutor, - Runnable runnable) { - long initialDelay = - (long) Math.floor(statusEventDelayMillis - zeroToTenPercentRandom(statusEventDelayMillis)); - return scheduledTasksExecutor.scheduleWithFixedDelay( - runnable, initialDelay, statusEventDelayMillis, TimeUnit.MILLISECONDS); - } - - private static double zeroToTenPercentRandom(long statusEventDelayMillis) { - return 0.1 * statusEventDelayMillis * Math.random(); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/InsightsSupportVerifier.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/InsightsSupportVerifier.java deleted file mode 100644 index ec016ef52d8..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/InsightsSupportVerifier.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights; - -import com.datastax.dse.driver.api.core.metadata.DseNodeProperties; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.metadata.Node; -import java.util.Collection; - -class InsightsSupportVerifier { - private static final Version minDse6Version = Version.parse("6.0.5"); - private static final Version minDse51Version = Version.parse("5.1.13"); - private static final Version dse600Version = Version.parse("6.0.0"); - - static boolean supportsInsights(Collection nodes) { - assert minDse6Version != null; - assert dse600Version != null; - assert minDse51Version != null; - if (nodes.isEmpty()) return false; - - for (Node node : nodes) { - Object version = node.getExtras().get(DseNodeProperties.DSE_VERSION); - if (version == null) { - return false; - } - Version dseVersion = (Version) version; - if (!(dseVersion.compareTo(minDse6Version) >= 0 - || (dseVersion.compareTo(dse600Version) < 0 - && dseVersion.compareTo(minDse51Version) >= 0))) { - return false; - } - } - return true; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/PackageUtil.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/PackageUtil.java deleted file mode 100644 index 3c61dec4f20..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/PackageUtil.java +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights; - -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.base.Joiner; -import java.util.Arrays; -import java.util.regex.Pattern; - -class PackageUtil { - static final String DEFAULT_SPECULATIVE_EXECUTION_PACKAGE = - "com.datastax.oss.driver.internal.core.specex"; - static final String DEFAULT_LOAD_BALANCING_PACKAGE = - "com.datastax.oss.driver.internal.core.loadbalancing"; - static final String DEFAULT_AUTH_PROVIDER_PACKAGE = "com.datastax.oss.driver.internal.core.auth"; - private static final Pattern PACKAGE_SPLIT_REGEX = Pattern.compile("\\."); - private static final Joiner DOT_JOINER = Joiner.on("."); - - static String getNamespace(Class tClass) { - String namespace = ""; - Package packageInfo = tClass.getPackage(); - if (packageInfo != null) { - namespace = packageInfo.getName(); - } - return namespace; - } - - static ClassSettingDetails getSpeculativeExecutionDetails(String classSetting) { - return getClassSettingDetails(classSetting, DEFAULT_SPECULATIVE_EXECUTION_PACKAGE); - } - - static ClassSettingDetails getLoadBalancingDetails(String classSetting) { - return getClassSettingDetails(classSetting, DEFAULT_LOAD_BALANCING_PACKAGE); - } - - static ClassSettingDetails getAuthProviderDetails(String classSetting) { - return getClassSettingDetails(classSetting, DEFAULT_AUTH_PROVIDER_PACKAGE); - } - - private static ClassSettingDetails getClassSettingDetails( - String classSetting, String packageName) { - String className = getClassName(classSetting); - String fullPackage = getFullPackageOrDefault(classSetting, packageName); - return new ClassSettingDetails(className, fullPackage); - } - - @VisibleForTesting - static String getClassName(String classSetting) { - String[] split = PACKAGE_SPLIT_REGEX.split(classSetting); - if (split.length == 0) { - return ""; - } - return split[split.length - 1]; - } - - @VisibleForTesting - static String getFullPackageOrDefault(String classSetting, String defaultValue) { - String[] split = PACKAGE_SPLIT_REGEX.split(classSetting); - if (split.length <= 1) return defaultValue; - return DOT_JOINER.join(Arrays.copyOf(split, split.length - 1)); - } - - static class ClassSettingDetails { - private final String className; - private final String fullPackage; - - ClassSettingDetails(String className, String fullPackage) { - this.className = className; - this.fullPackage = fullPackage; - } - - String getClassName() { - return className; - } - - String getFullPackage() { - return fullPackage; - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/PlatformInfoFinder.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/PlatformInfoFinder.java deleted file mode 100644 index 30d41d40836..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/PlatformInfoFinder.java +++ /dev/null @@ -1,295 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights; - -import static com.datastax.dse.driver.internal.core.insights.schema.InsightsPlatformInfo.OS; -import static com.datastax.dse.driver.internal.core.insights.schema.InsightsPlatformInfo.RuntimeAndCompileTimeVersions; - -import com.datastax.dse.driver.internal.core.insights.schema.InsightsPlatformInfo; -import com.datastax.dse.driver.internal.core.insights.schema.InsightsPlatformInfo.CPUS; -import com.datastax.oss.driver.internal.core.os.Native; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import java.io.BufferedReader; -import java.io.IOException; -import java.io.InputStream; -import java.io.InputStreamReader; -import java.net.URL; -import java.nio.charset.StandardCharsets; -import java.util.ArrayList; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Properties; -import java.util.function.Function; -import java.util.regex.Pattern; - -class PlatformInfoFinder { - private static final String MAVEN_IGNORE_LINE = "The following files have been resolved:"; - private static final Pattern DEPENDENCY_SPLIT_REGEX = Pattern.compile(":"); - static final String UNVERIFIED_RUNTIME_VERSION = "UNVERIFIED"; - private final Function propertiesUrlProvider; - - @SuppressWarnings("UnnecessaryLambda") - private static final Function M2_PROPERTIES_PROVIDER = - d -> { - ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader(); - if (contextClassLoader == null) { - contextClassLoader = PlatformInfoFinder.class.getClassLoader(); - } - return contextClassLoader.getResource( - "META-INF/maven/" + d.groupId + "/" + d.artifactId + "/pom.properties"); - }; - - PlatformInfoFinder() { - this(M2_PROPERTIES_PROVIDER); - } - - @VisibleForTesting - PlatformInfoFinder(Function pomPropertiesUrlProvider) { - this.propertiesUrlProvider = pomPropertiesUrlProvider; - } - - InsightsPlatformInfo getInsightsPlatformInfo() { - OS os = getOsInfo(); - CPUS cpus = getCpuInfo(); - Map> runtimeInfo = getRuntimeInfo(); - - return new InsightsPlatformInfo(os, cpus, runtimeInfo); - } - - private Map> getRuntimeInfo() { - Map coreDeps = - fetchDependenciesFromFile( - this.getClass().getResourceAsStream("/com/datastax/dse/driver/internal/deps.txt")); - - Map queryBuilderDeps = - fetchDependenciesFromFile( - this.getClass() - .getResourceAsStream("/com/datastax/dse/driver/internal/querybuilder/deps.txt")); - - Map mapperProcessorDeps = - fetchDependenciesFromFile( - this.getClass() - .getResourceAsStream( - "/com/datastax/dse/driver/internal/mapper/processor/deps.txt")); - - Map mapperRuntimeDeps = - fetchDependenciesFromFile( - this.getClass() - .getResourceAsStream("/com/datastax/dse/driver/internal/mapper/deps.txt")); - - Map> runtimeDependencies = - new LinkedHashMap<>(); - putIfNonEmpty(coreDeps, runtimeDependencies, "core"); - putIfNonEmpty(queryBuilderDeps, runtimeDependencies, "query-builder"); - putIfNonEmpty(mapperProcessorDeps, runtimeDependencies, "mapper-processor"); - putIfNonEmpty(mapperRuntimeDeps, runtimeDependencies, "mapper-runtime"); - addJavaVersion(runtimeDependencies); - return runtimeDependencies; - } - - private void putIfNonEmpty( - Map moduleDependencies, - Map> runtimeDependencies, - String moduleName) { - if (!moduleDependencies.isEmpty()) { - runtimeDependencies.put(moduleName, moduleDependencies); - } - } - - @VisibleForTesting - void addJavaVersion(Map> runtimeDependencies) { - Package javaPackage = Runtime.class.getPackage(); - Map javaDependencies = new LinkedHashMap<>(); - javaDependencies.put( - "version", toSameRuntimeAndCompileVersion(javaPackage.getImplementationVersion())); - javaDependencies.put( - "vendor", toSameRuntimeAndCompileVersion(javaPackage.getImplementationVendor())); - javaDependencies.put( - "title", toSameRuntimeAndCompileVersion(javaPackage.getImplementationTitle())); - putIfNonEmpty(javaDependencies, runtimeDependencies, "java"); - } - - private RuntimeAndCompileTimeVersions toSameRuntimeAndCompileVersion(String version) { - return new RuntimeAndCompileTimeVersions(version, version, false); - } - - /** - * Method is fetching dependencies from file. Lines in file should be in format: - * com.organization:artifactId:jar:1.2.0 or com.organization:artifactId:jar:native:1.2.0 - * - *

For such file the output will be: Map - * "com.organization:artifactId",{"runtimeVersion":"1.2.0", "compileVersion:"1.2.0", "optional": - * false} Duplicates will be omitted. If there are two dependencies for the exactly the same - * organizationId:artifactId it is not deterministic which version will be taken. In the case of - * an error while opening file this method will fail silently returning an empty Map - */ - @VisibleForTesting - Map fetchDependenciesFromFile(InputStream inputStream) { - Map dependencies = new LinkedHashMap<>(); - if (inputStream == null) { - return dependencies; - } - try { - List dependenciesFromFile = extractMavenDependenciesFromFile(inputStream); - for (DependencyFromFile d : dependenciesFromFile) { - dependencies.put(formatDependencyName(d), getRuntimeAndCompileVersion(d)); - } - } catch (IOException e) { - return dependencies; - } - return dependencies; - } - - private RuntimeAndCompileTimeVersions getRuntimeAndCompileVersion(DependencyFromFile d) { - URL url = propertiesUrlProvider.apply(d); - if (url == null) { - return new RuntimeAndCompileTimeVersions( - UNVERIFIED_RUNTIME_VERSION, d.getVersion(), d.isOptional()); - } - Properties properties = new Properties(); - try { - properties.load(url.openStream()); - } catch (IOException e) { - return new RuntimeAndCompileTimeVersions( - UNVERIFIED_RUNTIME_VERSION, d.getVersion(), d.isOptional()); - } - Object version = properties.get("version"); - if (version == null) { - return new RuntimeAndCompileTimeVersions( - UNVERIFIED_RUNTIME_VERSION, d.getVersion(), d.isOptional()); - } else { - return new RuntimeAndCompileTimeVersions(version.toString(), d.getVersion(), d.isOptional()); - } - } - - private String formatDependencyName(DependencyFromFile d) { - return String.format("%s:%s", d.getGroupId(), d.getArtifactId()); - } - - private List extractMavenDependenciesFromFile(InputStream inputStream) - throws IOException { - List dependenciesFromFile = new ArrayList<>(); - BufferedReader reader = - new BufferedReader(new InputStreamReader(inputStream, StandardCharsets.UTF_8)); - for (String line; (line = reader.readLine()) != null; ) { - if (lineWithDependencyInfo(line)) { - dependenciesFromFile.add(extractDependencyFromLine(line.trim())); - } - } - return dependenciesFromFile; - } - - private DependencyFromFile extractDependencyFromLine(String line) { - String[] split = DEPENDENCY_SPLIT_REGEX.split(line); - if (split.length == 6) { // case for i.e.: com.github.jnr:jffi:jar:native:1.2.16:compile - return new DependencyFromFile(split[0], split[1], split[4], checkIsOptional(split[5])); - } else { // case for normal: org.ow2.asm:asm:jar:5.0.3:compile - return new DependencyFromFile(split[0], split[1], split[3], checkIsOptional(split[4])); - } - } - - private boolean checkIsOptional(String scope) { - return scope.contains("(optional)"); - } - - private boolean lineWithDependencyInfo(String line) { - return (!line.equals(MAVEN_IGNORE_LINE) && !line.isEmpty()); - } - - private CPUS getCpuInfo() { - int numberOfProcessors = Runtime.getRuntime().availableProcessors(); - String model = Native.getCpu(); - return new CPUS(numberOfProcessors, model); - } - - private OS getOsInfo() { - String osName = System.getProperty("os.name"); - String osVersion = System.getProperty("os.version"); - String osArch = System.getProperty("os.arch"); - return new OS(osName, osVersion, osArch); - } - - static class DependencyFromFile { - private final String groupId; - private final String artifactId; - private final String version; - private final boolean optional; - - DependencyFromFile(String groupId, String artifactId, String version, boolean optional) { - this.groupId = groupId; - this.artifactId = artifactId; - this.version = version; - this.optional = optional; - } - - String getGroupId() { - return groupId; - } - - String getArtifactId() { - return artifactId; - } - - String getVersion() { - return version; - } - - boolean isOptional() { - return optional; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof DependencyFromFile)) { - return false; - } - DependencyFromFile that = (DependencyFromFile) o; - return optional == that.optional - && Objects.equals(groupId, that.groupId) - && Objects.equals(artifactId, that.artifactId) - && Objects.equals(version, that.version); - } - - @Override - public int hashCode() { - return Objects.hash(groupId, artifactId, version, optional); - } - - @Override - public String toString() { - return "DependencyFromFile{" - + "groupId='" - + groupId - + '\'' - + ", artifactId='" - + artifactId - + '\'' - + ", version='" - + version - + '\'' - + ", optional=" - + optional - + '}'; - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/ReconnectionPolicyInfoFinder.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/ReconnectionPolicyInfoFinder.java deleted file mode 100644 index af8aff74035..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/ReconnectionPolicyInfoFinder.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights; - -import com.datastax.dse.driver.internal.core.insights.schema.ReconnectionPolicyInfo; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.connection.ReconnectionPolicy; -import com.datastax.oss.driver.internal.core.connection.ConstantReconnectionPolicy; -import com.datastax.oss.driver.internal.core.connection.ExponentialReconnectionPolicy; -import java.util.HashMap; -import java.util.Map; - -class ReconnectionPolicyInfoFinder { - ReconnectionPolicyInfo getReconnectionPolicyInfo( - ReconnectionPolicy reconnectionPolicy, DriverExecutionProfile executionProfile) { - Class reconnectionPolicyClass = reconnectionPolicy.getClass(); - String type = reconnectionPolicyClass.getSimpleName(); - String namespace = PackageUtil.getNamespace(reconnectionPolicyClass); - Map options = new HashMap<>(); - if (reconnectionPolicy instanceof ConstantReconnectionPolicy) { - options.put( - "delayMs", - executionProfile.getDuration(DefaultDriverOption.RECONNECTION_BASE_DELAY).toMillis()); - } else if (reconnectionPolicy instanceof ExponentialReconnectionPolicy) { - ExponentialReconnectionPolicy exponentialReconnectionPolicy = - (ExponentialReconnectionPolicy) reconnectionPolicy; - options.put("maxDelayMs", exponentialReconnectionPolicy.getMaxDelayMs()); - options.put("baseDelayMs", exponentialReconnectionPolicy.getBaseDelayMs()); - options.put("maxAttempts", exponentialReconnectionPolicy.getMaxAttempts()); - } - return new ReconnectionPolicyInfo(type, options, namespace); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/configuration/InsightsConfiguration.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/configuration/InsightsConfiguration.java deleted file mode 100644 index ac27bb76389..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/configuration/InsightsConfiguration.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights.configuration; - -import io.netty.util.concurrent.EventExecutor; - -public class InsightsConfiguration { - private final boolean monitorReportingEnabled; - private final long statusEventDelayMillis; - private final EventExecutor executor; - - public InsightsConfiguration( - boolean monitorReportingEnabled, long statusEventDelayMillis, EventExecutor executor) { - this.monitorReportingEnabled = monitorReportingEnabled; - this.statusEventDelayMillis = statusEventDelayMillis; - this.executor = executor; - } - - public boolean isMonitorReportingEnabled() { - return monitorReportingEnabled; - } - - public long getStatusEventDelayMillis() { - return statusEventDelayMillis; - } - - public EventExecutor getExecutor() { - return executor; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/exceptions/InsightEventFormatException.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/exceptions/InsightEventFormatException.java deleted file mode 100644 index cfce68971ef..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/exceptions/InsightEventFormatException.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights.exceptions; - -public class InsightEventFormatException extends RuntimeException { - - public InsightEventFormatException(String message, Throwable cause) { - super(message, cause); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/AuthProviderType.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/AuthProviderType.java deleted file mode 100644 index 18aec53e899..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/AuthProviderType.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights.schema; - -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; -import java.util.Objects; - -public class AuthProviderType { - @JsonProperty("type") - private final String type; - - @JsonProperty("namespace") - private final String namespace; - - @JsonCreator - public AuthProviderType( - @JsonProperty("type") String type, @JsonProperty("namespace") String namespace) { - this.type = type; - this.namespace = namespace; - } - - public String getType() { - return type; - } - - public String getNamespace() { - return namespace; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof AuthProviderType)) { - return false; - } - AuthProviderType that = (AuthProviderType) o; - return Objects.equals(type, that.type) && Objects.equals(namespace, that.namespace); - } - - @Override - public int hashCode() { - return Objects.hash(type, namespace); - } - - @Override - public String toString() { - return "AuthProviderType{" + "type='" + type + '\'' + ", namespace='" + namespace + '\'' + '}'; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/Insight.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/Insight.java deleted file mode 100644 index ca4e6455345..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/Insight.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights.schema; - -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonIgnoreProperties; -import com.fasterxml.jackson.annotation.JsonInclude; -import com.fasterxml.jackson.annotation.JsonProperty; - -@JsonIgnoreProperties(ignoreUnknown = true) -@JsonInclude(JsonInclude.Include.NON_EMPTY) -public class Insight { - @JsonProperty("metadata") - private final InsightMetadata metadata; - - @JsonProperty("data") - private final T insightData; - - @JsonCreator - public Insight(@JsonProperty("metadata") InsightMetadata metadata, @JsonProperty("data") T data) { - this.metadata = metadata; - this.insightData = data; - } - - public InsightMetadata getMetadata() { - return metadata; - } - - public T getInsightData() { - return insightData; - } - - @Override - public String toString() { - return "Insight{" + "metadata=" + metadata + ", insightData=" + insightData + '}'; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/InsightMetadata.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/InsightMetadata.java deleted file mode 100644 index cfa2644b0c7..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/InsightMetadata.java +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights.schema; - -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.driver.shaded.guava.common.base.Strings; -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonInclude; -import com.fasterxml.jackson.annotation.JsonProperty; -import java.util.Map; -import java.util.Objects; - -public class InsightMetadata { - @JsonProperty("name") - private final String name; - - @JsonProperty("timestamp") - private final long timestamp; - - @JsonProperty("tags") - private final Map tags; - - @JsonProperty("insightType") - private final InsightType insightType; - - @JsonProperty("insightMappingId") - @JsonInclude(JsonInclude.Include.NON_NULL) - private String insightMappingId; - - @JsonCreator - public InsightMetadata( - @JsonProperty("name") String name, - @JsonProperty("timestamp") long timestamp, - @JsonProperty("tags") Map tags, - @JsonProperty("insightType") InsightType insightType, - @JsonProperty("insightMappingId") String insightMappingId) { - Preconditions.checkArgument(!Strings.isNullOrEmpty(name), "name is required"); - - this.name = name; - this.timestamp = timestamp; - this.tags = tags; - this.insightType = insightType; - this.insightMappingId = insightMappingId; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof InsightMetadata)) { - return false; - } - InsightMetadata that = (InsightMetadata) o; - return Objects.equals(name, that.name) - && Objects.equals(timestamp, that.timestamp) - && Objects.equals(tags, that.tags) - && insightType == that.insightType - && Objects.equals(insightMappingId, that.insightMappingId); - } - - @Override - public int hashCode() { - return Objects.hash(name, timestamp, tags, insightType, insightMappingId); - } - - @Override - public String toString() { - return "InsightMetadata{" - + "name='" - + name - + '\'' - + ", timestamp=" - + timestamp - + ", tags=" - + tags - + ", insightType=" - + insightType - + ", insightMappingId=" - + insightMappingId - + '}'; - } - - public String getName() { - return name; - } - - public long getTimestamp() { - return timestamp; - } - - public Map getTags() { - return tags; - } - - public InsightType getInsightType() { - return insightType; - } - - public String getInsightMappingId() { - return insightMappingId; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/InsightType.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/InsightType.java deleted file mode 100644 index ae91e27d227..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/InsightType.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights.schema; - -public enum InsightType { - EVENT, - GAUGE, - COUNTER, - HISTOGRAM, - TIMER, - METER, - LOG; -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/InsightsPlatformInfo.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/InsightsPlatformInfo.java deleted file mode 100644 index 231f082d785..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/InsightsPlatformInfo.java +++ /dev/null @@ -1,236 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights.schema; - -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; -import java.util.Map; -import java.util.Objects; - -public class InsightsPlatformInfo { - @JsonProperty("os") - private final OS os; - - @JsonProperty("cpus") - private CPUS cpus; - - /** - * All dependencies in a map format grouped by the module: {"core" : {"com.datastax.driver:core": - * {"runtimeVersion:" : "1.0.0", "compileVersion": "1.0.1"},...}}, "extras"" {...} - */ - @JsonProperty("runtime") - private Map> runtime; - - @JsonCreator - public InsightsPlatformInfo( - @JsonProperty("os") OS os, - @JsonProperty("cpus") CPUS cpus, - @JsonProperty("runtime") Map> runtime) { - this.os = os; - this.cpus = cpus; - this.runtime = runtime; - } - - public OS getOs() { - return os; - } - - public CPUS getCpus() { - return cpus; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof InsightsPlatformInfo)) { - return false; - } - InsightsPlatformInfo that = (InsightsPlatformInfo) o; - return Objects.equals(os, that.os) - && Objects.equals(cpus, that.cpus) - && Objects.equals(runtime, that.runtime); - } - - @Override - public int hashCode() { - return Objects.hash(os, cpus, runtime); - } - - Map> getRuntime() { - return runtime; - } - - public static class OS { - @JsonProperty("name") - private final String name; - - @JsonProperty("version") - private final String version; - - @JsonProperty("arch") - private final String arch; - - @JsonCreator - public OS( - @JsonProperty("name") String name, - @JsonProperty("version") String version, - @JsonProperty("arch") String arch) { - this.name = name; - this.version = version; - this.arch = arch; - } - - public String getName() { - return name; - } - - public String getVersion() { - return version; - } - - public String getArch() { - return arch; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof OS)) { - return false; - } - OS os = (OS) o; - return Objects.equals(name, os.name) - && Objects.equals(version, os.version) - && Objects.equals(arch, os.arch); - } - - @Override - public int hashCode() { - return Objects.hash(name, version, arch); - } - } - - public static class CPUS { - @JsonProperty("length") - private final int length; - - @JsonProperty("model") - private final String model; - - @JsonCreator - public CPUS(@JsonProperty("length") int length, @JsonProperty("model") String model) { - this.length = length; - this.model = model; - } - - public int getLength() { - return length; - } - - public String getModel() { - return model; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof CPUS)) { - return false; - } - CPUS cpus = (CPUS) o; - return length == cpus.length && Objects.equals(model, cpus.model); - } - - @Override - public int hashCode() { - return Objects.hash(length, model); - } - } - - public static class RuntimeAndCompileTimeVersions { - @JsonProperty("runtimeVersion") - private final String runtimeVersion; - - @JsonProperty("compileVersion") - private final String compileVersion; - - @JsonProperty("optional") - private final boolean optional; - - @JsonCreator - public RuntimeAndCompileTimeVersions( - @JsonProperty("runtimeVersion") String runtimeVersion, - @JsonProperty("compileVersion") String compileVersion, - @JsonProperty("optional") boolean optional) { - this.runtimeVersion = runtimeVersion; - this.compileVersion = compileVersion; - this.optional = optional; - } - - public String getRuntimeVersion() { - return runtimeVersion; - } - - public String getCompileVersion() { - return compileVersion; - } - - public boolean isOptional() { - return optional; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof RuntimeAndCompileTimeVersions)) { - return false; - } - RuntimeAndCompileTimeVersions that = (RuntimeAndCompileTimeVersions) o; - return optional == that.optional - && Objects.equals(runtimeVersion, that.runtimeVersion) - && Objects.equals(compileVersion, that.compileVersion); - } - - @Override - public int hashCode() { - return Objects.hash(runtimeVersion, compileVersion, optional); - } - - @Override - public String toString() { - return "RuntimeAndCompileTimeVersions{" - + "runtimeVersion='" - + runtimeVersion - + '\'' - + ", compileVersion='" - + compileVersion - + '\'' - + ", optional=" - + optional - + '}'; - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/InsightsStartupData.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/InsightsStartupData.java deleted file mode 100644 index bddd3ef94b3..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/InsightsStartupData.java +++ /dev/null @@ -1,425 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights.schema; - -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; -import java.util.List; -import java.util.Map; -import java.util.Set; - -public class InsightsStartupData { - @JsonProperty("clientId") - private final String clientId; - - @JsonProperty("sessionId") - private final String sessionId; - - @JsonProperty("applicationName") - private final String applicationName; - - @JsonProperty("applicationVersion") - private final String applicationVersion; - - @JsonProperty("contactPoints") - private final Map> contactPoints; - - @JsonProperty("initialControlConnection") - private final String initialControlConnection; - - @JsonProperty("protocolVersion") - private final int protocolVersion; - - @JsonProperty("localAddress") - private final String localAddress; - - @JsonProperty("executionProfiles") - private final Map executionProfiles; - - @JsonProperty("poolSizeByHostDistance") - private final PoolSizeByHostDistance poolSizeByHostDistance; - - @JsonProperty("heartbeatInterval") - private final long heartbeatInterval; - - @JsonProperty("compression") - private final String compression; - - @JsonProperty("reconnectionPolicy") - private final ReconnectionPolicyInfo reconnectionPolicy; - - @JsonProperty("ssl") - private final SSL ssl; - - @JsonProperty("authProvider") - private final AuthProviderType authProvider; - - @JsonProperty("otherOptions") - private final Map otherOptions; - - @JsonProperty("configAntiPatterns") - private final Map configAntiPatterns; - - @JsonProperty("periodicStatusInterval") - private final long periodicStatusInterval; - - @JsonProperty("platformInfo") - private final InsightsPlatformInfo platformInfo; - - @JsonProperty("hostName") - private final String hostName; - - @JsonProperty("driverName") - private String driverName; - - @JsonProperty("applicationNameWasGenerated") - private boolean applicationNameWasGenerated; - - @JsonProperty("driverVersion") - private String driverVersion; - - @JsonProperty("dataCenters") - private Set dataCenters; - - @JsonCreator - private InsightsStartupData( - @JsonProperty("clientId") String clientId, - @JsonProperty("sessionId") String sessionId, - @JsonProperty("applicationName") String applicationName, - @JsonProperty("applicationVersion") String applicationVersion, - @JsonProperty("contactPoints") Map> contactPoints, - @JsonProperty("initialControlConnection") String initialControlConnection, - @JsonProperty("protocolVersion") int protocolVersion, - @JsonProperty("localAddress") String localAddress, - @JsonProperty("executionProfiles") Map executionProfiles, - @JsonProperty("poolSizeByHostDistance") PoolSizeByHostDistance poolSizeByHostDistance, - @JsonProperty("heartbeatInterval") long heartbeatInterval, - @JsonProperty("compression") String compression, - @JsonProperty("reconnectionPolicy") ReconnectionPolicyInfo reconnectionPolicy, - @JsonProperty("ssl") SSL ssl, - @JsonProperty("authProvider") AuthProviderType authProvider, - @JsonProperty("otherOptions") Map otherOptions, - @JsonProperty("configAntiPatterns") Map configAntiPatterns, - @JsonProperty("periodicStatusInterval") long periodicStatusInterval, - @JsonProperty("platformInfo") InsightsPlatformInfo platformInfo, - @JsonProperty("hostName") String hostName, - @JsonProperty("driverName") String driverName, - @JsonProperty("applicationNameWasGenerated") boolean applicationNameWasGenerated, - @JsonProperty("driverVersion") String driverVersion, - @JsonProperty("dataCenters") Set dataCenters) { - this.clientId = clientId; - this.sessionId = sessionId; - this.applicationName = applicationName; - this.applicationVersion = applicationVersion; - this.contactPoints = contactPoints; - this.initialControlConnection = initialControlConnection; - this.protocolVersion = protocolVersion; - this.localAddress = localAddress; - this.executionProfiles = executionProfiles; - this.poolSizeByHostDistance = poolSizeByHostDistance; - this.heartbeatInterval = heartbeatInterval; - this.compression = compression; - this.reconnectionPolicy = reconnectionPolicy; - this.ssl = ssl; - this.authProvider = authProvider; - this.otherOptions = otherOptions; - this.configAntiPatterns = configAntiPatterns; - this.periodicStatusInterval = periodicStatusInterval; - this.platformInfo = platformInfo; - this.hostName = hostName; - this.driverName = driverName; - this.applicationNameWasGenerated = applicationNameWasGenerated; - this.driverVersion = driverVersion; - this.dataCenters = dataCenters; - } - - public String getClientId() { - return clientId; - } - - public String getSessionId() { - return sessionId; - } - - public String getApplicationName() { - return applicationName; - } - - public String getApplicationVersion() { - return applicationVersion; - } - - public Map> getContactPoints() { - return contactPoints; - } - - public String getInitialControlConnection() { - return initialControlConnection; - } - - public int getProtocolVersion() { - return protocolVersion; - } - - public String getLocalAddress() { - return localAddress; - } - - public Map getExecutionProfiles() { - return executionProfiles; - } - - public PoolSizeByHostDistance getPoolSizeByHostDistance() { - return poolSizeByHostDistance; - } - - public long getHeartbeatInterval() { - return heartbeatInterval; - } - - public String getCompression() { - return compression; - } - - public ReconnectionPolicyInfo getReconnectionPolicy() { - return reconnectionPolicy; - } - - public SSL getSsl() { - return ssl; - } - - public AuthProviderType getAuthProvider() { - return authProvider; - } - - public Map getOtherOptions() { - return otherOptions; - } - - public Map getConfigAntiPatterns() { - return configAntiPatterns; - } - - public long getPeriodicStatusInterval() { - return periodicStatusInterval; - } - - public InsightsPlatformInfo getPlatformInfo() { - return platformInfo; - } - - public String getHostName() { - return hostName; - } - - public String getDriverName() { - return driverName; - } - - public boolean isApplicationNameWasGenerated() { - return applicationNameWasGenerated; - } - - public String getDriverVersion() { - return driverVersion; - } - - public Set getDataCenters() { - return dataCenters; - } - - public static InsightsStartupData.Builder builder() { - return new InsightsStartupData.Builder(); - } - - public static class Builder { - private String clientId; - private String sessionId; - private String applicationName; - private String applicationVersion; - private Map> contactPoints; - private String initialControlConnection; - private int protocolVersion; - private String localAddress; - private Map executionProfiles; - private PoolSizeByHostDistance poolSizeByHostDistance; - private long heartbeatInterval; - private String compression; - private ReconnectionPolicyInfo reconnectionPolicy; - private SSL ssl; - private AuthProviderType authProvider; - private Map otherOptions; - private Map configAntiPatterns; - private long periodicStatusInterval; - private InsightsPlatformInfo platformInfo; - private String hostName; - private String driverName; - private String driverVersion; - private boolean applicationNameWasGenerated; - private Set dataCenters; - - public InsightsStartupData build() { - return new InsightsStartupData( - clientId, - sessionId, - applicationName, - applicationVersion, - contactPoints, - initialControlConnection, - protocolVersion, - localAddress, - executionProfiles, - poolSizeByHostDistance, - heartbeatInterval, - compression, - reconnectionPolicy, - ssl, - authProvider, - otherOptions, - configAntiPatterns, - periodicStatusInterval, - platformInfo, - hostName, - driverName, - applicationNameWasGenerated, - driverVersion, - dataCenters); - } - - public Builder withClientId(String clientId) { - this.clientId = clientId; - return this; - } - - public Builder withSessionId(String id) { - this.sessionId = id; - return this; - } - - public Builder withApplicationName(String applicationName) { - this.applicationName = applicationName; - return this; - } - - public Builder withApplicationVersion(String applicationVersion) { - this.applicationVersion = applicationVersion; - return this; - } - - public Builder withContactPoints(Map> contactPoints) { - this.contactPoints = contactPoints; - return this; - } - - public Builder withInitialControlConnection(String inetSocketAddress) { - this.initialControlConnection = inetSocketAddress; - return this; - } - - public Builder withProtocolVersion(int protocolVersion) { - this.protocolVersion = protocolVersion; - return this; - } - - public Builder withLocalAddress(String localAddress) { - this.localAddress = localAddress; - return this; - } - - public Builder withExecutionProfiles(Map executionProfiles) { - this.executionProfiles = executionProfiles; - return this; - } - - public Builder withPoolSizeByHostDistance(PoolSizeByHostDistance poolSizeByHostDistance) { - this.poolSizeByHostDistance = poolSizeByHostDistance; - return this; - } - - public Builder withHeartbeatInterval(long heartbeatInterval) { - this.heartbeatInterval = heartbeatInterval; - return this; - } - - public Builder withCompression(String compression) { - this.compression = compression; - return this; - } - - public Builder withReconnectionPolicy(ReconnectionPolicyInfo reconnectionPolicy) { - this.reconnectionPolicy = reconnectionPolicy; - return this; - } - - public Builder withSsl(SSL ssl) { - this.ssl = ssl; - return this; - } - - public Builder withAuthProvider(AuthProviderType authProvider) { - this.authProvider = authProvider; - return this; - } - - public Builder withOtherOptions(Map otherOptions) { - this.otherOptions = otherOptions; - return this; - } - - public Builder withConfigAntiPatterns(Map configAntiPatterns) { - this.configAntiPatterns = configAntiPatterns; - return this; - } - - public Builder withPeriodicStatusInterval(long periodicStatusInterval) { - this.periodicStatusInterval = periodicStatusInterval; - return this; - } - - public Builder withPlatformInfo(InsightsPlatformInfo insightsPlatformInfo) { - this.platformInfo = insightsPlatformInfo; - return this; - } - - public Builder withHostName(String hostName) { - this.hostName = hostName; - return this; - } - - public Builder withDriverName(String driverName) { - this.driverName = driverName; - return this; - } - - public Builder withDriverVersion(String driverVersion) { - this.driverVersion = driverVersion; - return this; - } - - public Builder withApplicationNameWasGenerated(boolean applicationNameWasGenerated) { - this.applicationNameWasGenerated = applicationNameWasGenerated; - return this; - } - - public Builder withDataCenters(Set dataCenters) { - this.dataCenters = dataCenters; - return this; - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/InsightsStatusData.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/InsightsStatusData.java deleted file mode 100644 index 6f5a135f7c4..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/InsightsStatusData.java +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights.schema; - -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; -import java.util.Map; -import java.util.Objects; - -public class InsightsStatusData { - @JsonProperty("clientId") - private final String clientId; - - @JsonProperty("sessionId") - private final String sessionId; - - @JsonProperty("controlConnection") - private final String controlConnection; - - @JsonProperty("connectedNodes") - private final Map connectedNodes; - - @JsonCreator - private InsightsStatusData( - @JsonProperty("clientId") String clientId, - @JsonProperty("sessionId") String sessionId, - @JsonProperty("controlConnection") String controlConnection, - @JsonProperty("connectedNodes") Map connectedNodes) { - this.clientId = clientId; - this.sessionId = sessionId; - this.controlConnection = controlConnection; - this.connectedNodes = connectedNodes; - } - - public String getClientId() { - return clientId; - } - - public String getSessionId() { - return sessionId; - } - - public String getControlConnection() { - return controlConnection; - } - - public Map getConnectedNodes() { - return connectedNodes; - } - - @Override - public String toString() { - return "InsightsStatusData{" - + "clientId='" - + clientId - + '\'' - + ", sessionId='" - + sessionId - + '\'' - + ", controlConnection=" - + controlConnection - + ", connectedNodes=" - + connectedNodes - + '}'; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof InsightsStatusData)) { - return false; - } - InsightsStatusData that = (InsightsStatusData) o; - return Objects.equals(clientId, that.clientId) - && Objects.equals(sessionId, that.sessionId) - && Objects.equals(controlConnection, that.controlConnection) - && Objects.equals(connectedNodes, that.connectedNodes); - } - - @Override - public int hashCode() { - return Objects.hash(clientId, sessionId, controlConnection, connectedNodes); - } - - public static InsightsStatusData.Builder builder() { - return new InsightsStatusData.Builder(); - } - - public static class Builder { - private String clientId; - private String sessionId; - private String controlConnection; - private Map connectedNodes; - - public Builder withClientId(String clientId) { - this.clientId = clientId; - return this; - } - - public Builder withSessionId(String id) { - this.sessionId = id; - return this; - } - - public Builder withControlConnection(String controlConnection) { - this.controlConnection = controlConnection; - return this; - } - - public Builder withConnectedNodes(Map connectedNodes) { - this.connectedNodes = connectedNodes; - return this; - } - - public InsightsStatusData build() { - return new InsightsStatusData(clientId, sessionId, controlConnection, connectedNodes); - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/LoadBalancingInfo.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/LoadBalancingInfo.java deleted file mode 100644 index 594583e3f28..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/LoadBalancingInfo.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights.schema; - -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; -import java.util.Map; -import java.util.Objects; - -public class LoadBalancingInfo { - @JsonProperty("type") - private final String type; - - @JsonProperty("options") - private final Map options; - - @JsonProperty("namespace") - private final String namespace; - - @JsonCreator - public LoadBalancingInfo( - @JsonProperty("type") String type, - @JsonProperty("options") Map options, - @JsonProperty("namespace") String namespace) { - this.type = type; - this.options = options; - this.namespace = namespace; - } - - public String getType() { - return type; - } - - public Map getOptions() { - return options; - } - - public String getNamespace() { - return namespace; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof LoadBalancingInfo)) { - return false; - } - LoadBalancingInfo that = (LoadBalancingInfo) o; - return Objects.equals(type, that.type) - && Objects.equals(options, that.options) - && Objects.equals(namespace, that.namespace); - } - - @Override - public int hashCode() { - return Objects.hash(type, options, namespace); - } - - @Override - public String toString() { - return "LoadBalancingInfo{" - + "type='" - + type - + '\'' - + ", options=" - + options - + ", namespace='" - + namespace - + '\'' - + '}'; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/PoolSizeByHostDistance.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/PoolSizeByHostDistance.java deleted file mode 100644 index 07f76a18d40..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/PoolSizeByHostDistance.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights.schema; - -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; -import java.util.Objects; - -public class PoolSizeByHostDistance { - @JsonProperty("local") - private final int local; - - @JsonProperty("remote") - private final int remote; - - @JsonProperty("ignored") - private final int ignored; - - @JsonCreator - public PoolSizeByHostDistance( - @JsonProperty("local") int local, - @JsonProperty("remote") int remote, - @JsonProperty("ignored") int ignored) { - - this.local = local; - this.remote = remote; - this.ignored = ignored; - } - - public int getLocal() { - return local; - } - - public int getRemote() { - return remote; - } - - public int getIgnored() { - return ignored; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof PoolSizeByHostDistance)) { - return false; - } - PoolSizeByHostDistance that = (PoolSizeByHostDistance) o; - return local == that.local && remote == that.remote && ignored == that.ignored; - } - - @Override - public int hashCode() { - return Objects.hash(local, remote, ignored); - } - - @Override - public String toString() { - return "PoolSizeByHostDistance{" - + "local=" - + local - + ", remote=" - + remote - + ", ignored=" - + ignored - + '}'; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/ReconnectionPolicyInfo.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/ReconnectionPolicyInfo.java deleted file mode 100644 index 463c23a4325..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/ReconnectionPolicyInfo.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights.schema; - -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; -import java.util.Map; -import java.util.Objects; - -public class ReconnectionPolicyInfo { - @JsonProperty("type") - private final String type; - - @JsonProperty("options") - private final Map options; - - @JsonProperty("namespace") - private final String namespace; - - @JsonCreator - public ReconnectionPolicyInfo( - @JsonProperty("type") String type, - @JsonProperty("options") Map options, - @JsonProperty("namespace") String namespace) { - - this.type = type; - this.options = options; - this.namespace = namespace; - } - - public String getType() { - return type; - } - - public Map getOptions() { - return options; - } - - public String getNamespace() { - return namespace; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof ReconnectionPolicyInfo)) { - return false; - } - ReconnectionPolicyInfo that = (ReconnectionPolicyInfo) o; - return Objects.equals(type, that.type) - && Objects.equals(options, that.options) - && Objects.equals(namespace, that.namespace); - } - - @Override - public int hashCode() { - return Objects.hash(type, options, namespace); - } - - @Override - public String toString() { - return "ReconnectionPolicyInfo{" - + "type='" - + type - + '\'' - + ", options=" - + options - + ", namespace='" - + namespace - + '\'' - + '}'; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/SSL.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/SSL.java deleted file mode 100644 index debcd85c025..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/SSL.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights.schema; - -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; -import java.util.Objects; - -public class SSL { - @JsonProperty("enabled") - private final boolean enabled; - - @JsonProperty("certValidation") - private final boolean certValidation; - - @JsonCreator - public SSL( - @JsonProperty("enabled") boolean enabled, - @JsonProperty("certValidation") boolean certValidation) { - this.enabled = enabled; - this.certValidation = certValidation; - } - - public boolean isEnabled() { - return enabled; - } - - public boolean isCertValidation() { - return certValidation; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof SSL)) { - return false; - } - SSL that = (SSL) o; - return enabled == that.enabled && certValidation == that.certValidation; - } - - @Override - public int hashCode() { - return Objects.hash(enabled, certValidation); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/SessionStateForNode.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/SessionStateForNode.java deleted file mode 100644 index 8b50e5b2313..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/SessionStateForNode.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights.schema; - -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; -import java.util.Objects; - -public class SessionStateForNode { - @JsonProperty("connections") - private final Integer connections; - - @JsonProperty("inFlightQueries") - private final Integer inFlightQueries; - - @JsonCreator - public SessionStateForNode( - @JsonProperty("connections") Integer connections, - @JsonProperty("inFlightQueries") Integer inFlightQueries) { - this.connections = connections; - this.inFlightQueries = inFlightQueries; - } - - public Integer getConnections() { - return connections; - } - - public Integer getInFlightQueries() { - return inFlightQueries; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof SessionStateForNode)) { - return false; - } - SessionStateForNode that = (SessionStateForNode) o; - return Objects.equals(connections, that.connections) - && Objects.equals(inFlightQueries, that.inFlightQueries); - } - - @Override - public int hashCode() { - return Objects.hash(connections, inFlightQueries); - } - - @Override - public String toString() { - return "SessionStateForNode{" - + "connections=" - + connections - + ", inFlightQueries=" - + inFlightQueries - + '}'; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/SpecificExecutionProfile.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/SpecificExecutionProfile.java deleted file mode 100644 index 58652fdf885..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/SpecificExecutionProfile.java +++ /dev/null @@ -1,133 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights.schema; - -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonInclude; -import com.fasterxml.jackson.annotation.JsonProperty; -import java.util.Map; -import java.util.Objects; - -@JsonInclude(JsonInclude.Include.NON_NULL) -public class SpecificExecutionProfile { - @JsonProperty("readTimeout") - private final Integer readTimeout; - - @JsonProperty("loadBalancing") - private final LoadBalancingInfo loadBalancing; - - @JsonProperty("speculativeExecution") - private SpeculativeExecutionInfo speculativeExecution; - - @JsonProperty("consistency") - private final String consistency; - - @JsonProperty("serialConsistency") - private final String serialConsistency; - - @JsonProperty("graphOptions") - private Map graphOptions; - - @JsonCreator - public SpecificExecutionProfile( - @JsonProperty("readTimeout") Integer readTimeoutMillis, - @JsonProperty("loadBalancing") LoadBalancingInfo loadBalancing, - @JsonProperty("speculativeExecution") SpeculativeExecutionInfo speculativeExecutionInfo, - @JsonProperty("consistency") String consistency, - @JsonProperty("serialConsistency") String serialConsistency, - @JsonProperty("graphOptions") Map graphOptions) { - readTimeout = readTimeoutMillis; - this.loadBalancing = loadBalancing; - this.speculativeExecution = speculativeExecutionInfo; - this.consistency = consistency; - this.serialConsistency = serialConsistency; - this.graphOptions = graphOptions; - } - - public Integer getReadTimeout() { - return readTimeout; - } - - public LoadBalancingInfo getLoadBalancing() { - return loadBalancing; - } - - public SpeculativeExecutionInfo getSpeculativeExecution() { - return speculativeExecution; - } - - public String getConsistency() { - return consistency; - } - - public String getSerialConsistency() { - return serialConsistency; - } - - public Map getGraphOptions() { - return graphOptions; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof SpecificExecutionProfile)) { - return false; - } - SpecificExecutionProfile that = (SpecificExecutionProfile) o; - return Objects.equals(readTimeout, that.readTimeout) - && Objects.equals(loadBalancing, that.loadBalancing) - && Objects.equals(speculativeExecution, that.speculativeExecution) - && Objects.equals(consistency, that.consistency) - && Objects.equals(serialConsistency, that.serialConsistency) - && Objects.equals(graphOptions, that.graphOptions); - } - - @Override - public int hashCode() { - return Objects.hash( - readTimeout, - loadBalancing, - speculativeExecution, - consistency, - serialConsistency, - graphOptions); - } - - @Override - public String toString() { - return "SpecificExecutionProfile{" - + "readTimeout=" - + readTimeout - + ", loadBalancing=" - + loadBalancing - + ", speculativeExecution=" - + speculativeExecution - + ", consistency='" - + consistency - + '\'' - + ", serialConsistency='" - + serialConsistency - + '\'' - + ", graphOptions=" - + graphOptions - + '}'; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/SpeculativeExecutionInfo.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/SpeculativeExecutionInfo.java deleted file mode 100644 index 779a4ed9e51..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/SpeculativeExecutionInfo.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights.schema; - -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; -import java.util.Map; -import java.util.Objects; - -public class SpeculativeExecutionInfo { - @JsonProperty("type") - private final String type; - - @JsonProperty("options") - private final Map options; - - @JsonProperty("namespace") - private String namespace; - - @JsonCreator - public SpeculativeExecutionInfo( - @JsonProperty("type") String type, - @JsonProperty("options") Map options, - @JsonProperty("namespace") String namespace) { - this.type = type; - this.options = options; - this.namespace = namespace; - } - - public String getType() { - return type; - } - - public Map getOptions() { - return options; - } - - public String getNamespace() { - return namespace; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof SpeculativeExecutionInfo)) { - return false; - } - SpeculativeExecutionInfo that = (SpeculativeExecutionInfo) o; - return Objects.equals(type, that.type) - && Objects.equals(options, that.options) - && Objects.equals(namespace, that.namespace); - } - - @Override - public int hashCode() { - return Objects.hash(type, options, namespace); - } - - @Override - public String toString() { - return "SpeculativeExecutionInfo{" - + "type='" - + type - + '\'' - + ", options=" - + options - + ", namespace='" - + namespace - + '\'' - + '}'; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/loadbalancing/DseDcInferringLoadBalancingPolicy.java b/core/src/main/java/com/datastax/dse/driver/internal/core/loadbalancing/DseDcInferringLoadBalancingPolicy.java deleted file mode 100644 index 501fa263258..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/loadbalancing/DseDcInferringLoadBalancingPolicy.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.loadbalancing; - -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.internal.core.loadbalancing.DcInferringLoadBalancingPolicy; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * @deprecated This class only exists for backward compatibility. It is equivalent to {@link - * DcInferringLoadBalancingPolicy}, which should now be used instead. - */ -@Deprecated -public class DseDcInferringLoadBalancingPolicy extends DcInferringLoadBalancingPolicy { - public DseDcInferringLoadBalancingPolicy( - @NonNull DriverContext context, @NonNull String profileName) { - super(context, profileName); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/loadbalancing/DseLoadBalancingPolicy.java b/core/src/main/java/com/datastax/dse/driver/internal/core/loadbalancing/DseLoadBalancingPolicy.java deleted file mode 100644 index 059a37c4774..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/loadbalancing/DseLoadBalancingPolicy.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.loadbalancing; - -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.internal.core.loadbalancing.DefaultLoadBalancingPolicy; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * @deprecated This class only exists for backward compatibility. It is equivalent to {@link - * DefaultLoadBalancingPolicy}, which should now be used instead. - */ -@Deprecated -public class DseLoadBalancingPolicy extends DefaultLoadBalancingPolicy { - public DseLoadBalancingPolicy(@NonNull DriverContext context, @NonNull String profileName) { - super(context, profileName); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseAggregateMetadata.java b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseAggregateMetadata.java deleted file mode 100644 index 52a0b846076..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseAggregateMetadata.java +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.metadata.schema; - -import com.datastax.dse.driver.api.core.metadata.schema.DseAggregateMetadata; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.FunctionSignature; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.internal.core.metadata.schema.DefaultAggregateMetadata; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Objects; -import java.util.Optional; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultDseAggregateMetadata extends DefaultAggregateMetadata - implements DseAggregateMetadata { - - @Nullable private final Boolean deterministic; - - public DefaultDseAggregateMetadata( - @NonNull CqlIdentifier keyspace, - @NonNull FunctionSignature signature, - @Nullable FunctionSignature finalFuncSignature, - @Nullable Object initCond, - @NonNull DataType returnType, - @NonNull FunctionSignature stateFuncSignature, - @NonNull DataType stateType, - @NonNull TypeCodec stateTypeCodec, - @Nullable Boolean deterministic) { - super( - keyspace, - signature, - finalFuncSignature, - initCond, - returnType, - stateFuncSignature, - stateType, - stateTypeCodec); - this.deterministic = deterministic; - } - - @Override - @Deprecated - public boolean isDeterministic() { - return deterministic != null && deterministic; - } - - @Override - @Nullable - public Optional getDeterministic() { - return Optional.ofNullable(deterministic); - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof DseAggregateMetadata) { - DseAggregateMetadata that = (DseAggregateMetadata) other; - return Objects.equals(this.getKeyspace(), that.getKeyspace()) - && Objects.equals(this.getSignature(), that.getSignature()) - && Objects.equals( - this.getFinalFuncSignature().orElse(null), that.getFinalFuncSignature().orElse(null)) - && Objects.equals(this.getInitCond().orElse(null), that.getInitCond().orElse(null)) - && Objects.equals(this.getReturnType(), that.getReturnType()) - && Objects.equals(this.getStateFuncSignature(), that.getStateFuncSignature()) - && Objects.equals(this.getStateType(), that.getStateType()) - && Objects.equals(this.deterministic, that.getDeterministic().orElse(null)); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash( - getKeyspace(), - getSignature(), - getFinalFuncSignature(), - getInitCond(), - getReturnType(), - getStateFuncSignature(), - getStateType(), - deterministic); - } - - @Override - public String toString() { - return "Aggregate Name: " - + getSignature().getName().asCql(false) - + ", Keyspace: " - + getKeyspace().asCql(false) - + ", Return Type: " - + getReturnType().asCql(false, false) - + ", Deterministic: " - + deterministic; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseColumnMetadata.java b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseColumnMetadata.java deleted file mode 100644 index 2168f20fdc7..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseColumnMetadata.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.metadata.schema; - -import com.datastax.dse.driver.api.core.metadata.schema.DseColumnMetadata; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.internal.core.metadata.schema.DefaultColumnMetadata; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultDseColumnMetadata extends DefaultColumnMetadata implements DseColumnMetadata { - - public DefaultDseColumnMetadata( - @NonNull CqlIdentifier keyspace, - @NonNull CqlIdentifier parent, - @NonNull CqlIdentifier name, - @NonNull DataType dataType, - boolean isStatic) { - super(keyspace, parent, name, dataType, isStatic); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseEdgeMetadata.java b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseEdgeMetadata.java deleted file mode 100644 index e4de62f294c..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseEdgeMetadata.java +++ /dev/null @@ -1,152 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.metadata.schema; - -import com.datastax.dse.driver.api.core.metadata.schema.DseEdgeMetadata; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.Serializable; -import java.util.List; -import java.util.Objects; - -public class DefaultDseEdgeMetadata implements DseEdgeMetadata, Serializable { - - private static final long serialVersionUID = 1; - - @NonNull private final CqlIdentifier labelName; - - @NonNull private final CqlIdentifier fromTable; - @NonNull private final CqlIdentifier fromLabel; - @NonNull private final List fromPartitionKeyColumns; - @NonNull private final List fromClusteringColumns; - - @NonNull private final CqlIdentifier toTable; - @NonNull private final CqlIdentifier toLabel; - @NonNull private final List toPartitionKeyColumns; - @NonNull private final List toClusteringColumns; - - public DefaultDseEdgeMetadata( - @NonNull CqlIdentifier labelName, - @NonNull CqlIdentifier fromTable, - @NonNull CqlIdentifier fromLabel, - @NonNull List fromPartitionKeyColumns, - @NonNull List fromClusteringColumns, - @NonNull CqlIdentifier toTable, - @NonNull CqlIdentifier toLabel, - @NonNull List toPartitionKeyColumns, - @NonNull List toClusteringColumns) { - this.labelName = Preconditions.checkNotNull(labelName); - this.fromTable = Preconditions.checkNotNull(fromTable); - this.fromLabel = Preconditions.checkNotNull(fromLabel); - this.fromPartitionKeyColumns = Preconditions.checkNotNull(fromPartitionKeyColumns); - this.fromClusteringColumns = Preconditions.checkNotNull(fromClusteringColumns); - this.toTable = Preconditions.checkNotNull(toTable); - this.toLabel = Preconditions.checkNotNull(toLabel); - this.toPartitionKeyColumns = Preconditions.checkNotNull(toPartitionKeyColumns); - this.toClusteringColumns = Preconditions.checkNotNull(toClusteringColumns); - } - - @NonNull - @Override - public CqlIdentifier getLabelName() { - return labelName; - } - - @NonNull - @Override - public CqlIdentifier getFromTable() { - return fromTable; - } - - @NonNull - @Override - public CqlIdentifier getFromLabel() { - return fromLabel; - } - - @NonNull - @Override - public List getFromPartitionKeyColumns() { - return fromPartitionKeyColumns; - } - - @NonNull - @Override - public List getFromClusteringColumns() { - return fromClusteringColumns; - } - - @NonNull - @Override - public CqlIdentifier getToTable() { - return toTable; - } - - @NonNull - @Override - public CqlIdentifier getToLabel() { - return toLabel; - } - - @NonNull - @Override - public List getToPartitionKeyColumns() { - return toPartitionKeyColumns; - } - - @NonNull - @Override - public List getToClusteringColumns() { - return toClusteringColumns; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof DseEdgeMetadata) { - DseEdgeMetadata that = (DseEdgeMetadata) other; - return Objects.equals(this.labelName, that.getLabelName()) - && Objects.equals(this.fromTable, that.getFromTable()) - && Objects.equals(this.fromLabel, that.getFromLabel()) - && Objects.equals(this.fromPartitionKeyColumns, that.getFromPartitionKeyColumns()) - && Objects.equals(this.fromClusteringColumns, that.getFromClusteringColumns()) - && Objects.equals(this.toTable, that.getToTable()) - && Objects.equals(this.toLabel, that.getToLabel()) - && Objects.equals(this.toPartitionKeyColumns, that.getToPartitionKeyColumns()) - && Objects.equals(this.toClusteringColumns, that.getToClusteringColumns()); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash( - labelName, - fromTable, - fromLabel, - fromPartitionKeyColumns, - fromClusteringColumns, - toTable, - toLabel, - toPartitionKeyColumns, - toClusteringColumns); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseFunctionMetadata.java b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseFunctionMetadata.java deleted file mode 100644 index 0a94491f1f7..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseFunctionMetadata.java +++ /dev/null @@ -1,147 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.metadata.schema; - -import com.datastax.dse.driver.api.core.metadata.schema.DseFunctionMetadata; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.FunctionSignature; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.internal.core.metadata.schema.DefaultFunctionMetadata; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.List; -import java.util.Objects; -import java.util.Optional; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultDseFunctionMetadata extends DefaultFunctionMetadata - implements DseFunctionMetadata { - - @Nullable private final Boolean deterministic; - @Nullable private final Monotonicity monotonicity; - @NonNull private final List monotonicArgumentNames; - - public DefaultDseFunctionMetadata( - @NonNull CqlIdentifier keyspace, - @NonNull FunctionSignature signature, - @NonNull List parameterNames, - @NonNull String body, - boolean calledOnNullInput, - @NonNull String language, - @NonNull DataType returnType, - @Nullable Boolean deterministic, - @Nullable Boolean monotonic, - @NonNull List monotonicArgumentNames) { - super(keyspace, signature, parameterNames, body, calledOnNullInput, language, returnType); - // set DSE extension attributes - this.deterministic = deterministic; - this.monotonicity = - monotonic == null - ? null - : monotonic - ? Monotonicity.FULLY_MONOTONIC - : monotonicArgumentNames.isEmpty() - ? Monotonicity.NOT_MONOTONIC - : Monotonicity.PARTIALLY_MONOTONIC; - this.monotonicArgumentNames = ImmutableList.copyOf(monotonicArgumentNames); - } - - @Override - @Deprecated - public boolean isDeterministic() { - return deterministic != null && deterministic; - } - - @Override - public Optional getDeterministic() { - return Optional.ofNullable(deterministic); - } - - @Override - @Deprecated - public boolean isMonotonic() { - return monotonicity == Monotonicity.FULLY_MONOTONIC; - } - - @Override - public Optional getMonotonicity() { - return Optional.ofNullable(monotonicity); - } - - @NonNull - @Override - public List getMonotonicArgumentNames() { - return this.monotonicArgumentNames; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof DseFunctionMetadata) { - DseFunctionMetadata that = (DseFunctionMetadata) other; - return Objects.equals(this.getKeyspace(), that.getKeyspace()) - && Objects.equals(this.getSignature(), that.getSignature()) - && Objects.equals(this.getParameterNames(), that.getParameterNames()) - && Objects.equals(this.getBody(), that.getBody()) - && this.isCalledOnNullInput() == that.isCalledOnNullInput() - && Objects.equals(this.getLanguage(), that.getLanguage()) - && Objects.equals(this.getReturnType(), that.getReturnType()) - && Objects.equals(this.deterministic, that.getDeterministic().orElse(null)) - && this.monotonicity == that.getMonotonicity().orElse(null) - && Objects.equals(this.monotonicArgumentNames, that.getMonotonicArgumentNames()); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash( - getKeyspace(), - getSignature(), - getParameterNames(), - getBody(), - isCalledOnNullInput(), - getLanguage(), - getReturnType(), - deterministic, - monotonicity, - monotonicArgumentNames); - } - - @Override - public String toString() { - return "Function Name: " - + this.getSignature().getName().asCql(false) - + ", Keyspace: " - + this.getKeyspace().asCql(false) - + ", Language: " - + this.getLanguage() - + ", Return Type: " - + getReturnType().asCql(false, false) - + ", Deterministic: " - + this.deterministic - + ", Monotonicity: " - + this.monotonicity - + ", Monotonic On: " - + (this.monotonicArgumentNames.isEmpty() ? "" : this.monotonicArgumentNames.get(0)); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseIndexMetadata.java b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseIndexMetadata.java deleted file mode 100644 index c66d7934151..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseIndexMetadata.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.metadata.schema; - -import com.datastax.dse.driver.api.core.metadata.schema.DseIndexMetadata; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.IndexKind; -import com.datastax.oss.driver.internal.core.metadata.schema.DefaultIndexMetadata; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultDseIndexMetadata extends DefaultIndexMetadata implements DseIndexMetadata { - - public DefaultDseIndexMetadata( - @NonNull CqlIdentifier keyspace, - @NonNull CqlIdentifier table, - @NonNull CqlIdentifier name, - @NonNull IndexKind kind, - @NonNull String target, - @NonNull Map options) { - super(keyspace, table, name, kind, target, options); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseKeyspaceMetadata.java b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseKeyspaceMetadata.java deleted file mode 100644 index 8e54c9082e1..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseKeyspaceMetadata.java +++ /dev/null @@ -1,183 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.metadata.schema; - -import com.datastax.dse.driver.api.core.metadata.schema.DseGraphKeyspaceMetadata; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.AggregateMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.FunctionMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.FunctionSignature; -import com.datastax.oss.driver.api.core.metadata.schema.TableMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.ViewMetadata; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.io.Serializable; -import java.util.Map; -import java.util.Objects; -import java.util.Optional; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultDseKeyspaceMetadata implements DseGraphKeyspaceMetadata, Serializable { - - private static final long serialVersionUID = 1; - - @NonNull private final CqlIdentifier name; - private final boolean durableWrites; - private final boolean virtual; - @Nullable private final String graphEngine; - @NonNull private final Map replication; - @NonNull private final Map types; - @NonNull private final Map tables; - @NonNull private final Map views; - @NonNull private final Map functions; - @NonNull private final Map aggregates; - - public DefaultDseKeyspaceMetadata( - @NonNull CqlIdentifier name, - boolean durableWrites, - boolean virtual, - @Nullable String graphEngine, - @NonNull Map replication, - @NonNull Map types, - @NonNull Map tables, - @NonNull Map views, - @NonNull Map functions, - @NonNull Map aggregates) { - this.name = name; - this.durableWrites = durableWrites; - this.virtual = virtual; - this.graphEngine = graphEngine; - this.replication = replication; - this.types = types; - this.tables = tables; - this.views = views; - this.functions = functions; - this.aggregates = aggregates; - } - - @NonNull - @Override - public CqlIdentifier getName() { - return name; - } - - @Override - public boolean isDurableWrites() { - return durableWrites; - } - - @Override - public boolean isVirtual() { - return virtual; - } - - @NonNull - @Override - public Optional getGraphEngine() { - return Optional.ofNullable(graphEngine); - } - - @NonNull - @Override - public Map getReplication() { - return replication; - } - - @NonNull - @Override - public Map getUserDefinedTypes() { - return types; - } - - @NonNull - @Override - public Map getTables() { - return tables; - } - - @NonNull - @Override - public Map getViews() { - return views; - } - - @NonNull - @Override - public Map getFunctions() { - return functions; - } - - @NonNull - @Override - public Map getAggregates() { - return aggregates; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof DseGraphKeyspaceMetadata) { - DseGraphKeyspaceMetadata that = (DseGraphKeyspaceMetadata) other; - return Objects.equals(this.name, that.getName()) - && this.durableWrites == that.isDurableWrites() - && this.virtual == that.isVirtual() - && Objects.equals(this.graphEngine, that.getGraphEngine().orElse(null)) - && Objects.equals(this.replication, that.getReplication()) - && Objects.equals(this.types, that.getUserDefinedTypes()) - && Objects.equals(this.tables, that.getTables()) - && Objects.equals(this.views, that.getViews()) - && Objects.equals(this.functions, that.getFunctions()) - && Objects.equals(this.aggregates, that.getAggregates()); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash( - name, - durableWrites, - virtual, - graphEngine, - replication, - types, - tables, - views, - functions, - aggregates); - } - - @Override - public boolean shallowEquals(Object other) { - if (other == this) { - return true; - } else if (other instanceof DseGraphKeyspaceMetadata) { - DseGraphKeyspaceMetadata that = (DseGraphKeyspaceMetadata) other; - return Objects.equals(this.name, that.getName()) - && this.durableWrites == that.isDurableWrites() - && Objects.equals(this.graphEngine, that.getGraphEngine().orElse(null)) - && Objects.equals(this.replication, that.getReplication()); - } else { - return false; - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseTableMetadata.java b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseTableMetadata.java deleted file mode 100644 index f8fb8cc10d1..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseTableMetadata.java +++ /dev/null @@ -1,190 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.metadata.schema; - -import com.datastax.dse.driver.api.core.metadata.schema.DseEdgeMetadata; -import com.datastax.dse.driver.api.core.metadata.schema.DseGraphTableMetadata; -import com.datastax.dse.driver.api.core.metadata.schema.DseVertexMetadata; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.ClusteringOrder; -import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.IndexMetadata; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.io.Serializable; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Optional; -import java.util.UUID; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultDseTableMetadata implements DseGraphTableMetadata, Serializable { - - private static final long serialVersionUID = 1; - - @NonNull private final CqlIdentifier keyspace; - @NonNull private final CqlIdentifier name; - // null for virtual tables - @Nullable private final UUID id; - private final boolean compactStorage; - private final boolean virtual; - @NonNull private final List partitionKey; - @NonNull private final Map clusteringColumns; - @NonNull private final Map columns; - @NonNull private final Map options; - @NonNull private final Map indexes; - @Nullable private final DseVertexMetadata vertex; - @Nullable private final DseEdgeMetadata edge; - - public DefaultDseTableMetadata( - @NonNull CqlIdentifier keyspace, - @NonNull CqlIdentifier name, - @Nullable UUID id, - boolean compactStorage, - boolean virtual, - @NonNull List partitionKey, - @NonNull Map clusteringColumns, - @NonNull Map columns, - @NonNull Map options, - @NonNull Map indexes, - @Nullable DseVertexMetadata vertex, - @Nullable DseEdgeMetadata edge) { - this.keyspace = keyspace; - this.name = name; - this.id = id; - this.compactStorage = compactStorage; - this.virtual = virtual; - this.partitionKey = partitionKey; - this.clusteringColumns = clusteringColumns; - this.columns = columns; - this.options = options; - this.indexes = indexes; - this.vertex = vertex; - this.edge = edge; - } - - @NonNull - @Override - public CqlIdentifier getKeyspace() { - return keyspace; - } - - @NonNull - @Override - public CqlIdentifier getName() { - return name; - } - - @NonNull - @Override - public Optional getId() { - return Optional.ofNullable(id); - } - - @Override - public boolean isCompactStorage() { - return compactStorage; - } - - @Override - public boolean isVirtual() { - return virtual; - } - - @NonNull - @Override - public List getPartitionKey() { - return partitionKey; - } - - @NonNull - @Override - public Map getClusteringColumns() { - return clusteringColumns; - } - - @NonNull - @Override - public Map getColumns() { - return columns; - } - - @NonNull - @Override - public Map getOptions() { - return options; - } - - @NonNull - @Override - public Map getIndexes() { - return indexes; - } - - @NonNull - @Override - public Optional getVertex() { - return Optional.ofNullable(vertex); - } - - @NonNull - @Override - public Optional getEdge() { - return Optional.ofNullable(edge); - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof DseGraphTableMetadata) { - DseGraphTableMetadata that = (DseGraphTableMetadata) other; - return Objects.equals(this.keyspace, that.getKeyspace()) - && Objects.equals(this.name, that.getName()) - && Objects.equals(this.id, that.getId().orElse(null)) - && this.compactStorage == that.isCompactStorage() - && this.virtual == that.isVirtual() - && Objects.equals(this.partitionKey, that.getPartitionKey()) - && Objects.equals(this.clusteringColumns, that.getClusteringColumns()) - && Objects.equals(this.columns, that.getColumns()) - && Objects.equals(this.indexes, that.getIndexes()) - && Objects.equals(this.vertex, that.getVertex().orElse(null)) - && Objects.equals(this.edge, that.getEdge().orElse(null)); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash( - keyspace, - name, - id, - compactStorage, - virtual, - partitionKey, - clusteringColumns, - columns, - indexes, - vertex, - edge); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseVertexMetadata.java b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseVertexMetadata.java deleted file mode 100644 index 05ba2823704..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseVertexMetadata.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.metadata.schema; - -import com.datastax.dse.driver.api.core.metadata.schema.DseVertexMetadata; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.Serializable; -import java.util.Objects; - -public class DefaultDseVertexMetadata implements DseVertexMetadata, Serializable { - - private static final long serialVersionUID = 1; - - @NonNull private final CqlIdentifier labelName; - - public DefaultDseVertexMetadata(@NonNull CqlIdentifier labelName) { - this.labelName = Preconditions.checkNotNull(labelName); - } - - @NonNull - @Override - public CqlIdentifier getLabelName() { - return labelName; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof DefaultDseVertexMetadata) { - DefaultDseVertexMetadata that = (DefaultDseVertexMetadata) other; - return Objects.equals(this.labelName, that.getLabelName()); - } else { - return false; - } - } - - @Override - public int hashCode() { - return labelName.hashCode(); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseViewMetadata.java b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseViewMetadata.java deleted file mode 100644 index f04b7640041..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseViewMetadata.java +++ /dev/null @@ -1,169 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.metadata.schema; - -import com.datastax.dse.driver.api.core.metadata.schema.DseViewMetadata; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.ClusteringOrder; -import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.io.Serializable; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Optional; -import java.util.UUID; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultDseViewMetadata implements DseViewMetadata, Serializable { - - private static final long serialVersionUID = 1; - - @NonNull private final CqlIdentifier keyspace; - @NonNull private final CqlIdentifier name; - @NonNull private final CqlIdentifier baseTable; - private final boolean includesAllColumns; - @Nullable private final String whereClause; - @NonNull private final UUID id; - @NonNull private final ImmutableList partitionKey; - @NonNull private final ImmutableMap clusteringColumns; - @NonNull private final ImmutableMap columns; - @NonNull private final Map options; - - public DefaultDseViewMetadata( - @NonNull CqlIdentifier keyspace, - @NonNull CqlIdentifier name, - @NonNull CqlIdentifier baseTable, - boolean includesAllColumns, - @Nullable String whereClause, - @NonNull UUID id, - @NonNull ImmutableList partitionKey, - @NonNull ImmutableMap clusteringColumns, - @NonNull ImmutableMap columns, - @NonNull Map options) { - this.keyspace = keyspace; - this.name = name; - this.baseTable = baseTable; - this.includesAllColumns = includesAllColumns; - this.whereClause = whereClause; - this.id = id; - this.partitionKey = partitionKey; - this.clusteringColumns = clusteringColumns; - this.columns = columns; - this.options = options; - } - - @NonNull - @Override - public CqlIdentifier getKeyspace() { - return keyspace; - } - - @NonNull - @Override - public CqlIdentifier getName() { - return name; - } - - @NonNull - @Override - public Optional getId() { - return Optional.of(id); - } - - @NonNull - @Override - public CqlIdentifier getBaseTable() { - return baseTable; - } - - @Override - public boolean includesAllColumns() { - return includesAllColumns; - } - - @NonNull - @Override - public Optional getWhereClause() { - return Optional.ofNullable(whereClause); - } - - @NonNull - @Override - public List getPartitionKey() { - return partitionKey; - } - - @NonNull - @Override - public Map getClusteringColumns() { - return clusteringColumns; - } - - @NonNull - @Override - public Map getColumns() { - return columns; - } - - @NonNull - @Override - public Map getOptions() { - return options; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof DseViewMetadata) { - DseViewMetadata that = (DseViewMetadata) other; - return Objects.equals(this.keyspace, that.getKeyspace()) - && Objects.equals(this.name, that.getName()) - && Objects.equals(this.baseTable, that.getBaseTable()) - && this.includesAllColumns == that.includesAllColumns() - && Objects.equals(this.whereClause, that.getWhereClause().orElse(null)) - && Objects.equals(Optional.of(this.id), that.getId()) - && Objects.equals(this.partitionKey, that.getPartitionKey()) - && Objects.equals(this.clusteringColumns, that.getClusteringColumns()) - && Objects.equals(this.columns, that.getColumns()) - && Objects.equals(this.options, that.getOptions()); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash( - keyspace, - name, - baseTable, - includesAllColumns, - whereClause, - id, - partitionKey, - clusteringColumns, - columns, - options); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/ScriptHelper.java b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/ScriptHelper.java deleted file mode 100644 index 64f6cac19f0..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/ScriptHelper.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.metadata.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.internal.core.metadata.schema.ScriptBuilder; -import java.util.List; - -public class ScriptHelper { - - public static void appendEdgeSide( - ScriptBuilder builder, - CqlIdentifier table, - CqlIdentifier label, - List partitionKeyColumns, - List clusteringColumns, - String keyword) { - builder.append(" ").append(keyword).append(label).append("("); - - if (partitionKeyColumns.size() == 1) { // PRIMARY KEY (k - builder.append(partitionKeyColumns.get(0)); - } else { // PRIMARY KEY ((k1, k2) - builder.append("("); - boolean first = true; - for (CqlIdentifier pkColumn : partitionKeyColumns) { - if (first) { - first = false; - } else { - builder.append(", "); - } - builder.append(pkColumn); - } - builder.append(")"); - } - // PRIMARY KEY (, cc1, cc2, cc3) - for (CqlIdentifier clusteringColumn : clusteringColumns) { - builder.append(", ").append(clusteringColumn); - } - builder.append(")"); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/parsing/DseAggregateParser.java b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/parsing/DseAggregateParser.java deleted file mode 100644 index 37a7a2768c2..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/parsing/DseAggregateParser.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.metadata.schema.parsing; - -import com.datastax.dse.driver.api.core.metadata.schema.DseAggregateMetadata; -import com.datastax.dse.driver.internal.core.metadata.schema.DefaultDseAggregateMetadata; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.AggregateMetadata; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.AggregateParser; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.DataTypeParser; -import java.util.Map; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class DseAggregateParser { - - private final AggregateParser aggregateParser; - private final InternalDriverContext context; - - public DseAggregateParser(DataTypeParser dataTypeParser, InternalDriverContext context) { - this.aggregateParser = new AggregateParser(dataTypeParser, context); - this.context = context; - } - - public DseAggregateMetadata parseAggregate( - AdminRow row, - CqlIdentifier keyspaceId, - Map userDefinedTypes) { - AggregateMetadata aggregate = aggregateParser.parseAggregate(row, keyspaceId, userDefinedTypes); - // parse the DSE extended columns - final Boolean deterministic = - row.contains("deterministic") ? row.getBoolean("deterministic") : null; - - return new DefaultDseAggregateMetadata( - aggregate.getKeyspace(), - aggregate.getSignature(), - aggregate.getFinalFuncSignature().orElse(null), - aggregate.getInitCond().orElse(null), - aggregate.getReturnType(), - aggregate.getStateFuncSignature(), - aggregate.getStateType(), - context.getCodecRegistry().codecFor(aggregate.getStateType()), - deterministic); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/parsing/DseFunctionParser.java b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/parsing/DseFunctionParser.java deleted file mode 100644 index 0d88bce8740..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/parsing/DseFunctionParser.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.metadata.schema.parsing; - -import com.datastax.dse.driver.api.core.metadata.schema.DseFunctionMetadata; -import com.datastax.dse.driver.internal.core.metadata.schema.DefaultDseFunctionMetadata; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.FunctionMetadata; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.DataTypeParser; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.FunctionParser; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class DseFunctionParser { - - private final FunctionParser functionParser; - - public DseFunctionParser(DataTypeParser dataTypeParser, InternalDriverContext context) { - this.functionParser = new FunctionParser(dataTypeParser, context); - } - - public DseFunctionMetadata parseFunction( - AdminRow row, - CqlIdentifier keyspaceId, - Map userDefinedTypes) { - FunctionMetadata function = functionParser.parseFunction(row, keyspaceId, userDefinedTypes); - // parse the DSE extended columns - final Boolean deterministic = - row.contains("deterministic") ? row.getBoolean("deterministic") : null; - final Boolean monotonic = row.contains("monotonic") ? row.getBoolean("monotonic") : null; - // stream the list of strings into a list of CqlIdentifiers - final List monotonicOn = - row.contains("monotonic_on") - ? row.getListOfString("monotonic_on").stream() - .map(CqlIdentifier::fromInternal) - .collect(Collectors.toList()) - : Collections.emptyList(); - - return new DefaultDseFunctionMetadata( - function.getKeyspace(), - function.getSignature(), - function.getParameterNames(), - function.getBody(), - function.isCalledOnNullInput(), - function.getLanguage(), - function.getReturnType(), - deterministic, - monotonic, - monotonicOn); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/parsing/DseSchemaParser.java b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/parsing/DseSchemaParser.java deleted file mode 100644 index ca7fb74a746..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/parsing/DseSchemaParser.java +++ /dev/null @@ -1,237 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.metadata.schema.parsing; - -import com.datastax.dse.driver.api.core.metadata.schema.DseAggregateMetadata; -import com.datastax.dse.driver.api.core.metadata.schema.DseFunctionMetadata; -import com.datastax.dse.driver.api.core.metadata.schema.DseKeyspaceMetadata; -import com.datastax.dse.driver.api.core.metadata.schema.DseTableMetadata; -import com.datastax.dse.driver.api.core.metadata.schema.DseViewMetadata; -import com.datastax.dse.driver.internal.core.metadata.schema.DefaultDseKeyspaceMetadata; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.AggregateMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.FunctionMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.FunctionSignature; -import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.TableMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.ViewMetadata; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.CassandraSchemaParser; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.SchemaParser; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.SimpleJsonParser; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.UserDefinedTypeParser; -import com.datastax.oss.driver.internal.core.metadata.schema.queries.SchemaRows; -import com.datastax.oss.driver.internal.core.metadata.schema.refresh.SchemaRefresh; -import com.datastax.oss.driver.internal.core.util.NanoTime; -import com.datastax.oss.driver.shaded.guava.common.base.MoreObjects; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.Multimap; -import java.util.Collections; -import java.util.Map; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Default parser implementation for DSE. - * - *

For modularity, the code for each element row is split into separate classes (schema stuff is - * not on the hot path, so creating a few extra objects doesn't matter). - */ -@ThreadSafe -public class DseSchemaParser implements SchemaParser { - - private static final Logger LOG = LoggerFactory.getLogger(CassandraSchemaParser.class); - - private final SchemaRows rows; - private final UserDefinedTypeParser userDefinedTypeParser; - private final DseTableParser tableParser; - private final DseViewParser viewParser; - private final DseFunctionParser functionParser; - private final DseAggregateParser aggregateParser; - private final String logPrefix; - private final long startTimeNs = System.nanoTime(); - - public DseSchemaParser(SchemaRows rows, InternalDriverContext context) { - this.rows = rows; - this.logPrefix = context.getSessionName(); - - this.userDefinedTypeParser = new UserDefinedTypeParser(rows.dataTypeParser(), context); - this.tableParser = new DseTableParser(rows, context); - this.viewParser = new DseViewParser(rows, context); - this.functionParser = new DseFunctionParser(rows.dataTypeParser(), context); - this.aggregateParser = new DseAggregateParser(rows.dataTypeParser(), context); - } - - @Override - public SchemaRefresh parse() { - ImmutableMap.Builder keyspacesBuilder = ImmutableMap.builder(); - for (AdminRow row : rows.keyspaces()) { - DseKeyspaceMetadata keyspace = parseKeyspace(row); - keyspacesBuilder.put(keyspace.getName(), keyspace); - } - for (AdminRow row : rows.virtualKeyspaces()) { - DseKeyspaceMetadata keyspace = parseVirtualKeyspace(row); - keyspacesBuilder.put(keyspace.getName(), keyspace); - } - SchemaRefresh refresh = new SchemaRefresh(keyspacesBuilder.build()); - LOG.debug("[{}] Schema parsing took {}", logPrefix, NanoTime.formatTimeSince(startTimeNs)); - return refresh; - } - - private DseKeyspaceMetadata parseKeyspace(AdminRow keyspaceRow) { - - // Cassandra <= 2.2 - // CREATE TABLE system.schema_keyspaces ( - // keyspace_name text PRIMARY KEY, - // durable_writes boolean, - // strategy_class text, - // strategy_options text - // ) - // - // Cassandra >= 3.0: - // CREATE TABLE system_schema.keyspaces ( - // keyspace_name text PRIMARY KEY, - // durable_writes boolean, - // replication frozen> - // ) - // - // DSE >= 6.8: same as Cassandra 3 + graph_engine text - CqlIdentifier keyspaceId = CqlIdentifier.fromInternal(keyspaceRow.getString("keyspace_name")); - boolean durableWrites = - MoreObjects.firstNonNull(keyspaceRow.getBoolean("durable_writes"), false); - String graphEngine = keyspaceRow.getString("graph_engine"); - - Map replicationOptions; - if (keyspaceRow.contains("strategy_class")) { - String strategyClass = keyspaceRow.getString("strategy_class"); - Map strategyOptions = - SimpleJsonParser.parseStringMap(keyspaceRow.getString("strategy_options")); - replicationOptions = - ImmutableMap.builder() - .putAll(strategyOptions) - .put("class", strategyClass) - .build(); - } else { - replicationOptions = keyspaceRow.getMapOfStringToString("replication"); - } - - Map types = parseTypes(keyspaceId); - - return new DefaultDseKeyspaceMetadata( - keyspaceId, - durableWrites, - false, - graphEngine, - replicationOptions, - types, - parseTables(keyspaceId, types), - parseViews(keyspaceId, types), - parseFunctions(keyspaceId, types), - parseAggregates(keyspaceId, types)); - } - - private Map parseTypes(CqlIdentifier keyspaceId) { - return userDefinedTypeParser.parse(rows.types().get(keyspaceId), keyspaceId); - } - - private Map parseTables( - CqlIdentifier keyspaceId, Map types) { - ImmutableMap.Builder tablesBuilder = ImmutableMap.builder(); - Multimap vertices = rows.vertices().get(keyspaceId); - Multimap edges = rows.edges().get(keyspaceId); - for (AdminRow tableRow : rows.tables().get(keyspaceId)) { - DseTableMetadata table = tableParser.parseTable(tableRow, keyspaceId, types, vertices, edges); - if (table != null) { - tablesBuilder.put(table.getName(), table); - } - } - return tablesBuilder.build(); - } - - private Map parseViews( - CqlIdentifier keyspaceId, Map types) { - ImmutableMap.Builder viewsBuilder = ImmutableMap.builder(); - for (AdminRow viewRow : rows.views().get(keyspaceId)) { - DseViewMetadata view = viewParser.parseView(viewRow, keyspaceId, types); - if (view != null) { - viewsBuilder.put(view.getName(), view); - } - } - return viewsBuilder.build(); - } - - private Map parseFunctions( - CqlIdentifier keyspaceId, Map types) { - ImmutableMap.Builder functionsBuilder = - ImmutableMap.builder(); - for (AdminRow functionRow : rows.functions().get(keyspaceId)) { - DseFunctionMetadata function = functionParser.parseFunction(functionRow, keyspaceId, types); - if (function != null) { - functionsBuilder.put(function.getSignature(), function); - } - } - return functionsBuilder.build(); - } - - private Map parseAggregates( - CqlIdentifier keyspaceId, Map types) { - ImmutableMap.Builder aggregatesBuilder = - ImmutableMap.builder(); - for (AdminRow aggregateRow : rows.aggregates().get(keyspaceId)) { - DseAggregateMetadata aggregate = - aggregateParser.parseAggregate(aggregateRow, keyspaceId, types); - if (aggregate != null) { - aggregatesBuilder.put(aggregate.getSignature(), aggregate); - } - } - return aggregatesBuilder.build(); - } - - private DseKeyspaceMetadata parseVirtualKeyspace(AdminRow keyspaceRow) { - - CqlIdentifier keyspaceId = CqlIdentifier.fromInternal(keyspaceRow.getString("keyspace_name")); - boolean durableWrites = - MoreObjects.firstNonNull(keyspaceRow.getBoolean("durable_writes"), false); - - return new DefaultDseKeyspaceMetadata( - keyspaceId, - durableWrites, - true, - null, - Collections.emptyMap(), - Collections.emptyMap(), - parseVirtualTables(keyspaceId), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptyMap()); - } - - private Map parseVirtualTables(CqlIdentifier keyspaceId) { - ImmutableMap.Builder tablesBuilder = ImmutableMap.builder(); - for (AdminRow tableRow : rows.virtualTables().get(keyspaceId)) { - DseTableMetadata table = tableParser.parseVirtualTable(tableRow, keyspaceId); - if (table != null) { - tablesBuilder.put(table.getName(), table); - } - } - return tablesBuilder.build(); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/parsing/DseTableParser.java b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/parsing/DseTableParser.java deleted file mode 100644 index 7fd4a5f0167..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/parsing/DseTableParser.java +++ /dev/null @@ -1,425 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.metadata.schema.parsing; - -import com.datastax.dse.driver.api.core.metadata.schema.DseColumnMetadata; -import com.datastax.dse.driver.api.core.metadata.schema.DseEdgeMetadata; -import com.datastax.dse.driver.api.core.metadata.schema.DseIndexMetadata; -import com.datastax.dse.driver.api.core.metadata.schema.DseTableMetadata; -import com.datastax.dse.driver.api.core.metadata.schema.DseVertexMetadata; -import com.datastax.dse.driver.internal.core.metadata.schema.DefaultDseColumnMetadata; -import com.datastax.dse.driver.internal.core.metadata.schema.DefaultDseEdgeMetadata; -import com.datastax.dse.driver.internal.core.metadata.schema.DefaultDseIndexMetadata; -import com.datastax.dse.driver.internal.core.metadata.schema.DefaultDseTableMetadata; -import com.datastax.dse.driver.internal.core.metadata.schema.DefaultDseVertexMetadata; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.ClusteringOrder; -import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.IndexKind; -import com.datastax.oss.driver.api.core.metadata.schema.IndexMetadata; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.ListType; -import com.datastax.oss.driver.api.core.type.MapType; -import com.datastax.oss.driver.api.core.type.SetType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.internal.core.CqlIdentifiers; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.DataTypeClassNameCompositeParser; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.RawColumn; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.RelationParser; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.TableParser; -import com.datastax.oss.driver.internal.core.metadata.schema.queries.SchemaRows; -import com.datastax.oss.driver.internal.core.util.Loggers; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMultimap; -import com.datastax.oss.driver.shaded.guava.common.collect.Multimap; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@ThreadSafe -public class DseTableParser extends RelationParser { - - private static final Logger LOG = LoggerFactory.getLogger(TableParser.class); - - public DseTableParser(SchemaRows rows, InternalDriverContext context) { - super(rows, context); - } - - public DseTableMetadata parseTable( - AdminRow tableRow, - CqlIdentifier keyspaceId, - Map userTypes, - Multimap vertices, - Multimap edges) { - // Cassandra <= 2.2: - // CREATE TABLE system.schema_columnfamilies ( - // keyspace_name text, - // columnfamily_name text, - // bloom_filter_fp_chance double, - // caching text, - // cf_id uuid, - // column_aliases text, (2.1 only) - // comment text, - // compaction_strategy_class text, - // compaction_strategy_options text, - // comparator text, - // compression_parameters text, - // default_time_to_live int, - // default_validator text, - // dropped_columns map, - // gc_grace_seconds int, - // index_interval int, - // is_dense boolean, (2.1 only) - // key_aliases text, (2.1 only) - // key_validator text, - // local_read_repair_chance double, - // max_compaction_threshold int, - // max_index_interval int, - // memtable_flush_period_in_ms int, - // min_compaction_threshold int, - // min_index_interval int, - // read_repair_chance double, - // speculative_retry text, - // subcomparator text, - // type text, - // value_alias text, (2.1 only) - // PRIMARY KEY (keyspace_name, columnfamily_name) - // ) WITH CLUSTERING ORDER BY (columnfamily_name ASC) - // - // Cassandra 3.0: - // CREATE TABLE system_schema.tables ( - // keyspace_name text, - // table_name text, - // bloom_filter_fp_chance double, - // caching frozen>, - // cdc boolean, - // comment text, - // compaction frozen>, - // compression frozen>, - // crc_check_chance double, - // dclocal_read_repair_chance double, - // default_time_to_live int, - // extensions frozen>, - // flags frozen>, - // gc_grace_seconds int, - // id uuid, - // max_index_interval int, - // memtable_flush_period_in_ms int, - // min_index_interval int, - // read_repair_chance double, - // speculative_retry text, - // PRIMARY KEY (keyspace_name, table_name) - // ) WITH CLUSTERING ORDER BY (table_name ASC) - CqlIdentifier tableId = - CqlIdentifier.fromInternal( - tableRow.getString( - tableRow.contains("table_name") ? "table_name" : "columnfamily_name")); - - UUID uuid = tableRow.contains("id") ? tableRow.getUuid("id") : tableRow.getUuid("cf_id"); - - List rawColumns = - RawColumn.toRawColumns( - rows.columns().getOrDefault(keyspaceId, ImmutableMultimap.of()).get(tableId)); - if (rawColumns.isEmpty()) { - LOG.warn( - "[{}] Processing TABLE refresh for {}.{} but found no matching rows, skipping", - logPrefix, - keyspaceId, - tableId); - return null; - } - - boolean isCompactStorage; - if (tableRow.contains("flags")) { - Set flags = tableRow.getSetOfString("flags"); - boolean isDense = flags.contains("dense"); - boolean isSuper = flags.contains("super"); - boolean isCompound = flags.contains("compound"); - isCompactStorage = isSuper || isDense || !isCompound; - boolean isStaticCompact = !isSuper && !isDense && !isCompound; - if (isStaticCompact) { - RawColumn.pruneStaticCompactTableColumns(rawColumns); - } else if (isDense) { - RawColumn.pruneDenseTableColumnsV3(rawColumns); - } - } else { - boolean isDense = tableRow.getBoolean("is_dense"); - if (isDense) { - RawColumn.pruneDenseTableColumnsV2(rawColumns); - } - DataTypeClassNameCompositeParser.ParseResult comparator = - new DataTypeClassNameCompositeParser() - .parseWithComposite(tableRow.getString("comparator"), keyspaceId, userTypes, context); - isCompactStorage = isDense || !comparator.isComposite; - } - - Collections.sort(rawColumns); - ImmutableMap.Builder allColumnsBuilder = ImmutableMap.builder(); - ImmutableList.Builder partitionKeyBuilder = ImmutableList.builder(); - ImmutableMap.Builder clusteringColumnsBuilder = - ImmutableMap.builder(); - ImmutableMap.Builder indexesBuilder = ImmutableMap.builder(); - - for (RawColumn raw : rawColumns) { - DataType dataType = rows.dataTypeParser().parse(keyspaceId, raw.dataType, userTypes, context); - DseColumnMetadata column = - new DefaultDseColumnMetadata( - keyspaceId, tableId, raw.name, dataType, raw.kind.equals(RawColumn.KIND_STATIC)); - switch (raw.kind) { - case RawColumn.KIND_PARTITION_KEY: - partitionKeyBuilder.add(column); - break; - case RawColumn.KIND_CLUSTERING_COLUMN: - clusteringColumnsBuilder.put( - column, raw.reversed ? ClusteringOrder.DESC : ClusteringOrder.ASC); - break; - default: - // nothing to do - } - allColumnsBuilder.put(column.getName(), column); - - DseIndexMetadata index = buildLegacyIndex(raw, column); - if (index != null) { - indexesBuilder.put(index.getName(), index); - } - } - - Map options; - try { - options = parseOptions(tableRow); - } catch (Exception e) { - // Options change the most often, so be especially lenient if anything goes wrong. - Loggers.warnWithException( - LOG, - "[{}] Error while parsing options for {}.{}, getOptions() will be empty", - logPrefix, - keyspaceId, - tableId, - e); - options = Collections.emptyMap(); - } - - Collection indexRows = - rows.indexes().getOrDefault(keyspaceId, ImmutableMultimap.of()).get(tableId); - for (AdminRow indexRow : indexRows) { - DseIndexMetadata index = buildModernIndex(keyspaceId, tableId, indexRow); - indexesBuilder.put(index.getName(), index); - } - - return new DefaultDseTableMetadata( - keyspaceId, - tableId, - uuid, - isCompactStorage, - false, - partitionKeyBuilder.build(), - clusteringColumnsBuilder.build(), - allColumnsBuilder.build(), - options, - indexesBuilder.build(), - buildVertex(tableId, vertices), - buildEdge(tableId, edges, vertices)); - } - - DseTableMetadata parseVirtualTable(AdminRow tableRow, CqlIdentifier keyspaceId) { - - CqlIdentifier tableId = CqlIdentifier.fromInternal(tableRow.getString("table_name")); - - List rawColumns = - RawColumn.toRawColumns( - rows.virtualColumns().getOrDefault(keyspaceId, ImmutableMultimap.of()).get(tableId)); - if (rawColumns.isEmpty()) { - LOG.warn( - "[{}] Processing TABLE refresh for {}.{} but found no matching rows, skipping", - logPrefix, - keyspaceId, - tableId); - return null; - } - - Collections.sort(rawColumns); - ImmutableMap.Builder allColumnsBuilder = ImmutableMap.builder(); - ImmutableList.Builder partitionKeyBuilder = ImmutableList.builder(); - ImmutableMap.Builder clusteringColumnsBuilder = - ImmutableMap.builder(); - - for (RawColumn raw : rawColumns) { - DataType dataType = - rows.dataTypeParser().parse(keyspaceId, raw.dataType, Collections.emptyMap(), context); - DseColumnMetadata column = - new DefaultDseColumnMetadata( - keyspaceId, tableId, raw.name, dataType, raw.kind.equals(RawColumn.KIND_STATIC)); - switch (raw.kind) { - case RawColumn.KIND_PARTITION_KEY: - partitionKeyBuilder.add(column); - break; - case RawColumn.KIND_CLUSTERING_COLUMN: - clusteringColumnsBuilder.put( - column, raw.reversed ? ClusteringOrder.DESC : ClusteringOrder.ASC); - break; - default: - } - - allColumnsBuilder.put(column.getName(), column); - } - - return new DefaultDseTableMetadata( - keyspaceId, - tableId, - null, - false, - true, - partitionKeyBuilder.build(), - clusteringColumnsBuilder.build(), - allColumnsBuilder.build(), - Collections.emptyMap(), - Collections.emptyMap(), - null, - null); - } - - // In C*<=2.2, index information is stored alongside the column. - private DseIndexMetadata buildLegacyIndex(RawColumn raw, ColumnMetadata column) { - if (raw.indexName == null) { - return null; - } - return new DefaultDseIndexMetadata( - column.getKeyspace(), - column.getParent(), - CqlIdentifier.fromInternal(raw.indexName), - IndexKind.valueOf(raw.indexType), - buildLegacyIndexTarget(column, raw.indexOptions), - raw.indexOptions); - } - - private static String buildLegacyIndexTarget(ColumnMetadata column, Map options) { - String columnName = column.getName().asCql(true); - DataType columnType = column.getType(); - if (options.containsKey("index_keys")) { - return String.format("keys(%s)", columnName); - } - if (options.containsKey("index_keys_and_values")) { - return String.format("entries(%s)", columnName); - } - if ((columnType instanceof ListType && ((ListType) columnType).isFrozen()) - || (columnType instanceof SetType && ((SetType) columnType).isFrozen()) - || (columnType instanceof MapType && ((MapType) columnType).isFrozen())) { - return String.format("full(%s)", columnName); - } - // Note: the keyword 'values' is not accepted as a valid index target function until 3.0 - return columnName; - } - - // In C*>=3.0, index information is stored in a dedicated table: - // CREATE TABLE system_schema.indexes ( - // keyspace_name text, - // table_name text, - // index_name text, - // kind text, - // options frozen>, - // PRIMARY KEY (keyspace_name, table_name, index_name) - // ) WITH CLUSTERING ORDER BY (table_name ASC, index_name ASC) - private DseIndexMetadata buildModernIndex( - CqlIdentifier keyspaceId, CqlIdentifier tableId, AdminRow row) { - CqlIdentifier name = CqlIdentifier.fromInternal(row.getString("index_name")); - IndexKind kind = IndexKind.valueOf(row.getString("kind")); - Map options = row.getMapOfStringToString("options"); - String target = options.get("target"); - return new DefaultDseIndexMetadata(keyspaceId, tableId, name, kind, target, options); - } - - private DseVertexMetadata buildVertex( - CqlIdentifier tableId, Multimap keyspaceVertices) { - - if (keyspaceVertices == null) { - return null; - } - Collection tableVertices = keyspaceVertices.get(tableId); - if (tableVertices == null || tableVertices.isEmpty()) { - return null; - } - - AdminRow row = tableVertices.iterator().next(); - return new DefaultDseVertexMetadata(getLabel(row)); - } - - private DseEdgeMetadata buildEdge( - CqlIdentifier tableId, - Multimap keyspaceEdges, - Multimap keyspaceVertices) { - - if (keyspaceEdges == null) { - return null; - } - - Collection tableEdges = keyspaceEdges.get(tableId); - if (tableEdges == null || tableEdges.isEmpty()) { - return null; - } - - AdminRow row = tableEdges.iterator().next(); - - CqlIdentifier fromTable = CqlIdentifier.fromInternal(row.getString("from_table")); - - CqlIdentifier toTable = CqlIdentifier.fromInternal(row.getString("to_table")); - - return new DefaultDseEdgeMetadata( - getLabel(row), - fromTable, - findVertexLabel(fromTable, keyspaceVertices, "incoming"), - CqlIdentifiers.wrapInternal(row.getListOfString("from_partition_key_columns")), - CqlIdentifiers.wrapInternal(row.getListOfString("from_clustering_columns")), - toTable, - findVertexLabel(toTable, keyspaceVertices, "outgoing"), - CqlIdentifiers.wrapInternal(row.getListOfString("to_partition_key_columns")), - CqlIdentifiers.wrapInternal(row.getListOfString("to_clustering_columns"))); - } - - private CqlIdentifier getLabel(AdminRow row) { - String rawLabel = row.getString("label_name"); - return (rawLabel == null || rawLabel.isEmpty()) ? null : CqlIdentifier.fromInternal(rawLabel); - } - - // system_schema.edges only contains vertex table names. We also expose the labels in our metadata - // objects, so we need to look them up in system_schema.vertices. - private CqlIdentifier findVertexLabel( - CqlIdentifier table, - Multimap keyspaceVertices, - String directionForErrorMessage) { - Collection tableVertices = - (keyspaceVertices == null) ? null : keyspaceVertices.get(table); - if (tableVertices == null || tableVertices.isEmpty()) { - throw new IllegalArgumentException( - String.format( - "Missing vertex definition for %s table %s", - directionForErrorMessage, table.asCql(true))); - } - - AdminRow row = tableVertices.iterator().next(); - return getLabel(row); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/parsing/DseViewParser.java b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/parsing/DseViewParser.java deleted file mode 100644 index 07a1e2b5c39..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/parsing/DseViewParser.java +++ /dev/null @@ -1,157 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.metadata.schema.parsing; - -import com.datastax.dse.driver.api.core.metadata.schema.DseColumnMetadata; -import com.datastax.dse.driver.api.core.metadata.schema.DseViewMetadata; -import com.datastax.dse.driver.internal.core.metadata.schema.DefaultDseColumnMetadata; -import com.datastax.dse.driver.internal.core.metadata.schema.DefaultDseViewMetadata; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.ClusteringOrder; -import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.RawColumn; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.RelationParser; -import com.datastax.oss.driver.internal.core.metadata.schema.queries.SchemaRows; -import com.datastax.oss.driver.internal.core.util.Loggers; -import com.datastax.oss.driver.shaded.guava.common.base.MoreObjects; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMultimap; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@ThreadSafe -public class DseViewParser extends RelationParser { - - private static final Logger LOG = LoggerFactory.getLogger(DseViewParser.class); - - public DseViewParser(SchemaRows rows, InternalDriverContext context) { - super(rows, context); - } - - public DseViewMetadata parseView( - AdminRow viewRow, CqlIdentifier keyspaceId, Map userTypes) { - // Cassandra 3.0 (no views in earlier versions): - // CREATE TABLE system_schema.views ( - // keyspace_name text, - // view_name text, - // base_table_id uuid, - // base_table_name text, - // bloom_filter_fp_chance double, - // caching frozen>, - // cdc boolean, - // comment text, - // compaction frozen>, - // compression frozen>, - // crc_check_chance double, - // dclocal_read_repair_chance double, - // default_time_to_live int, - // extensions frozen>, - // gc_grace_seconds int, - // id uuid, - // include_all_columns boolean, - // max_index_interval int, - // memtable_flush_period_in_ms int, - // min_index_interval int, - // read_repair_chance double, - // speculative_retry text, - // where_clause text, - // PRIMARY KEY (keyspace_name, view_name) - // ) WITH CLUSTERING ORDER BY (view_name ASC) - CqlIdentifier viewId = CqlIdentifier.fromInternal(viewRow.getString("view_name")); - - UUID uuid = viewRow.getUuid("id"); - CqlIdentifier baseTableId = CqlIdentifier.fromInternal(viewRow.getString("base_table_name")); - boolean includesAllColumns = - MoreObjects.firstNonNull(viewRow.getBoolean("include_all_columns"), false); - String whereClause = viewRow.getString("where_clause"); - - List rawColumns = - RawColumn.toRawColumns( - rows.columns().getOrDefault(keyspaceId, ImmutableMultimap.of()).get(viewId)); - if (rawColumns.isEmpty()) { - LOG.warn( - "[{}] Processing VIEW refresh for {}.{} but found no matching rows, skipping", - logPrefix, - keyspaceId, - viewId); - return null; - } - - Collections.sort(rawColumns); - ImmutableMap.Builder allColumnsBuilder = ImmutableMap.builder(); - ImmutableList.Builder partitionKeyBuilder = ImmutableList.builder(); - ImmutableMap.Builder clusteringColumnsBuilder = - ImmutableMap.builder(); - - for (RawColumn raw : rawColumns) { - DataType dataType = rows.dataTypeParser().parse(keyspaceId, raw.dataType, userTypes, context); - DseColumnMetadata column = - new DefaultDseColumnMetadata( - keyspaceId, viewId, raw.name, dataType, raw.kind.equals(RawColumn.KIND_STATIC)); - switch (raw.kind) { - case RawColumn.KIND_PARTITION_KEY: - partitionKeyBuilder.add(column); - break; - case RawColumn.KIND_CLUSTERING_COLUMN: - clusteringColumnsBuilder.put( - column, raw.reversed ? ClusteringOrder.DESC : ClusteringOrder.ASC); - break; - default: - // nothing to do - } - allColumnsBuilder.put(column.getName(), column); - } - - Map options; - try { - options = parseOptions(viewRow); - } catch (Exception e) { - // Options change the most often, so be especially lenient if anything goes wrong. - Loggers.warnWithException( - LOG, - "[{}] Error while parsing options for {}.{}, getOptions() will be empty", - logPrefix, - keyspaceId, - viewId, - e); - options = Collections.emptyMap(); - } - - return new DefaultDseViewMetadata( - keyspaceId, - viewId, - baseTableId, - includesAllColumns, - whereClause, - uuid, - partitionKeyBuilder.build(), - clusteringColumnsBuilder.build(), - allColumnsBuilder.build(), - options); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/protocol/TinkerpopBufferPrimitiveCodec.java b/core/src/main/java/com/datastax/dse/driver/internal/core/protocol/TinkerpopBufferPrimitiveCodec.java deleted file mode 100644 index 13238519e06..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/protocol/TinkerpopBufferPrimitiveCodec.java +++ /dev/null @@ -1,292 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.protocol; - -import com.datastax.dse.driver.internal.core.graph.binary.buffer.DseNettyBufferFactory; -import com.datastax.oss.driver.internal.core.protocol.ByteBufPrimitiveCodec; -import com.datastax.oss.driver.shaded.guava.common.base.Charsets; -import com.datastax.oss.protocol.internal.PrimitiveCodec; -import java.net.InetAddress; -import java.net.UnknownHostException; -import java.nio.ByteBuffer; -import java.util.Arrays; -import java.util.zip.CRC32; -import org.apache.tinkerpop.gremlin.structure.io.Buffer; - -/** - * Minimal implementation of {@link PrimitiveCodec} for Tinkerpop {@link Buffer} instances. - * - *

This approach represents a temporary design compromise. PrimitiveCodec is primarily used for - * handling data directly from Netty, a task satisfied by {@link ByteBufPrimitiveCodec}. But - * PrimitiveCodec is also used to implement graph serialization for some of the "dynamic" types - * (notably UDTs and tuples). Since we're converting graph serialization to use the new Tinkerpop - * Buffer API we need just enough of a PrimitiveCodec impl to satisfy the needs of graph - * serialization... and nothing more. - * - *

A more explicit approach would be to change graph serialization to use a different interface, - * some kind of subset of PrimitiveCodec.... and then make PrimitiveCodec extend this interface. - * This is left as future work for now since it involves changes to the native-protocol lib(s). - */ -public class TinkerpopBufferPrimitiveCodec implements PrimitiveCodec { - - private final DseNettyBufferFactory factory; - - public TinkerpopBufferPrimitiveCodec(DseNettyBufferFactory factory) { - this.factory = factory; - } - - @Override - public Buffer allocate(int size) { - // Note: we use io() here to match up to what ByteBufPrimitiveCodec does, but be warned that - // ByteBufs created in this way don't support the array() method used elsewhere in this codec - // (readString() specifically). As such usage of this method to create Buffer instances is - // discouraged; we have a factory for that. - return this.factory.io(size, size); - } - - @Override - public void release(Buffer toRelease) { - toRelease.release(); - } - - @Override - public int sizeOf(Buffer toMeasure) { - return toMeasure.readableBytes(); - } - - // TODO - @Override - public Buffer concat(Buffer left, Buffer right) { - boolean leftReadable = left.readableBytes() > 0; - boolean rightReadable = right.readableBytes() > 0; - if (!(leftReadable || rightReadable)) { - return factory.heap(); - } - if (!leftReadable) { - return right; - } - if (!rightReadable) { - return left; - } - Buffer rv = factory.composite(left, right); - // c.readerIndex() is 0, which is the first readable byte in left - rv.writerIndex( - left.writerIndex() - left.readerIndex() + right.writerIndex() - right.readerIndex()); - return rv; - } - - @Override - public void markReaderIndex(Buffer source) { - throw new UnsupportedOperationException(); - } - - @Override - public void resetReaderIndex(Buffer source) { - throw new UnsupportedOperationException(); - } - - @Override - public byte readByte(Buffer source) { - return source.readByte(); - } - - @Override - public int readInt(Buffer source) { - return source.readInt(); - } - - @Override - public int readInt(Buffer source, int offset) { - throw new UnsupportedOperationException(); - } - - @Override - public InetAddress readInetAddr(Buffer source) { - int length = readByte(source) & 0xFF; - byte[] bytes = new byte[length]; - source.readBytes(bytes); - return newInetAddress(bytes); - } - - @Override - public long readLong(Buffer source) { - return source.readLong(); - } - - @Override - public int readUnsignedShort(Buffer source) { - return source.readShort() & 0xFFFF; - } - - @Override - public ByteBuffer readBytes(Buffer source) { - int length = readInt(source); - if (length < 0) return null; - return source.nioBuffer(source.readerIndex(), length); - } - - @Override - public byte[] readShortBytes(Buffer source) { - try { - int length = readUnsignedShort(source); - byte[] bytes = new byte[length]; - source.readBytes(bytes); - return bytes; - } catch (IndexOutOfBoundsException e) { - throw new IllegalArgumentException( - "Not enough bytes to read a byte array preceded by its 2 bytes length"); - } - } - - // Copy of PrimitiveCodec impl - @Override - public String readString(Buffer source) { - int length = readUnsignedShort(source); - return readString(source, length); - } - - @Override - public String readLongString(Buffer source) { - int length = readInt(source); - return readString(source, length); - } - - @Override - public Buffer readRetainedSlice(Buffer source, int sliceLength) { - throw new UnsupportedOperationException(); - } - - @Override - public void updateCrc(Buffer source, CRC32 crc) { - throw new UnsupportedOperationException(); - } - - @Override - public void writeByte(byte b, Buffer dest) { - dest.writeByte(b); - } - - @Override - public void writeInt(int i, Buffer dest) { - dest.writeInt(i); - } - - @Override - public void writeInetAddr(InetAddress address, Buffer dest) { - byte[] bytes = address.getAddress(); - writeByte((byte) bytes.length, dest); - dest.writeBytes(bytes); - } - - @Override - public void writeLong(long l, Buffer dest) { - dest.writeLong(l); - } - - @Override - public void writeUnsignedShort(int i, Buffer dest) { - dest.writeShort(i); - } - - // Copy of PrimitiveCodec impl - @Override - public void writeString(String s, Buffer dest) { - - byte[] bytes = s.getBytes(Charsets.UTF_8); - writeUnsignedShort(bytes.length, dest); - dest.writeBytes(bytes); - } - - @Override - public void writeLongString(String s, Buffer dest) { - byte[] bytes = s.getBytes(Charsets.UTF_8); - writeInt(bytes.length, dest); - dest.writeBytes(bytes); - } - - @Override - public void writeBytes(ByteBuffer bytes, Buffer dest) { - if (bytes == null) { - writeInt(-1, dest); - } else { - writeInt(bytes.remaining(), dest); - dest.writeBytes(bytes.duplicate()); - } - } - - @Override - public void writeBytes(byte[] bytes, Buffer dest) { - if (bytes == null) { - writeInt(-1, dest); - } else { - writeInt(bytes.length, dest); - dest.writeBytes(bytes); - } - } - - @Override - public void writeShortBytes(byte[] bytes, Buffer dest) { - writeUnsignedShort(bytes.length, dest); - dest.writeBytes(bytes); - } - - // Based on PrimitiveCodec impl, although that method leverages some - // Netty built-ins which we have to do manually here - private static String readString(Buffer buff, int length) { - try { - - // Basically what io.netty.buffer.ByteBufUtil.decodeString() does minus some extra - // ByteBuf-specific ops - int offset; - byte[] bytes; - ByteBuffer byteBuff = buff.nioBuffer(); - if (byteBuff.hasArray()) { - - bytes = byteBuff.array(); - offset = byteBuff.arrayOffset(); - } else { - - bytes = new byte[length]; - byteBuff.get(bytes, 0, length); - offset = 0; - } - - String str = new String(bytes, offset, length, Charsets.UTF_8); - - // Ops against the NIO buffers don't impact the read/write indexes for he Buffer - // itself so we have to do that manually - buff.readerIndex(buff.readerIndex() + length); - return str; - } catch (IndexOutOfBoundsException e) { - throw new IllegalArgumentException( - "Not enough bytes to read an UTF-8 serialized string of size " + length, e); - } - } - - // TODO: Code below copied directly from ByteBufPrimitiveCodec, probably want to consolidate this - // somewhere - private static InetAddress newInetAddress(byte[] bytes) { - try { - return InetAddress.getByAddress(bytes); - } catch (UnknownHostException e) { - // Per the Javadoc, the only way this can happen is if the length is illegal - throw new IllegalArgumentException( - String.format("Invalid address length: %d (%s)", bytes.length, Arrays.toString(bytes))); - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/search/DateRangeUtil.java b/core/src/main/java/com/datastax/dse/driver/internal/core/search/DateRangeUtil.java deleted file mode 100644 index 15e278260c5..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/search/DateRangeUtil.java +++ /dev/null @@ -1,230 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.search; - -import com.datastax.dse.driver.api.core.data.time.DateRangePrecision; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.text.ParseException; -import java.time.LocalDateTime; -import java.time.ZoneOffset; -import java.time.ZonedDateTime; -import java.time.temporal.ChronoField; -import java.time.temporal.ChronoUnit; -import java.time.temporal.TemporalAdjusters; -import java.util.Calendar; -import java.util.Locale; -import java.util.Map; -import java.util.TimeZone; - -public class DateRangeUtil { - - /** Sets all the fields smaller than the given unit to their lowest possible value. */ - @NonNull - public static ZonedDateTime roundDown(@NonNull ZonedDateTime date, @NonNull ChronoUnit unit) { - switch (unit) { - case YEARS: - return date.with(TemporalAdjusters.firstDayOfYear()).truncatedTo(ChronoUnit.DAYS); - case MONTHS: - return date.with(TemporalAdjusters.firstDayOfMonth()).truncatedTo(ChronoUnit.DAYS); - case DAYS: - case HOURS: - case MINUTES: - case SECONDS: - case MILLIS: - return date.truncatedTo(unit); - default: - throw new IllegalArgumentException("Unsupported unit for rounding: " + unit); - } - } - - /** Sets all the fields smaller than the given unit to their highest possible value. */ - @NonNull - public static ZonedDateTime roundUp(@NonNull ZonedDateTime date, @NonNull ChronoUnit unit) { - return roundDown(date, unit) - .plus(1, unit) - // Even though ZDT has nanosecond-precision, DSE only rounds to millisecond precision so be - // consistent with that - .minus(1, ChronoUnit.MILLIS); - } - - /** - * Parses the given string as a date in a range bound. - * - *

This method deliberately uses legacy time APIs, in order to be as close as possible to the - * server-side parsing logic. We want the client to behave exactly like the server, i.e. parsing a - * date locally and inlining it in a CQL query should always yield the same result as binding the - * date as a value. - */ - public static Calendar parseCalendar(String source) throws ParseException { - // The contents of this method are based on Lucene's DateRangePrefixTree#parseCalendar, released - // under the Apache License, Version 2.0. - // Following is the original notice from that file: - - // Licensed to the Apache Software Foundation (ASF) under one or more - // contributor license agreements. See the NOTICE file distributed with - // this work for additional information regarding copyright ownership. - // The ASF licenses this file to You under the Apache License, Version 2.0 - // (the "License"); you may not use this file except in compliance with - // the License. You may obtain a copy of the License at - // - // http://www.apache.org/licenses/LICENSE-2.0 - // - // Unless required by applicable law or agreed to in writing, software - // distributed under the License is distributed on an "AS IS" BASIS, - // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - // See the License for the specific language governing permissions and - // limitations under the License. - - if (source == null || source.isEmpty()) { - throw new IllegalArgumentException("Can't parse a null or blank string"); - } - - Calendar calendar = newCalendar(); - if (source.equals("*")) { - return calendar; - } - int offset = 0; // a pointer - try { - // year & era: - int lastOffset = - (source.charAt(source.length() - 1) == 'Z') ? source.length() - 1 : source.length(); - int hyphenIdx = source.indexOf('-', 1); // look past possible leading hyphen - if (hyphenIdx < 0) { - hyphenIdx = lastOffset; - } - int year = Integer.parseInt(source.substring(offset, hyphenIdx)); - calendar.set(Calendar.ERA, year <= 0 ? 0 : 1); - calendar.set(Calendar.YEAR, year <= 0 ? -1 * year + 1 : year); - offset = hyphenIdx + 1; - if (lastOffset < offset) { - return calendar; - } - - // NOTE: We aren't validating separator chars, and we unintentionally accept leading +/-. - // The str.substring()'s hopefully get optimized to be stack-allocated. - - // month: - calendar.set( - Calendar.MONTH, - Integer.parseInt(source.substring(offset, offset + 2)) - 1); // starts at 0 - offset += 3; - if (lastOffset < offset) { - return calendar; - } - // day: - calendar.set(Calendar.DAY_OF_MONTH, Integer.parseInt(source.substring(offset, offset + 2))); - offset += 3; - if (lastOffset < offset) { - return calendar; - } - // hour: - calendar.set(Calendar.HOUR_OF_DAY, Integer.parseInt(source.substring(offset, offset + 2))); - offset += 3; - if (lastOffset < offset) { - return calendar; - } - // minute: - calendar.set(Calendar.MINUTE, Integer.parseInt(source.substring(offset, offset + 2))); - offset += 3; - if (lastOffset < offset) { - return calendar; - } - // second: - calendar.set(Calendar.SECOND, Integer.parseInt(source.substring(offset, offset + 2))); - offset += 3; - if (lastOffset < offset) { - return calendar; - } - // ms: - calendar.set(Calendar.MILLISECOND, Integer.parseInt(source.substring(offset, offset + 3))); - offset += 3; // last one, move to next char - if (lastOffset == offset) { - return calendar; - } - } catch (Exception e) { - ParseException pe = new ParseException("Improperly formatted date: " + source, offset); - pe.initCause(e); - throw pe; - } - throw new ParseException("Improperly formatted date: " + source, offset); - } - - private static Calendar newCalendar() { - Calendar calendar = Calendar.getInstance(UTC, Locale.ROOT); - calendar.clear(); - return calendar; - } - - private static final TimeZone UTC = TimeZone.getTimeZone("UTC"); - - /** - * Returns the precision of a calendar obtained through {@link #parseCalendar(String)}, or {@code - * null} if no field was set. - */ - @Nullable - public static DateRangePrecision getPrecision(Calendar calendar) { - DateRangePrecision lastPrecision = null; - for (Map.Entry entry : FIELD_BY_PRECISION.entrySet()) { - DateRangePrecision precision = entry.getKey(); - int field = entry.getValue(); - if (calendar.isSet(field)) { - lastPrecision = precision; - } else { - break; - } - } - return lastPrecision; - } - - // Note: this could be a field on DateRangePrecision, but it's only used within this class so it's - // better not to expose it. - private static final ImmutableMap FIELD_BY_PRECISION = - ImmutableMap.builder() - .put(DateRangePrecision.YEAR, Calendar.YEAR) - .put(DateRangePrecision.MONTH, Calendar.MONTH) - .put(DateRangePrecision.DAY, Calendar.DAY_OF_MONTH) - .put(DateRangePrecision.HOUR, Calendar.HOUR_OF_DAY) - .put(DateRangePrecision.MINUTE, Calendar.MINUTE) - .put(DateRangePrecision.SECOND, Calendar.SECOND) - .put(DateRangePrecision.MILLISECOND, Calendar.MILLISECOND) - .build(); - - public static ZonedDateTime toZonedDateTime(Calendar calendar) { - int year = calendar.get(Calendar.YEAR); - if (calendar.get(Calendar.ERA) == 0) { - // BC era; 1 BC == 0 AD, 0 BD == -1 AD, etc - year -= 1; - if (year > 0) { - year = -year; - } - } - LocalDateTime localDateTime = - LocalDateTime.of( - year, - calendar.get(Calendar.MONTH) + 1, - calendar.get(Calendar.DAY_OF_MONTH), - calendar.get(Calendar.HOUR_OF_DAY), - calendar.get(Calendar.MINUTE), - calendar.get(Calendar.SECOND)); - localDateTime = - localDateTime.with(ChronoField.MILLI_OF_SECOND, calendar.get(Calendar.MILLISECOND)); - return ZonedDateTime.of(localDateTime, ZoneOffset.UTC); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/session/DefaultDseSession.java b/core/src/main/java/com/datastax/dse/driver/internal/core/session/DefaultDseSession.java deleted file mode 100644 index 183f385aa4a..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/session/DefaultDseSession.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.session; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.datastax.oss.driver.internal.core.session.SessionWrapper; -import net.jcip.annotations.ThreadSafe; - -/** - * @deprecated DSE functionality is now exposed directly on {@link CqlSession}. This class is - * preserved for backward compatibility, but {@link DefaultSession} should be used instead. - */ -@ThreadSafe -@Deprecated -public class DefaultDseSession extends SessionWrapper - implements com.datastax.dse.driver.api.core.DseSession { - - public DefaultDseSession(Session delegate) { - super(delegate); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/DseTypeCodecsRegistrar.java b/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/DseTypeCodecsRegistrar.java deleted file mode 100644 index 55da2a9475f..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/DseTypeCodecsRegistrar.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.type.codec; - -import static com.datastax.oss.driver.internal.core.util.Dependency.ESRI; - -import com.datastax.dse.driver.api.core.type.codec.DseTypeCodecs; -import com.datastax.oss.driver.api.core.type.codec.registry.MutableCodecRegistry; -import com.datastax.oss.driver.internal.core.util.DefaultDependencyChecker; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class DseTypeCodecsRegistrar { - - private static final Logger LOG = LoggerFactory.getLogger(DseTypeCodecsRegistrar.class); - - public static void registerDseCodecs(MutableCodecRegistry registry) { - registry.register(DseTypeCodecs.DATE_RANGE); - if (DefaultDependencyChecker.isPresent(ESRI)) { - registry.register(DseTypeCodecs.LINE_STRING, DseTypeCodecs.POINT, DseTypeCodecs.POLYGON); - } else { - LOG.debug("ESRI was not found on the classpath: geo codecs will not be available"); - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/DseTypeCodecsRegistrarSubstitutions.java b/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/DseTypeCodecsRegistrarSubstitutions.java deleted file mode 100644 index afd8d6cf9f6..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/DseTypeCodecsRegistrarSubstitutions.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.type.codec; - -import static com.datastax.oss.driver.internal.core.util.Dependency.ESRI; - -import com.datastax.dse.driver.api.core.type.codec.DseTypeCodecs; -import com.datastax.oss.driver.api.core.type.codec.registry.MutableCodecRegistry; -import com.datastax.oss.driver.internal.core.util.GraalDependencyChecker; -import com.oracle.svm.core.annotate.Substitute; -import com.oracle.svm.core.annotate.TargetClass; -import java.util.function.BooleanSupplier; - -@SuppressWarnings("unused") -public class DseTypeCodecsRegistrarSubstitutions { - - @TargetClass(value = DseTypeCodecsRegistrar.class, onlyWith = EsriMissing.class) - public static final class DseTypeCodecsRegistrarEsriMissing { - - @Substitute - public static void registerDseCodecs(MutableCodecRegistry registry) { - registry.register(DseTypeCodecs.DATE_RANGE); - } - } - - public static class EsriMissing implements BooleanSupplier { - @Override - public boolean getAsBoolean() { - return !GraalDependencyChecker.isPresent(ESRI); - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/geometry/GeometryCodec.java b/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/geometry/GeometryCodec.java deleted file mode 100644 index f6309bc1860..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/geometry/GeometryCodec.java +++ /dev/null @@ -1,113 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.type.codec.geometry; - -import static com.datastax.oss.driver.internal.core.util.Strings.isQuoted; - -import com.datastax.dse.driver.api.core.data.geometry.Geometry; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.internal.core.util.Strings; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import net.jcip.annotations.ThreadSafe; - -/** Base class for geospatial type codecs. */ -@ThreadSafe -public abstract class GeometryCodec implements TypeCodec { - - @Nullable - @Override - public T decode(@Nullable ByteBuffer bb, @NonNull ProtocolVersion protocolVersion) { - return bb == null || bb.remaining() == 0 ? null : fromWellKnownBinary(bb.slice()); - } - - @Nullable - @Override - public ByteBuffer encode(@Nullable T geometry, @NonNull ProtocolVersion protocolVersion) { - return geometry == null ? null : toWellKnownBinary(geometry); - } - - @Nullable - @Override - public T parse(@Nullable String s) { - if (s == null) { - return null; - } - s = s.trim(); - if (s.isEmpty() || s.equalsIgnoreCase("NULL")) { - return null; - } - if (!isQuoted(s)) { - throw new IllegalArgumentException("Geometry values must be enclosed by single quotes"); - } - return fromWellKnownText(Strings.unquote(s)); - } - - @NonNull - @Override - public String format(@Nullable T geometry) throws IllegalArgumentException { - return geometry == null ? "NULL" : Strings.quote(toWellKnownText(geometry)); - } - - /** - * Creates an instance of this codec's geospatial type from its Well-known Text (WKT) representation. - * - * @param source the Well-known Text representation to parse. Cannot be null. - * @return A new instance of this codec's geospatial type. - * @throws IllegalArgumentException if the string does not contain a valid Well-known Text - * representation. - */ - @NonNull - protected abstract T fromWellKnownText(@NonNull String source); - - /** - * Creates an instance of a geospatial type from its Well-known Binary - * (WKB) representation. - * - * @param bb the Well-known Binary representation to parse. Cannot be null. - * @return A new instance of this codec's geospatial type. - * @throws IllegalArgumentException if the given {@link ByteBuffer} does not contain a valid - * Well-known Binary representation. - */ - @NonNull - protected abstract T fromWellKnownBinary(@NonNull ByteBuffer bb); - - /** - * Returns a Well-known Text (WKT) - * representation of the given geospatial object. - * - * @param geometry the geospatial object to convert. Cannot be null. - * @return A Well-known Text representation of the given object. - */ - @NonNull - protected abstract String toWellKnownText(@NonNull T geometry); - - /** - * Returns a Well-known - * Binary (WKB) representation of the given geospatial object. - * - * @param geometry the geospatial object to convert. Cannot be null. - * @return A Well-known Binary representation of the given object. - */ - @NonNull - protected abstract ByteBuffer toWellKnownBinary(@NonNull T geometry); -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/geometry/LineStringCodec.java b/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/geometry/LineStringCodec.java deleted file mode 100644 index bbec99a4103..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/geometry/LineStringCodec.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.type.codec.geometry; - -import com.datastax.dse.driver.api.core.data.geometry.LineString; -import com.datastax.dse.driver.api.core.type.DseDataTypes; -import com.datastax.dse.driver.internal.core.data.geometry.DefaultGeometry; -import com.datastax.dse.driver.internal.core.data.geometry.DefaultLineString; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.esri.core.geometry.ogc.OGCLineString; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import net.jcip.annotations.ThreadSafe; - -/** - * A custom type codec to use {@link LineString} instances in driver. - * - *

If you use {@link com.datastax.dse.driver.api.core.DseSessionBuilder} to build your cluster, - * it will automatically register this codec. - */ -@ThreadSafe -public class LineStringCodec extends GeometryCodec { - - private static final GenericType JAVA_TYPE = GenericType.of(LineString.class); - - @NonNull - @Override - public GenericType getJavaType() { - return JAVA_TYPE; - } - - @NonNull - @Override - protected LineString fromWellKnownText(@NonNull String source) { - return new DefaultLineString(DefaultGeometry.fromOgcWellKnownText(source, OGCLineString.class)); - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return javaClass == LineString.class; - } - - @Override - public boolean accepts(@NonNull Object value) { - return value instanceof LineString; - } - - @NonNull - @Override - protected LineString fromWellKnownBinary(@NonNull ByteBuffer bb) { - return new DefaultLineString(DefaultGeometry.fromOgcWellKnownBinary(bb, OGCLineString.class)); - } - - @NonNull - @Override - protected String toWellKnownText(@NonNull LineString geometry) { - return geometry.asWellKnownText(); - } - - @NonNull - @Override - protected ByteBuffer toWellKnownBinary(@NonNull LineString geometry) { - return geometry.asWellKnownBinary(); - } - - @NonNull - @Override - public DataType getCqlType() { - return DseDataTypes.LINE_STRING; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/geometry/PointCodec.java b/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/geometry/PointCodec.java deleted file mode 100644 index 5ebae64cbab..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/geometry/PointCodec.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.type.codec.geometry; - -import com.datastax.dse.driver.api.core.data.geometry.Point; -import com.datastax.dse.driver.api.core.type.DseDataTypes; -import com.datastax.dse.driver.internal.core.data.geometry.DefaultGeometry; -import com.datastax.dse.driver.internal.core.data.geometry.DefaultPoint; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.esri.core.geometry.ogc.OGCPoint; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import net.jcip.annotations.ThreadSafe; - -/** - * A custom type codec to use {@link Point} instances in the driver. - * - *

If you use {@link com.datastax.dse.driver.api.core.DseSessionBuilder} to build your cluster, - * it will automatically register this codec. - */ -@ThreadSafe -public class PointCodec extends GeometryCodec { - - private static final GenericType JAVA_TYPE = GenericType.of(Point.class); - - @NonNull - @Override - public GenericType getJavaType() { - return JAVA_TYPE; - } - - @NonNull - @Override - public DataType getCqlType() { - return DseDataTypes.POINT; - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return javaClass == Point.class; - } - - @Override - public boolean accepts(@NonNull Object value) { - return value instanceof Point; - } - - @NonNull - @Override - protected String toWellKnownText(@NonNull Point geometry) { - return geometry.asWellKnownText(); - } - - @NonNull - @Override - protected ByteBuffer toWellKnownBinary(@NonNull Point geometry) { - return geometry.asWellKnownBinary(); - } - - @NonNull - @Override - protected Point fromWellKnownText(@NonNull String source) { - return new DefaultPoint(DefaultGeometry.fromOgcWellKnownText(source, OGCPoint.class)); - } - - @NonNull - @Override - protected Point fromWellKnownBinary(@NonNull ByteBuffer source) { - return new DefaultPoint(DefaultGeometry.fromOgcWellKnownBinary(source, OGCPoint.class)); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/geometry/PolygonCodec.java b/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/geometry/PolygonCodec.java deleted file mode 100644 index 00a070a4b4a..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/geometry/PolygonCodec.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.type.codec.geometry; - -import com.datastax.dse.driver.api.core.data.geometry.Polygon; -import com.datastax.dse.driver.api.core.type.DseDataTypes; -import com.datastax.dse.driver.internal.core.data.geometry.DefaultGeometry; -import com.datastax.dse.driver.internal.core.data.geometry.DefaultPolygon; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.esri.core.geometry.ogc.OGCPolygon; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import net.jcip.annotations.ThreadSafe; - -/** - * A custom type codec to use {@link Polygon} instances in the driver. - * - *

If you use {@link com.datastax.dse.driver.api.core.DseSessionBuilder} to build your cluster, - * it will automatically register this codec. - */ -@ThreadSafe -public class PolygonCodec extends GeometryCodec { - - private static final GenericType JAVA_TYPE = GenericType.of(Polygon.class); - - @NonNull - @Override - public GenericType getJavaType() { - return JAVA_TYPE; - } - - @NonNull - @Override - public DataType getCqlType() { - return DseDataTypes.POLYGON; - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return javaClass == Polygon.class; - } - - @Override - public boolean accepts(@NonNull Object value) { - return value instanceof Polygon; - } - - @NonNull - @Override - protected Polygon fromWellKnownText(@NonNull String source) { - return new DefaultPolygon(DefaultGeometry.fromOgcWellKnownText(source, OGCPolygon.class)); - } - - @NonNull - @Override - protected Polygon fromWellKnownBinary(@NonNull ByteBuffer bb) { - return new DefaultPolygon(DefaultGeometry.fromOgcWellKnownBinary(bb, OGCPolygon.class)); - } - - @NonNull - @Override - protected String toWellKnownText(@NonNull Polygon geometry) { - return geometry.asWellKnownText(); - } - - @NonNull - @Override - protected ByteBuffer toWellKnownBinary(@NonNull Polygon geometry) { - return geometry.asWellKnownBinary(); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/time/DateRangeCodec.java b/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/time/DateRangeCodec.java deleted file mode 100644 index e8a23e88848..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/time/DateRangeCodec.java +++ /dev/null @@ -1,188 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.type.codec.time; - -import com.datastax.dse.driver.api.core.data.time.DateRange; -import com.datastax.dse.driver.api.core.data.time.DateRangeBound; -import com.datastax.dse.driver.api.core.data.time.DateRangePrecision; -import com.datastax.dse.driver.api.core.type.DseDataTypes; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.util.Strings; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.text.ParseException; -import java.time.Instant; -import java.time.ZoneOffset; -import java.time.ZonedDateTime; -import java.util.Optional; - -public class DateRangeCodec implements TypeCodec { - - private static final GenericType JAVA_TYPE = GenericType.of(DateRange.class); - private static final DataType CQL_TYPE = DseDataTypes.DATE_RANGE; - - // e.g. [2001-01-01] - private static final byte DATE_RANGE_TYPE_SINGLE_DATE = 0x00; - // e.g. [2001-01-01 TO 2001-01-31] - private static final byte DATE_RANGE_TYPE_CLOSED_RANGE = 0x01; - // e.g. [2001-01-01 TO *] - private static final byte DATE_RANGE_TYPE_OPEN_RANGE_HIGH = 0x02; - // e.g. [* TO 2001-01-01] - private static final byte DATE_RANGE_TYPE_OPEN_RANGE_LOW = 0x03; - // [* TO *] - private static final byte DATE_RANGE_TYPE_BOTH_OPEN_RANGE = 0x04; - // * - private static final byte DATE_RANGE_TYPE_SINGLE_DATE_OPEN = 0x05; - - @NonNull - @Override - public GenericType getJavaType() { - return JAVA_TYPE; - } - - @NonNull - @Override - public DataType getCqlType() { - return CQL_TYPE; - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return javaClass == DateRange.class; - } - - @Nullable - @Override - public ByteBuffer encode( - @Nullable DateRange dateRange, @NonNull ProtocolVersion protocolVersion) { - if (dateRange == null) { - return null; - } - byte rangeType = encodeType(dateRange); - int bufferSize = 1; - DateRangeBound lowerBound = dateRange.getLowerBound(); - Optional maybeUpperBound = dateRange.getUpperBound(); - bufferSize += lowerBound.isUnbounded() ? 0 : 9; - bufferSize += maybeUpperBound.map(upperBound -> upperBound.isUnbounded() ? 0 : 9).orElse(0); - ByteBuffer buffer = ByteBuffer.allocate(bufferSize); - buffer.put(rangeType); - if (!lowerBound.isUnbounded()) { - put(buffer, lowerBound); - } - maybeUpperBound.ifPresent( - upperBound -> { - if (!upperBound.isUnbounded()) { - put(buffer, upperBound); - } - }); - return (ByteBuffer) buffer.flip(); - } - - private static byte encodeType(DateRange dateRange) { - if (dateRange.isSingleBounded()) { - return dateRange.getLowerBound().isUnbounded() - ? DATE_RANGE_TYPE_SINGLE_DATE_OPEN - : DATE_RANGE_TYPE_SINGLE_DATE; - } else { - DateRangeBound upperBound = - dateRange - .getUpperBound() - .orElseThrow( - () -> - new IllegalStateException("Upper bound should be set if !isSingleBounded()")); - if (dateRange.getLowerBound().isUnbounded()) { - return upperBound.isUnbounded() - ? DATE_RANGE_TYPE_BOTH_OPEN_RANGE - : DATE_RANGE_TYPE_OPEN_RANGE_LOW; - } else { - return upperBound.isUnbounded() - ? DATE_RANGE_TYPE_OPEN_RANGE_HIGH - : DATE_RANGE_TYPE_CLOSED_RANGE; - } - } - } - - private static void put(ByteBuffer buffer, DateRangeBound bound) { - buffer.putLong(bound.getTimestamp().toInstant().toEpochMilli()); - buffer.put(bound.getPrecision().getEncoding()); - } - - @Nullable - @Override - public DateRange decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - if (bytes == null || bytes.remaining() == 0) { - return null; - } - byte type = bytes.get(); - switch (type) { - case DATE_RANGE_TYPE_SINGLE_DATE: - return new DateRange(decodeLowerBound(bytes)); - case DATE_RANGE_TYPE_CLOSED_RANGE: - return new DateRange(decodeLowerBound(bytes), decodeUpperBound(bytes)); - case DATE_RANGE_TYPE_OPEN_RANGE_HIGH: - return new DateRange(decodeLowerBound(bytes), DateRangeBound.UNBOUNDED); - case DATE_RANGE_TYPE_OPEN_RANGE_LOW: - return new DateRange(DateRangeBound.UNBOUNDED, decodeUpperBound(bytes)); - case DATE_RANGE_TYPE_BOTH_OPEN_RANGE: - return new DateRange(DateRangeBound.UNBOUNDED, DateRangeBound.UNBOUNDED); - case DATE_RANGE_TYPE_SINGLE_DATE_OPEN: - return new DateRange(DateRangeBound.UNBOUNDED); - default: - throw new IllegalArgumentException("Unknown date range type: " + type); - } - } - - private static DateRangeBound decodeLowerBound(ByteBuffer bytes) { - long epochMilli = bytes.getLong(); - ZonedDateTime timestamp = - ZonedDateTime.ofInstant(Instant.ofEpochMilli(epochMilli), ZoneOffset.UTC); - DateRangePrecision precision = DateRangePrecision.fromEncoding(bytes.get()); - return DateRangeBound.lowerBound(timestamp, precision); - } - - private static DateRangeBound decodeUpperBound(ByteBuffer bytes) { - long epochMilli = bytes.getLong(); - ZonedDateTime timestamp = - ZonedDateTime.ofInstant(Instant.ofEpochMilli(epochMilli), ZoneOffset.UTC); - DateRangePrecision precision = DateRangePrecision.fromEncoding(bytes.get()); - return DateRangeBound.upperBound(timestamp, precision); - } - - @NonNull - @Override - public String format(@Nullable DateRange dateRange) { - return (dateRange == null) ? "NULL" : Strings.quote(dateRange.toString()); - } - - @Nullable - @Override - public DateRange parse(@Nullable String value) { - if (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) { - return null; - } - try { - return DateRange.parse(Strings.unquote(value)); - } catch (ParseException e) { - throw new IllegalArgumentException(String.format("Invalid date range literal: %s", value), e); - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/util/concurrent/BoundedConcurrentQueue.java b/core/src/main/java/com/datastax/dse/driver/internal/core/util/concurrent/BoundedConcurrentQueue.java deleted file mode 100644 index ea9ccd7d622..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/util/concurrent/BoundedConcurrentQueue.java +++ /dev/null @@ -1,134 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.util.concurrent; - -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Deque; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.ConcurrentLinkedDeque; -import java.util.concurrent.atomic.AtomicReference; - -/** - * A concurrent queue with a limited size. - * - *

Once the queue is full, the insertion of the next element is delayed until space becomes - * available again; in the meantime, additional insertions are not allowed (in other words, there - * can be at most one "pending" element waiting on a full queue). - */ -public class BoundedConcurrentQueue { - - private final Deque elements = new ConcurrentLinkedDeque<>(); - private final AtomicReference state; - - public BoundedConcurrentQueue(int maxSize) { - this.state = new AtomicReference<>(new State(maxSize)); - } - - /** - * @return a stage that completes when the element is inserted. If there was still space in the - * queue, it will be already complete; if the queue was full, it will complete at a later - * point in time (triggered by a call to {@link #poll()}). This method must not be invoked - * again until the stage has completed. - * @throws IllegalStateException if the method is invoked before the stage returned by the - * previous call has completed. - */ - @NonNull - public CompletionStage offer(@NonNull ElementT element) { - while (true) { - State oldState = state.get(); - State newState = oldState.increment(); - if (state.compareAndSet(oldState, newState)) { - if (newState.spaceAvailable != null) { - return newState.spaceAvailable.thenApply( - (aVoid) -> { - elements.offer(element); - return element; - }); - } else { - elements.offer(element); - return CompletableFuture.completedFuture(element); - } - } - } - } - - @Nullable - public ElementT poll() { - while (true) { - State oldState = state.get(); - if (oldState.size == 0) { - return null; - } - State newState = oldState.decrement(); - if (state.compareAndSet(oldState, newState)) { - if (oldState.spaceAvailable != null) { - oldState.spaceAvailable.complete(null); - } - return elements.poll(); - } - } - } - - @Nullable - public ElementT peek() { - return elements.peek(); - } - - /** - * Note that this does not complete a pending call to {@link #offer(Object)}. We only use this - * method for terminal states where we want to dereference the contained elements. - */ - public void clear() { - elements.clear(); - } - - private static class State { - - private final int maxSize; - - final int size; // Number of elements in the queue, + 1 if one is waiting to get in - final CompletableFuture spaceAvailable; // Not null iff size == maxSize + 1 - - State(int maxSize) { - this(0, null, maxSize); - } - - private State(int size, CompletableFuture spaceAvailable, int maxSize) { - this.maxSize = maxSize; - this.size = size; - this.spaceAvailable = spaceAvailable; - } - - State increment() { - if (size > maxSize) { - throw new IllegalStateException( - "Can't call offer() until the stage returned by the previous offer() call has completed"); - } - int newSize = size + 1; - CompletableFuture newFuture = - (newSize == maxSize + 1) ? new CompletableFuture<>() : null; - return new State(newSize, newFuture, maxSize); - } - - State decrement() { - return new State(size - 1, null, maxSize); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/AllNodesFailedException.java b/core/src/main/java/com/datastax/oss/driver/api/core/AllNodesFailedException.java deleted file mode 100644 index b6f1bf93838..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/AllNodesFailedException.java +++ /dev/null @@ -1,187 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.Iterables; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; - -/** - * Thrown when a query failed on all the coordinators it was tried on. This exception may wrap - * multiple errors, that are available either as {@linkplain #getSuppressed() suppressed - * exceptions}, or via {@link #getAllErrors()} where they are grouped by node. - */ -public class AllNodesFailedException extends DriverException { - - /** @deprecated Use {@link #fromErrors(List)} instead. */ - @NonNull - @Deprecated - public static AllNodesFailedException fromErrors(@Nullable Map errors) { - if (errors == null || errors.isEmpty()) { - return new NoNodeAvailableException(); - } else { - return new AllNodesFailedException(groupByNode(errors)); - } - } - - @NonNull - public static AllNodesFailedException fromErrors(@Nullable List> errors) { - if (errors == null || errors.isEmpty()) { - return new NoNodeAvailableException(); - } else { - return new AllNodesFailedException(groupByNode(errors)); - } - } - - private final Map> errors; - - /** @deprecated Use {@link #AllNodesFailedException(String, ExecutionInfo, Iterable)} instead. */ - @Deprecated - protected AllNodesFailedException( - @NonNull String message, - @Nullable ExecutionInfo executionInfo, - @NonNull Map errors) { - super(message, executionInfo, null, true); - this.errors = toDeepImmutableMap(groupByNode(errors)); - addSuppressedErrors(); - } - - protected AllNodesFailedException( - @NonNull String message, - @Nullable ExecutionInfo executionInfo, - @NonNull Iterable>> errors) { - super(message, executionInfo, null, true); - this.errors = toDeepImmutableMap(errors); - addSuppressedErrors(); - } - - private void addSuppressedErrors() { - for (List errors : this.errors.values()) { - for (Throwable error : errors) { - addSuppressed(error); - } - } - } - - private AllNodesFailedException(Map> errors) { - this( - buildMessage( - String.format("All %d node(s) tried for the query failed", errors.size()), errors), - null, - errors.entrySet()); - } - - private static String buildMessage(String baseMessage, Map> errors) { - int limit = Math.min(errors.size(), 3); - Iterator>> iterator = - Iterables.limit(errors.entrySet(), limit).iterator(); - StringBuilder details = new StringBuilder(); - while (iterator.hasNext()) { - Entry> entry = iterator.next(); - details.append(entry.getKey()).append(": ").append(entry.getValue()); - if (iterator.hasNext()) { - details.append(", "); - } - } - return String.format( - "%s (showing first %d nodes, use getAllErrors() for more): %s", - baseMessage, limit, details); - } - - /** - * An immutable map containing the first error on each tried node. - * - * @deprecated Use {@link #getAllErrors()} instead. - */ - @NonNull - @Deprecated - public Map getErrors() { - ImmutableMap.Builder builder = ImmutableMap.builder(); - for (Node node : errors.keySet()) { - List nodeErrors = errors.get(node); - if (!nodeErrors.isEmpty()) { - builder.put(node, nodeErrors.get(0)); - } - } - return builder.build(); - } - - /** An immutable map containing all errors on each tried node. */ - @NonNull - public Map> getAllErrors() { - return errors; - } - - @NonNull - @Override - public DriverException copy() { - return new AllNodesFailedException(getMessage(), getExecutionInfo(), errors.entrySet()); - } - - @NonNull - public AllNodesFailedException reword(String newMessage) { - return new AllNodesFailedException( - buildMessage(newMessage, errors), getExecutionInfo(), errors.entrySet()); - } - - private static Map> groupByNode(Map errors) { - return groupByNode(errors.entrySet()); - } - - private static Map> groupByNode(Iterable> errors) { - // no need for immutable collections here - Map> map = new LinkedHashMap<>(); - for (Entry entry : errors) { - Node node = entry.getKey(); - Throwable error = entry.getValue(); - map.compute( - node, - (k, v) -> { - if (v == null) { - v = new ArrayList<>(); - } - v.add(error); - return v; - }); - } - return map; - } - - private static Map> toDeepImmutableMap(Map> errors) { - return toDeepImmutableMap(errors.entrySet()); - } - - private static Map> toDeepImmutableMap( - Iterable>> errors) { - ImmutableMap.Builder> builder = ImmutableMap.builder(); - for (Entry> entry : errors) { - builder.put(entry.getKey(), ImmutableList.copyOf(entry.getValue())); - } - return builder.build(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/AsyncAutoCloseable.java b/core/src/main/java/com/datastax/oss/driver/api/core/AsyncAutoCloseable.java deleted file mode 100644 index 7f8cafbc895..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/AsyncAutoCloseable.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import com.datastax.oss.driver.internal.core.util.concurrent.BlockingOperation; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.concurrent.CompletionStage; - -/** - * An object that can be closed in an asynchronous, non-blocking manner. - * - *

For convenience, this extends the JDK's {@code AutoCloseable} in order to be usable in - * try-with-resource blocks (in that case, the blocking {@link #close()} will be used). - */ -public interface AsyncAutoCloseable extends AutoCloseable { - - /** - * Returns a stage that will complete when {@link #close()} or {@link #forceCloseAsync()} is - * called, and the shutdown sequence completes. - */ - @NonNull - CompletionStage closeFuture(); - - /** - * Whether shutdown has completed. - * - *

This is a shortcut for {@code closeFuture().toCompletableFuture().isDone()}. - */ - default boolean isClosed() { - return closeFuture().toCompletableFuture().isDone(); - } - - /** - * Initiates an orderly shutdown: no new requests are accepted, but all pending requests are - * allowed to complete normally. - * - * @return a stage that will complete when the shutdown sequence is complete. Multiple calls to - * this method or {@link #forceCloseAsync()} always return the same instance. - */ - @NonNull - CompletionStage closeAsync(); - - /** - * Initiates a forced shutdown of this instance: no new requests are accepted, and all pending - * requests will complete with an exception. - * - * @return a stage that will complete when the shutdown sequence is complete. Multiple calls to - * this method or {@link #close()} always return the same instance. - */ - @NonNull - CompletionStage forceCloseAsync(); - - /** - * {@inheritDoc} - * - *

This method is implemented by calling {@link #closeAsync()} and blocking on the result. This - * should not be called on a driver thread. - */ - @Override - default void close() { - BlockingOperation.checkNotDriverThread(); - CompletableFutures.getUninterruptibly(closeAsync().toCompletableFuture()); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/AsyncPagingIterable.java b/core/src/main/java/com/datastax/oss/driver/api/core/AsyncPagingIterable.java deleted file mode 100644 index fd7c5be6baa..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/AsyncPagingIterable.java +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.internal.core.AsyncPagingIterableWrapper; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Iterator; -import java.util.concurrent.CompletionStage; -import java.util.function.Function; - -/** - * An iterable of elements which are fetched asynchronously by the driver, possibly in multiple - * requests. - */ -public interface AsyncPagingIterable> { - - /** Metadata about the columns returned by the CQL request that was used to build this result. */ - @NonNull - ColumnDefinitions getColumnDefinitions(); - - /** Returns {@linkplain ExecutionInfo information about the execution} of this page of results. */ - @NonNull - ExecutionInfo getExecutionInfo(); - - /** How many rows are left before the current page is exhausted. */ - int remaining(); - - /** - * The elements in the current page. To keep iterating beyond that, use {@link #hasMorePages()} - * and {@link #fetchNextPage()}. - * - *

Note that this method always returns the same object, and that that object can only be - * iterated once: elements are "consumed" as they are read. - */ - @NonNull - Iterable currentPage(); - - /** - * Returns the next element, or {@code null} if the results are exhausted. - * - *

This is convenient for queries that are known to return exactly one element, for example - * count queries. - */ - @Nullable - default ElementT one() { - Iterator iterator = currentPage().iterator(); - return iterator.hasNext() ? iterator.next() : null; - } - - /** - * Whether there are more pages of results. If so, call {@link #fetchNextPage()} to fetch the next - * one asynchronously. - */ - boolean hasMorePages(); - - /** - * Fetch the next page of results asynchronously. - * - * @throws IllegalStateException if there are no more pages. Use {@link #hasMorePages()} to check - * if you can call this method. - */ - @NonNull - CompletionStage fetchNextPage() throws IllegalStateException; - - /** - * If the query that produced this result was a CQL conditional update, indicate whether it was - * successfully applied. - * - *

For consistency, this method always returns {@code true} for non-conditional queries - * (although there is no reason to call the method in that case). This is also the case for - * conditional DDL statements ({@code CREATE KEYSPACE... IF NOT EXISTS}, {@code CREATE TABLE... IF - * NOT EXISTS}), for which Cassandra doesn't return an {@code [applied]} column. - * - *

Note that, for versions of Cassandra strictly lower than 2.1.0-rc2, a server-side bug (CASSANDRA-7337) causes this - * method to always return {@code true} for batches containing conditional queries. - */ - boolean wasApplied(); - - /** - * Creates a new instance by transforming each element of this iterable with the provided - * function. - * - *

Note that both instances share the same underlying data: consuming elements from the - * transformed iterable will also consume them from this object, and vice-versa. - */ - default MappedAsyncPagingIterable map( - Function elementMapper) { - return new AsyncPagingIterableWrapper<>(this, elementMapper); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/ConsistencyLevel.java b/core/src/main/java/com/datastax/oss/driver/api/core/ConsistencyLevel.java deleted file mode 100644 index a1b6d8006df..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/ConsistencyLevel.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * The consistency level of a request. - * - *

The only reason to model this as an interface (as opposed to an enum type) is to accommodate - * for custom protocol extensions. If you're connecting to a standard Apache Cassandra cluster, all - * {@code ConsistencyLevel}s are {@link DefaultConsistencyLevel} instances. - */ -public interface ConsistencyLevel { - - ConsistencyLevel ANY = DefaultConsistencyLevel.ANY; - ConsistencyLevel ONE = DefaultConsistencyLevel.ONE; - ConsistencyLevel TWO = DefaultConsistencyLevel.TWO; - ConsistencyLevel THREE = DefaultConsistencyLevel.THREE; - ConsistencyLevel QUORUM = DefaultConsistencyLevel.QUORUM; - ConsistencyLevel ALL = DefaultConsistencyLevel.ALL; - ConsistencyLevel LOCAL_ONE = DefaultConsistencyLevel.LOCAL_ONE; - ConsistencyLevel LOCAL_QUORUM = DefaultConsistencyLevel.LOCAL_QUORUM; - ConsistencyLevel EACH_QUORUM = DefaultConsistencyLevel.EACH_QUORUM; - ConsistencyLevel SERIAL = DefaultConsistencyLevel.SERIAL; - ConsistencyLevel LOCAL_SERIAL = DefaultConsistencyLevel.LOCAL_SERIAL; - - /** The numerical value that the level is encoded to in protocol frames. */ - int getProtocolCode(); - - /** The textual representation of the level in configuration files. */ - @NonNull - String name(); - - /** Whether this consistency level applies to the local datacenter only. */ - boolean isDcLocal(); - - /** - * Whether this consistency level is serial, that is, applies only to the "paxos" phase of a lightweight - * transaction. - * - *

Serial consistency levels are only meaningful when executing conditional updates ({@code - * INSERT}, {@code UPDATE} or {@code DELETE} statements with an {@code IF} condition). - */ - boolean isSerial(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/CqlIdentifier.java b/core/src/main/java/com/datastax/oss/driver/api/core/CqlIdentifier.java deleted file mode 100644 index 82e4c2b30a6..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/CqlIdentifier.java +++ /dev/null @@ -1,155 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import com.datastax.oss.driver.internal.core.util.Strings; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.IOException; -import java.io.ObjectInputStream; -import java.io.Serializable; -import java.util.Locale; -import net.jcip.annotations.Immutable; - -/** - * The identifier of CQL element (keyspace, table, column, etc). - * - *

It has two representations: - * - *

    - *
  • the "CQL" form, which is how you would type the identifier in a CQL query. It is - * case-insensitive unless enclosed in double quotation marks; in addition, identifiers that - * contain special characters (anything other than alphanumeric and underscore), or match CQL - * keywords, must be double-quoted (with inner double quotes escaped as {@code ""}). - *
  • the "internal" form, which is how the name is stored in Cassandra system tables. It is - * lower-case for case-sensitive identifiers, and in the exact case for case-sensitive - * identifiers. - *
- * - * Examples: - * - * - * - * - * - * - * - * - * - *
Create statementCase-sensitive?CQL idInternal id
CREATE TABLE t(foo int PRIMARY KEY)Nofoofoo
CREATE TABLE t(Foo int PRIMARY KEY)Nofoofoo
CREATE TABLE t("Foo" int PRIMARY KEY)Yes"Foo"Foo
CREATE TABLE t("foo bar" int PRIMARY KEY)Yes"foo bar"foo bar
CREATE TABLE t("foo""bar" int PRIMARY KEY)Yes"foo""bar"foo"bar
CREATE TABLE t("create" int PRIMARY KEY)Yes (reserved keyword)"create"create
- * - * This class provides a common representation and avoids any ambiguity about which form the - * identifier is in. Driver clients will generally want to create instances from the CQL form with - * {@link #fromCql(String)}. - * - *

There is no internal caching; if you reuse the same identifiers often, consider caching them - * in your application. - */ -@Immutable -public class CqlIdentifier implements Serializable { - - private static final long serialVersionUID = 1; - - // IMPLEMENTATION NOTES: - // This is used internally, and for all API methods where the overhead of requiring the client to - // create an instance is acceptable (metadata, statement.getKeyspace, etc.) - // One exception is named getters, where we keep raw strings with the 3.x rules. - - /** Creates an identifier from its {@link CqlIdentifier CQL form}. */ - @NonNull - public static CqlIdentifier fromCql(@NonNull String cql) { - Preconditions.checkNotNull(cql, "cql must not be null"); - final String internal; - if (Strings.isDoubleQuoted(cql)) { - internal = Strings.unDoubleQuote(cql); - } else { - internal = cql.toLowerCase(Locale.ROOT); - Preconditions.checkArgument( - !Strings.needsDoubleQuotes(internal), "Invalid CQL form [%s]: needs double quotes", cql); - } - return fromInternal(internal); - } - - /** Creates an identifier from its {@link CqlIdentifier internal form}. */ - @NonNull - public static CqlIdentifier fromInternal(@NonNull String internal) { - Preconditions.checkNotNull(internal, "internal must not be null"); - return new CqlIdentifier(internal); - } - - /** @serial */ - private final String internal; - - private CqlIdentifier(String internal) { - this.internal = internal; - } - - /** - * Returns the identifier in the "internal" format. - * - * @return the identifier in its exact case, unquoted. - */ - @NonNull - public String asInternal() { - return this.internal; - } - - /** - * Returns the identifier in a format appropriate for concatenation in a CQL query. - * - * @param pretty if {@code true}, use the shortest possible representation: if the identifier is - * case-insensitive, an unquoted, lower-case string, otherwise the double-quoted form. If - * {@code false}, always use the double-quoted form (this is slightly more efficient since we - * don't need to inspect the string). - */ - @NonNull - public String asCql(boolean pretty) { - if (pretty) { - return Strings.needsDoubleQuotes(internal) ? Strings.doubleQuote(internal) : internal; - } else { - return Strings.doubleQuote(internal); - } - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof CqlIdentifier) { - CqlIdentifier that = (CqlIdentifier) other; - return this.internal.equals(that.internal); - } else { - return false; - } - } - - @Override - public int hashCode() { - return internal.hashCode(); - } - - @Override - public String toString() { - return internal; - } - - private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { - in.defaultReadObject(); - Preconditions.checkNotNull(internal, "internal must not be null"); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/CqlSession.java b/core/src/main/java/com/datastax/oss/driver/api/core/CqlSession.java deleted file mode 100644 index ff096719f3e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/CqlSession.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import com.datastax.dse.driver.api.core.cql.continuous.ContinuousSession; -import com.datastax.dse.driver.api.core.cql.continuous.reactive.ContinuousReactiveSession; -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveSession; -import com.datastax.dse.driver.api.core.graph.GraphSession; -import com.datastax.dse.driver.api.core.graph.reactive.ReactiveGraphSession; -import com.datastax.oss.driver.api.core.cql.AsyncCqlSession; -import com.datastax.oss.driver.api.core.cql.SyncCqlSession; -import com.datastax.oss.driver.api.core.session.Session; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * The default session type built by the driver. - * - *

It provides user-friendly execution methods for: - * - *

    - *
  • CQL requests: synchronous, asynchronous or reactive mode; - *
  • requests specific to DataStax Enterprise: graph and continuous paging. - *
- * - * Client applications can use this interface even if they don't need all the features. In - * particular, it can be used with a regular Apache Cassandra ® cluster, as long as you don't - * call any of the DSE-specific execute methods. If you're in that situation, you might also want to - * exclude certain dependencies from your classpath (see the "Integration" page in the user manual). - * - *

Note that the name "CQL session" is no longer really accurate since this interface can now - * execute other request types; but it was preserved for backward compatibility with previous driver - * versions. - */ -public interface CqlSession - extends Session, - SyncCqlSession, - AsyncCqlSession, - ReactiveSession, - ContinuousSession, - GraphSession, - ContinuousReactiveSession, - ReactiveGraphSession { - - /** - * Returns a builder to create a new instance. - * - *

Note that this builder is mutable and not thread-safe. - * - * @return {@code CqlSessionBuilder} to create a new instance. - */ - @NonNull - static CqlSessionBuilder builder() { - return new CqlSessionBuilder(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/CqlSessionBuilder.java b/core/src/main/java/com/datastax/oss/driver/api/core/CqlSessionBuilder.java deleted file mode 100644 index 4598c078dca..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/CqlSessionBuilder.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import com.datastax.oss.driver.api.core.session.SessionBuilder; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.NotThreadSafe; - -/** - * Helper class to build a {@link CqlSession} instance. - * - *

This class is mutable and not thread-safe. - */ -@NotThreadSafe -public class CqlSessionBuilder extends SessionBuilder { - - @Override - protected CqlSession wrap(@NonNull CqlSession defaultSession) { - return defaultSession; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/DefaultConsistencyLevel.java b/core/src/main/java/com/datastax/oss/driver/api/core/DefaultConsistencyLevel.java deleted file mode 100644 index 2e5a4a6f022..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/DefaultConsistencyLevel.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; - -/** A default consistency level supported by the driver out of the box. */ -public enum DefaultConsistencyLevel implements ConsistencyLevel { - ANY(ProtocolConstants.ConsistencyLevel.ANY), - ONE(ProtocolConstants.ConsistencyLevel.ONE), - TWO(ProtocolConstants.ConsistencyLevel.TWO), - THREE(ProtocolConstants.ConsistencyLevel.THREE), - QUORUM(ProtocolConstants.ConsistencyLevel.QUORUM), - ALL(ProtocolConstants.ConsistencyLevel.ALL), - LOCAL_ONE(ProtocolConstants.ConsistencyLevel.LOCAL_ONE), - LOCAL_QUORUM(ProtocolConstants.ConsistencyLevel.LOCAL_QUORUM), - EACH_QUORUM(ProtocolConstants.ConsistencyLevel.EACH_QUORUM), - - SERIAL(ProtocolConstants.ConsistencyLevel.SERIAL), - LOCAL_SERIAL(ProtocolConstants.ConsistencyLevel.LOCAL_SERIAL), - ; - // Note that, for the sake of convenience, we also expose shortcuts to these constants on the - // ConsistencyLevel interface. If you add a new enum constant, remember to update the interface as - // well. - - private final int protocolCode; - - DefaultConsistencyLevel(int protocolCode) { - this.protocolCode = protocolCode; - } - - @Override - public int getProtocolCode() { - return protocolCode; - } - - @NonNull - public static DefaultConsistencyLevel fromCode(int code) { - DefaultConsistencyLevel level = BY_CODE.get(code); - if (level == null) { - throw new IllegalArgumentException("Unknown code: " + code); - } - return level; - } - - @Override - public boolean isDcLocal() { - return this == LOCAL_ONE || this == LOCAL_QUORUM || this == LOCAL_SERIAL; - } - - @Override - public boolean isSerial() { - return this == SERIAL || this == LOCAL_SERIAL; - } - - private static final Map BY_CODE = mapByCode(values()); - - private static Map mapByCode(DefaultConsistencyLevel[] levels) { - ImmutableMap.Builder builder = ImmutableMap.builder(); - for (DefaultConsistencyLevel level : levels) { - builder.put(level.protocolCode, level); - } - return builder.build(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/DefaultProtocolVersion.java b/core/src/main/java/com/datastax/oss/driver/api/core/DefaultProtocolVersion.java deleted file mode 100644 index 91b45fc506a..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/DefaultProtocolVersion.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import com.datastax.oss.protocol.internal.ProtocolConstants; - -/** - * A protocol version supported by default by the driver. - * - *

Legacy versions 1 (Cassandra 1.2) and 2 (Cassandra 2.0) are not supported anymore. - */ -public enum DefaultProtocolVersion implements ProtocolVersion { - - /** Version 3, supported by Cassandra 2.1 and above. */ - V3(ProtocolConstants.Version.V3, false), - - /** Version 4, supported by Cassandra 2.2 and above. */ - V4(ProtocolConstants.Version.V4, false), - - /** Version 5, supported by Cassandra 4.0 and above. */ - V5(ProtocolConstants.Version.V5, false), - - /** - * Version 6, currently supported as a beta preview in Cassandra 4.0 and above. - * - *

Do not use this in production. - * - * @see ProtocolVersion#isBeta() - */ - V6(ProtocolConstants.Version.V6, true), - ; - // Note that, for the sake of convenience, we also expose shortcuts to these constants on the - // ProtocolVersion interface. If you add a new enum constant, remember to update the interface as - // well. - - private final int code; - private final boolean beta; - - DefaultProtocolVersion(int code, boolean beta) { - this.code = code; - this.beta = beta; - } - - @Override - public int getCode() { - return code; - } - - @Override - public boolean isBeta() { - return beta; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/DriverException.java b/core/src/main/java/com/datastax/oss/driver/api/core/DriverException.java deleted file mode 100644 index f5cf76e29eb..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/DriverException.java +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** - * Base class for all exceptions thrown by the driver. - * - *

Note that, for obvious programming errors, the driver might throw JDK runtime exceptions, such - * as {@link IllegalArgumentException} or {@link IllegalStateException}. In all other cases, it will - * be an instance of this class. - * - *

One special case is when the driver tried multiple nodes to complete a request, and they all - * failed; the error returned to the client will be an {@link AllNodesFailedException}, which wraps - * a map of errors per node. - * - *

Some implementations make the stack trace not writable to improve performance (see {@link - * Throwable#Throwable(String, Throwable, boolean, boolean)}). This is only done when the exception - * is thrown in a small number of well-known cases, and the stack trace wouldn't add any useful - * information (for example, server error responses). Instances returned by {@link #copy()} always - * have a stack trace. - */ -public abstract class DriverException extends RuntimeException { - - private transient volatile ExecutionInfo executionInfo; - - protected DriverException( - @Nullable String message, - @Nullable ExecutionInfo executionInfo, - @Nullable Throwable cause, - boolean writableStackTrace) { - super(message, cause, true, writableStackTrace); - this.executionInfo = executionInfo; - } - - /** - * Returns execution information about the request that led to this error. - * - *

This is similar to the information returned for a successful query in {@link ResultSet}, - * except that some fields may be absent: - * - *

    - *
  • {@link ExecutionInfo#getCoordinator()} may be null if the error occurred before any node - * was contacted; - *
  • {@link ExecutionInfo#getErrors()} will contain the errors encountered for other nodes, - * but not this error itself; - *
  • {@link ExecutionInfo#getSuccessfulExecutionIndex()} may be -1 if the error occurred - * before any execution was started; - *
  • {@link ExecutionInfo#getPagingState()} and {@link ExecutionInfo#getTracingId()} will - * always be null; - *
  • {@link ExecutionInfo#getWarnings()} and {@link ExecutionInfo#getIncomingPayload()} will - * always be empty; - *
  • {@link ExecutionInfo#isSchemaInAgreement()} will always be true; - *
  • {@link ExecutionInfo#getResponseSizeInBytes()} and {@link - * ExecutionInfo#getCompressedResponseSizeInBytes()} will always be -1. - *
- * - *

Note that this is only set for exceptions that are rethrown directly to the client from a - * session call. For example, individual node errors stored in {@link - * AllNodesFailedException#getAllErrors()} or {@link ExecutionInfo#getErrors()} do not contain - * their own execution info, and therefore return null from this method. - * - *

This method will also return null for low-level exceptions thrown directly from a driver - * channel, such as {@link com.datastax.oss.driver.api.core.connection.ConnectionInitException} or - * {@link com.datastax.oss.driver.api.core.connection.ClosedConnectionException}. - * - *

It will also be null if you serialize and deserialize an exception. - */ - public ExecutionInfo getExecutionInfo() { - return executionInfo; - } - - /** This is for internal use by the driver, a client application has no reason to call it. */ - public void setExecutionInfo(ExecutionInfo executionInfo) { - this.executionInfo = executionInfo; - } - - /** - * Copy the exception. - * - *

This returns a new exception, equivalent to the original one, except that because a new - * object is created in the current thread, the top-most element in the stacktrace of the - * exception will refer to the current thread. The original exception may or may not be included - * as the copy's cause, depending on whether that is deemed useful (this is left to the discretion - * of each implementation). - * - *

This is intended for the synchronous wrapper methods of the driver, in order to produce a - * more user-friendly stack trace (that includes the line in the user code where the driver - * rethrew the error). - */ - @NonNull - public abstract DriverException copy(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/DriverExecutionException.java b/core/src/main/java/com/datastax/oss/driver/api/core/DriverExecutionException.java deleted file mode 100644 index 90ff875e375..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/DriverExecutionException.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.Statement; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * Thrown by synchronous wrapper methods (such as {@link CqlSession#execute(Statement)}, when the - * underlying future was completed with a checked exception. - * - *

This exception should be rarely thrown (if ever). Most of the time, the driver uses unchecked - * exceptions, which will be rethrown directly instead of being wrapped in this class. - */ -public class DriverExecutionException extends DriverException { - public DriverExecutionException(Throwable cause) { - this(null, cause); - } - - private DriverExecutionException(ExecutionInfo executionInfo, Throwable cause) { - super(null, executionInfo, cause, true); - } - - @NonNull - @Override - public DriverException copy() { - return new DriverExecutionException(getExecutionInfo(), getCause()); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/DriverTimeoutException.java b/core/src/main/java/com/datastax/oss/driver/api/core/DriverTimeoutException.java deleted file mode 100644 index 8b4cc5dc5bb..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/DriverTimeoutException.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** Thrown when a driver request timed out. */ -public class DriverTimeoutException extends DriverException { - public DriverTimeoutException(@NonNull String message) { - this(message, null); - } - - private DriverTimeoutException(String message, ExecutionInfo executionInfo) { - super(message, executionInfo, null, true); - } - - @NonNull - @Override - public DriverException copy() { - return new DriverTimeoutException(getMessage(), getExecutionInfo()); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/InvalidKeyspaceException.java b/core/src/main/java/com/datastax/oss/driver/api/core/InvalidKeyspaceException.java deleted file mode 100644 index aa3f774800c..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/InvalidKeyspaceException.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** Thrown when a session gets created with an invalid keyspace. */ -public class InvalidKeyspaceException extends DriverException { - public InvalidKeyspaceException(@NonNull String message) { - this(message, null); - } - - private InvalidKeyspaceException(String message, ExecutionInfo executionInfo) { - super(message, executionInfo, null, true); - } - - @NonNull - @Override - public DriverException copy() { - return new InvalidKeyspaceException(getMessage(), getExecutionInfo()); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/MappedAsyncPagingIterable.java b/core/src/main/java/com/datastax/oss/driver/api/core/MappedAsyncPagingIterable.java deleted file mode 100644 index b3902489a48..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/MappedAsyncPagingIterable.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import java.util.function.Function; - -/** The result of calling {@link #map(Function)} on another async iterable. */ -public interface MappedAsyncPagingIterable - extends AsyncPagingIterable> {} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/MavenCoordinates.java b/core/src/main/java/com/datastax/oss/driver/api/core/MavenCoordinates.java deleted file mode 100644 index 3c3f18a5dc2..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/MavenCoordinates.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import edu.umd.cs.findbugs.annotations.NonNull; - -public interface MavenCoordinates { - - @NonNull - String getGroupId(); - - @NonNull - String getArtifactId(); - - @NonNull - Version getVersion(); - - @NonNull - String getName(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/NoNodeAvailableException.java b/core/src/main/java/com/datastax/oss/driver/api/core/NoNodeAvailableException.java deleted file mode 100644 index 9ef51fb99b6..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/NoNodeAvailableException.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Collections; - -/** - * Specialization of {@code AllNodesFailedException} when no coordinators were tried. - * - *

This can happen if all nodes are down, or if all the contact points provided at startup were - * invalid. - */ -public class NoNodeAvailableException extends AllNodesFailedException { - public NoNodeAvailableException() { - this(null); - } - - private NoNodeAvailableException(ExecutionInfo executionInfo) { - super("No node was available to execute the query", executionInfo, Collections.emptySet()); - } - - @NonNull - @Override - public DriverException copy() { - return new NoNodeAvailableException(getExecutionInfo()); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/NodeUnavailableException.java b/core/src/main/java/com/datastax/oss/driver/api/core/NodeUnavailableException.java deleted file mode 100644 index 5303119844e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/NodeUnavailableException.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import com.datastax.oss.driver.api.core.metadata.Node; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Objects; - -/** - * Indicates that a {@link Node} was selected in a query plan, but it had no connection available. - * - *

A common reason to encounter this error is when the configured number of connections per node - * and requests per connection is not high enough to absorb the overall request rate. This can be - * mitigated by tuning the following options: - * - *

    - *
  • {@code advanced.connection.pool.local.size}; - *
  • {@code advanced.connection.pool.remote.size}; - *
  • {@code advanced.connection.max-requests-per-connection}. - *
- * - * See {@code reference.conf} for more details. - * - *

Another possibility is when you are trying to direct a request {@linkplain - * com.datastax.oss.driver.api.core.cql.Statement#setNode(Node) to a particular node}, but that node - * has no connections available. - */ -public class NodeUnavailableException extends DriverException { - - private final Node node; - - public NodeUnavailableException(Node node) { - super("No connection was available to " + node, null, null, true); - this.node = Objects.requireNonNull(node); - } - - @NonNull - public Node getNode() { - return node; - } - - @Override - @NonNull - public DriverException copy() { - return new NodeUnavailableException(node); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/PagingIterable.java b/core/src/main/java/com/datastax/oss/driver/api/core/PagingIterable.java deleted file mode 100644 index c2a81b554d0..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/PagingIterable.java +++ /dev/null @@ -1,186 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.internal.core.PagingIterableWrapper; -import com.datastax.oss.driver.internal.core.cql.PagingIterableSpliterator; -import com.datastax.oss.driver.shaded.guava.common.collect.Iterables; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; -import java.util.Spliterator; -import java.util.function.Function; - -/** - * An iterable of elements which are fetched synchronously by the driver, possibly in multiple - * requests. - * - *

It uses asynchronous calls internally, but blocks on the results in order to provide a - * synchronous API to its clients. If the query is paged, only the first page will be fetched - * initially, and iteration will trigger background fetches of the next pages when necessary. - * - *

Note that this object can only be iterated once: elements are "consumed" as they are read, - * subsequent calls to {@code iterator()} will return the same iterator instance. - * - *

Implementations of this type are not thread-safe. They can only be iterated by the - * thread that invoked {@code session.execute}. - * - *

This is a generalization of {@link ResultSet}, replacing rows by an arbitrary element type. - */ -public interface PagingIterable extends Iterable { - - /** Metadata about the columns returned by the CQL request that was used to build this result. */ - @NonNull - ColumnDefinitions getColumnDefinitions(); - - /** - * The execution information for the last query performed for this iterable. - * - *

This is a shortcut for: - * - *

-   * getExecutionInfos().get(getExecutionInfos().size() - 1)
-   * 
- * - * @see #getExecutionInfos() - */ - @NonNull - default ExecutionInfo getExecutionInfo() { - List infos = getExecutionInfos(); - return infos.get(infos.size() - 1); - } - - /** - * The execution information for all the queries that have been performed so far to assemble this - * iterable. - * - *

This will have multiple elements if the query is paged, since the driver performs blocking - * background queries to fetch additional pages transparently as the result set is being iterated. - */ - @NonNull - List getExecutionInfos(); - - /** - * Returns the next element, or {@code null} if the iterable is exhausted. - * - *

This is convenient for queries that are known to return exactly one row, for example count - * queries. - */ - @Nullable - default ElementT one() { - Iterator iterator = iterator(); - return iterator.hasNext() ? iterator.next() : null; - } - - /** - * Returns all the remaining elements as a list; not recommended for queries that return a - * large number of elements. - * - *

Contrary to {@link #iterator()} or successive calls to {@link #one()}, this method forces - * fetching the full contents at once; in particular, this means that a large number of - * background queries might have to be run, and that all the data will be held in memory locally. - * Therefore it is crucial to only call this method for queries that are known to return a - * reasonable number of results. - */ - @NonNull - @SuppressWarnings("MixedMutabilityReturnType") - default List all() { - if (!iterator().hasNext()) { - return Collections.emptyList(); - } - // We can't know the actual size in advance since more pages could be fetched, but we can at - // least allocate for what we already have. - List result = Lists.newArrayListWithExpectedSize(getAvailableWithoutFetching()); - Iterables.addAll(result, this); - return result; - } - - /** - * Whether all pages have been fetched from the database. - * - *

If this is {@code false}, it means that more blocking background queries will be triggered - * as iteration continues. - */ - boolean isFullyFetched(); - - /** - * The number of elements that can be returned from this result set before a blocking background - * query needs to be performed to retrieve more results. In other words, this is the number of - * elements remaining in the current page. - * - *

This is useful if you use the paging state to pause the iteration and resume it later: after - * you've retrieved the state ({@link ExecutionInfo#getPagingState() - * getExecutionInfo().getPagingState()}), call this method and iterate the remaining elements; - * that way you're not leaving a gap between the last element and the position you'll restart from - * when you reinject the state in a new query. - */ - int getAvailableWithoutFetching(); - - /** - * If the query that produced this result was a CQL conditional update, indicate whether it was - * successfully applied. - * - *

For consistency, this method always returns {@code true} for non-conditional queries - * (although there is no reason to call the method in that case). This is also the case for - * conditional DDL statements ({@code CREATE KEYSPACE... IF NOT EXISTS}, {@code CREATE TABLE... IF - * NOT EXISTS}), for which Cassandra doesn't return an {@code [applied]} column. - * - *

Note that, for versions of Cassandra strictly lower than 2.1.0-rc2, a server-side bug (CASSANDRA-7337) causes this - * method to always return {@code true} for batches containing conditional queries. - */ - boolean wasApplied(); - - /** - * Creates a new instance by transforming each element of this iterable with the provided - * function. - * - *

Note that both instances share the same underlying data: consuming elements from the - * transformed iterable will also consume them from this object, and vice-versa. - */ - @NonNull - default PagingIterable map( - Function elementMapper) { - return new PagingIterableWrapper<>(this, elementMapper); - } - - /** - * {@inheritDoc} - * - *

Default spliterators created by the driver will report the following characteristics: {@link - * Spliterator#ORDERED}, {@link Spliterator#IMMUTABLE}, {@link Spliterator#NONNULL}. Single-page - * result sets will also report {@link Spliterator#SIZED} and {@link Spliterator#SUBSIZED}, since - * the result set size is known. - * - *

This method should be called at most once. Spliterators share the same underlying data but - * do not support concurrent consumption; once a spliterator for this iterable is obtained, the - * iterable should not be consumed through calls to other methods such as {@link - * #iterator()}, {@link #one()} or {@link #all()}; doing so will result in unpredictable results. - */ - @NonNull - @Override - default Spliterator spliterator() { - return new PagingIterableSpliterator<>(this); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/ProtocolVersion.java b/core/src/main/java/com/datastax/oss/driver/api/core/ProtocolVersion.java deleted file mode 100644 index dd69f705453..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/ProtocolVersion.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import com.datastax.dse.driver.api.core.DseProtocolVersion; -import com.datastax.oss.driver.api.core.detach.Detachable; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * A version of the native protocol used by the driver to communicate with the server. - * - *

The only reason to model this as an interface (as opposed to an enum type) is to accommodate - * for custom protocol extensions. If you're connecting to a standard Apache Cassandra cluster, all - * {@code ProtocolVersion}s are {@link DefaultProtocolVersion} instances. - */ -public interface ProtocolVersion { - - ProtocolVersion V3 = DefaultProtocolVersion.V3; - ProtocolVersion V4 = DefaultProtocolVersion.V4; - ProtocolVersion V5 = DefaultProtocolVersion.V5; - ProtocolVersion V6 = DefaultProtocolVersion.V6; - ProtocolVersion DSE_V1 = DseProtocolVersion.DSE_V1; - ProtocolVersion DSE_V2 = DseProtocolVersion.DSE_V2; - - /** The default version used for {@link Detachable detached} objects. */ - // Implementation note: we can't use the ProtocolVersionRegistry here, this has to be a - // compile-time constant. - ProtocolVersion DEFAULT = DefaultProtocolVersion.V5; - - /** - * A numeric code that uniquely identifies the version (this is the code used in network frames). - */ - int getCode(); - - /** A string representation of the version. */ - @NonNull - String name(); - - /** - * Whether the protocol version is in a beta status. - * - *

Beta versions are intended for Cassandra development. They should not be used in a regular - * application, as beta features may break at any point. - */ - boolean isBeta(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/RequestThrottlingException.java b/core/src/main/java/com/datastax/oss/driver/api/core/RequestThrottlingException.java deleted file mode 100644 index acf569d55f6..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/RequestThrottlingException.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * Thrown if the session uses a request throttler, and it didn't allow the current request to - * execute. - * - *

This can happen either when the session is overloaded, or at shutdown for requests that had - * been enqueued. - */ -public class RequestThrottlingException extends DriverException { - - public RequestThrottlingException(@NonNull String message) { - this(message, null); - } - - private RequestThrottlingException(String message, ExecutionInfo executionInfo) { - super(message, executionInfo, null, true); - } - - @NonNull - @Override - public DriverException copy() { - return new RequestThrottlingException(getMessage(), getExecutionInfo()); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/UnsupportedProtocolVersionException.java b/core/src/main/java/com/datastax/oss/driver/api/core/UnsupportedProtocolVersionException.java deleted file mode 100644 index 030984dc274..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/UnsupportedProtocolVersionException.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Collections; -import java.util.List; - -/** - * Indicates that we've attempted to connect to a Cassandra node with a protocol version that it - * cannot handle (e.g., connecting to a C* 2.1 node with protocol version 4). - * - *

The only time when this is returned directly to the client (wrapped in a {@link - * AllNodesFailedException}) is at initialization. If it happens later when the driver is already - * connected, it is just logged an the corresponding node is forced down. - */ -public class UnsupportedProtocolVersionException extends DriverException { - private static final long serialVersionUID = 0; - - private final EndPoint endPoint; - private final List attemptedVersions; - - @NonNull - public static UnsupportedProtocolVersionException forSingleAttempt( - @NonNull EndPoint endPoint, @NonNull ProtocolVersion attemptedVersion) { - String message = - String.format("[%s] Host does not support protocol version %s", endPoint, attemptedVersion); - return new UnsupportedProtocolVersionException( - endPoint, message, Collections.singletonList(attemptedVersion), null); - } - - @NonNull - public static UnsupportedProtocolVersionException forNegotiation( - @NonNull EndPoint endPoint, @NonNull List attemptedVersions) { - String message = - String.format( - "[%s] Protocol negotiation failed: could not find a common version (attempted: %s). " - + "Note that the driver does not support Cassandra 2.0 or lower.", - endPoint, attemptedVersions); - return new UnsupportedProtocolVersionException( - endPoint, message, ImmutableList.copyOf(attemptedVersions), null); - } - - public UnsupportedProtocolVersionException( - @Nullable EndPoint endPoint, // technically nullable, but should never be in real life - @NonNull String message, - @NonNull List attemptedVersions) { - this(endPoint, message, attemptedVersions, null); - } - - private UnsupportedProtocolVersionException( - EndPoint endPoint, - String message, - List attemptedVersions, - ExecutionInfo executionInfo) { - super(message, executionInfo, null, true); - this.endPoint = endPoint; - this.attemptedVersions = attemptedVersions; - } - - /** The address of the node that threw the error. */ - @Nullable - public EndPoint getEndPoint() { - return endPoint; - } - - /** The versions that were attempted. */ - @NonNull - public List getAttemptedVersions() { - return attemptedVersions; - } - - @NonNull - @Override - public DriverException copy() { - return new UnsupportedProtocolVersionException( - endPoint, getMessage(), attemptedVersions, getExecutionInfo()); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/Version.java b/core/src/main/java/com/datastax/oss/driver/api/core/Version.java deleted file mode 100644 index 52751e02984..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/Version.java +++ /dev/null @@ -1,310 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.io.Serializable; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.Objects; -import java.util.regex.Matcher; -import java.util.regex.Pattern; -import net.jcip.annotations.Immutable; - -/** - * A structured version number. - * - *

It is in the form X.Y.Z, with optional pre-release labels and build metadata. - * - *

Version numbers compare the usual way, the major number (X) is compared first, then the minor - * one (Y) and then the patch level one (Z). Lastly, versions with pre-release sorts before the - * versions that don't have one, and labels are sorted alphabetically if necessary. Build metadata - * are ignored for sorting versions. - */ -@Immutable -public class Version implements Comparable, Serializable { - - private static final long serialVersionUID = 1; - - private static final String VERSION_REGEXP = - "(\\d+)\\.(\\d+)(\\.\\d+)?(\\.\\d+)?([~\\-]\\w[.\\w]*(?:-\\w[.\\w]*)*)?(\\+[.\\w]+)?"; - - private static final Pattern pattern = Pattern.compile(VERSION_REGEXP); - - @NonNull public static final Version V1_0_0 = Objects.requireNonNull(parse("1.0.0")); - @NonNull public static final Version V2_1_0 = Objects.requireNonNull(parse("2.1.0")); - @NonNull public static final Version V2_2_0 = Objects.requireNonNull(parse("2.2.0")); - @NonNull public static final Version V3_0_0 = Objects.requireNonNull(parse("3.0.0")); - @NonNull public static final Version V4_0_0 = Objects.requireNonNull(parse("4.0.0")); - @NonNull public static final Version V4_1_0 = Objects.requireNonNull(parse("4.1.0")); - @NonNull public static final Version V5_0_0 = Objects.requireNonNull(parse("5.0.0")); - @NonNull public static final Version V6_7_0 = Objects.requireNonNull(parse("6.7.0")); - @NonNull public static final Version V6_8_0 = Objects.requireNonNull(parse("6.8.0")); - @NonNull public static final Version V6_9_0 = Objects.requireNonNull(parse("6.9.0")); - - private final int major; - private final int minor; - private final int patch; - private final int dsePatch; - - private final String[] preReleases; - private final String build; - - private Version( - int major, int minor, int patch, int dsePatch, String[] preReleases, String build) { - this.major = major; - this.minor = minor; - this.patch = patch; - this.dsePatch = dsePatch; - this.preReleases = preReleases; - this.build = build; - } - - /** - * Parses a version from a string. - * - *

The version string should have primarily the form X.Y.Z to which can be appended one or more - * pre-release label after dashes (2.0.1-beta1, 2.1.4-rc1-SNAPSHOT) and an optional build label - * (2.1.0-beta1+a20ba.sha). Out of convenience, the "patch" version number, Z, can be omitted, in - * which case it is assumed to be 0. - * - * @param version the string to parse. - * @return the parsed version number. - * @throws IllegalArgumentException if the provided string does not represent a valid version. - */ - @Nullable - public static Version parse(@Nullable String version) { - if (version == null) { - return null; - } - - Matcher matcher = pattern.matcher(version); - if (!matcher.matches()) { - throw new IllegalArgumentException("Invalid version number: " + version); - } - - try { - int major = Integer.parseInt(matcher.group(1)); - int minor = Integer.parseInt(matcher.group(2)); - - String pa = matcher.group(3); - int patch = - pa == null || pa.isEmpty() - ? 0 - : Integer.parseInt( - pa.substring(1)); // dropping the initial '.' since it's included this time - - String dse = matcher.group(4); - int dsePatch = - dse == null || dse.isEmpty() - ? -1 - : Integer.parseInt( - dse.substring(1)); // dropping the initial '.' since it's included this time - - String pr = matcher.group(5); - String[] preReleases = - pr == null || pr.isEmpty() - ? null - : pr.substring(1) - .split("-"); // drop initial '-' or '~' then split on the remaining ones - - String bl = matcher.group(6); - String build = bl == null || bl.isEmpty() ? null : bl.substring(1); // drop the initial '+' - - return new Version(major, minor, patch, dsePatch, preReleases, build); - } catch (NumberFormatException e) { - throw new IllegalArgumentException("Invalid version number: " + version); - } - } - - /** - * The major version number. - * - * @return the major version number, i.e. X in X.Y.Z. - */ - public int getMajor() { - return major; - } - - /** - * The minor version number. - * - * @return the minor version number, i.e. Y in X.Y.Z. - */ - public int getMinor() { - return minor; - } - - /** - * The patch version number. - * - * @return the patch version number, i.e. Z in X.Y.Z. - */ - public int getPatch() { - return patch; - } - - /** - * The DSE patch version number (will only be present for version of Cassandra in DSE). - * - *

DataStax Entreprise (DSE) adds a fourth number to the version number to track potential hot - * fixes and/or DSE specific patches that may have been applied to the Cassandra version. In that - * case, this method returns that fourth number. - * - * @return the DSE patch version number, i.e. D in X.Y.Z.D, or -1 if the version number is not - * from DSE. - */ - public int getDSEPatch() { - return dsePatch; - } - - /** - * The pre-release labels if relevant, i.e. label1 and label2 in X.Y.Z-label1-lable2. - * - * @return the pre-release labels. The return list will be {@code null} if the version number - * doesn't have any. - */ - public List getPreReleaseLabels() { - return preReleases == null ? null : Collections.unmodifiableList(Arrays.asList(preReleases)); - } - - /** - * The build label if there is one. - * - * @return the build label or {@code null} if the version number doesn't have one. - */ - public String getBuildLabel() { - return build; - } - - /** - * The next stable version, i.e. the version stripped of its pre-release labels and build - * metadata. - * - *

This is mostly used during our development stage, where we test the driver against - * pre-release versions of Cassandra like 2.1.0-rc7-SNAPSHOT, but need to compare to the stable - * version 2.1.0 when testing for native protocol compatibility, etc. - * - * @return the next stable version. - */ - public Version nextStable() { - return new Version(major, minor, patch, dsePatch, null, null); - } - - @Override - public int compareTo(@NonNull Version other) { - if (major < other.major) { - return -1; - } - if (major > other.major) { - return 1; - } - - if (minor < other.minor) { - return -1; - } - if (minor > other.minor) { - return 1; - } - - if (patch < other.patch) { - return -1; - } - if (patch > other.patch) { - return 1; - } - - if (dsePatch < 0) { - if (other.dsePatch >= 0) { - return -1; - } - } else { - if (other.dsePatch < 0) { - return 1; - } - - // Both are >= 0 - if (dsePatch < other.dsePatch) { - return -1; - } - if (dsePatch > other.dsePatch) { - return 1; - } - } - - if (preReleases == null) { - return other.preReleases == null ? 0 : 1; - } - if (other.preReleases == null) { - return -1; - } - - for (int i = 0; i < Math.min(preReleases.length, other.preReleases.length); i++) { - int cmp = preReleases[i].compareTo(other.preReleases[i]); - if (cmp != 0) { - return cmp; - } - } - - return Integer.compare(preReleases.length, other.preReleases.length); - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof Version) { - Version that = (Version) other; - return this.major == that.major - && this.minor == that.minor - && this.patch == that.patch - && this.dsePatch == that.dsePatch - && (this.preReleases == null - ? that.preReleases == null - : Arrays.equals(this.preReleases, that.preReleases)) - && Objects.equals(this.build, that.build); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(major, minor, patch, dsePatch, Arrays.hashCode(preReleases), build); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append(major).append('.').append(minor).append('.').append(patch); - if (dsePatch >= 0) { - sb.append('.').append(dsePatch); - } - if (preReleases != null) { - for (String preRelease : preReleases) { - sb.append('-').append(preRelease); - } - } - if (build != null) { - sb.append('+').append(build); - } - return sb.toString(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/addresstranslation/AddressTranslator.java b/core/src/main/java/com/datastax/oss/driver/api/core/addresstranslation/AddressTranslator.java deleted file mode 100644 index 47ce62f1461..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/addresstranslation/AddressTranslator.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.addresstranslation; - -import edu.umd.cs.findbugs.annotations.NonNull; -import java.net.InetSocketAddress; - -/** - * Translates IP addresses received from Cassandra nodes into locally queriable addresses. - * - *

The driver auto-detects new Cassandra nodes added to the cluster through server side pushed - * notifications and system table queries. For each node, the address the driver will receive will - * correspond to the address set as {@code broadcast_rpc_address} in the node's YAML file. In most - * cases, this is the correct address to use by the driver, and that is what is used by default. - * However, sometimes the addresses received through this mechanism will either not be reachable - * directly by the driver, or should not be the preferred address to use to reach the node (for - * instance, the {@code broadcast_rpc_address} set on Cassandra nodes might be a private IP, but - * some clients may have to use a public IP, or go through a router to reach that node). This - * interface addresses such cases, by allowing to translate an address as sent by a Cassandra node - * into another address to be used by the driver for connection. - * - *

The contact point addresses provided at driver initialization are considered translated - * already; in other words, they will be used as-is, without being processed by this component. - */ -public interface AddressTranslator extends AutoCloseable { - - /** - * Translates an address reported by a Cassandra node into the address that the driver will use to - * connect. - */ - @NonNull - InetSocketAddress translate(@NonNull InetSocketAddress address); - - /** Called when the cluster that this translator is associated with closes. */ - @Override - void close(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/auth/AuthProvider.java b/core/src/main/java/com/datastax/oss/driver/api/core/auth/AuthProvider.java deleted file mode 100644 index c73c3e4fb67..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/auth/AuthProvider.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.auth; - -import com.datastax.oss.driver.api.core.connection.ReconnectionPolicy; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.internal.core.auth.PlainTextAuthProvider; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * Provides {@link Authenticator} instances to use when connecting to Cassandra nodes. - * - *

See {@link PlainTextAuthProvider} for an implementation which uses SASL PLAIN mechanism to - * authenticate using username/password strings. - */ -public interface AuthProvider extends AutoCloseable { - - /** - * The authenticator to use when connecting to {@code host}. - * - * @param endPoint the Cassandra host to connect to. - * @param serverAuthenticator the configured authenticator on the host. - * @return the authentication implementation to use. - */ - @NonNull - Authenticator newAuthenticator(@NonNull EndPoint endPoint, @NonNull String serverAuthenticator) - throws AuthenticationException; - - /** - * What to do if the server does not send back an authentication challenge (in other words, lets - * the client connect without any form of authentication). - * - *

This is suspicious because having authentication enabled on the client but not on the server - * is probably a configuration mistake. - * - *

Provider implementations are free to handle this however they want; typical approaches are: - * - *

    - *
  • ignoring; - *
  • logging a warning; - *
  • throwing an {@link AuthenticationException} to abort the connection (but note that it - * will be retried according to the {@link ReconnectionPolicy}). - *
- */ - void onMissingChallenge(@NonNull EndPoint endPoint) throws AuthenticationException; -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/auth/AuthenticationException.java b/core/src/main/java/com/datastax/oss/driver/api/core/auth/AuthenticationException.java deleted file mode 100644 index 28dde2123cb..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/auth/AuthenticationException.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.auth; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** - * Indicates an error during the authentication phase while connecting to a node. - * - *

The only time when this is returned directly to the client (wrapped in a {@link - * AllNodesFailedException}) is at initialization. If it happens later when the driver is already - * connected, it is just logged and the connection will be reattempted. - */ -public class AuthenticationException extends RuntimeException { - private static final long serialVersionUID = 0; - - private final EndPoint endPoint; - - public AuthenticationException(@NonNull EndPoint endPoint, @NonNull String message) { - this(endPoint, message, null); - } - - public AuthenticationException( - @NonNull EndPoint endPoint, @NonNull String message, @Nullable Throwable cause) { - super(String.format("Authentication error on node %s: %s", endPoint, message), cause); - this.endPoint = endPoint; - } - - /** The address of the node that encountered the error. */ - @NonNull - public EndPoint getEndPoint() { - return endPoint; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/auth/Authenticator.java b/core/src/main/java/com/datastax/oss/driver/api/core/auth/Authenticator.java deleted file mode 100644 index 150a1dfb63f..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/auth/Authenticator.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.auth; - -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.util.concurrent.CompletionStage; - -/** - * Handles SASL authentication with Cassandra servers. - * - *

Each time a new connection is created and the server requires authentication, a new instance - * of this class will be created by the corresponding {@link AuthProvider} to handle that - * authentication. The lifecycle of that new {@code Authenticator} will be: - * - *

    - *
  1. the {@link #initialResponse} method will be called. The initial return value will be sent - * to the server to initiate the handshake. - *
  2. the server will respond to each client response by either issuing a challenge or indicating - * that the authentication is complete (successfully or not). If a new challenge is issued, - * the authenticator's {@link #evaluateChallenge} method will be called to produce a response - * that will be sent to the server. This challenge/response negotiation will continue until - * the server responds that authentication is successful (or an {@link - * AuthenticationException} is raised). - *
  3. When the server indicates that authentication is successful, the {@link - * #onAuthenticationSuccess} method will be called with the last information that the server - * may optionally have sent. - *
- * - * The exact nature of the negotiation between client and server is specific to the authentication - * mechanism configured server side. - * - *

Note that, since the methods in this interface will be invoked on a driver I/O thread, they - * all return asynchronous results. If your implementation performs heavy computations or blocking - * calls, you'll want to schedule them on a separate executor, and return a {@code CompletionStage} - * that represents their future completion. If your implementation is fast, lightweight and does not - * perform blocking operations, it might be acceptable to run it on I/O threads directly; in that - * case, implement {@link SyncAuthenticator} instead of this interface. - */ -public interface Authenticator { - - /** - * Obtain an initial response token for initializing the SASL handshake. - * - * @return a completion stage that will complete with the initial response to send to the server - * (which may be {@code null}). Note that, if the returned byte buffer is writable, the driver - * will clear its contents immediately after use (to avoid keeping sensitive - * information in memory); do not reuse the same buffer across multiple invocations. - * Alternatively, if the contents are not sensitive, you can make the buffer {@linkplain - * ByteBuffer#asReadOnlyBuffer() read-only} and safely reuse it. - */ - @NonNull - CompletionStage initialResponse(); - - /** - * Evaluate a challenge received from the server. Generally, this method should return null when - * authentication is complete from the client perspective. - * - * @param challenge the server's SASL challenge. - * @return a completion stage that will complete with the updated SASL token (which may be null to - * indicate the client requires no further action). Note that, if the returned byte buffer is - * writable, the driver will clear its contents immediately after use (to avoid keeping - * sensitive information in memory); do not reuse the same buffer across multiple invocations. - * Alternatively, if the contents are not sensitive, you can make the buffer {@linkplain - * ByteBuffer#asReadOnlyBuffer() read-only} and safely reuse it. - */ - @NonNull - CompletionStage evaluateChallenge(@Nullable ByteBuffer challenge); - - /** - * Called when authentication is successful with the last information optionally sent by the - * server. - * - * @param token the information sent by the server with the authentication successful message. - * This will be {@code null} if the server sends no particular information on authentication - * success. - * @return a completion stage that completes when the authenticator is done processing this - * response. - */ - @NonNull - CompletionStage onAuthenticationSuccess(@Nullable ByteBuffer token); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/auth/PlainTextAuthProviderBase.java b/core/src/main/java/com/datastax/oss/driver/api/core/auth/PlainTextAuthProviderBase.java deleted file mode 100644 index fb85797af9e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/auth/PlainTextAuthProviderBase.java +++ /dev/null @@ -1,264 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.auth; - -import com.datastax.dse.driver.api.core.auth.BaseDseAuthenticator; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.shaded.guava.common.base.Charsets; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.net.InetSocketAddress; -import java.net.SocketAddress; -import java.nio.ByteBuffer; -import java.nio.CharBuffer; -import java.nio.charset.StandardCharsets; -import java.util.Arrays; -import java.util.Objects; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Common infrastructure for plain text auth providers. - * - *

This can be reused to write an implementation that retrieves the credentials from another - * source than the configuration. The driver offers one built-in implementation: {@link - * ProgrammaticPlainTextAuthProvider}. - */ -@ThreadSafe -public abstract class PlainTextAuthProviderBase implements AuthProvider { - - private static final Logger LOG = LoggerFactory.getLogger(PlainTextAuthProviderBase.class); - - private final String logPrefix; - - /** - * @param logPrefix a string that will get prepended to the logs (this is used for discrimination - * when you have multiple driver instances executing in the same JVM). Built-in - * implementations fill this with {@link Session#getName()}. - */ - protected PlainTextAuthProviderBase(@NonNull String logPrefix) { - this.logPrefix = Objects.requireNonNull(logPrefix); - } - - /** - * Retrieves the credentials from the underlying source. - * - *

This is invoked every time the driver opens a new connection. - * - * @param endPoint The endpoint being contacted. - * @param serverAuthenticator The authenticator class sent by the endpoint. - */ - @NonNull - protected abstract Credentials getCredentials( - @NonNull EndPoint endPoint, @NonNull String serverAuthenticator); - - @NonNull - @Override - public Authenticator newAuthenticator( - @NonNull EndPoint endPoint, @NonNull String serverAuthenticator) - throws AuthenticationException { - return new PlainTextAuthenticator( - getCredentials(endPoint, serverAuthenticator), endPoint, serverAuthenticator); - } - - @Override - public void onMissingChallenge(@NonNull EndPoint endPoint) { - LOG.warn( - "[{}] {} did not send an authentication challenge; " - + "This is suspicious because the driver expects authentication", - logPrefix, - endPoint); - } - - @Override - public void close() { - // nothing to do - } - - public static class Credentials { - - private final char[] username; - private final char[] password; - private final char[] authorizationId; - - /** - * Builds an instance for username/password authentication, and proxy authentication with the - * given authorizationId. - * - *

This feature is only available with DataStax Enterprise. If the target server is Apache - * Cassandra, the authorizationId will be ignored. - */ - public Credentials( - @NonNull char[] username, @NonNull char[] password, @NonNull char[] authorizationId) { - this.username = Objects.requireNonNull(username); - this.password = Objects.requireNonNull(password); - this.authorizationId = Objects.requireNonNull(authorizationId); - } - - /** Builds an instance for simple username/password authentication. */ - public Credentials(@NonNull char[] username, @NonNull char[] password) { - this(username, password, new char[0]); - } - - @NonNull - public char[] getUsername() { - return username; - } - - /** - * @deprecated this method only exists for backward compatibility. It is a synonym for {@link - * #getUsername()}, which should be used instead. - */ - @Deprecated - @NonNull - public char[] getAuthenticationId() { - return username; - } - - @NonNull - public char[] getPassword() { - return password; - } - - @NonNull - public char[] getAuthorizationId() { - return authorizationId; - } - - /** Clears the credentials from memory when they're no longer needed. */ - protected void clear() { - // Note: this is a bit irrelevant with the built-in provider, because the config already - // caches the credentials in memory. But it might be useful for a custom implementation that - // retrieves the credentials from a different source. - Arrays.fill(getUsername(), (char) 0); - Arrays.fill(getPassword(), (char) 0); - Arrays.fill(getAuthorizationId(), (char) 0); - } - } - - // Implementation note: BaseDseAuthenticator is backward compatible with Cassandra authenticators. - // This will work with both Cassandra (as long as no authorizationId is set) and DSE. - protected static class PlainTextAuthenticator extends BaseDseAuthenticator { - - private static final ByteBuffer MECHANISM = - ByteBuffer.wrap("PLAIN".getBytes(StandardCharsets.UTF_8)).asReadOnlyBuffer(); - - private static final ByteBuffer SERVER_INITIAL_CHALLENGE = - ByteBuffer.wrap("PLAIN-START".getBytes(StandardCharsets.UTF_8)).asReadOnlyBuffer(); - - private static final EndPoint DUMMY_END_POINT = - new EndPoint() { - @NonNull - @Override - public SocketAddress resolve() { - return new InetSocketAddress("127.0.0.1", 9042); - } - - @NonNull - @Override - public String asMetricPrefix() { - return ""; // will never be used - } - }; - - private final ByteBuffer encodedCredentials; - private final EndPoint endPoint; - - protected PlainTextAuthenticator( - @NonNull Credentials credentials, - @NonNull EndPoint endPoint, - @NonNull String serverAuthenticator) { - super(serverAuthenticator); - - Objects.requireNonNull(credentials); - Objects.requireNonNull(endPoint); - - ByteBuffer authorizationId = toUtf8Bytes(credentials.getAuthorizationId()); - ByteBuffer username = toUtf8Bytes(credentials.getUsername()); - ByteBuffer password = toUtf8Bytes(credentials.getPassword()); - - this.encodedCredentials = - ByteBuffer.allocate( - authorizationId.remaining() + username.remaining() + password.remaining() + 2); - encodedCredentials.put(authorizationId); - encodedCredentials.put((byte) 0); - encodedCredentials.put(username); - encodedCredentials.put((byte) 0); - encodedCredentials.put(password); - encodedCredentials.flip(); - - clear(authorizationId); - clear(username); - clear(password); - - this.endPoint = endPoint; - } - - /** - * @deprecated Preserved for backward compatibility, implementors should use the 3-arg - * constructor {@code PlainTextAuthenticator(Credentials, EndPoint, String)} instead. - */ - @Deprecated - protected PlainTextAuthenticator(@NonNull Credentials credentials) { - this( - credentials, - // It's unlikely that this class was ever extended by third parties, but if it was, assume - // that it was not written for DSE: - // - dummy end point because we should never need to build an auth exception - DUMMY_END_POINT, - // - default OSS authenticator name (the only thing that matters is how this string - // compares to "DseAuthenticator") - "org.apache.cassandra.auth.PasswordAuthenticator"); - } - - private static ByteBuffer toUtf8Bytes(char[] charArray) { - CharBuffer charBuffer = CharBuffer.wrap(charArray); - return Charsets.UTF_8.encode(charBuffer); - } - - private static void clear(ByteBuffer buffer) { - buffer.rewind(); - while (buffer.remaining() > 0) { - buffer.put((byte) 0); - } - } - - @NonNull - @Override - public ByteBuffer getMechanism() { - return MECHANISM; - } - - @NonNull - @Override - public ByteBuffer getInitialServerChallenge() { - return SERVER_INITIAL_CHALLENGE; - } - - @Nullable - @Override - public ByteBuffer evaluateChallengeSync(@Nullable ByteBuffer challenge) { - if (SERVER_INITIAL_CHALLENGE.equals(challenge)) { - return encodedCredentials; - } - throw new AuthenticationException(endPoint, "Incorrect challenge from server"); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/auth/ProgrammaticPlainTextAuthProvider.java b/core/src/main/java/com/datastax/oss/driver/api/core/auth/ProgrammaticPlainTextAuthProvider.java deleted file mode 100644 index d991f5c5cb5..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/auth/ProgrammaticPlainTextAuthProvider.java +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.auth; - -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.api.core.session.SessionBuilder; -import com.datastax.oss.driver.internal.core.util.Strings; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Objects; -import net.jcip.annotations.ThreadSafe; - -/** - * A simple plaintext {@link AuthProvider} that receives the credentials programmatically instead of - * pulling them from the configuration. - * - *

To use this class, create an instance with the appropriate credentials to use and pass it to - * your session builder: - * - *

- * AuthProvider authProvider = new ProgrammaticPlainTextAuthProvider("...", "...");
- * CqlSession session =
- *     CqlSession.builder()
- *         .addContactEndPoints(...)
- *         .withAuthProvider(authProvider)
- *         .build();
- * 
- * - *

It also offers the possibility of changing the credentials at runtime. The new credentials - * will be used for all connections initiated after the change. - * - *

Implementation Note: this implementation is not particularly suited for highly-sensitive - * applications: it stores the credentials to use as private fields, and even if the fields are char - * arrays rather than strings to make it difficult to dump their contents, they are never cleared - * until the provider itself is garbage-collected, which typically only happens when the session is - * closed. - * - * @see SessionBuilder#withAuthProvider(AuthProvider) - * @see SessionBuilder#withAuthCredentials(String, String) - * @see SessionBuilder#withAuthCredentials(String, String, String) - */ -@ThreadSafe -public class ProgrammaticPlainTextAuthProvider extends PlainTextAuthProviderBase { - - private volatile char[] username; - private volatile char[] password; - private volatile char[] authorizationId; - - /** Builds an instance for simple username/password authentication. */ - public ProgrammaticPlainTextAuthProvider(@NonNull String username, @NonNull String password) { - this(username, password, ""); - } - - /** - * Builds an instance for username/password authentication, and proxy authentication with the - * given authorizationId. - * - *

This feature is only available with DataStax Enterprise. If the target server is Apache - * Cassandra, use {@link #ProgrammaticPlainTextAuthProvider(String, String)} instead, or set the - * authorizationId to an empty string. - */ - public ProgrammaticPlainTextAuthProvider( - @NonNull String username, @NonNull String password, @NonNull String authorizationId) { - // This will typically be built before the session so we don't know the log prefix yet. Pass an - // empty string, it's only used in one log message. - super(""); - this.username = Strings.requireNotEmpty(username, "username").toCharArray(); - this.password = Strings.requireNotEmpty(password, "password").toCharArray(); - this.authorizationId = - Objects.requireNonNull(authorizationId, "authorizationId cannot be null").toCharArray(); - } - - /** - * Changes the username. - * - *

The new credentials will be used for all connections initiated after this method was called. - * - * @param username the new name. - */ - public void setUsername(@NonNull String username) { - this.username = Strings.requireNotEmpty(username, "username").toCharArray(); - } - - /** - * Changes the password. - * - *

The new credentials will be used for all connections initiated after this method was called. - * - * @param password the new password. - */ - public void setPassword(@NonNull String password) { - this.password = Strings.requireNotEmpty(password, "password").toCharArray(); - } - - /** - * Changes the authorization id. - * - *

The new credentials will be used for all connections initiated after this method was called. - * - *

This feature is only available with DataStax Enterprise. If the target server is Apache - * Cassandra, this method should not be used. - * - * @param authorizationId the new authorization id. - */ - public void setAuthorizationId(@NonNull String authorizationId) { - this.authorizationId = - Objects.requireNonNull(authorizationId, "authorizationId cannot be null").toCharArray(); - } - - /** - * {@inheritDoc} - * - *

This implementation disregards the endpoint being connected to as well as the authenticator - * class sent by the server, and always returns the same credentials. - */ - @NonNull - @Override - protected Credentials getCredentials( - @NonNull EndPoint endPoint, @NonNull String serverAuthenticator) { - return new Credentials(username.clone(), password.clone(), authorizationId.clone()); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/auth/SyncAuthenticator.java b/core/src/main/java/com/datastax/oss/driver/api/core/auth/SyncAuthenticator.java deleted file mode 100644 index 016ac25680b..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/auth/SyncAuthenticator.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.auth; - -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.util.concurrent.CompletionStage; - -/** - * An authenticator that performs all of its operations synchronously, on the calling thread. - * - *

This is intended for simple implementations that are fast and lightweight enough, and do not - * perform any blocking operations. - */ -public interface SyncAuthenticator extends Authenticator { - - /** - * Obtain an initial response token for initializing the SASL handshake. - * - *

{@link #initialResponse()} calls this and wraps the result in an immediately completed - * future. - * - * @return The initial response to send to the server (which may be {@code null}). Note that, if - * the returned byte buffer is writable, the driver will clear its contents immediately - * after use (to avoid keeping sensitive information in memory); do not reuse the same buffer - * across multiple invocations. Alternatively, if the contents are not sensitive, you can make - * the buffer {@linkplain ByteBuffer#asReadOnlyBuffer() read-only} and safely reuse it. - */ - @Nullable - ByteBuffer initialResponseSync(); - - /** - * Evaluate a challenge received from the server. - * - *

{@link #evaluateChallenge(ByteBuffer)} calls this and wraps the result in an immediately - * completed future. - * - * @param challenge the server's SASL challenge; may be {@code null}. - * @return The updated SASL token (which may be {@code null} to indicate the client requires no - * further action). Note that, if the returned byte buffer is writable, the driver will - * clear its contents immediately after use (to avoid keeping sensitive information in - * memory); do not reuse the same buffer across multiple invocations. Alternatively, if the - * contents are not sensitive, you can make the buffer {@linkplain - * ByteBuffer#asReadOnlyBuffer() read-only} and safely reuse it. - */ - @Nullable - ByteBuffer evaluateChallengeSync(@Nullable ByteBuffer challenge); - - /** - * Called when authentication is successful with the last information optionally sent by the - * server. - * - *

{@link #onAuthenticationSuccess(ByteBuffer)} calls this, and then returns an immediately - * completed future. - * - * @param token the information sent by the server with the authentication successful message. - * This will be {@code null} if the server sends no particular information on authentication - * success. - */ - void onAuthenticationSuccessSync(@Nullable ByteBuffer token); - - @NonNull - @Override - default CompletionStage initialResponse() { - return CompletableFutures.wrap(this::initialResponseSync); - } - - @NonNull - @Override - default CompletionStage evaluateChallenge(@Nullable ByteBuffer challenge) { - return CompletableFutures.wrap(() -> evaluateChallengeSync(challenge)); - } - - @NonNull - @Override - default CompletionStage onAuthenticationSuccess(@Nullable ByteBuffer token) { - return CompletableFutures.wrap( - () -> { - onAuthenticationSuccessSync(token); - return null; - }); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/auth/package-info.java b/core/src/main/java/com/datastax/oss/driver/api/core/auth/package-info.java deleted file mode 100644 index b265b9ba463..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/auth/package-info.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Support for authentication between the driver and Cassandra nodes. - * - *

Authentication is performed on each newly open connection. It is customizable via the {@link - * com.datastax.oss.driver.api.core.auth.AuthProvider} interface. - */ -package com.datastax.oss.driver.api.core.auth; diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/config/DefaultDriverOption.java b/core/src/main/java/com/datastax/oss/driver/api/core/config/DefaultDriverOption.java deleted file mode 100644 index 60c44193577..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/config/DefaultDriverOption.java +++ /dev/null @@ -1,1057 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.config; - -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * Built-in driver options for the core driver. - * - *

Refer to {@code reference.conf} in the driver codebase for a full description of each option. - */ -public enum DefaultDriverOption implements DriverOption { - /** - * The contact points to use for the initial connection to the cluster. - * - *

Value type: {@link java.util.List List}<{@link String}> - */ - CONTACT_POINTS("basic.contact-points"), - /** - * A name that uniquely identifies the driver instance. - * - *

Value-type: {@link String} - */ - SESSION_NAME("basic.session-name"), - /** - * The name of the keyspace that the session should initially be connected to. - * - *

Value-type: {@link String} - */ - SESSION_KEYSPACE("basic.session-keyspace"), - /** - * How often the driver tries to reload the configuration. - * - *

Value-type: {@link java.time.Duration Duration} - */ - CONFIG_RELOAD_INTERVAL("basic.config-reload-interval"), - - /** - * How long the driver waits for a request to complete. - * - *

Value-type: {@link java.time.Duration Duration} - */ - REQUEST_TIMEOUT("basic.request.timeout"), - /** - * The consistency level. - * - *

Value-Type: {@link String} - */ - REQUEST_CONSISTENCY("basic.request.consistency"), - /** - * The page size. - * - *

Value-Type: int - */ - REQUEST_PAGE_SIZE("basic.request.page-size"), - /** - * The serial consistency level. - * - *

Value-type: {@link String} - */ - REQUEST_SERIAL_CONSISTENCY("basic.request.serial-consistency"), - /** - * The default idempotence of a request. - * - *

Value-type: boolean - */ - REQUEST_DEFAULT_IDEMPOTENCE("basic.request.default-idempotence"), - - // LOAD_BALANCING_POLICY is a collection of sub-properties - LOAD_BALANCING_POLICY("basic.load-balancing-policy"), - /** - * The class of the load balancing policy. - * - *

Value-type: {@link String} - */ - LOAD_BALANCING_POLICY_CLASS("basic.load-balancing-policy.class"), - /** - * The datacenter that is considered "local". - * - *

Value-type: {@link String} - */ - LOAD_BALANCING_LOCAL_DATACENTER("basic.load-balancing-policy.local-datacenter"), - /** - * A custom filter to include/exclude nodes. - * - *

Value-Type: {@link String} - * - * @deprecated use {@link #LOAD_BALANCING_DISTANCE_EVALUATOR_CLASS} instead. - */ - @Deprecated - LOAD_BALANCING_FILTER_CLASS("basic.load-balancing-policy.filter.class"), - - /** - * The timeout to use for internal queries that run as part of the initialization process - * - *

Value-type: {@link java.time.Duration Duration} - */ - CONNECTION_INIT_QUERY_TIMEOUT("advanced.connection.init-query-timeout"), - /** - * The timeout to use when the driver changes the keyspace on a connection at runtime. - * - *

Value-type: {@link java.time.Duration Duration} - */ - CONNECTION_SET_KEYSPACE_TIMEOUT("advanced.connection.set-keyspace-timeout"), - /** - * The maximum number of requests that can be executed concurrently on a connection - * - *

Value-type: int - */ - CONNECTION_MAX_REQUESTS("advanced.connection.max-requests-per-connection"), - /** - * The maximum number of "orphaned" requests before a connection gets closed automatically. - * - *

Value-type: int - */ - CONNECTION_MAX_ORPHAN_REQUESTS("advanced.connection.max-orphan-requests"), - /** - * Whether to log non-fatal errors when the driver tries to open a new connection. - * - *

Value-type: boolean - */ - CONNECTION_WARN_INIT_ERROR("advanced.connection.warn-on-init-error"), - /** - * The number of connections in the LOCAL pool. - * - *

Value-type: int - */ - CONNECTION_POOL_LOCAL_SIZE("advanced.connection.pool.local.size"), - /** - * The number of connections in the REMOTE pool. - * - *

Value-type: int - */ - CONNECTION_POOL_REMOTE_SIZE("advanced.connection.pool.remote.size"), - - /** - * Whether to schedule reconnection attempts if all contact points are unreachable on the first - * initialization attempt. - * - *

Value-type: boolean - */ - RECONNECT_ON_INIT("advanced.reconnect-on-init"), - - /** - * The class of the reconnection policy. - * - *

Value-type: {@link String} - */ - RECONNECTION_POLICY_CLASS("advanced.reconnection-policy.class"), - /** - * Base delay for computing time between reconnection attempts. - * - *

Value-type: {@link java.time.Duration Duration} - */ - RECONNECTION_BASE_DELAY("advanced.reconnection-policy.base-delay"), - /** - * Maximum delay between reconnection attempts. - * - *

Value-type: {@link java.time.Duration Duration} - */ - RECONNECTION_MAX_DELAY("advanced.reconnection-policy.max-delay"), - - // RETRY_POLICY is a collection of sub-properties - RETRY_POLICY("advanced.retry-policy"), - /** - * The class of the retry policy. - * - *

Value-type: {@link String} - */ - RETRY_POLICY_CLASS("advanced.retry-policy.class"), - - // SPECULATIVE_EXECUTION_POLICY is a collection of sub-properties - SPECULATIVE_EXECUTION_POLICY("advanced.speculative-execution-policy"), - /** - * The class of the speculative execution policy. - * - *

Value-type: {@link String} - */ - SPECULATIVE_EXECUTION_POLICY_CLASS("advanced.speculative-execution-policy.class"), - /** - * The maximum number of executions. - * - *

Value-type: int - */ - SPECULATIVE_EXECUTION_MAX("advanced.speculative-execution-policy.max-executions"), - /** - * The delay between each execution. - * - *

Value-type: {@link java.time.Duration Duration} - */ - SPECULATIVE_EXECUTION_DELAY("advanced.speculative-execution-policy.delay"), - - /** - * The class of the authentication provider. - * - *

Value-type: {@link String} - */ - AUTH_PROVIDER_CLASS("advanced.auth-provider.class"), - /** - * Plain text auth provider username. - * - *

Value-type: {@link String} - */ - AUTH_PROVIDER_USER_NAME("advanced.auth-provider.username"), - /** - * Plain text auth provider password. - * - *

Value-type: {@link String} - */ - AUTH_PROVIDER_PASSWORD("advanced.auth-provider.password"), - - /** - * The class of the SSL Engine Factory. - * - *

Value-type: {@link String} - */ - SSL_ENGINE_FACTORY_CLASS("advanced.ssl-engine-factory.class"), - /** - * The cipher suites to enable when creating an SSLEngine for a connection. - * - *

Value type: {@link java.util.List List}<{@link String}> - */ - SSL_CIPHER_SUITES("advanced.ssl-engine-factory.cipher-suites"), - /** - * Whether or not to require validation that the hostname of the server certificate's common name - * matches the hostname of the server being connected to. - * - *

Value-type: boolean - */ - SSL_HOSTNAME_VALIDATION("advanced.ssl-engine-factory.hostname-validation"), - /** - * The location of the keystore file. - * - *

Value-type: {@link String} - */ - SSL_KEYSTORE_PATH("advanced.ssl-engine-factory.keystore-path"), - /** - * The keystore password. - * - *

Value-type: {@link String} - */ - SSL_KEYSTORE_PASSWORD("advanced.ssl-engine-factory.keystore-password"), - /** - * The location of the truststore file. - * - *

Value-type: {@link String} - */ - SSL_TRUSTSTORE_PATH("advanced.ssl-engine-factory.truststore-path"), - /** - * The truststore password. - * - *

Value-type: {@link String} - */ - SSL_TRUSTSTORE_PASSWORD("advanced.ssl-engine-factory.truststore-password"), - - /** - * The class of the generator that assigns a microsecond timestamp to each request. - * - *

Value-type: {@link String} - */ - TIMESTAMP_GENERATOR_CLASS("advanced.timestamp-generator.class"), - /** - * Whether to force the driver to use Java's millisecond-precision system clock. - * - *

Value-type: boolean - */ - TIMESTAMP_GENERATOR_FORCE_JAVA_CLOCK("advanced.timestamp-generator.force-java-clock"), - /** - * How far in the future timestamps are allowed to drift before the warning is logged. - * - *

Value-type: {@link java.time.Duration Duration} - */ - TIMESTAMP_GENERATOR_DRIFT_WARNING_THRESHOLD( - "advanced.timestamp-generator.drift-warning.threshold"), - /** - * How often the warning will be logged if timestamps keep drifting above the threshold. - * - *

Value-type: {@link java.time.Duration Duration} - */ - TIMESTAMP_GENERATOR_DRIFT_WARNING_INTERVAL("advanced.timestamp-generator.drift-warning.interval"), - - /** - * The class of a session-wide component that tracks the outcome of requests. - * - *

Value-type: {@link String} - * - * @deprecated Use {@link #REQUEST_TRACKER_CLASSES} instead. - */ - @Deprecated - REQUEST_TRACKER_CLASS("advanced.request-tracker.class"), - /** - * Whether to log successful requests. - * - *

Value-type: boolean - */ - REQUEST_LOGGER_SUCCESS_ENABLED("advanced.request-tracker.logs.success.enabled"), - /** - * The threshold to classify a successful request as "slow". - * - *

Value-type: {@link java.time.Duration Duration} - */ - REQUEST_LOGGER_SLOW_THRESHOLD("advanced.request-tracker.logs.slow.threshold"), - /** - * Whether to log slow requests. - * - *

Value-type: boolean - */ - REQUEST_LOGGER_SLOW_ENABLED("advanced.request-tracker.logs.slow.enabled"), - /** - * Whether to log failed requests. - * - *

Value-type: boolean - */ - REQUEST_LOGGER_ERROR_ENABLED("advanced.request-tracker.logs.error.enabled"), - /** - * The maximum length of the query string in the log message. - * - *

Value-type: int - */ - REQUEST_LOGGER_MAX_QUERY_LENGTH("advanced.request-tracker.logs.max-query-length"), - /** - * Whether to log bound values in addition to the query string. - * - *

Value-type: boolean - */ - REQUEST_LOGGER_VALUES("advanced.request-tracker.logs.show-values"), - /** - * The maximum length for bound values in the log message. - * - *

Value-type: int - */ - REQUEST_LOGGER_MAX_VALUE_LENGTH("advanced.request-tracker.logs.max-value-length"), - /** - * The maximum number of bound values to log. - * - *

Value-type: int - */ - REQUEST_LOGGER_MAX_VALUES("advanced.request-tracker.logs.max-values"), - /** - * Whether to log stack traces for failed queries. - * - *

Value-type: boolean - */ - REQUEST_LOGGER_STACK_TRACES("advanced.request-tracker.logs.show-stack-traces"), - - /** - * The class of a session-wide component that controls the rate at which requests are executed. - * - *

Value-type: {@link String} - */ - REQUEST_THROTTLER_CLASS("advanced.throttler.class"), - /** - * The maximum number of requests that are allowed to execute in parallel. - * - *

Value-type: int - */ - REQUEST_THROTTLER_MAX_CONCURRENT_REQUESTS("advanced.throttler.max-concurrent-requests"), - /** - * The maximum allowed request rate. - * - *

Value-type: int - */ - REQUEST_THROTTLER_MAX_REQUESTS_PER_SECOND("advanced.throttler.max-requests-per-second"), - /** - * The maximum number of requests that can be enqueued when the throttling threshold is exceeded. - * - *

Value-type: int - */ - REQUEST_THROTTLER_MAX_QUEUE_SIZE("advanced.throttler.max-queue-size"), - /** - * How often the throttler attempts to dequeue requests. - * - *

Value-type: {@link java.time.Duration Duration} - */ - REQUEST_THROTTLER_DRAIN_INTERVAL("advanced.throttler.drain-interval"), - - /** - * The class of a session-wide component that listens for node state changes. - * - *

Value-type: {@link String} - * - * @deprecated Use {@link #METADATA_NODE_STATE_LISTENER_CLASSES} instead. - */ - @Deprecated - METADATA_NODE_STATE_LISTENER_CLASS("advanced.node-state-listener.class"), - - /** - * The class of a session-wide component that listens for schema changes. - * - *

Value-type: {@link String} - * - * @deprecated Use {@link #METADATA_SCHEMA_CHANGE_LISTENER_CLASSES} instead. - */ - @Deprecated - METADATA_SCHEMA_CHANGE_LISTENER_CLASS("advanced.schema-change-listener.class"), - - /** - * The class of the address translator to use to convert the addresses sent by Cassandra nodes - * into ones that the driver uses to connect. - * - *

Value-type: {@link String} - */ - ADDRESS_TRANSLATOR_CLASS("advanced.address-translator.class"), - - /** - * The native protocol version to use. - * - *

Value-type: {@link String} - */ - PROTOCOL_VERSION("advanced.protocol.version"), - /** - * The name of the algorithm used to compress protocol frames. - * - *

Value-type: {@link String} - */ - PROTOCOL_COMPRESSION("advanced.protocol.compression"), - /** - * The maximum length, in bytes, of the frames supported by the driver. - * - *

Value-type: long - */ - PROTOCOL_MAX_FRAME_LENGTH("advanced.protocol.max-frame-length"), - - /** - * Whether a warning is logged when a request (such as a CQL `USE ...`) changes the active - * keyspace. - * - *

Value-type: boolean - */ - REQUEST_WARN_IF_SET_KEYSPACE("advanced.request.warn-if-set-keyspace"), - /** - * How many times the driver will attempt to fetch the query trace if it is not ready yet. - * - *

Value-type: int - */ - REQUEST_TRACE_ATTEMPTS("advanced.request.trace.attempts"), - /** - * The interval between each attempt. - * - *

Value-type: {@link java.time.Duration Duration} - */ - REQUEST_TRACE_INTERVAL("advanced.request.trace.interval"), - /** - * The consistency level to use for trace queries. - * - *

Value-type: {@link String} - */ - REQUEST_TRACE_CONSISTENCY("advanced.request.trace.consistency"), - - /** - * List of enabled session-level metrics. - * - *

Value type: {@link java.util.List List}<{@link String}> - */ - METRICS_SESSION_ENABLED("advanced.metrics.session.enabled"), - /** - * List of enabled node-level metrics. - * - *

Value type: {@link java.util.List List}<{@link String}> - */ - METRICS_NODE_ENABLED("advanced.metrics.node.enabled"), - /** - * The largest latency that we expect to record for requests. - * - *

Value-type: {@link java.time.Duration Duration} - */ - METRICS_SESSION_CQL_REQUESTS_HIGHEST("advanced.metrics.session.cql-requests.highest-latency"), - /** - * The number of significant decimal digits to which internal structures will maintain for - * requests. - * - *

Value-type: int - */ - METRICS_SESSION_CQL_REQUESTS_DIGITS("advanced.metrics.session.cql-requests.significant-digits"), - /** - * The interval at which percentile data is refreshed for requests. - * - *

Value-type: {@link java.time.Duration Duration} - */ - METRICS_SESSION_CQL_REQUESTS_INTERVAL("advanced.metrics.session.cql-requests.refresh-interval"), - /** - * The largest latency that we expect to record for throttling. - * - *

Value-type: {@link java.time.Duration Duration} - */ - METRICS_SESSION_THROTTLING_HIGHEST("advanced.metrics.session.throttling.delay.highest-latency"), - /** - * The number of significant decimal digits to which internal structures will maintain for - * throttling. - * - *

Value-type: int - */ - METRICS_SESSION_THROTTLING_DIGITS("advanced.metrics.session.throttling.delay.significant-digits"), - /** - * The interval at which percentile data is refreshed for throttling. - * - *

Value-type: {@link java.time.Duration Duration} - */ - METRICS_SESSION_THROTTLING_INTERVAL("advanced.metrics.session.throttling.delay.refresh-interval"), - /** - * The largest latency that we expect to record for requests. - * - *

Value-type: {@link java.time.Duration Duration} - */ - METRICS_NODE_CQL_MESSAGES_HIGHEST("advanced.metrics.node.cql-messages.highest-latency"), - /** - * The number of significant decimal digits to which internal structures will maintain for - * requests. - * - *

Value-type: int - */ - METRICS_NODE_CQL_MESSAGES_DIGITS("advanced.metrics.node.cql-messages.significant-digits"), - /** - * The interval at which percentile data is refreshed for requests. - * - *

Value-type: {@link java.time.Duration Duration} - */ - METRICS_NODE_CQL_MESSAGES_INTERVAL("advanced.metrics.node.cql-messages.refresh-interval"), - - /** - * Whether or not to disable the Nagle algorithm. - * - *

Value-type: boolean - */ - SOCKET_TCP_NODELAY("advanced.socket.tcp-no-delay"), - /** - * Whether or not to enable TCP keep-alive probes. - * - *

Value-type: boolean - */ - SOCKET_KEEP_ALIVE("advanced.socket.keep-alive"), - /** - * Whether or not to allow address reuse. - * - *

Value-type: boolean - */ - SOCKET_REUSE_ADDRESS("advanced.socket.reuse-address"), - /** - * Sets the linger interval. - * - *

Value-type: int - */ - SOCKET_LINGER_INTERVAL("advanced.socket.linger-interval"), - /** - * Sets a hint to the size of the underlying buffers for incoming network I/O. - * - *

Value-type: int - */ - SOCKET_RECEIVE_BUFFER_SIZE("advanced.socket.receive-buffer-size"), - /** - * Sets a hint to the size of the underlying buffers for outgoing network I/O. - * - *

Value-type: int - */ - SOCKET_SEND_BUFFER_SIZE("advanced.socket.send-buffer-size"), - - /** - * The connection heartbeat interval. - * - *

Value-type: {@link java.time.Duration Duration} - */ - HEARTBEAT_INTERVAL("advanced.heartbeat.interval"), - /** - * How long the driver waits for the response to a heartbeat. - * - *

Value-type: {@link java.time.Duration Duration} - */ - HEARTBEAT_TIMEOUT("advanced.heartbeat.timeout"), - - /** - * How long the driver waits to propagate a Topology event. - * - *

Value-type: {@link java.time.Duration Duration} - */ - METADATA_TOPOLOGY_WINDOW("advanced.metadata.topology-event-debouncer.window"), - /** - * The maximum number of events that can accumulate. - * - *

Value-type: int - */ - METADATA_TOPOLOGY_MAX_EVENTS("advanced.metadata.topology-event-debouncer.max-events"), - /** - * Whether schema metadata is enabled. - * - *

Value-type: boolean - */ - METADATA_SCHEMA_ENABLED("advanced.metadata.schema.enabled"), - /** - * The timeout for the requests to the schema tables. - * - *

Value-type: {@link java.time.Duration Duration} - */ - METADATA_SCHEMA_REQUEST_TIMEOUT("advanced.metadata.schema.request-timeout"), - /** - * The page size for the requests to the schema tables. - * - *

Value-type: int - */ - METADATA_SCHEMA_REQUEST_PAGE_SIZE("advanced.metadata.schema.request-page-size"), - /** - * The list of keyspaces for which schema and token metadata should be maintained. - * - *

Value type: {@link java.util.List List}<{@link String}> - */ - METADATA_SCHEMA_REFRESHED_KEYSPACES("advanced.metadata.schema.refreshed-keyspaces"), - /** - * How long the driver waits to apply a refresh. - * - *

Value-type: {@link java.time.Duration Duration} - */ - METADATA_SCHEMA_WINDOW("advanced.metadata.schema.debouncer.window"), - /** - * The maximum number of refreshes that can accumulate. - * - *

Value-type: int - */ - METADATA_SCHEMA_MAX_EVENTS("advanced.metadata.schema.debouncer.max-events"), - /** - * Whether token metadata is enabled. - * - *

Value-type: boolean - */ - METADATA_TOKEN_MAP_ENABLED("advanced.metadata.token-map.enabled"), - - /** - * How long the driver waits for responses to control queries. - * - *

Value-type: {@link java.time.Duration Duration} - */ - CONTROL_CONNECTION_TIMEOUT("advanced.control-connection.timeout"), - /** - * The interval between each schema agreement check attempt. - * - *

Value-type: {@link java.time.Duration Duration} - */ - CONTROL_CONNECTION_AGREEMENT_INTERVAL("advanced.control-connection.schema-agreement.interval"), - /** - * The timeout after which schema agreement fails. - * - *

Value-type: {@link java.time.Duration Duration} - */ - CONTROL_CONNECTION_AGREEMENT_TIMEOUT("advanced.control-connection.schema-agreement.timeout"), - /** - * Whether to log a warning if schema agreement fails. - * - *

Value-type: boolean - */ - CONTROL_CONNECTION_AGREEMENT_WARN("advanced.control-connection.schema-agreement.warn-on-failure"), - - /** - * Whether `Session.prepare` calls should be sent to all nodes in the cluster. - * - *

Value-type: boolean - */ - PREPARE_ON_ALL_NODES("advanced.prepared-statements.prepare-on-all-nodes"), - /** - * Whether the driver tries to prepare on new nodes at all. - * - *

Value-type: boolean - */ - REPREPARE_ENABLED("advanced.prepared-statements.reprepare-on-up.enabled"), - /** - * Whether to check `system.prepared_statements` on the target node before repreparing. - * - *

Value-type: boolean - */ - REPREPARE_CHECK_SYSTEM_TABLE("advanced.prepared-statements.reprepare-on-up.check-system-table"), - /** - * The maximum number of statements that should be reprepared. - * - *

Value-type: int - */ - REPREPARE_MAX_STATEMENTS("advanced.prepared-statements.reprepare-on-up.max-statements"), - /** - * The maximum number of concurrent requests when repreparing. - * - *

Value-type: int - */ - REPREPARE_MAX_PARALLELISM("advanced.prepared-statements.reprepare-on-up.max-parallelism"), - /** - * The request timeout when repreparing. - * - *

Value-type: {@link java.time.Duration Duration} - */ - REPREPARE_TIMEOUT("advanced.prepared-statements.reprepare-on-up.timeout"), - - /** - * The number of threads in the I/O group. - * - *

Value-type: int - */ - NETTY_IO_SIZE("advanced.netty.io-group.size"), - /** - * Quiet period for I/O group shutdown. - * - *

Value-type: int - */ - NETTY_IO_SHUTDOWN_QUIET_PERIOD("advanced.netty.io-group.shutdown.quiet-period"), - /** - * Max time to wait for I/O group shutdown. - * - *

Value-type: int - */ - NETTY_IO_SHUTDOWN_TIMEOUT("advanced.netty.io-group.shutdown.timeout"), - /** - * Units for I/O group quiet period and timeout. - * - *

Value-type: {@link String} - */ - NETTY_IO_SHUTDOWN_UNIT("advanced.netty.io-group.shutdown.unit"), - /** - * The number of threads in the Admin group. - * - *

Value-type: int - */ - NETTY_ADMIN_SIZE("advanced.netty.admin-group.size"), - /** - * Quiet period for admin group shutdown. - * - *

Value-type: int - */ - NETTY_ADMIN_SHUTDOWN_QUIET_PERIOD("advanced.netty.admin-group.shutdown.quiet-period"), - /** - * Max time to wait for admin group shutdown. - * - *

Value-type: {@link String} - */ - NETTY_ADMIN_SHUTDOWN_TIMEOUT("advanced.netty.admin-group.shutdown.timeout"), - /** - * Units for admin group quite period and timeout. - * - *

Value-type: {@link String} - */ - NETTY_ADMIN_SHUTDOWN_UNIT("advanced.netty.admin-group.shutdown.unit"), - - /** @deprecated This option was removed in version 4.6.1. */ - @Deprecated - COALESCER_MAX_RUNS("advanced.coalescer.max-runs-with-no-work"), - /** - * The coalescer reschedule interval. - * - *

Value-type: {@link java.time.Duration Duration} - */ - COALESCER_INTERVAL("advanced.coalescer.reschedule-interval"), - - /** - * Whether to resolve the addresses passed to `basic.contact-points`. - * - *

Value-type: boolean - */ - RESOLVE_CONTACT_POINTS("advanced.resolve-contact-points"), - - /** - * This is how frequent the timer should wake up to check for timed-out tasks or speculative - * executions. - * - *

Value-type: {@link java.time.Duration Duration} - */ - NETTY_TIMER_TICK_DURATION("advanced.netty.timer.tick-duration"), - /** - * Number of ticks in the Timer wheel. - * - *

Value-type: int - */ - NETTY_TIMER_TICKS_PER_WHEEL("advanced.netty.timer.ticks-per-wheel"), - - /** - * Whether logging of server warnings generated during query execution should be disabled by the - * driver. - * - *

Value-type: boolean - */ - REQUEST_LOG_WARNINGS("advanced.request.log-warnings"), - - /** - * Whether the threads created by the driver should be daemon threads. - * - *

Value-type: boolean - */ - NETTY_DAEMON("advanced.netty.daemon"), - - /** - * The location of the cloud secure bundle used to connect to DataStax Apache Cassandra as a - * service. - * - *

Value-type: {@link String} - */ - CLOUD_SECURE_CONNECT_BUNDLE("basic.cloud.secure-connect-bundle"), - - /** - * Whether the slow replica avoidance should be enabled in the default LBP. - * - *

Value-type: boolean - */ - LOAD_BALANCING_POLICY_SLOW_AVOIDANCE("basic.load-balancing-policy.slow-replica-avoidance"), - - /** - * The timeout to use when establishing driver connections. - * - *

Value-type: {@link java.time.Duration Duration} - */ - CONNECTION_CONNECT_TIMEOUT("advanced.connection.connect-timeout"), - - /** - * The maximum number of live sessions that are allowed to coexist in a given VM. - * - *

Value-type: int - */ - SESSION_LEAK_THRESHOLD("advanced.session-leak.threshold"), - /** - * The period of inactivity after which the node level metrics will be evicted. The eviction will - * happen only if none of the enabled node-level metrics is updated for a given node within this - * time window. - * - *

Value-type: {@link java.time.Duration Duration} - */ - METRICS_NODE_EXPIRE_AFTER("advanced.metrics.node.expire-after"), - - /** - * The classname of the desired MetricsFactory implementation. - * - *

Value-type: {@link String} - */ - METRICS_FACTORY_CLASS("advanced.metrics.factory.class"), - - /** - * The maximum number of nodes from remote DCs to include in query plans. - * - *

Value-Type: int - */ - LOAD_BALANCING_DC_FAILOVER_MAX_NODES_PER_REMOTE_DC( - "advanced.load-balancing-policy.dc-failover.max-nodes-per-remote-dc"), - /** - * Whether to consider nodes from remote DCs if the request's consistency level is local. - * - *

Value-Type: boolean - */ - LOAD_BALANCING_DC_FAILOVER_ALLOW_FOR_LOCAL_CONSISTENCY_LEVELS( - "advanced.load-balancing-policy.dc-failover.allow-for-local-consistency-levels"), - - /** - * The classname of the desired {@code MetricIdGenerator} implementation. - * - *

Value-type: {@link String} - */ - METRICS_ID_GENERATOR_CLASS("advanced.metrics.id-generator.class"), - - /** - * The value of the prefix to prepend to all metric names. - * - *

Value-type: {@link String} - */ - METRICS_ID_GENERATOR_PREFIX("advanced.metrics.id-generator.prefix"), - - /** - * The class name of a custom {@link - * com.datastax.oss.driver.api.core.loadbalancing.NodeDistanceEvaluator}. - * - *

Value-Type: {@link String} - */ - LOAD_BALANCING_DISTANCE_EVALUATOR_CLASS("basic.load-balancing-policy.evaluator.class"), - - /** - * The shortest latency that we expect to record for requests. - * - *

Value-type: {@link java.time.Duration Duration} - */ - METRICS_SESSION_CQL_REQUESTS_LOWEST("advanced.metrics.session.cql-requests.lowest-latency"), - /** - * Optional service-level objectives to meet, as a list of latencies to track. - * - *

Value-type: List of {@link java.time.Duration Duration} - */ - METRICS_SESSION_CQL_REQUESTS_SLO("advanced.metrics.session.cql-requests.slo"), - - /** - * The shortest latency that we expect to record for throttling. - * - *

Value-type: {@link java.time.Duration Duration} - */ - METRICS_SESSION_THROTTLING_LOWEST("advanced.metrics.session.throttling.delay.lowest-latency"), - /** - * Optional service-level objectives to meet, as a list of latencies to track. - * - *

Value-type: List of {@link java.time.Duration Duration} - */ - METRICS_SESSION_THROTTLING_SLO("advanced.metrics.session.throttling.delay.slo"), - - /** - * The shortest latency that we expect to record for requests. - * - *

Value-type: {@link java.time.Duration Duration} - */ - METRICS_NODE_CQL_MESSAGES_LOWEST("advanced.metrics.node.cql-messages.lowest-latency"), - /** - * Optional service-level objectives to meet, as a list of latencies to track. - * - *

Value-type: List of {@link java.time.Duration Duration} - */ - METRICS_NODE_CQL_MESSAGES_SLO("advanced.metrics.node.cql-messages.slo"), - - /** - * Whether the prepared statements cache use weak values. - * - *

Value-type: boolean - */ - PREPARED_CACHE_WEAK_VALUES("advanced.prepared-statements.prepared-cache.weak-values"), - - /** - * The classes of session-wide components that track the outcome of requests. - * - *

Value-type: List of {@link String} - */ - REQUEST_TRACKER_CLASSES("advanced.request-tracker.classes"), - - /** - * The classes of session-wide components that listen for node state changes. - * - *

Value-type: List of {@link String} - */ - METADATA_NODE_STATE_LISTENER_CLASSES("advanced.node-state-listener.classes"), - - /** - * The classes of session-wide components that listen for schema changes. - * - *

Value-type: List of {@link String} - */ - METADATA_SCHEMA_CHANGE_LISTENER_CLASSES("advanced.schema-change-listener.classes"), - /** - * Optional list of percentiles to publish for cql-requests metric. Produces an additional time - * series for each requested percentile. This percentile is computed locally, and so can't be - * aggregated with percentiles computed across other dimensions (e.g. in a different instance). - * - *

Value type: {@link java.util.List List}<{@link Double}> - */ - METRICS_SESSION_CQL_REQUESTS_PUBLISH_PERCENTILES( - "advanced.metrics.session.cql-requests.publish-percentiles"), - /** - * Optional list of percentiles to publish for node cql-messages metric. Produces an additional - * time series for each requested percentile. This percentile is computed locally, and so can't be - * aggregated with percentiles computed across other dimensions (e.g. in a different instance). - * - *

Value type: {@link java.util.List List}<{@link Double}> - */ - METRICS_NODE_CQL_MESSAGES_PUBLISH_PERCENTILES( - "advanced.metrics.node.cql-messages.publish-percentiles"), - /** - * Optional list of percentiles to publish for throttling delay metric.Produces an additional time - * series for each requested percentile. This percentile is computed locally, and so can't be - * aggregated with percentiles computed across other dimensions (e.g. in a different instance). - * - *

Value type: {@link java.util.List List}<{@link Double}> - */ - METRICS_SESSION_THROTTLING_PUBLISH_PERCENTILES( - "advanced.metrics.session.throttling.delay.publish-percentiles"), - /** - * Adds histogram buckets used to generate aggregable percentile approximations in monitoring - * systems that have query facilities to do so (e.g. Prometheus histogram_quantile, Atlas - * percentiles). - * - *

Value-type: boolean - */ - METRICS_GENERATE_AGGREGABLE_HISTOGRAMS("advanced.metrics.histograms.generate-aggregable"), - /** - * The duration between attempts to reload the keystore. - * - *

Value-type: {@link java.time.Duration} - */ - SSL_KEYSTORE_RELOAD_INTERVAL("advanced.ssl-engine-factory.keystore-reload-interval"), - /** - * Ordered preference list of remote dcs optionally supplied for automatic failover. - * - *

Value type: {@link java.util.List List}<{@link String}> - */ - LOAD_BALANCING_DC_FAILOVER_PREFERRED_REMOTE_DCS( - "advanced.load-balancing-policy.dc-failover.preferred-remote-dcs"), - /** - * Whether or not to do a DNS reverse-lookup of provided server addresses for SAN addresses. - * - *

Value-type: boolean - */ - SSL_ALLOW_DNS_REVERSE_LOOKUP_SAN("advanced.ssl-engine-factory.allow-dns-reverse-lookup-san"), - /** - * The class of session-wide component that generates request IDs. - * - *

Value-type: {@link String} - */ - REQUEST_ID_GENERATOR_CLASS("advanced.request-id.generator.class"), - /** - * An address to always translate all node addresses to that same proxy hostname no matter what IP - * address a node has, but still using its native transport port. - * - *

Value-Type: {@link String} - */ - ADDRESS_TRANSLATOR_ADVERTISED_HOSTNAME("advanced.address-translator.advertised-hostname"), - /** - * A map of Cassandra node subnets (CIDR notations) to target addresses, for example (note quoted - * keys): - * - *

-   * advanced.address-translator.subnet-addresses {
-   *   "100.64.0.0/15" = "cassandra.datacenter1.com:9042"
-   *   "100.66.0.0/15" = "cassandra.datacenter2.com:9042"
-   *   # IPv6 example:
-   *   # "::ffff:6440:0/111" = "cassandra.datacenter1.com:9042"
-   *   # "::ffff:6442:0/111" = "cassandra.datacenter2.com:9042"
-   * }
-   * 
- * - * Note: subnets must be represented as prefix blocks, see {@link - * inet.ipaddr.Address#isPrefixBlock()}. - * - *

Value type: {@link java.util.Map Map}<{@link String},{@link String}> - */ - ADDRESS_TRANSLATOR_SUBNET_ADDRESSES("advanced.address-translator.subnet-addresses"), - /** - * A default address to fallback to if Cassandra node IP isn't contained in any of the configured - * subnets. - * - *

Value-Type: {@link String} - */ - ADDRESS_TRANSLATOR_DEFAULT_ADDRESS("advanced.address-translator.default-address"), - /** - * Whether to resolve the addresses on initialization (if true) or on each node (re-)connection - * (if false). Defaults to false. - * - *

Value-Type: boolean - */ - ADDRESS_TRANSLATOR_RESOLVE_ADDRESSES("advanced.address-translator.resolve-addresses"); - - private final String path; - - DefaultDriverOption(String path) { - this.path = path; - } - - @NonNull - @Override - public String getPath() { - return path; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/config/DriverConfig.java b/core/src/main/java/com/datastax/oss/driver/api/core/config/DriverConfig.java deleted file mode 100644 index 88519c82a22..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/config/DriverConfig.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.config; - -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; - -/** - * The configuration of the driver. - * - *

It is composed of options, that are organized into profiles. There is a default profile that - * is always present, and additional, named profiles, that can override part of the options. - * Profiles can be used to categorize queries that use the same parameters (for example, an - * "analytics" profile vs. a "transactional" profile). - */ -public interface DriverConfig { - - /** - * Alias to get the default profile, which is stored under the name {@link - * DriverExecutionProfile#DEFAULT_NAME} and always present. - */ - @NonNull - default DriverExecutionProfile getDefaultProfile() { - return getProfile(DriverExecutionProfile.DEFAULT_NAME); - } - - /** @throws IllegalArgumentException if there is no profile with this name. */ - @NonNull - DriverExecutionProfile getProfile(@NonNull String profileName); - - /** Returns an immutable view of all named profiles (including the default profile). */ - @NonNull - Map getProfiles(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/config/DriverConfigLoader.java b/core/src/main/java/com/datastax/oss/driver/api/core/config/DriverConfigLoader.java deleted file mode 100644 index 15fae232d17..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/config/DriverConfigLoader.java +++ /dev/null @@ -1,386 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.config; - -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.session.SessionBuilder; -import com.datastax.oss.driver.internal.core.config.composite.CompositeDriverConfigLoader; -import com.datastax.oss.driver.internal.core.config.map.MapBasedDriverConfigLoader; -import com.datastax.oss.driver.internal.core.config.typesafe.DefaultDriverConfigLoader; -import com.datastax.oss.driver.internal.core.config.typesafe.DefaultProgrammaticDriverConfigLoaderBuilder; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.File; -import java.net.URL; -import java.nio.file.Path; -import java.util.concurrent.CompletionStage; - -/** - * Manages the initialization, and optionally the periodic reloading, of the driver configuration. - * - * @see SessionBuilder#withConfigLoader(DriverConfigLoader) - */ -public interface DriverConfigLoader extends AutoCloseable { - - /** - * Builds an instance using the driver's default implementation (based on Typesafe config) except - * that application-specific classpath resources will be located using the provided {@link - * ClassLoader} instead of {@linkplain Thread#getContextClassLoader() the current thread's context - * class loader}. - * - *

The returned loader will honor the reload interval defined by the option {@code - * basic.config-reload-interval}. - */ - @NonNull - static DriverConfigLoader fromDefaults(@NonNull ClassLoader appClassLoader) { - return new DefaultDriverConfigLoader(appClassLoader); - } - - /** - * Builds an instance using the driver's default implementation (based on Typesafe config), except - * that application-specific options are loaded from a classpath resource with a custom name. - * - *

The class loader used to locate application-specific classpath resources is {@linkplain - * Thread#getContextClassLoader() the current thread's context class loader}. This might not be - * suitable for OSGi deployments, which should use {@link #fromClasspath(String, ClassLoader)} - * instead. - * - *

More precisely, configuration properties are loaded and merged from the following - * (first-listed are higher priority): - * - *

    - *
  • system properties - *
  • {@code .conf} (all resources on classpath with this name) - *
  • {@code .json} (all resources on classpath with this name) - *
  • {@code .properties} (all resources on classpath with this name) - *
  • {@code reference.conf} (all resources on classpath with this name). In particular, this - * will load the {@code reference.conf} included in the core driver JAR, that defines - * default options for all mandatory options. - *
- * - * The resulting configuration is expected to contain a {@code datastax-java-driver} section. - * - *

The returned loader will honor the reload interval defined by the option {@code - * basic.config-reload-interval}. - */ - @NonNull - static DriverConfigLoader fromClasspath(@NonNull String resourceBaseName) { - return fromClasspath(resourceBaseName, Thread.currentThread().getContextClassLoader()); - } - - /** - * Just like {@link #fromClasspath(java.lang.String)} except that application-specific classpath - * resources will be located using the provided {@link ClassLoader} instead of {@linkplain - * Thread#getContextClassLoader() the current thread's context class loader}. - */ - @NonNull - static DriverConfigLoader fromClasspath( - @NonNull String resourceBaseName, @NonNull ClassLoader appClassLoader) { - return DefaultDriverConfigLoader.fromClasspath(resourceBaseName, appClassLoader); - } - - /** - * Builds an instance using the driver's default implementation (based on Typesafe config), except - * that application-specific options are loaded from the given path. - * - *

More precisely, configuration properties are loaded and merged from the following - * (first-listed are higher priority): - * - *

    - *
  • system properties - *
  • the contents of {@code file} - *
  • {@code reference.conf} (all resources on classpath with this name). In particular, this - * will load the {@code reference.conf} included in the core driver JAR, that defines - * default options for all mandatory options. - *
- * - * The resulting configuration is expected to contain a {@code datastax-java-driver} section. - * - *

The returned loader will honor the reload interval defined by the option {@code - * basic.config-reload-interval}. - */ - @NonNull - static DriverConfigLoader fromPath(@NonNull Path file) { - return fromFile(file.toFile()); - } - - /** - * Builds an instance using the driver's default implementation (based on Typesafe config), except - * that application-specific options are loaded from the given file. - * - *

More precisely, configuration properties are loaded and merged from the following - * (first-listed are higher priority): - * - *

    - *
  • system properties - *
  • the contents of {@code file} - *
  • {@code reference.conf} (all resources on classpath with this name). In particular, this - * will load the {@code reference.conf} included in the core driver JAR, that defines - * default options for all mandatory options. - *
- * - * The resulting configuration is expected to contain a {@code datastax-java-driver} section. - * - *

The returned loader will honor the reload interval defined by the option {@code - * basic.config-reload-interval}. - */ - @NonNull - static DriverConfigLoader fromFile(@NonNull File file) { - return DefaultDriverConfigLoader.fromFile(file); - } - - /** - * Builds an instance using the driver's default implementation (based on Typesafe config), except - * that application-specific options are loaded from the given URL. - * - *

More precisely, configuration properties are loaded and merged from the following - * (first-listed are higher priority): - * - *

    - *
  • system properties - *
  • the contents of {@code url} - *
  • {@code reference.conf} (all resources on classpath with this name). In particular, this - * will load the {@code reference.conf} included in the core driver JAR, that defines - * default options for all mandatory options. - *
- * - * The resulting configuration is expected to contain a {@code datastax-java-driver} section. - * - *

The returned loader will honor the reload interval defined by the option {@code - * basic.config-reload-interval}. - */ - @NonNull - static DriverConfigLoader fromUrl(@NonNull URL url) { - return DefaultDriverConfigLoader.fromUrl(url); - } - - /** - * Builds an instance using the driver's default implementation (based on Typesafe config), except - * that application-specific options are parsed from the given string. - * - *

The string must be in HOCON format and contain a {@code datastax-java-driver} section. - * Options must be separated by line breaks: - * - *

-   * DriverConfigLoader.fromString(
-   *         "datastax-java-driver.basic { session-name = my-app\nrequest.timeout = 1 millisecond }")
-   * 
- * - *

More precisely, configuration properties are loaded and merged from the following - * (first-listed are higher priority): - * - *

    - *
  • system properties - *
  • the config in {@code contents} - *
  • {@code reference.conf} (all resources on classpath with this name). In particular, this - * will load the {@code reference.conf} included in the core driver JAR, that defines - * default options for all mandatory options. - *
- * - *

This loader does not support runtime reloading. - */ - @NonNull - static DriverConfigLoader fromString(@NonNull String contents) { - return DefaultDriverConfigLoader.fromString(contents); - } - - /** - * Starts a builder that allows configuration options to be overridden programmatically. - * - *

Note that {@link #fromMap(OptionsMap)} provides an alternative approach for programmatic - * configuration, that might be more convenient if you wish to completely bypass Typesafe config. - * - *

For example: - * - *

{@code
-   * DriverConfigLoader loader =
-   *     DriverConfigLoader.programmaticBuilder()
-   *         .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(5))
-   *         .startProfile("slow")
-   *         .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(30))
-   *         .endProfile()
-   *         .build();
-   * }
- * - * produces the same overrides as: - * - *
-   * datastax-java-driver {
-   *   basic.request.timeout = 5 seconds
-   *   profiles {
-   *     slow {
-   *       basic.request.timeout = 30 seconds
-   *     }
-   *   }
-   * }
-   * 
- * - * The resulting loader still uses the driver's default implementation (based on Typesafe config), - * except that the programmatic configuration takes precedence. More precisely, configuration - * properties are loaded and merged from the following (first-listed are higher priority): - * - *
    - *
  • system properties - *
  • properties that were provided programmatically - *
  • {@code application.conf} (all resources on classpath with this name) - *
  • {@code application.json} (all resources on classpath with this name) - *
  • {@code application.properties} (all resources on classpath with this name) - *
  • {@code reference.conf} (all resources on classpath with this name). In particular, this - * will load the {@code reference.conf} included in the core driver JAR, that defines - * default options for all mandatory options. - *
- * - * Note that {@code application.*} is entirely optional, you may choose to only rely on the - * driver's built-in {@code reference.conf} and programmatic overrides. - * - *

The class loader used to locate application-specific classpath resources is {@linkplain - * Thread#getContextClassLoader() the current thread's context class loader}. This might not be - * suitable for OSGi deployments, which should use {@link #programmaticBuilder(ClassLoader)} - * instead. - * - *

The resulting configuration is expected to contain a {@code datastax-java-driver} section. - * - *

The loader will honor the reload interval defined by the option {@code - * basic.config-reload-interval}. - * - *

Note that the returned builder is not thread-safe. - * - * @see #fromMap(OptionsMap) - */ - @NonNull - static ProgrammaticDriverConfigLoaderBuilder programmaticBuilder() { - return new DefaultProgrammaticDriverConfigLoaderBuilder(); - } - - /** - * Just like {@link #programmaticBuilder()} except that application-specific classpath resources - * will be located using the provided {@link ClassLoader} instead of {@linkplain - * Thread#getContextClassLoader() the current thread's context class loader}. - */ - @NonNull - static ProgrammaticDriverConfigLoaderBuilder programmaticBuilder( - @NonNull ClassLoader appClassLoader) { - return new DefaultProgrammaticDriverConfigLoaderBuilder(appClassLoader); - } - - /** - * Builds an instance backed by an {@link OptionsMap}, which holds all options in memory. - * - *

This is the simplest implementation. It is intended for clients who wish to completely - * bypass Typesafe config, and instead manage the configuration programmatically. A typical - * example is a third-party tool that already has its own configuration file, and doesn't want to - * introduce a separate mechanism for driver options. - * - *

With this loader, the driver's built-in {@code reference.conf} file is ignored, the provided - * {@link OptionsMap} must explicitly provide all mandatory options. Note however that {@link - * OptionsMap#driverDefaults()} allows you to initialize an instance with the same default values - * as {@code reference.conf}. - * - *

-   * // This creates a configuration equivalent to the built-in reference.conf:
-   * OptionsMap map = OptionsMap.driverDefaults();
-   *
-   * // Customize an option:
-   * map.put(TypedDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(5));
-   *
-   * DriverConfigLoader loader = DriverConfigLoader.fromMap(map);
-   * CqlSession session = CqlSession.builder()
-   *     .withConfigLoader(loader)
-   *     .build();
-   * 
- * - *

If the {@link OptionsMap} is modified at runtime, this will be reflected immediately in the - * configuration, you don't need to call {@link #reload()}. Note however that, depending on the - * option, the driver might not react to a configuration change immediately, or ever (this is - * documented in {@code reference.conf}). - * - * @since 4.6.0 - */ - @NonNull - static DriverConfigLoader fromMap(@NonNull OptionsMap source) { - return new MapBasedDriverConfigLoader(source, source.asRawMap()); - } - - /** - * Composes two existing config loaders to form a new one. - * - *

When the driver reads an option, the "primary" config will be queried first. If the option - * is missing, then it will be looked up in the "fallback" config. - * - *

All execution profiles will be surfaced in the new config. If a profile is defined both in - * the primary and the fallback config, its options will be merged using the same precedence rules - * as described above. - * - *

The new config is reloadable if at least one of the input configs is. If you invoke {@link - * DriverConfigLoader#reload()} on the new loader, it will reload whatever is reloadable, or fail - * if nothing is. If the input loaders have periodic reloading built-in, each one will reload at - * its own pace, and the changes will be reflected in the new config. - */ - @NonNull - static DriverConfigLoader compose( - @NonNull DriverConfigLoader primaryConfigLoader, - @NonNull DriverConfigLoader fallbackConfigLoader) { - return new CompositeDriverConfigLoader(primaryConfigLoader, fallbackConfigLoader); - } - - /** - * Loads the first configuration that will be used to initialize the driver. - * - *

If this loader {@linkplain #supportsReloading() supports reloading}, this object should be - * mutable and reflect later changes when the configuration gets reloaded. - */ - @NonNull - DriverConfig getInitialConfig(); - - /** - * Called when the driver initializes. For loaders that periodically check for configuration - * updates, this is a good time to grab an internal executor and schedule a recurring task. - */ - void onDriverInit(@NonNull DriverContext context); - - /** - * Triggers an immediate reload attempt and returns a stage that completes once the attempt is - * finished, with a boolean indicating whether the configuration changed as a result of this - * reload. - * - *

If so, it's also guaranteed that internal driver components have been notified by that time; - * note however that some react to the notification asynchronously, so they may not have - * completely applied all resulting changes yet. - * - *

If this loader does not support programmatic reloading — which you can check by - * calling {@link #supportsReloading()} before this method — the returned stage should fail - * immediately with an {@link UnsupportedOperationException}. The default implementation of this - * interface does support programmatic reloading however, and never returns a failed stage. - */ - @NonNull - CompletionStage reload(); - - /** - * Whether this implementation supports programmatic reloading with the {@link #reload()} method. - * - *

The default implementation of this interface does support programmatic reloading and always - * returns true. - */ - boolean supportsReloading(); - - /** - * Called when the session closes. This is a good time to release any external resource, for - * example cancel a scheduled reloading task. - */ - @Override - void close(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/config/DriverExecutionProfile.java b/core/src/main/java/com/datastax/oss/driver/api/core/config/DriverExecutionProfile.java deleted file mode 100644 index 89c28f0f521..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/config/DriverExecutionProfile.java +++ /dev/null @@ -1,329 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.config; - -import com.datastax.oss.driver.internal.core.config.DerivedExecutionProfile; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.time.Duration; -import java.util.List; -import java.util.Map; -import java.util.SortedSet; - -/** - * A profile in the driver's configuration. - * - *

It is a collection of typed options. - * - *

Getters (such as {@link #getBoolean(DriverOption)}) are self-explanatory. - * - *

{@code withXxx} methods (such as {@link #withBoolean(DriverOption, boolean)}) create a - * "derived" profile, which is an on-the-fly copy of the profile with the new value (which - * might be a new option, or overwrite an existing one). If the original configuration is reloaded, - * all derived profiles get updated as well. For best performance, such derived profiles should be - * used sparingly; it is better to have built-in profiles for common scenarios. - * - * @see DriverConfig - */ -public interface DriverExecutionProfile extends OngoingConfigOptions { - - /** - * The name of the default profile (the string {@value}). - * - *

Named profiles can't use this name. If you try to declare such a profile, a runtime error - * will be thrown. - */ - String DEFAULT_NAME = "default"; - - /** - * The name of the profile in the configuration. - * - *

Derived profiles inherit the name of their parent. - */ - @NonNull - String getName(); - - boolean isDefined(@NonNull DriverOption option); - - boolean getBoolean(@NonNull DriverOption option); - - default boolean getBoolean(@NonNull DriverOption option, boolean defaultValue) { - return isDefined(option) ? getBoolean(option) : defaultValue; - } - - @NonNull - List getBooleanList(@NonNull DriverOption option); - - @Nullable - default List getBooleanList( - @NonNull DriverOption option, @Nullable List defaultValue) { - return isDefined(option) ? getBooleanList(option) : defaultValue; - } - - int getInt(@NonNull DriverOption option); - - default int getInt(@NonNull DriverOption option, int defaultValue) { - return isDefined(option) ? getInt(option) : defaultValue; - } - - @NonNull - List getIntList(@NonNull DriverOption option); - - @Nullable - default List getIntList( - @NonNull DriverOption option, @Nullable List defaultValue) { - return isDefined(option) ? getIntList(option) : defaultValue; - } - - long getLong(@NonNull DriverOption option); - - default long getLong(@NonNull DriverOption option, long defaultValue) { - return isDefined(option) ? getLong(option) : defaultValue; - } - - @NonNull - List getLongList(@NonNull DriverOption option); - - @Nullable - default List getLongList(@NonNull DriverOption option, @Nullable List defaultValue) { - return isDefined(option) ? getLongList(option) : defaultValue; - } - - double getDouble(@NonNull DriverOption option); - - default double getDouble(@NonNull DriverOption option, double defaultValue) { - return isDefined(option) ? getDouble(option) : defaultValue; - } - - @NonNull - List getDoubleList(@NonNull DriverOption option); - - @Nullable - default List getDoubleList( - @NonNull DriverOption option, @Nullable List defaultValue) { - return isDefined(option) ? getDoubleList(option) : defaultValue; - } - - @NonNull - String getString(@NonNull DriverOption option); - - @Nullable - default String getString(@NonNull DriverOption option, @Nullable String defaultValue) { - return isDefined(option) ? getString(option) : defaultValue; - } - - @NonNull - List getStringList(@NonNull DriverOption option); - - @Nullable - default List getStringList( - @NonNull DriverOption option, @Nullable List defaultValue) { - return isDefined(option) ? getStringList(option) : defaultValue; - } - - @NonNull - Map getStringMap(@NonNull DriverOption option); - - @Nullable - default Map getStringMap( - @NonNull DriverOption option, @Nullable Map defaultValue) { - return isDefined(option) ? getStringMap(option) : defaultValue; - } - - /** - * @return a size in bytes. This is separate from {@link #getLong(DriverOption)}, in case - * implementations want to allow users to provide sizes in a more human-readable way, for - * example "256 MB". - */ - long getBytes(@NonNull DriverOption option); - - default long getBytes(@NonNull DriverOption option, long defaultValue) { - return isDefined(option) ? getBytes(option) : defaultValue; - } - - /** @see #getBytes(DriverOption) */ - @NonNull - List getBytesList(DriverOption option); - - @Nullable - default List getBytesList(DriverOption option, @Nullable List defaultValue) { - return isDefined(option) ? getBytesList(option) : defaultValue; - } - - @NonNull - Duration getDuration(@NonNull DriverOption option); - - @Nullable - default Duration getDuration(@NonNull DriverOption option, @Nullable Duration defaultValue) { - return isDefined(option) ? getDuration(option) : defaultValue; - } - - @NonNull - List getDurationList(@NonNull DriverOption option); - - @Nullable - default List getDurationList( - @NonNull DriverOption option, @Nullable List defaultValue) { - return isDefined(option) ? getDurationList(option) : defaultValue; - } - - /** - * Returns a representation of all the child options under a given option. - * - *

This is used by the driver at initialization time, to compare profiles and determine if it - * must create per-profile policies. For example, if two profiles have the same options in the - * {@code basic.load-balancing-policy} section, they will share the same policy instance. But if - * their options differ, two separate instances will be created. - * - *

The runtime return type does not matter, as long as identical sections (same options with - * same values, regardless of order) compare as equal and have the same {@code hashCode()}. The - * default implementation builds a map based on the entries from {@link #entrySet()}, it should be - * good for most cases. - */ - @NonNull - default Object getComparisonKey(@NonNull DriverOption option) { - // This method is only used during driver initialization, performance is not crucial - String prefix = option.getPath(); - ImmutableMap.Builder childOptions = ImmutableMap.builder(); - for (Map.Entry entry : entrySet()) { - if (entry.getKey().startsWith(prefix)) { - childOptions.put(entry.getKey(), entry.getValue()); - } - } - return childOptions.build(); - } - - /** - * Enumerates all the entries in this profile, including those that were inherited from another - * profile. - * - *

The keys are raw strings that match {@link DriverOption#getPath()}. - * - *

The values are implementation-dependent. With the driver's default implementation, the - * possible types are {@code String}, {@code Number}, {@code Boolean}, {@code Map}, - * {@code List}, or {@code null}. - */ - @NonNull - SortedSet> entrySet(); - - @NonNull - @Override - default DriverExecutionProfile withBoolean(@NonNull DriverOption option, boolean value) { - return DerivedExecutionProfile.with(this, option, value); - } - - @NonNull - @Override - default DriverExecutionProfile withBooleanList( - @NonNull DriverOption option, @NonNull List value) { - return DerivedExecutionProfile.with(this, option, value); - } - - @NonNull - @Override - default DriverExecutionProfile withInt(@NonNull DriverOption option, int value) { - return DerivedExecutionProfile.with(this, option, value); - } - - @NonNull - @Override - default DriverExecutionProfile withIntList( - @NonNull DriverOption option, @NonNull List value) { - return DerivedExecutionProfile.with(this, option, value); - } - - @NonNull - @Override - default DriverExecutionProfile withLong(@NonNull DriverOption option, long value) { - return DerivedExecutionProfile.with(this, option, value); - } - - @NonNull - @Override - default DriverExecutionProfile withLongList( - @NonNull DriverOption option, @NonNull List value) { - return DerivedExecutionProfile.with(this, option, value); - } - - @NonNull - @Override - default DriverExecutionProfile withDouble(@NonNull DriverOption option, double value) { - return DerivedExecutionProfile.with(this, option, value); - } - - @NonNull - @Override - default DriverExecutionProfile withDoubleList( - @NonNull DriverOption option, @NonNull List value) { - return DerivedExecutionProfile.with(this, option, value); - } - - @NonNull - @Override - default DriverExecutionProfile withString(@NonNull DriverOption option, @NonNull String value) { - return DerivedExecutionProfile.with(this, option, value); - } - - @NonNull - @Override - default DriverExecutionProfile withStringList( - @NonNull DriverOption option, @NonNull List value) { - return DerivedExecutionProfile.with(this, option, value); - } - - @NonNull - @Override - default DriverExecutionProfile withStringMap( - @NonNull DriverOption option, @NonNull Map value) { - return DerivedExecutionProfile.with(this, option, value); - } - - @NonNull - @Override - default DriverExecutionProfile withBytes(@NonNull DriverOption option, long value) { - return DerivedExecutionProfile.with(this, option, value); - } - - @NonNull - @Override - default DriverExecutionProfile withBytesList( - @NonNull DriverOption option, @NonNull List value) { - return DerivedExecutionProfile.with(this, option, value); - } - - @NonNull - @Override - default DriverExecutionProfile withDuration( - @NonNull DriverOption option, @NonNull Duration value) { - return DerivedExecutionProfile.with(this, option, value); - } - - @NonNull - @Override - default DriverExecutionProfile withDurationList( - @NonNull DriverOption option, @NonNull List value) { - return DerivedExecutionProfile.with(this, option, value); - } - - @NonNull - @Override - default DriverExecutionProfile without(@NonNull DriverOption option) { - return DerivedExecutionProfile.without(this, option); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/config/DriverOption.java b/core/src/main/java/com/datastax/oss/driver/api/core/config/DriverOption.java deleted file mode 100644 index 2f15b701f36..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/config/DriverOption.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.config; - -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * Describes an option in the driver's configuration. - * - *

This is just a thin wrapper around the option's path, to make it easier to find where it is - * referenced in the code. We recommend using enums for implementations. - */ -public interface DriverOption { - - /** - * The option's path. Paths are hierarchical and each segment is separated by a dot, e.g. {@code - * metadata.schema.enabled}. - */ - @NonNull - String getPath(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/config/OngoingConfigOptions.java b/core/src/main/java/com/datastax/oss/driver/api/core/config/OngoingConfigOptions.java deleted file mode 100644 index 2c931bbfa91..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/config/OngoingConfigOptions.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.config; - -import edu.umd.cs.findbugs.annotations.NonNull; -import java.time.Duration; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; - -/** An object where config options can be set programmatically. */ -public interface OngoingConfigOptions> { - - @NonNull - SelfT withBoolean(@NonNull DriverOption option, boolean value); - - @NonNull - SelfT withBooleanList(@NonNull DriverOption option, @NonNull List value); - - @NonNull - SelfT withInt(@NonNull DriverOption option, int value); - - @NonNull - SelfT withIntList(@NonNull DriverOption option, @NonNull List value); - - @NonNull - SelfT withLong(@NonNull DriverOption option, long value); - - @NonNull - SelfT withLongList(@NonNull DriverOption option, @NonNull List value); - - @NonNull - SelfT withDouble(@NonNull DriverOption option, double value); - - @NonNull - SelfT withDoubleList(@NonNull DriverOption option, @NonNull List value); - - @NonNull - SelfT withString(@NonNull DriverOption option, @NonNull String value); - - /** - * Note that this is just a shortcut to call {@link #withString(DriverOption, String)} with {@code - * value.getName()}. - */ - @NonNull - default SelfT withClass(@NonNull DriverOption option, @NonNull Class value) { - return withString(option, value.getName()); - } - - /** - * Note that this is just a shortcut to call {@link #withStringList(DriverOption, List)} with - * class names obtained from {@link Class#getName()}. - */ - @NonNull - default SelfT withClassList(@NonNull DriverOption option, @NonNull List> values) { - return withStringList(option, values.stream().map(Class::getName).collect(Collectors.toList())); - } - - @NonNull - SelfT withStringList(@NonNull DriverOption option, @NonNull List value); - - @NonNull - SelfT withStringMap(@NonNull DriverOption option, @NonNull Map value); - - @NonNull - SelfT withBytes(@NonNull DriverOption option, long value); - - @NonNull - SelfT withBytesList(@NonNull DriverOption option, @NonNull List value); - - @NonNull - SelfT withDuration(@NonNull DriverOption option, @NonNull Duration value); - - @NonNull - SelfT withDurationList(@NonNull DriverOption option, @NonNull List value); - - @NonNull - SelfT without(@NonNull DriverOption option); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/config/OptionsMap.java b/core/src/main/java/com/datastax/oss/driver/api/core/config/OptionsMap.java deleted file mode 100644 index 98faf3e590c..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/config/OptionsMap.java +++ /dev/null @@ -1,403 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.config; - -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.io.InvalidObjectException; -import java.io.ObjectInputStream; -import java.io.Serializable; -import java.time.Duration; -import java.time.temporal.ChronoUnit; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.function.Consumer; -import net.jcip.annotations.Immutable; -import net.jcip.annotations.ThreadSafe; - -/** - * An in-memory repository of config options, for use with {@link - * DriverConfigLoader#fromMap(OptionsMap)}. - * - *

This class is intended for clients who wish to assemble the driver configuration in memory, - * instead of loading it from configuration files. Note that {@link #driverDefaults()} can be used - * to pre-initialize the map with the driver's built-in defaults. - * - *

It functions like a two-dimensional map indexed by execution profile and option. All methods - * have a profile-less variant that applies to the default profile, for example {@link #get(String, - * TypedDriverOption)} and {@link #get(TypedDriverOption)}. Options are represented by {@link - * TypedDriverOption}, which allows this class to enforce additional type-safety guarantees (an - * option can only be set to a value of its intended type). - * - *

This class is mutable and thread-safe. Live changes are reflected in real time to the driver - * session(s) that use this configuration. - * - * @since 4.6.0 - */ -@ThreadSafe -public class OptionsMap implements Serializable { - - private static final long serialVersionUID = 1; - - /** - * Creates a new instance that contains the driver's default configuration. - * - *

This will produce a configuration that is equivalent to the {@code reference.conf} file - * bundled with the driver (however, this method does not load any file, and doesn't require - * Typesafe config in the classpath). - */ - @NonNull - public static OptionsMap driverDefaults() { - OptionsMap source = new OptionsMap(); - fillWithDriverDefaults(source); - return source; - } - - private final ConcurrentHashMap> map; - - private final List> changeListeners = new CopyOnWriteArrayList<>(); - - public OptionsMap() { - this(new ConcurrentHashMap<>()); - } - - private OptionsMap(ConcurrentHashMap> map) { - this.map = map; - } - - /** - * Associates the specified value for the specified option, in the specified execution profile. - * - * @return the previous value associated with {@code option}, or {@code null} if the option was - * not defined. - */ - @Nullable - public ValueT put( - @NonNull String profile, @NonNull TypedDriverOption option, @NonNull ValueT value) { - Objects.requireNonNull(option, "option"); - Objects.requireNonNull(value, "value"); - Object previous = getProfileMap(profile).put(option.getRawOption(), value); - if (!value.equals(previous)) { - for (Consumer listener : changeListeners) { - listener.accept(this); - } - } - return cast(previous); - } - - /** - * Associates the specified value for the specified option, in the default execution profile. - * - * @return the previous value associated with {@code option}, or {@code null} if the option was - * not defined. - */ - @Nullable - public ValueT put(@NonNull TypedDriverOption option, @NonNull ValueT value) { - return put(DriverExecutionProfile.DEFAULT_NAME, option, value); - } - - /** - * Returns the value to which the specified option is mapped in the specified profile, or {@code - * null} if the option is not defined. - */ - @Nullable - public ValueT get(@NonNull String profile, @NonNull TypedDriverOption option) { - Objects.requireNonNull(option, "option"); - Object result = getProfileMap(profile).get(option.getRawOption()); - return cast(result); - } - - /** - * Returns the value to which the specified option is mapped in the default profile, or {@code - * null} if the option is not defined. - */ - @Nullable - public ValueT get(@NonNull TypedDriverOption option) { - return get(DriverExecutionProfile.DEFAULT_NAME, option); - } - - /** - * Removes the specified option from the specified profile. - * - * @return the previous value associated with {@code option}, or {@code null} if the option was - * not defined. - */ - @Nullable - public ValueT remove( - @NonNull String profile, @NonNull TypedDriverOption option) { - Objects.requireNonNull(option, "option"); - Object previous = getProfileMap(profile).remove(option.getRawOption()); - if (previous != null) { - for (Consumer listener : changeListeners) { - listener.accept(this); - } - } - return cast(previous); - } - - /** - * Removes the specified option from the default profile. - * - * @return the previous value associated with {@code option}, or {@code null} if the option was - * not defined. - */ - @Nullable - public ValueT remove(@NonNull TypedDriverOption option) { - return remove(DriverExecutionProfile.DEFAULT_NAME, option); - } - - /** - * Registers a listener that will get notified when this object changes. - * - *

This is mostly for internal use by the driver. Note that listeners are transient, and not - * taken into account by {@link #equals(Object)} and {@link #hashCode()}. - */ - public void addChangeListener(@NonNull Consumer listener) { - changeListeners.add(Objects.requireNonNull(listener)); - } - - /** - * Unregisters a listener that was previously registered with {@link - * #addChangeListener(Consumer)}. - * - * @return {@code true} if the listener was indeed registered for this object. - */ - public boolean removeChangeListener(@NonNull Consumer listener) { - return changeListeners.remove(Objects.requireNonNull(listener)); - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof OptionsMap) { - OptionsMap that = (OptionsMap) other; - return this.map.equals(that.map); - } else { - return false; - } - } - - @Override - public int hashCode() { - return map.hashCode(); - } - - /** - * Returns a live view of this object, using the driver's untyped {@link DriverOption}. - * - *

This is intended for internal usage by the driver. Modifying the resulting map is strongly - * discouraged, as it could break the type-safety guarantees provided by the public methods. - */ - @NonNull - protected Map> asRawMap() { - return map; - } - - @NonNull - private Map getProfileMap(@NonNull String profile) { - Objects.requireNonNull(profile, "profile"); - return map.computeIfAbsent(profile, p -> new ConcurrentHashMap<>()); - } - - // Isolate the suppressed warning for retrieval. The cast should always succeed unless the user - // messes with asMap() directly. - @SuppressWarnings({"unchecked", "TypeParameterUnusedInFormals"}) - @Nullable - private ValueT cast(@Nullable Object value) { - return (ValueT) value; - } - - /** - * This object gets replaced by an internal proxy for serialization. - * - * @serialData the serialized form of the {@code Map>} used to - * store options internally (listeners are transient). - */ - private Object writeReplace() { - return new SerializationProxy(this.map); - } - - // Should never be called since we serialize a proxy - @SuppressWarnings("UnusedVariable") - private void readObject(ObjectInputStream stream) throws InvalidObjectException { - throw new InvalidObjectException("Proxy required"); - } - - protected static void fillWithDriverDefaults(OptionsMap map) { - Duration initQueryTimeout = Duration.ofSeconds(5); - Duration requestTimeout = Duration.ofSeconds(2); - int requestPageSize = 5000; - int continuousMaxPages = 0; - int continuousMaxPagesPerSecond = 0; - int continuousMaxEnqueuedPages = 4; - - // Sorted by order of appearance in reference.conf: - - // Skip CONFIG_RELOAD_INTERVAL because the map-based config doesn't need periodic reloading - map.put(TypedDriverOption.REQUEST_TIMEOUT, requestTimeout); - map.put(TypedDriverOption.REQUEST_CONSISTENCY, "LOCAL_ONE"); - map.put(TypedDriverOption.REQUEST_PAGE_SIZE, requestPageSize); - map.put(TypedDriverOption.REQUEST_SERIAL_CONSISTENCY, "SERIAL"); - map.put(TypedDriverOption.REQUEST_DEFAULT_IDEMPOTENCE, false); - map.put(TypedDriverOption.GRAPH_TRAVERSAL_SOURCE, "g"); - map.put(TypedDriverOption.LOAD_BALANCING_POLICY_CLASS, "DefaultLoadBalancingPolicy"); - map.put(TypedDriverOption.LOAD_BALANCING_POLICY_SLOW_AVOIDANCE, true); - map.put(TypedDriverOption.SESSION_LEAK_THRESHOLD, 4); - map.put(TypedDriverOption.CONNECTION_CONNECT_TIMEOUT, Duration.ofSeconds(5)); - map.put(TypedDriverOption.CONNECTION_INIT_QUERY_TIMEOUT, initQueryTimeout); - map.put(TypedDriverOption.CONNECTION_SET_KEYSPACE_TIMEOUT, initQueryTimeout); - map.put(TypedDriverOption.CONNECTION_POOL_LOCAL_SIZE, 1); - map.put(TypedDriverOption.CONNECTION_POOL_REMOTE_SIZE, 1); - map.put(TypedDriverOption.CONNECTION_MAX_REQUESTS, 1024); - map.put(TypedDriverOption.CONNECTION_MAX_ORPHAN_REQUESTS, 256); - map.put(TypedDriverOption.CONNECTION_WARN_INIT_ERROR, true); - map.put(TypedDriverOption.RECONNECT_ON_INIT, false); - map.put(TypedDriverOption.RECONNECTION_POLICY_CLASS, "ExponentialReconnectionPolicy"); - map.put(TypedDriverOption.RECONNECTION_BASE_DELAY, Duration.ofSeconds(1)); - map.put(TypedDriverOption.RECONNECTION_MAX_DELAY, Duration.ofSeconds(60)); - map.put(TypedDriverOption.RETRY_POLICY_CLASS, "DefaultRetryPolicy"); - map.put(TypedDriverOption.SPECULATIVE_EXECUTION_POLICY_CLASS, "NoSpeculativeExecutionPolicy"); - map.put(TypedDriverOption.TIMESTAMP_GENERATOR_CLASS, "AtomicTimestampGenerator"); - map.put(TypedDriverOption.TIMESTAMP_GENERATOR_DRIFT_WARNING_THRESHOLD, Duration.ofSeconds(1)); - map.put(TypedDriverOption.TIMESTAMP_GENERATOR_DRIFT_WARNING_INTERVAL, Duration.ofSeconds(10)); - map.put(TypedDriverOption.TIMESTAMP_GENERATOR_FORCE_JAVA_CLOCK, false); - map.put(TypedDriverOption.REQUEST_THROTTLER_CLASS, "PassThroughRequestThrottler"); - map.put(TypedDriverOption.ADDRESS_TRANSLATOR_CLASS, "PassThroughAddressTranslator"); - map.put(TypedDriverOption.RESOLVE_CONTACT_POINTS, true); - map.put(TypedDriverOption.PROTOCOL_MAX_FRAME_LENGTH, 256L * 1024 * 1024); - map.put(TypedDriverOption.REQUEST_WARN_IF_SET_KEYSPACE, true); - map.put(TypedDriverOption.REQUEST_TRACE_ATTEMPTS, 5); - map.put(TypedDriverOption.REQUEST_TRACE_INTERVAL, Duration.ofMillis(3)); - map.put(TypedDriverOption.REQUEST_TRACE_CONSISTENCY, "ONE"); - map.put(TypedDriverOption.REQUEST_LOG_WARNINGS, true); - map.put(TypedDriverOption.GRAPH_PAGING_ENABLED, "AUTO"); - map.put(TypedDriverOption.GRAPH_CONTINUOUS_PAGING_PAGE_SIZE, requestPageSize); - map.put(TypedDriverOption.GRAPH_CONTINUOUS_PAGING_MAX_PAGES, continuousMaxPages); - map.put( - TypedDriverOption.GRAPH_CONTINUOUS_PAGING_MAX_PAGES_PER_SECOND, - continuousMaxPagesPerSecond); - map.put( - TypedDriverOption.GRAPH_CONTINUOUS_PAGING_MAX_ENQUEUED_PAGES, continuousMaxEnqueuedPages); - map.put(TypedDriverOption.CONTINUOUS_PAGING_PAGE_SIZE, requestPageSize); - map.put(TypedDriverOption.CONTINUOUS_PAGING_PAGE_SIZE_BYTES, false); - map.put(TypedDriverOption.CONTINUOUS_PAGING_MAX_PAGES, continuousMaxPages); - map.put(TypedDriverOption.CONTINUOUS_PAGING_MAX_PAGES_PER_SECOND, continuousMaxPagesPerSecond); - map.put(TypedDriverOption.CONTINUOUS_PAGING_MAX_ENQUEUED_PAGES, continuousMaxEnqueuedPages); - map.put(TypedDriverOption.CONTINUOUS_PAGING_TIMEOUT_FIRST_PAGE, Duration.ofSeconds(2)); - map.put(TypedDriverOption.CONTINUOUS_PAGING_TIMEOUT_OTHER_PAGES, Duration.ofSeconds(1)); - map.put(TypedDriverOption.MONITOR_REPORTING_ENABLED, true); - map.put(TypedDriverOption.METRICS_SESSION_ENABLED, Collections.emptyList()); - map.put(TypedDriverOption.METRICS_SESSION_CQL_REQUESTS_HIGHEST, Duration.ofSeconds(3)); - map.put(TypedDriverOption.METRICS_SESSION_CQL_REQUESTS_LOWEST, Duration.ofMillis(1)); - map.put(TypedDriverOption.METRICS_SESSION_CQL_REQUESTS_DIGITS, 3); - map.put(TypedDriverOption.METRICS_SESSION_CQL_REQUESTS_INTERVAL, Duration.ofMinutes(5)); - map.put(TypedDriverOption.METRICS_SESSION_THROTTLING_HIGHEST, Duration.ofSeconds(3)); - map.put(TypedDriverOption.METRICS_SESSION_THROTTLING_LOWEST, Duration.ofMillis(1)); - map.put(TypedDriverOption.METRICS_SESSION_THROTTLING_DIGITS, 3); - map.put(TypedDriverOption.METRICS_SESSION_THROTTLING_INTERVAL, Duration.ofMinutes(5)); - map.put( - TypedDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_HIGHEST, - Duration.ofMinutes(2)); - map.put( - TypedDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_LOWEST, - Duration.ofMillis(10)); - map.put(TypedDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_DIGITS, 3); - map.put( - TypedDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_INTERVAL, - Duration.ofMinutes(5)); - map.put(TypedDriverOption.METRICS_FACTORY_CLASS, "DefaultMetricsFactory"); - map.put(TypedDriverOption.METRICS_ID_GENERATOR_CLASS, "DefaultMetricIdGenerator"); - map.put(TypedDriverOption.METRICS_SESSION_GRAPH_REQUESTS_HIGHEST, Duration.ofSeconds(12)); - map.put(TypedDriverOption.METRICS_SESSION_GRAPH_REQUESTS_LOWEST, Duration.ofMillis(1)); - map.put(TypedDriverOption.METRICS_SESSION_GRAPH_REQUESTS_DIGITS, 3); - map.put(TypedDriverOption.METRICS_SESSION_GRAPH_REQUESTS_INTERVAL, Duration.ofMinutes(5)); - map.put(TypedDriverOption.METRICS_NODE_ENABLED, Collections.emptyList()); - map.put(TypedDriverOption.METRICS_NODE_CQL_MESSAGES_HIGHEST, Duration.ofSeconds(3)); - map.put(TypedDriverOption.METRICS_NODE_CQL_MESSAGES_LOWEST, Duration.ofMillis(1)); - map.put(TypedDriverOption.METRICS_NODE_CQL_MESSAGES_DIGITS, 3); - map.put(TypedDriverOption.METRICS_NODE_CQL_MESSAGES_INTERVAL, Duration.ofMinutes(5)); - map.put(TypedDriverOption.METRICS_NODE_GRAPH_MESSAGES_HIGHEST, Duration.ofSeconds(3)); - map.put(TypedDriverOption.METRICS_NODE_GRAPH_MESSAGES_LOWEST, Duration.ofMillis(1)); - map.put(TypedDriverOption.METRICS_NODE_GRAPH_MESSAGES_DIGITS, 3); - map.put(TypedDriverOption.METRICS_NODE_GRAPH_MESSAGES_INTERVAL, Duration.ofMinutes(5)); - map.put(TypedDriverOption.METRICS_NODE_EXPIRE_AFTER, Duration.ofHours(1)); - map.put(TypedDriverOption.SOCKET_TCP_NODELAY, true); - map.put(TypedDriverOption.HEARTBEAT_INTERVAL, Duration.ofSeconds(30)); - map.put(TypedDriverOption.HEARTBEAT_TIMEOUT, initQueryTimeout); - map.put(TypedDriverOption.METADATA_TOPOLOGY_WINDOW, Duration.ofSeconds(1)); - map.put(TypedDriverOption.METADATA_TOPOLOGY_MAX_EVENTS, 20); - map.put(TypedDriverOption.METADATA_SCHEMA_ENABLED, true); - map.put( - TypedDriverOption.METADATA_SCHEMA_REFRESHED_KEYSPACES, - ImmutableList.of("!system", "!/^system_.*/", "!/^dse_.*/", "!solr_admin", "!OpsCenter")); - map.put(TypedDriverOption.METADATA_SCHEMA_REQUEST_TIMEOUT, requestTimeout); - map.put(TypedDriverOption.METADATA_SCHEMA_REQUEST_PAGE_SIZE, requestPageSize); - map.put(TypedDriverOption.METADATA_SCHEMA_WINDOW, Duration.ofSeconds(1)); - map.put(TypedDriverOption.METADATA_SCHEMA_MAX_EVENTS, 20); - map.put(TypedDriverOption.METADATA_TOKEN_MAP_ENABLED, true); - map.put(TypedDriverOption.CONTROL_CONNECTION_TIMEOUT, initQueryTimeout); - map.put(TypedDriverOption.CONTROL_CONNECTION_AGREEMENT_INTERVAL, Duration.ofMillis(200)); - map.put(TypedDriverOption.CONTROL_CONNECTION_AGREEMENT_TIMEOUT, Duration.ofSeconds(10)); - map.put(TypedDriverOption.CONTROL_CONNECTION_AGREEMENT_WARN, true); - map.put(TypedDriverOption.PREPARE_ON_ALL_NODES, true); - map.put(TypedDriverOption.REPREPARE_ENABLED, true); - map.put(TypedDriverOption.REPREPARE_CHECK_SYSTEM_TABLE, false); - map.put(TypedDriverOption.REPREPARE_MAX_STATEMENTS, 0); - map.put(TypedDriverOption.REPREPARE_MAX_PARALLELISM, 100); - map.put(TypedDriverOption.REPREPARE_TIMEOUT, initQueryTimeout); - map.put(TypedDriverOption.NETTY_DAEMON, false); - map.put(TypedDriverOption.NETTY_IO_SIZE, 0); - map.put(TypedDriverOption.NETTY_IO_SHUTDOWN_QUIET_PERIOD, 2); - map.put(TypedDriverOption.NETTY_IO_SHUTDOWN_TIMEOUT, 15); - map.put(TypedDriverOption.NETTY_IO_SHUTDOWN_UNIT, "SECONDS"); - map.put(TypedDriverOption.NETTY_ADMIN_SIZE, 2); - map.put(TypedDriverOption.NETTY_ADMIN_SHUTDOWN_QUIET_PERIOD, 2); - map.put(TypedDriverOption.NETTY_ADMIN_SHUTDOWN_TIMEOUT, 15); - map.put(TypedDriverOption.NETTY_ADMIN_SHUTDOWN_UNIT, "SECONDS"); - map.put(TypedDriverOption.NETTY_TIMER_TICK_DURATION, Duration.ofMillis(100)); - map.put(TypedDriverOption.NETTY_TIMER_TICKS_PER_WHEEL, 2048); - map.put(TypedDriverOption.COALESCER_INTERVAL, Duration.of(10, ChronoUnit.MICROS)); - map.put(TypedDriverOption.LOAD_BALANCING_DC_FAILOVER_MAX_NODES_PER_REMOTE_DC, 0); - map.put(TypedDriverOption.LOAD_BALANCING_DC_FAILOVER_ALLOW_FOR_LOCAL_CONSISTENCY_LEVELS, false); - map.put(TypedDriverOption.METRICS_GENERATE_AGGREGABLE_HISTOGRAMS, true); - map.put( - TypedDriverOption.LOAD_BALANCING_DC_FAILOVER_PREFERRED_REMOTE_DCS, ImmutableList.of("")); - } - - @Immutable - private static class SerializationProxy implements Serializable { - - private static final long serialVersionUID = 1L; - - private final ConcurrentHashMap> map; - - private SerializationProxy(ConcurrentHashMap> map) { - this.map = map; - } - - private Object readResolve() { - return new OptionsMap(map); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/config/ProgrammaticDriverConfigLoaderBuilder.java b/core/src/main/java/com/datastax/oss/driver/api/core/config/ProgrammaticDriverConfigLoaderBuilder.java deleted file mode 100644 index c3ae1d1bf5b..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/config/ProgrammaticDriverConfigLoaderBuilder.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.config; - -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * A builder that allows the creation of a config loader where options are overridden - * programmatically. - * - * @see DriverConfigLoader#programmaticBuilder() - */ -public interface ProgrammaticDriverConfigLoaderBuilder - extends OngoingConfigOptions { - - /** - * Starts the definition of a new profile. - * - *

All options set after this call, and before the next call to this method or {@link - * #endProfile()}, will apply to the given profile. - */ - @NonNull - ProgrammaticDriverConfigLoaderBuilder startProfile(@NonNull String profileName); - - /** - * Ends the definition of a profile. - * - *

All options set after this call, and before the next call to {@link #startProfile(String)}, - * will apply to the default profile. - */ - @NonNull - ProgrammaticDriverConfigLoaderBuilder endProfile(); - - @NonNull - DriverConfigLoader build(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/config/TypedDriverOption.java b/core/src/main/java/com/datastax/oss/driver/api/core/config/TypedDriverOption.java deleted file mode 100644 index 182753300e7..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/config/TypedDriverOption.java +++ /dev/null @@ -1,944 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.config; - -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.lang.reflect.Field; -import java.lang.reflect.Modifier; -import java.time.Duration; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.StringJoiner; - -/** - * A type-safe wrapper around {@link DriverOption}, that encodes the intended value type of each - * option. - * - *

This type was introduced in conjunction with {@link DriverConfigLoader#fromMap(OptionsMap)}. - * Unfortunately, for backward compatibility reasons, it wasn't possible to retrofit the rest of the - * driver to use it; therefore the APIs used to read the configuration, such as {@link DriverConfig} - * and {@link DriverExecutionProfile}, still use the untyped {@link DriverOption}. - * - * @since 4.6.0 - */ -public class TypedDriverOption { - - private static volatile Iterable> builtInValues; - - /** - * Returns the list of all built-in options known to the driver codebase; in other words, all the - * {@link TypedDriverOption} constants defined on this class. - * - *

Note that 3rd-party driver extensions might define their own {@link TypedDriverOption} - * constants for custom options. - * - *

This method uses reflection to introspect all the constants on this class; the result is - * computed lazily on the first invocation, and then cached for future calls. - */ - public static Iterable> builtInValues() { - if (builtInValues == null) { - builtInValues = introspectBuiltInValues(); - } - return builtInValues; - } - - private final DriverOption rawOption; - private final GenericType expectedType; - - public TypedDriverOption( - @NonNull DriverOption rawOption, @NonNull GenericType expectedType) { - this.rawOption = Objects.requireNonNull(rawOption); - this.expectedType = Objects.requireNonNull(expectedType); - } - - @NonNull - public DriverOption getRawOption() { - return rawOption; - } - - @NonNull - public GenericType getExpectedType() { - return expectedType; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof TypedDriverOption) { - TypedDriverOption that = (TypedDriverOption) other; - return this.rawOption.equals(that.rawOption) && this.expectedType.equals(that.expectedType); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(rawOption, expectedType); - } - - @Override - public String toString() { - return new StringJoiner(", ", TypedDriverOption.class.getSimpleName() + "[", "]") - .add("rawOption=" + rawOption) - .add("expectedType=" + expectedType) - .toString(); - } - - /** The contact points to use for the initial connection to the cluster. */ - public static final TypedDriverOption> CONTACT_POINTS = - new TypedDriverOption<>(DefaultDriverOption.CONTACT_POINTS, GenericType.listOf(String.class)); - /** A name that uniquely identifies the driver instance. */ - public static final TypedDriverOption SESSION_NAME = - new TypedDriverOption<>(DefaultDriverOption.SESSION_NAME, GenericType.STRING); - /** The name of the keyspace that the session should initially be connected to. */ - public static final TypedDriverOption SESSION_KEYSPACE = - new TypedDriverOption<>(DefaultDriverOption.SESSION_KEYSPACE, GenericType.STRING); - /** How often the driver tries to reload the configuration. */ - public static final TypedDriverOption CONFIG_RELOAD_INTERVAL = - new TypedDriverOption<>(DefaultDriverOption.CONFIG_RELOAD_INTERVAL, GenericType.DURATION); - /** How long the driver waits for a request to complete. */ - public static final TypedDriverOption REQUEST_TIMEOUT = - new TypedDriverOption<>(DefaultDriverOption.REQUEST_TIMEOUT, GenericType.DURATION); - /** The consistency level. */ - public static final TypedDriverOption REQUEST_CONSISTENCY = - new TypedDriverOption<>(DefaultDriverOption.REQUEST_CONSISTENCY, GenericType.STRING); - /** The page size. */ - public static final TypedDriverOption REQUEST_PAGE_SIZE = - new TypedDriverOption<>(DefaultDriverOption.REQUEST_PAGE_SIZE, GenericType.INTEGER); - /** The serial consistency level. */ - public static final TypedDriverOption REQUEST_SERIAL_CONSISTENCY = - new TypedDriverOption<>(DefaultDriverOption.REQUEST_SERIAL_CONSISTENCY, GenericType.STRING); - /** The default idempotence of a request. */ - public static final TypedDriverOption REQUEST_DEFAULT_IDEMPOTENCE = - new TypedDriverOption<>(DefaultDriverOption.REQUEST_DEFAULT_IDEMPOTENCE, GenericType.BOOLEAN); - /** The class of the load balancing policy. */ - public static final TypedDriverOption LOAD_BALANCING_POLICY_CLASS = - new TypedDriverOption<>(DefaultDriverOption.LOAD_BALANCING_POLICY_CLASS, GenericType.STRING); - /** The datacenter that is considered "local". */ - public static final TypedDriverOption LOAD_BALANCING_LOCAL_DATACENTER = - new TypedDriverOption<>( - DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER, GenericType.STRING); - /** - * A custom filter to include/exclude nodes. - * - * @deprecated Use {@link #LOAD_BALANCING_DISTANCE_EVALUATOR_CLASS} instead. - */ - @Deprecated - public static final TypedDriverOption LOAD_BALANCING_FILTER_CLASS = - new TypedDriverOption<>(DefaultDriverOption.LOAD_BALANCING_FILTER_CLASS, GenericType.STRING); - /** - * The class name of a custom {@link - * com.datastax.oss.driver.api.core.loadbalancing.NodeDistanceEvaluator}. - */ - public static final TypedDriverOption LOAD_BALANCING_DISTANCE_EVALUATOR_CLASS = - new TypedDriverOption<>( - DefaultDriverOption.LOAD_BALANCING_DISTANCE_EVALUATOR_CLASS, GenericType.STRING); - /** The timeout to use for internal queries that run as part of the initialization process. */ - public static final TypedDriverOption CONNECTION_INIT_QUERY_TIMEOUT = - new TypedDriverOption<>( - DefaultDriverOption.CONNECTION_INIT_QUERY_TIMEOUT, GenericType.DURATION); - /** The timeout to use when the driver changes the keyspace on a connection at runtime. */ - public static final TypedDriverOption CONNECTION_SET_KEYSPACE_TIMEOUT = - new TypedDriverOption<>( - DefaultDriverOption.CONNECTION_SET_KEYSPACE_TIMEOUT, GenericType.DURATION); - /** The maximum number of requests that can be executed concurrently on a connection. */ - public static final TypedDriverOption CONNECTION_MAX_REQUESTS = - new TypedDriverOption<>(DefaultDriverOption.CONNECTION_MAX_REQUESTS, GenericType.INTEGER); - /** The maximum number of "orphaned" requests before a connection gets closed automatically. */ - public static final TypedDriverOption CONNECTION_MAX_ORPHAN_REQUESTS = - new TypedDriverOption<>( - DefaultDriverOption.CONNECTION_MAX_ORPHAN_REQUESTS, GenericType.INTEGER); - /** Whether to log non-fatal errors when the driver tries to open a new connection. */ - public static final TypedDriverOption CONNECTION_WARN_INIT_ERROR = - new TypedDriverOption<>(DefaultDriverOption.CONNECTION_WARN_INIT_ERROR, GenericType.BOOLEAN); - /** The number of connections in the LOCAL pool. */ - public static final TypedDriverOption CONNECTION_POOL_LOCAL_SIZE = - new TypedDriverOption<>(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE, GenericType.INTEGER); - /** The number of connections in the REMOTE pool. */ - public static final TypedDriverOption CONNECTION_POOL_REMOTE_SIZE = - new TypedDriverOption<>(DefaultDriverOption.CONNECTION_POOL_REMOTE_SIZE, GenericType.INTEGER); - /** - * Whether to schedule reconnection attempts if all contact points are unreachable on the first - * initialization attempt. - */ - public static final TypedDriverOption RECONNECT_ON_INIT = - new TypedDriverOption<>(DefaultDriverOption.RECONNECT_ON_INIT, GenericType.BOOLEAN); - /** The class of the reconnection policy. */ - public static final TypedDriverOption RECONNECTION_POLICY_CLASS = - new TypedDriverOption<>(DefaultDriverOption.RECONNECTION_POLICY_CLASS, GenericType.STRING); - /** Base delay for computing time between reconnection attempts. */ - public static final TypedDriverOption RECONNECTION_BASE_DELAY = - new TypedDriverOption<>(DefaultDriverOption.RECONNECTION_BASE_DELAY, GenericType.DURATION); - /** Maximum delay between reconnection attempts. */ - public static final TypedDriverOption RECONNECTION_MAX_DELAY = - new TypedDriverOption<>(DefaultDriverOption.RECONNECTION_MAX_DELAY, GenericType.DURATION); - /** The class of the retry policy. */ - public static final TypedDriverOption RETRY_POLICY_CLASS = - new TypedDriverOption<>(DefaultDriverOption.RETRY_POLICY_CLASS, GenericType.STRING); - /** The class of the speculative execution policy. */ - public static final TypedDriverOption SPECULATIVE_EXECUTION_POLICY_CLASS = - new TypedDriverOption<>( - DefaultDriverOption.SPECULATIVE_EXECUTION_POLICY_CLASS, GenericType.STRING); - /** The maximum number of executions. */ - public static final TypedDriverOption SPECULATIVE_EXECUTION_MAX = - new TypedDriverOption<>(DefaultDriverOption.SPECULATIVE_EXECUTION_MAX, GenericType.INTEGER); - /** The delay between each execution. */ - public static final TypedDriverOption SPECULATIVE_EXECUTION_DELAY = - new TypedDriverOption<>( - DefaultDriverOption.SPECULATIVE_EXECUTION_DELAY, GenericType.DURATION); - /** The class of the authentication provider. */ - public static final TypedDriverOption AUTH_PROVIDER_CLASS = - new TypedDriverOption<>(DefaultDriverOption.AUTH_PROVIDER_CLASS, GenericType.STRING); - /** Plain text auth provider username. */ - public static final TypedDriverOption AUTH_PROVIDER_USER_NAME = - new TypedDriverOption<>(DefaultDriverOption.AUTH_PROVIDER_USER_NAME, GenericType.STRING); - /** Plain text auth provider password. */ - public static final TypedDriverOption AUTH_PROVIDER_PASSWORD = - new TypedDriverOption<>(DefaultDriverOption.AUTH_PROVIDER_PASSWORD, GenericType.STRING); - /** The class of the SSL Engine Factory. */ - public static final TypedDriverOption SSL_ENGINE_FACTORY_CLASS = - new TypedDriverOption<>(DefaultDriverOption.SSL_ENGINE_FACTORY_CLASS, GenericType.STRING); - /** The cipher suites to enable when creating an SSLEngine for a connection. */ - public static final TypedDriverOption> SSL_CIPHER_SUITES = - new TypedDriverOption<>( - DefaultDriverOption.SSL_CIPHER_SUITES, GenericType.listOf(String.class)); - /** - * Whether or not to require validation that the hostname of the server certificate's common name - * matches the hostname of the server being connected to. - */ - public static final TypedDriverOption SSL_HOSTNAME_VALIDATION = - new TypedDriverOption<>(DefaultDriverOption.SSL_HOSTNAME_VALIDATION, GenericType.BOOLEAN); - - public static final TypedDriverOption SSL_ALLOW_DNS_REVERSE_LOOKUP_SAN = - new TypedDriverOption<>( - DefaultDriverOption.SSL_ALLOW_DNS_REVERSE_LOOKUP_SAN, GenericType.BOOLEAN); - /** The location of the keystore file. */ - public static final TypedDriverOption SSL_KEYSTORE_PATH = - new TypedDriverOption<>(DefaultDriverOption.SSL_KEYSTORE_PATH, GenericType.STRING); - /** The keystore password. */ - public static final TypedDriverOption SSL_KEYSTORE_PASSWORD = - new TypedDriverOption<>(DefaultDriverOption.SSL_KEYSTORE_PASSWORD, GenericType.STRING); - - /** The duration between attempts to reload the keystore. */ - public static final TypedDriverOption SSL_KEYSTORE_RELOAD_INTERVAL = - new TypedDriverOption<>( - DefaultDriverOption.SSL_KEYSTORE_RELOAD_INTERVAL, GenericType.DURATION); - - /** The location of the truststore file. */ - public static final TypedDriverOption SSL_TRUSTSTORE_PATH = - new TypedDriverOption<>(DefaultDriverOption.SSL_TRUSTSTORE_PATH, GenericType.STRING); - /** The truststore password. */ - public static final TypedDriverOption SSL_TRUSTSTORE_PASSWORD = - new TypedDriverOption<>(DefaultDriverOption.SSL_TRUSTSTORE_PASSWORD, GenericType.STRING); - /** The class of the generator that assigns a microsecond timestamp to each request. */ - public static final TypedDriverOption TIMESTAMP_GENERATOR_CLASS = - new TypedDriverOption<>(DefaultDriverOption.TIMESTAMP_GENERATOR_CLASS, GenericType.STRING); - /** Whether to force the driver to use Java's millisecond-precision system clock. */ - public static final TypedDriverOption TIMESTAMP_GENERATOR_FORCE_JAVA_CLOCK = - new TypedDriverOption<>( - DefaultDriverOption.TIMESTAMP_GENERATOR_FORCE_JAVA_CLOCK, GenericType.BOOLEAN); - /** How far in the future timestamps are allowed to drift before the warning is logged. */ - public static final TypedDriverOption TIMESTAMP_GENERATOR_DRIFT_WARNING_THRESHOLD = - new TypedDriverOption<>( - DefaultDriverOption.TIMESTAMP_GENERATOR_DRIFT_WARNING_THRESHOLD, GenericType.DURATION); - /** How often the warning will be logged if timestamps keep drifting above the threshold. */ - public static final TypedDriverOption TIMESTAMP_GENERATOR_DRIFT_WARNING_INTERVAL = - new TypedDriverOption<>( - DefaultDriverOption.TIMESTAMP_GENERATOR_DRIFT_WARNING_INTERVAL, GenericType.DURATION); - - /** - * The class of a session-wide component that tracks the outcome of requests. - * - * @deprecated Use {@link #REQUEST_TRACKER_CLASSES} instead. - */ - @Deprecated - public static final TypedDriverOption REQUEST_TRACKER_CLASS = - new TypedDriverOption<>(DefaultDriverOption.REQUEST_TRACKER_CLASS, GenericType.STRING); - - /** The classes of session-wide components that track the outcome of requests. */ - public static final TypedDriverOption> REQUEST_TRACKER_CLASSES = - new TypedDriverOption<>( - DefaultDriverOption.REQUEST_TRACKER_CLASSES, GenericType.listOf(String.class)); - - /** The class of a session-wide component that generates request IDs. */ - public static final TypedDriverOption REQUEST_ID_GENERATOR_CLASS = - new TypedDriverOption<>(DefaultDriverOption.REQUEST_ID_GENERATOR_CLASS, GenericType.STRING); - - /** Whether to log successful requests. */ - public static final TypedDriverOption REQUEST_LOGGER_SUCCESS_ENABLED = - new TypedDriverOption<>( - DefaultDriverOption.REQUEST_LOGGER_SUCCESS_ENABLED, GenericType.BOOLEAN); - /** The threshold to classify a successful request as "slow". */ - public static final TypedDriverOption REQUEST_LOGGER_SLOW_THRESHOLD = - new TypedDriverOption<>( - DefaultDriverOption.REQUEST_LOGGER_SLOW_THRESHOLD, GenericType.DURATION); - /** Whether to log slow requests. */ - public static final TypedDriverOption REQUEST_LOGGER_SLOW_ENABLED = - new TypedDriverOption<>(DefaultDriverOption.REQUEST_LOGGER_SLOW_ENABLED, GenericType.BOOLEAN); - /** Whether to log failed requests. */ - public static final TypedDriverOption REQUEST_LOGGER_ERROR_ENABLED = - new TypedDriverOption<>( - DefaultDriverOption.REQUEST_LOGGER_ERROR_ENABLED, GenericType.BOOLEAN); - /** The maximum length of the query string in the log message. */ - public static final TypedDriverOption REQUEST_LOGGER_MAX_QUERY_LENGTH = - new TypedDriverOption<>( - DefaultDriverOption.REQUEST_LOGGER_MAX_QUERY_LENGTH, GenericType.INTEGER); - /** Whether to log bound values in addition to the query string. */ - public static final TypedDriverOption REQUEST_LOGGER_VALUES = - new TypedDriverOption<>(DefaultDriverOption.REQUEST_LOGGER_VALUES, GenericType.BOOLEAN); - /** The maximum length for bound values in the log message. */ - public static final TypedDriverOption REQUEST_LOGGER_MAX_VALUE_LENGTH = - new TypedDriverOption<>( - DefaultDriverOption.REQUEST_LOGGER_MAX_VALUE_LENGTH, GenericType.INTEGER); - /** The maximum number of bound values to log. */ - public static final TypedDriverOption REQUEST_LOGGER_MAX_VALUES = - new TypedDriverOption<>(DefaultDriverOption.REQUEST_LOGGER_MAX_VALUES, GenericType.INTEGER); - /** Whether to log stack traces for failed queries. */ - public static final TypedDriverOption REQUEST_LOGGER_STACK_TRACES = - new TypedDriverOption<>(DefaultDriverOption.REQUEST_LOGGER_STACK_TRACES, GenericType.BOOLEAN); - /** - * The class of a session-wide component that controls the rate at which requests are executed. - */ - public static final TypedDriverOption REQUEST_THROTTLER_CLASS = - new TypedDriverOption<>(DefaultDriverOption.REQUEST_THROTTLER_CLASS, GenericType.STRING); - /** The maximum number of requests that are allowed to execute in parallel. */ - public static final TypedDriverOption REQUEST_THROTTLER_MAX_CONCURRENT_REQUESTS = - new TypedDriverOption<>( - DefaultDriverOption.REQUEST_THROTTLER_MAX_CONCURRENT_REQUESTS, GenericType.INTEGER); - /** The maximum allowed request rate. */ - public static final TypedDriverOption REQUEST_THROTTLER_MAX_REQUESTS_PER_SECOND = - new TypedDriverOption<>( - DefaultDriverOption.REQUEST_THROTTLER_MAX_REQUESTS_PER_SECOND, GenericType.INTEGER); - /** - * The maximum number of requests that can be enqueued when the throttling threshold is exceeded. - */ - public static final TypedDriverOption REQUEST_THROTTLER_MAX_QUEUE_SIZE = - new TypedDriverOption<>( - DefaultDriverOption.REQUEST_THROTTLER_MAX_QUEUE_SIZE, GenericType.INTEGER); - /** How often the throttler attempts to dequeue requests. */ - public static final TypedDriverOption REQUEST_THROTTLER_DRAIN_INTERVAL = - new TypedDriverOption<>( - DefaultDriverOption.REQUEST_THROTTLER_DRAIN_INTERVAL, GenericType.DURATION); - - /** - * The class of a session-wide component that listens for node state changes. - * - * @deprecated Use {@link #METADATA_NODE_STATE_LISTENER_CLASSES} instead. - */ - @Deprecated - public static final TypedDriverOption METADATA_NODE_STATE_LISTENER_CLASS = - new TypedDriverOption<>( - DefaultDriverOption.METADATA_NODE_STATE_LISTENER_CLASS, GenericType.STRING); - - /** - * The class of a session-wide component that listens for schema changes. - * - * @deprecated Use {@link #METADATA_SCHEMA_CHANGE_LISTENER_CLASSES} instead. - */ - @Deprecated - public static final TypedDriverOption METADATA_SCHEMA_CHANGE_LISTENER_CLASS = - new TypedDriverOption<>( - DefaultDriverOption.METADATA_SCHEMA_CHANGE_LISTENER_CLASS, GenericType.STRING); - - /** The classes of session-wide components that listen for node state changes. */ - public static final TypedDriverOption> METADATA_NODE_STATE_LISTENER_CLASSES = - new TypedDriverOption<>( - DefaultDriverOption.METADATA_NODE_STATE_LISTENER_CLASSES, - GenericType.listOf(String.class)); - - /** The classes of session-wide components that listen for schema changes. */ - public static final TypedDriverOption> METADATA_SCHEMA_CHANGE_LISTENER_CLASSES = - new TypedDriverOption<>( - DefaultDriverOption.METADATA_SCHEMA_CHANGE_LISTENER_CLASSES, - GenericType.listOf(String.class)); - - /** - * The class of the address translator to use to convert the addresses sent by Cassandra nodes - * into ones that the driver uses to connect. - */ - public static final TypedDriverOption ADDRESS_TRANSLATOR_CLASS = - new TypedDriverOption<>(DefaultDriverOption.ADDRESS_TRANSLATOR_CLASS, GenericType.STRING); - /** The native protocol version to use. */ - public static final TypedDriverOption PROTOCOL_VERSION = - new TypedDriverOption<>(DefaultDriverOption.PROTOCOL_VERSION, GenericType.STRING); - /** The name of the algorithm used to compress protocol frames. */ - public static final TypedDriverOption PROTOCOL_COMPRESSION = - new TypedDriverOption<>(DefaultDriverOption.PROTOCOL_COMPRESSION, GenericType.STRING); - /** The maximum length, in bytes, of the frames supported by the driver. */ - public static final TypedDriverOption PROTOCOL_MAX_FRAME_LENGTH = - new TypedDriverOption<>(DefaultDriverOption.PROTOCOL_MAX_FRAME_LENGTH, GenericType.LONG); - /** - * Whether a warning is logged when a request (such as a CQL `USE ...`) changes the active - * keyspace. - */ - public static final TypedDriverOption REQUEST_WARN_IF_SET_KEYSPACE = - new TypedDriverOption<>( - DefaultDriverOption.REQUEST_WARN_IF_SET_KEYSPACE, GenericType.BOOLEAN); - /** How many times the driver will attempt to fetch the query trace if it is not ready yet. */ - public static final TypedDriverOption REQUEST_TRACE_ATTEMPTS = - new TypedDriverOption<>(DefaultDriverOption.REQUEST_TRACE_ATTEMPTS, GenericType.INTEGER); - /** The interval between each attempt. */ - public static final TypedDriverOption REQUEST_TRACE_INTERVAL = - new TypedDriverOption<>(DefaultDriverOption.REQUEST_TRACE_INTERVAL, GenericType.DURATION); - /** The consistency level to use for trace queries. */ - public static final TypedDriverOption REQUEST_TRACE_CONSISTENCY = - new TypedDriverOption<>(DefaultDriverOption.REQUEST_TRACE_CONSISTENCY, GenericType.STRING); - /** Whether or not to publish aggregable histogram for metrics */ - public static final TypedDriverOption METRICS_GENERATE_AGGREGABLE_HISTOGRAMS = - new TypedDriverOption<>( - DefaultDriverOption.METRICS_GENERATE_AGGREGABLE_HISTOGRAMS, GenericType.BOOLEAN); - /** List of enabled session-level metrics. */ - public static final TypedDriverOption> METRICS_SESSION_ENABLED = - new TypedDriverOption<>( - DefaultDriverOption.METRICS_SESSION_ENABLED, GenericType.listOf(String.class)); - /** List of enabled node-level metrics. */ - public static final TypedDriverOption> METRICS_NODE_ENABLED = - new TypedDriverOption<>( - DefaultDriverOption.METRICS_NODE_ENABLED, GenericType.listOf(String.class)); - /** The largest latency that we expect to record for requests. */ - public static final TypedDriverOption METRICS_SESSION_CQL_REQUESTS_HIGHEST = - new TypedDriverOption<>( - DefaultDriverOption.METRICS_SESSION_CQL_REQUESTS_HIGHEST, GenericType.DURATION); - /** The shortest latency that we expect to record for requests. */ - public static final TypedDriverOption METRICS_SESSION_CQL_REQUESTS_LOWEST = - new TypedDriverOption<>( - DefaultDriverOption.METRICS_SESSION_CQL_REQUESTS_LOWEST, GenericType.DURATION); - /** Optional service-level objectives to meet, as a list of latencies to track. */ - public static final TypedDriverOption> METRICS_SESSION_CQL_REQUESTS_SLO = - new TypedDriverOption<>( - DefaultDriverOption.METRICS_SESSION_CQL_REQUESTS_SLO, - GenericType.listOf(GenericType.DURATION)); - /** Optional pre-defined percentile of cql requests to publish, as a list of percentiles . */ - public static final TypedDriverOption> - METRICS_SESSION_CQL_REQUESTS_PUBLISH_PERCENTILES = - new TypedDriverOption<>( - DefaultDriverOption.METRICS_SESSION_CQL_REQUESTS_PUBLISH_PERCENTILES, - GenericType.listOf(GenericType.DOUBLE)); - /** - * The number of significant decimal digits to which internal structures will maintain for - * requests. - */ - public static final TypedDriverOption METRICS_SESSION_CQL_REQUESTS_DIGITS = - new TypedDriverOption<>( - DefaultDriverOption.METRICS_SESSION_CQL_REQUESTS_DIGITS, GenericType.INTEGER); - /** The interval at which percentile data is refreshed for requests. */ - public static final TypedDriverOption METRICS_SESSION_CQL_REQUESTS_INTERVAL = - new TypedDriverOption<>( - DefaultDriverOption.METRICS_SESSION_CQL_REQUESTS_INTERVAL, GenericType.DURATION); - /** The largest latency that we expect to record for throttling. */ - public static final TypedDriverOption METRICS_SESSION_THROTTLING_HIGHEST = - new TypedDriverOption<>( - DefaultDriverOption.METRICS_SESSION_THROTTLING_HIGHEST, GenericType.DURATION); - /** The shortest latency that we expect to record for throttling. */ - public static final TypedDriverOption METRICS_SESSION_THROTTLING_LOWEST = - new TypedDriverOption<>( - DefaultDriverOption.METRICS_SESSION_THROTTLING_LOWEST, GenericType.DURATION); - /** Optional service-level objectives to meet, as a list of latencies to track. */ - public static final TypedDriverOption> METRICS_SESSION_THROTTLING_SLO = - new TypedDriverOption<>( - DefaultDriverOption.METRICS_SESSION_THROTTLING_SLO, - GenericType.listOf(GenericType.DURATION)); - /** Optional pre-defined percentile of throttling delay to publish, as a list of percentiles . */ - public static final TypedDriverOption> - METRICS_SESSION_THROTTLING_PUBLISH_PERCENTILES = - new TypedDriverOption<>( - DefaultDriverOption.METRICS_SESSION_THROTTLING_PUBLISH_PERCENTILES, - GenericType.listOf(GenericType.DOUBLE)); - /** - * The number of significant decimal digits to which internal structures will maintain for - * throttling. - */ - public static final TypedDriverOption METRICS_SESSION_THROTTLING_DIGITS = - new TypedDriverOption<>( - DefaultDriverOption.METRICS_SESSION_THROTTLING_DIGITS, GenericType.INTEGER); - /** The interval at which percentile data is refreshed for throttling. */ - public static final TypedDriverOption METRICS_SESSION_THROTTLING_INTERVAL = - new TypedDriverOption<>( - DefaultDriverOption.METRICS_SESSION_THROTTLING_INTERVAL, GenericType.DURATION); - /** The largest latency that we expect to record for requests. */ - public static final TypedDriverOption METRICS_NODE_CQL_MESSAGES_HIGHEST = - new TypedDriverOption<>( - DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_HIGHEST, GenericType.DURATION); - /** The shortest latency that we expect to record for requests. */ - public static final TypedDriverOption METRICS_NODE_CQL_MESSAGES_LOWEST = - new TypedDriverOption<>( - DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_LOWEST, GenericType.DURATION); - /** Optional service-level objectives to meet, as a list of latencies to track. */ - public static final TypedDriverOption> METRICS_NODE_CQL_MESSAGES_SLO = - new TypedDriverOption<>( - DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_SLO, - GenericType.listOf(GenericType.DURATION)); - /** Optional pre-defined percentile of node cql messages to publish, as a list of percentiles . */ - public static final TypedDriverOption> - METRICS_NODE_CQL_MESSAGES_PUBLISH_PERCENTILES = - new TypedDriverOption<>( - DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_PUBLISH_PERCENTILES, - GenericType.listOf(GenericType.DOUBLE)); - /** - * The number of significant decimal digits to which internal structures will maintain for - * requests. - */ - public static final TypedDriverOption METRICS_NODE_CQL_MESSAGES_DIGITS = - new TypedDriverOption<>( - DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_DIGITS, GenericType.INTEGER); - /** The interval at which percentile data is refreshed for requests. */ - public static final TypedDriverOption METRICS_NODE_CQL_MESSAGES_INTERVAL = - new TypedDriverOption<>( - DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_INTERVAL, GenericType.DURATION); - /** Whether or not to disable the Nagle algorithm. */ - public static final TypedDriverOption SOCKET_TCP_NODELAY = - new TypedDriverOption<>(DefaultDriverOption.SOCKET_TCP_NODELAY, GenericType.BOOLEAN); - /** Whether or not to enable TCP keep-alive probes. */ - public static final TypedDriverOption SOCKET_KEEP_ALIVE = - new TypedDriverOption<>(DefaultDriverOption.SOCKET_KEEP_ALIVE, GenericType.BOOLEAN); - /** Whether or not to allow address reuse. */ - public static final TypedDriverOption SOCKET_REUSE_ADDRESS = - new TypedDriverOption<>(DefaultDriverOption.SOCKET_REUSE_ADDRESS, GenericType.BOOLEAN); - /** Sets the linger interval. */ - public static final TypedDriverOption SOCKET_LINGER_INTERVAL = - new TypedDriverOption<>(DefaultDriverOption.SOCKET_LINGER_INTERVAL, GenericType.INTEGER); - /** Sets a hint to the size of the underlying buffers for incoming network I/O. */ - public static final TypedDriverOption SOCKET_RECEIVE_BUFFER_SIZE = - new TypedDriverOption<>(DefaultDriverOption.SOCKET_RECEIVE_BUFFER_SIZE, GenericType.INTEGER); - /** Sets a hint to the size of the underlying buffers for outgoing network I/O. */ - public static final TypedDriverOption SOCKET_SEND_BUFFER_SIZE = - new TypedDriverOption<>(DefaultDriverOption.SOCKET_SEND_BUFFER_SIZE, GenericType.INTEGER); - /** The connection heartbeat interval. */ - public static final TypedDriverOption HEARTBEAT_INTERVAL = - new TypedDriverOption<>(DefaultDriverOption.HEARTBEAT_INTERVAL, GenericType.DURATION); - /** How long the driver waits for the response to a heartbeat. */ - public static final TypedDriverOption HEARTBEAT_TIMEOUT = - new TypedDriverOption<>(DefaultDriverOption.HEARTBEAT_TIMEOUT, GenericType.DURATION); - /** How long the driver waits to propagate a Topology event. */ - public static final TypedDriverOption METADATA_TOPOLOGY_WINDOW = - new TypedDriverOption<>(DefaultDriverOption.METADATA_TOPOLOGY_WINDOW, GenericType.DURATION); - /** The maximum number of events that can accumulate. */ - public static final TypedDriverOption METADATA_TOPOLOGY_MAX_EVENTS = - new TypedDriverOption<>( - DefaultDriverOption.METADATA_TOPOLOGY_MAX_EVENTS, GenericType.INTEGER); - /** Whether schema metadata is enabled. */ - public static final TypedDriverOption METADATA_SCHEMA_ENABLED = - new TypedDriverOption<>(DefaultDriverOption.METADATA_SCHEMA_ENABLED, GenericType.BOOLEAN); - /** The timeout for the requests to the schema tables. */ - public static final TypedDriverOption METADATA_SCHEMA_REQUEST_TIMEOUT = - new TypedDriverOption<>( - DefaultDriverOption.METADATA_SCHEMA_REQUEST_TIMEOUT, GenericType.DURATION); - /** The page size for the requests to the schema tables. */ - public static final TypedDriverOption METADATA_SCHEMA_REQUEST_PAGE_SIZE = - new TypedDriverOption<>( - DefaultDriverOption.METADATA_SCHEMA_REQUEST_PAGE_SIZE, GenericType.INTEGER); - /** The list of keyspaces for which schema and token metadata should be maintained. */ - public static final TypedDriverOption> METADATA_SCHEMA_REFRESHED_KEYSPACES = - new TypedDriverOption<>( - DefaultDriverOption.METADATA_SCHEMA_REFRESHED_KEYSPACES, - GenericType.listOf(String.class)); - /** How long the driver waits to apply a refresh. */ - public static final TypedDriverOption METADATA_SCHEMA_WINDOW = - new TypedDriverOption<>(DefaultDriverOption.METADATA_SCHEMA_WINDOW, GenericType.DURATION); - /** The maximum number of refreshes that can accumulate. */ - public static final TypedDriverOption METADATA_SCHEMA_MAX_EVENTS = - new TypedDriverOption<>(DefaultDriverOption.METADATA_SCHEMA_MAX_EVENTS, GenericType.INTEGER); - /** Whether token metadata is enabled. */ - public static final TypedDriverOption METADATA_TOKEN_MAP_ENABLED = - new TypedDriverOption<>(DefaultDriverOption.METADATA_TOKEN_MAP_ENABLED, GenericType.BOOLEAN); - /** How long the driver waits for responses to control queries. */ - public static final TypedDriverOption CONTROL_CONNECTION_TIMEOUT = - new TypedDriverOption<>(DefaultDriverOption.CONTROL_CONNECTION_TIMEOUT, GenericType.DURATION); - /** The interval between each schema agreement check attempt. */ - public static final TypedDriverOption CONTROL_CONNECTION_AGREEMENT_INTERVAL = - new TypedDriverOption<>( - DefaultDriverOption.CONTROL_CONNECTION_AGREEMENT_INTERVAL, GenericType.DURATION); - /** The timeout after which schema agreement fails. */ - public static final TypedDriverOption CONTROL_CONNECTION_AGREEMENT_TIMEOUT = - new TypedDriverOption<>( - DefaultDriverOption.CONTROL_CONNECTION_AGREEMENT_TIMEOUT, GenericType.DURATION); - /** Whether to log a warning if schema agreement fails. */ - public static final TypedDriverOption CONTROL_CONNECTION_AGREEMENT_WARN = - new TypedDriverOption<>( - DefaultDriverOption.CONTROL_CONNECTION_AGREEMENT_WARN, GenericType.BOOLEAN); - /** Whether `Session.prepare` calls should be sent to all nodes in the cluster. */ - public static final TypedDriverOption PREPARE_ON_ALL_NODES = - new TypedDriverOption<>(DefaultDriverOption.PREPARE_ON_ALL_NODES, GenericType.BOOLEAN); - /** Whether the driver tries to prepare on new nodes at all. */ - public static final TypedDriverOption REPREPARE_ENABLED = - new TypedDriverOption<>(DefaultDriverOption.REPREPARE_ENABLED, GenericType.BOOLEAN); - /** Whether to check `system.prepared_statements` on the target node before repreparing. */ - public static final TypedDriverOption REPREPARE_CHECK_SYSTEM_TABLE = - new TypedDriverOption<>( - DefaultDriverOption.REPREPARE_CHECK_SYSTEM_TABLE, GenericType.BOOLEAN); - /** The maximum number of statements that should be reprepared. */ - public static final TypedDriverOption REPREPARE_MAX_STATEMENTS = - new TypedDriverOption<>(DefaultDriverOption.REPREPARE_MAX_STATEMENTS, GenericType.INTEGER); - /** The maximum number of concurrent requests when repreparing. */ - public static final TypedDriverOption REPREPARE_MAX_PARALLELISM = - new TypedDriverOption<>(DefaultDriverOption.REPREPARE_MAX_PARALLELISM, GenericType.INTEGER); - /** The request timeout when repreparing. */ - public static final TypedDriverOption REPREPARE_TIMEOUT = - new TypedDriverOption<>(DefaultDriverOption.REPREPARE_TIMEOUT, GenericType.DURATION); - /** Whether the prepared statements cache use weak values. */ - public static final TypedDriverOption PREPARED_CACHE_WEAK_VALUES = - new TypedDriverOption<>(DefaultDriverOption.PREPARED_CACHE_WEAK_VALUES, GenericType.BOOLEAN); - /** The number of threads in the I/O group. */ - public static final TypedDriverOption NETTY_IO_SIZE = - new TypedDriverOption<>(DefaultDriverOption.NETTY_IO_SIZE, GenericType.INTEGER); - /** Quiet period for I/O group shutdown. */ - public static final TypedDriverOption NETTY_IO_SHUTDOWN_QUIET_PERIOD = - new TypedDriverOption<>( - DefaultDriverOption.NETTY_IO_SHUTDOWN_QUIET_PERIOD, GenericType.INTEGER); - /** Max time to wait for I/O group shutdown. */ - public static final TypedDriverOption NETTY_IO_SHUTDOWN_TIMEOUT = - new TypedDriverOption<>(DefaultDriverOption.NETTY_IO_SHUTDOWN_TIMEOUT, GenericType.INTEGER); - /** Units for I/O group quiet period and timeout. */ - public static final TypedDriverOption NETTY_IO_SHUTDOWN_UNIT = - new TypedDriverOption<>(DefaultDriverOption.NETTY_IO_SHUTDOWN_UNIT, GenericType.STRING); - /** The number of threads in the Admin group. */ - public static final TypedDriverOption NETTY_ADMIN_SIZE = - new TypedDriverOption<>(DefaultDriverOption.NETTY_ADMIN_SIZE, GenericType.INTEGER); - /** Quiet period for admin group shutdown. */ - public static final TypedDriverOption NETTY_ADMIN_SHUTDOWN_QUIET_PERIOD = - new TypedDriverOption<>( - DefaultDriverOption.NETTY_ADMIN_SHUTDOWN_QUIET_PERIOD, GenericType.INTEGER); - /** Max time to wait for admin group shutdown. */ - public static final TypedDriverOption NETTY_ADMIN_SHUTDOWN_TIMEOUT = - new TypedDriverOption<>( - DefaultDriverOption.NETTY_ADMIN_SHUTDOWN_TIMEOUT, GenericType.INTEGER); - /** Units for admin group quiet period and timeout. */ - public static final TypedDriverOption NETTY_ADMIN_SHUTDOWN_UNIT = - new TypedDriverOption<>(DefaultDriverOption.NETTY_ADMIN_SHUTDOWN_UNIT, GenericType.STRING); - /** @deprecated This option was removed in version 4.6.1. */ - @Deprecated - public static final TypedDriverOption COALESCER_MAX_RUNS = - new TypedDriverOption<>(DefaultDriverOption.COALESCER_MAX_RUNS, GenericType.INTEGER); - /** The coalescer reschedule interval. */ - public static final TypedDriverOption COALESCER_INTERVAL = - new TypedDriverOption<>(DefaultDriverOption.COALESCER_INTERVAL, GenericType.DURATION); - /** Whether to resolve the addresses passed to `basic.contact-points`. */ - public static final TypedDriverOption RESOLVE_CONTACT_POINTS = - new TypedDriverOption<>(DefaultDriverOption.RESOLVE_CONTACT_POINTS, GenericType.BOOLEAN); - /** - * This is how frequent the timer should wake up to check for timed-out tasks or speculative - * executions. - */ - public static final TypedDriverOption NETTY_TIMER_TICK_DURATION = - new TypedDriverOption<>(DefaultDriverOption.NETTY_TIMER_TICK_DURATION, GenericType.DURATION); - /** Number of ticks in the Timer wheel. */ - public static final TypedDriverOption NETTY_TIMER_TICKS_PER_WHEEL = - new TypedDriverOption<>(DefaultDriverOption.NETTY_TIMER_TICKS_PER_WHEEL, GenericType.INTEGER); - /** - * Whether logging of server warnings generated during query execution should be disabled by the - * driver. - */ - public static final TypedDriverOption REQUEST_LOG_WARNINGS = - new TypedDriverOption<>(DefaultDriverOption.REQUEST_LOG_WARNINGS, GenericType.BOOLEAN); - /** Whether the threads created by the driver should be daemon threads. */ - public static final TypedDriverOption NETTY_DAEMON = - new TypedDriverOption<>(DefaultDriverOption.NETTY_DAEMON, GenericType.BOOLEAN); - /** - * The location of the cloud secure bundle used to connect to DataStax Apache Cassandra as a - * service. - */ - public static final TypedDriverOption CLOUD_SECURE_CONNECT_BUNDLE = - new TypedDriverOption<>(DefaultDriverOption.CLOUD_SECURE_CONNECT_BUNDLE, GenericType.STRING); - /** Whether the slow replica avoidance should be enabled in the default LBP. */ - public static final TypedDriverOption LOAD_BALANCING_POLICY_SLOW_AVOIDANCE = - new TypedDriverOption<>( - DefaultDriverOption.LOAD_BALANCING_POLICY_SLOW_AVOIDANCE, GenericType.BOOLEAN); - /** The timeout to use when establishing driver connections. */ - public static final TypedDriverOption CONNECTION_CONNECT_TIMEOUT = - new TypedDriverOption<>(DefaultDriverOption.CONNECTION_CONNECT_TIMEOUT, GenericType.DURATION); - /** The maximum number of live sessions that are allowed to coexist in a given VM. */ - public static final TypedDriverOption SESSION_LEAK_THRESHOLD = - new TypedDriverOption<>(DefaultDriverOption.SESSION_LEAK_THRESHOLD, GenericType.INTEGER); - - /** The name of the application using the session. */ - public static final TypedDriverOption APPLICATION_NAME = - new TypedDriverOption<>(DseDriverOption.APPLICATION_NAME, GenericType.STRING); - /** The version of the application using the session. */ - public static final TypedDriverOption APPLICATION_VERSION = - new TypedDriverOption<>(DseDriverOption.APPLICATION_VERSION, GenericType.STRING); - /** Proxy authentication for GSSAPI authentication: allows to login as another user or role. */ - public static final TypedDriverOption AUTH_PROVIDER_AUTHORIZATION_ID = - new TypedDriverOption<>(DseDriverOption.AUTH_PROVIDER_AUTHORIZATION_ID, GenericType.STRING); - /** Service name for GSSAPI authentication. */ - public static final TypedDriverOption AUTH_PROVIDER_SERVICE = - new TypedDriverOption<>(DseDriverOption.AUTH_PROVIDER_SERVICE, GenericType.STRING); - /** Login configuration for GSSAPI authentication. */ - public static final TypedDriverOption AUTH_PROVIDER_LOGIN_CONFIGURATION = - new TypedDriverOption<>( - DseDriverOption.AUTH_PROVIDER_LOGIN_CONFIGURATION, GenericType.STRING); - /** Internal SASL properties, if any, such as QOP, for GSSAPI authentication. */ - public static final TypedDriverOption> AUTH_PROVIDER_SASL_PROPERTIES = - new TypedDriverOption<>( - DseDriverOption.AUTH_PROVIDER_SASL_PROPERTIES, - GenericType.mapOf(GenericType.STRING, GenericType.STRING)); - /** The page size for continuous paging. */ - public static final TypedDriverOption CONTINUOUS_PAGING_PAGE_SIZE = - new TypedDriverOption<>(DseDriverOption.CONTINUOUS_PAGING_PAGE_SIZE, GenericType.INTEGER); - /** - * Whether {@link #CONTINUOUS_PAGING_PAGE_SIZE} should be interpreted in number of rows or bytes. - */ - public static final TypedDriverOption CONTINUOUS_PAGING_PAGE_SIZE_BYTES = - new TypedDriverOption<>( - DseDriverOption.CONTINUOUS_PAGING_PAGE_SIZE_BYTES, GenericType.BOOLEAN); - /** The maximum number of continuous pages to return. */ - public static final TypedDriverOption CONTINUOUS_PAGING_MAX_PAGES = - new TypedDriverOption<>(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES, GenericType.INTEGER); - /** The maximum number of continuous pages per second. */ - public static final TypedDriverOption CONTINUOUS_PAGING_MAX_PAGES_PER_SECOND = - new TypedDriverOption<>( - DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES_PER_SECOND, GenericType.INTEGER); - /** The maximum number of continuous pages that can be stored in the local queue. */ - public static final TypedDriverOption CONTINUOUS_PAGING_MAX_ENQUEUED_PAGES = - new TypedDriverOption<>( - DseDriverOption.CONTINUOUS_PAGING_MAX_ENQUEUED_PAGES, GenericType.INTEGER); - /** How long to wait for the coordinator to send the first continuous page. */ - public static final TypedDriverOption CONTINUOUS_PAGING_TIMEOUT_FIRST_PAGE = - new TypedDriverOption<>( - DseDriverOption.CONTINUOUS_PAGING_TIMEOUT_FIRST_PAGE, GenericType.DURATION); - /** How long to wait for the coordinator to send subsequent continuous pages. */ - public static final TypedDriverOption CONTINUOUS_PAGING_TIMEOUT_OTHER_PAGES = - new TypedDriverOption<>( - DseDriverOption.CONTINUOUS_PAGING_TIMEOUT_OTHER_PAGES, GenericType.DURATION); - /** The largest latency that we expect to record for continuous requests. */ - public static final TypedDriverOption - CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_HIGHEST = - new TypedDriverOption<>( - DseDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_HIGHEST, - GenericType.DURATION); - /** The shortest latency that we expect to record for continuous requests. */ - public static final TypedDriverOption - CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_LOWEST = - new TypedDriverOption<>( - DseDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_LOWEST, - GenericType.DURATION); - /** Optional service-level objectives to meet, as a list of latencies to track. */ - public static final TypedDriverOption> - CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_SLO = - new TypedDriverOption<>( - DseDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_SLO, - GenericType.listOf(GenericType.DURATION)); - /** - * Optional pre-defined percentile of continuous paging cql requests to publish, as a list of - * percentiles . - */ - public static final TypedDriverOption> - CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_PUBLISH_PERCENTILES = - new TypedDriverOption<>( - DseDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_PUBLISH_PERCENTILES, - GenericType.listOf(GenericType.DOUBLE)); - /** - * The number of significant decimal digits to which internal structures will maintain for - * continuous requests. - */ - public static final TypedDriverOption - CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_DIGITS = - new TypedDriverOption<>( - DseDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_DIGITS, - GenericType.INTEGER); - /** The interval at which percentile data is refreshed for continuous requests. */ - public static final TypedDriverOption - CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_INTERVAL = - new TypedDriverOption<>( - DseDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_INTERVAL, - GenericType.DURATION); - /** The read consistency level to use for graph statements. */ - public static final TypedDriverOption GRAPH_READ_CONSISTENCY_LEVEL = - new TypedDriverOption<>(DseDriverOption.GRAPH_READ_CONSISTENCY_LEVEL, GenericType.STRING); - /** The write consistency level to use for graph statements. */ - public static final TypedDriverOption GRAPH_WRITE_CONSISTENCY_LEVEL = - new TypedDriverOption<>(DseDriverOption.GRAPH_WRITE_CONSISTENCY_LEVEL, GenericType.STRING); - /** The traversal source to use for graph statements. */ - public static final TypedDriverOption GRAPH_TRAVERSAL_SOURCE = - new TypedDriverOption<>(DseDriverOption.GRAPH_TRAVERSAL_SOURCE, GenericType.STRING); - /** - * The sub-protocol the driver will use to communicate with DSE Graph, on top of the Cassandra - * native protocol. - */ - public static final TypedDriverOption GRAPH_SUB_PROTOCOL = - new TypedDriverOption<>(DseDriverOption.GRAPH_SUB_PROTOCOL, GenericType.STRING); - /** Whether a script statement represents a system query. */ - public static final TypedDriverOption GRAPH_IS_SYSTEM_QUERY = - new TypedDriverOption<>(DseDriverOption.GRAPH_IS_SYSTEM_QUERY, GenericType.BOOLEAN); - /** The name of the graph targeted by graph statements. */ - public static final TypedDriverOption GRAPH_NAME = - new TypedDriverOption<>(DseDriverOption.GRAPH_NAME, GenericType.STRING); - /** How long the driver waits for a graph request to complete. */ - public static final TypedDriverOption GRAPH_TIMEOUT = - new TypedDriverOption<>(DseDriverOption.GRAPH_TIMEOUT, GenericType.DURATION); - /** Whether to send events for Insights monitoring. */ - public static final TypedDriverOption MONITOR_REPORTING_ENABLED = - new TypedDriverOption<>(DseDriverOption.MONITOR_REPORTING_ENABLED, GenericType.BOOLEAN); - /** Whether to enable paging for Graph queries. */ - public static final TypedDriverOption GRAPH_PAGING_ENABLED = - new TypedDriverOption<>(DseDriverOption.GRAPH_PAGING_ENABLED, GenericType.STRING); - /** The page size for Graph continuous paging. */ - public static final TypedDriverOption GRAPH_CONTINUOUS_PAGING_PAGE_SIZE = - new TypedDriverOption<>( - DseDriverOption.GRAPH_CONTINUOUS_PAGING_PAGE_SIZE, GenericType.INTEGER); - /** The maximum number of Graph continuous pages to return. */ - public static final TypedDriverOption GRAPH_CONTINUOUS_PAGING_MAX_PAGES = - new TypedDriverOption<>( - DseDriverOption.GRAPH_CONTINUOUS_PAGING_MAX_PAGES, GenericType.INTEGER); - /** The maximum number of Graph continuous pages per second. */ - public static final TypedDriverOption GRAPH_CONTINUOUS_PAGING_MAX_PAGES_PER_SECOND = - new TypedDriverOption<>( - DseDriverOption.GRAPH_CONTINUOUS_PAGING_MAX_PAGES_PER_SECOND, GenericType.INTEGER); - /** The maximum number of Graph continuous pages that can be stored in the local queue. */ - public static final TypedDriverOption GRAPH_CONTINUOUS_PAGING_MAX_ENQUEUED_PAGES = - new TypedDriverOption<>( - DseDriverOption.GRAPH_CONTINUOUS_PAGING_MAX_ENQUEUED_PAGES, GenericType.INTEGER); - /** The largest latency that we expect to record for graph requests. */ - public static final TypedDriverOption METRICS_SESSION_GRAPH_REQUESTS_HIGHEST = - new TypedDriverOption<>( - DseDriverOption.METRICS_SESSION_GRAPH_REQUESTS_HIGHEST, GenericType.DURATION); - /** The shortest latency that we expect to record for graph requests. */ - public static final TypedDriverOption METRICS_SESSION_GRAPH_REQUESTS_LOWEST = - new TypedDriverOption<>( - DseDriverOption.METRICS_SESSION_GRAPH_REQUESTS_LOWEST, GenericType.DURATION); - /** Optional service-level objectives to meet, as a list of latencies to track. */ - public static final TypedDriverOption> METRICS_SESSION_GRAPH_REQUESTS_SLO = - new TypedDriverOption<>( - DseDriverOption.METRICS_SESSION_GRAPH_REQUESTS_SLO, - GenericType.listOf(GenericType.DURATION)); - /** Optional pre-defined percentile of graph requests to publish, as a list of percentiles . */ - public static final TypedDriverOption> - METRICS_SESSION_GRAPH_REQUESTS_PUBLISH_PERCENTILES = - new TypedDriverOption<>( - DseDriverOption.METRICS_SESSION_GRAPH_REQUESTS_PUBLISH_PERCENTILES, - GenericType.listOf(GenericType.DOUBLE)); - /** - * The number of significant decimal digits to which internal structures will maintain for graph - * requests. - */ - public static final TypedDriverOption METRICS_SESSION_GRAPH_REQUESTS_DIGITS = - new TypedDriverOption<>( - DseDriverOption.METRICS_SESSION_GRAPH_REQUESTS_DIGITS, GenericType.INTEGER); - /** The interval at which percentile data is refreshed for graph requests. */ - public static final TypedDriverOption METRICS_SESSION_GRAPH_REQUESTS_INTERVAL = - new TypedDriverOption<>( - DseDriverOption.METRICS_SESSION_GRAPH_REQUESTS_INTERVAL, GenericType.DURATION); - /** The largest latency that we expect to record for graph requests. */ - public static final TypedDriverOption METRICS_NODE_GRAPH_MESSAGES_HIGHEST = - new TypedDriverOption<>( - DseDriverOption.METRICS_NODE_GRAPH_MESSAGES_HIGHEST, GenericType.DURATION); - /** The shortest latency that we expect to record for graph requests. */ - public static final TypedDriverOption METRICS_NODE_GRAPH_MESSAGES_LOWEST = - new TypedDriverOption<>( - DseDriverOption.METRICS_NODE_GRAPH_MESSAGES_LOWEST, GenericType.DURATION); - /** Optional service-level objectives to meet, as a list of latencies to track. */ - public static final TypedDriverOption> METRICS_NODE_GRAPH_MESSAGES_SLO = - new TypedDriverOption<>( - DseDriverOption.METRICS_NODE_GRAPH_MESSAGES_SLO, - GenericType.listOf(GenericType.DURATION)); - /** - * Optional pre-defined percentile of node graph requests to publish, as a list of percentiles . - */ - public static final TypedDriverOption> - METRICS_NODE_GRAPH_MESSAGES_PUBLISH_PERCENTILES = - new TypedDriverOption<>( - DseDriverOption.METRICS_NODE_GRAPH_MESSAGES_PUBLISH_PERCENTILES, - GenericType.listOf(GenericType.DOUBLE)); - /** - * The number of significant decimal digits to which internal structures will maintain for graph - * requests. - */ - public static final TypedDriverOption METRICS_NODE_GRAPH_MESSAGES_DIGITS = - new TypedDriverOption<>( - DseDriverOption.METRICS_NODE_GRAPH_MESSAGES_DIGITS, GenericType.INTEGER); - /** The interval at which percentile data is refreshed for graph requests. */ - public static final TypedDriverOption METRICS_NODE_GRAPH_MESSAGES_INTERVAL = - new TypedDriverOption<>( - DseDriverOption.METRICS_NODE_GRAPH_MESSAGES_INTERVAL, GenericType.DURATION); - - /** The time after which the node level metrics will be evicted. */ - public static final TypedDriverOption METRICS_NODE_EXPIRE_AFTER = - new TypedDriverOption<>(DefaultDriverOption.METRICS_NODE_EXPIRE_AFTER, GenericType.DURATION); - - /** The classname of the desired MetricsFactory implementation. */ - public static final TypedDriverOption METRICS_FACTORY_CLASS = - new TypedDriverOption<>(DefaultDriverOption.METRICS_FACTORY_CLASS, GenericType.STRING); - - /** The classname of the desired {@code MetricIdGenerator} implementation. */ - public static final TypedDriverOption METRICS_ID_GENERATOR_CLASS = - new TypedDriverOption<>(DefaultDriverOption.METRICS_ID_GENERATOR_CLASS, GenericType.STRING); - - /** The value of the prefix to prepend to all metric names. */ - public static final TypedDriverOption METRICS_ID_GENERATOR_PREFIX = - new TypedDriverOption<>(DefaultDriverOption.METRICS_ID_GENERATOR_PREFIX, GenericType.STRING); - - /** The maximum number of nodes from remote DCs to include in query plans. */ - public static final TypedDriverOption - LOAD_BALANCING_DC_FAILOVER_MAX_NODES_PER_REMOTE_DC = - new TypedDriverOption<>( - DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_MAX_NODES_PER_REMOTE_DC, - GenericType.INTEGER); - /** Whether to consider nodes from remote DCs if the request's consistency level is local. */ - public static final TypedDriverOption - LOAD_BALANCING_DC_FAILOVER_ALLOW_FOR_LOCAL_CONSISTENCY_LEVELS = - new TypedDriverOption<>( - DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_ALLOW_FOR_LOCAL_CONSISTENCY_LEVELS, - GenericType.BOOLEAN); - - public static final TypedDriverOption ADDRESS_TRANSLATOR_ADVERTISED_HOSTNAME = - new TypedDriverOption<>( - DefaultDriverOption.ADDRESS_TRANSLATOR_ADVERTISED_HOSTNAME, GenericType.STRING); - public static final TypedDriverOption> ADDRESS_TRANSLATOR_SUBNET_ADDRESSES = - new TypedDriverOption<>( - DefaultDriverOption.ADDRESS_TRANSLATOR_SUBNET_ADDRESSES, - GenericType.mapOf(GenericType.STRING, GenericType.STRING)); - public static final TypedDriverOption ADDRESS_TRANSLATOR_DEFAULT_ADDRESS = - new TypedDriverOption<>( - DefaultDriverOption.ADDRESS_TRANSLATOR_DEFAULT_ADDRESS, GenericType.STRING); - public static final TypedDriverOption ADDRESS_TRANSLATOR_RESOLVE_ADDRESSES = - new TypedDriverOption<>( - DefaultDriverOption.ADDRESS_TRANSLATOR_RESOLVE_ADDRESSES, GenericType.BOOLEAN); - - /** - * Ordered preference list of remote dcs optionally supplied for automatic failover and included - * in query plan. This feature is enabled only when max-nodes-per-remote-dc is greater than 0. - */ - public static final TypedDriverOption> - LOAD_BALANCING_DC_FAILOVER_PREFERRED_REMOTE_DCS = - new TypedDriverOption<>( - DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_PREFERRED_REMOTE_DCS, - GenericType.listOf(String.class)); - - private static Iterable> introspectBuiltInValues() { - try { - ImmutableList.Builder> result = ImmutableList.builder(); - for (Field field : TypedDriverOption.class.getFields()) { - if ((field.getModifiers() & PUBLIC_STATIC_FINAL) == PUBLIC_STATIC_FINAL - && field.getType() == TypedDriverOption.class) { - TypedDriverOption typedOption = (TypedDriverOption) field.get(null); - result.add(typedOption); - } - } - return result.build(); - } catch (IllegalAccessException e) { - throw new IllegalStateException("Unexpected error while introspecting built-in values", e); - } - } - - private static final int PUBLIC_STATIC_FINAL = Modifier.PUBLIC | Modifier.STATIC | Modifier.FINAL; -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/config/package-info.java b/core/src/main/java/com/datastax/oss/driver/api/core/config/package-info.java deleted file mode 100644 index a751d983e70..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/config/package-info.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * The configuration of the driver. - * - *

The public API is completely agnostic to the underlying implementation (where the - * configuration is loaded from, what framework is used...). - */ -package com.datastax.oss.driver.api.core.config; diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/connection/BusyConnectionException.java b/core/src/main/java/com/datastax/oss/driver/api/core/connection/BusyConnectionException.java deleted file mode 100644 index 8069474612a..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/connection/BusyConnectionException.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.connection; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * Indicates that a write was attempted on a connection that already handles too many simultaneous - * requests. - * - *

This might happen under heavy load. The driver will automatically try the next node in the - * query plan. Therefore, the only way that the client can observe this exception is as part of a - * {@link AllNodesFailedException}. - */ -public class BusyConnectionException extends DriverException { - - // Note: the driver doesn't use this constructor anymore, it is preserved only for backward - // compatibility. - @SuppressWarnings("unused") - public BusyConnectionException(int maxAvailableIds) { - this( - String.format( - "Connection has exceeded its maximum of %d simultaneous requests", maxAvailableIds), - null, - false); - } - - public BusyConnectionException(String message) { - this(message, null, false); - } - - private BusyConnectionException( - String message, ExecutionInfo executionInfo, boolean writableStackTrace) { - super(message, executionInfo, null, writableStackTrace); - } - - @Override - @NonNull - public DriverException copy() { - return new BusyConnectionException(getMessage(), getExecutionInfo(), true); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/connection/ClosedConnectionException.java b/core/src/main/java/com/datastax/oss/driver/api/core/connection/ClosedConnectionException.java deleted file mode 100644 index a192e2c5efc..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/connection/ClosedConnectionException.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.connection; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.DriverException; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** - * Thrown when the connection on which a request was executing is closed due to an unrelated event. - * - *

For example, this can happen if the node is unresponsive and a heartbeat query failed, or if - * the node was forced down. - * - *

The driver will retry these requests on the next node transparently, unless the request is not - * idempotent. Therefore, this exception is usually observed as part of an {@link - * AllNodesFailedException}. - */ -public class ClosedConnectionException extends DriverException { - - public ClosedConnectionException(@NonNull String message) { - this(message, null, false); - } - - public ClosedConnectionException(@NonNull String message, @Nullable Throwable cause) { - this(message, cause, false); - } - - private ClosedConnectionException( - @NonNull String message, @Nullable Throwable cause, boolean writableStackTrace) { - super(message, null, cause, writableStackTrace); - } - - @Override - @NonNull - public DriverException copy() { - return new ClosedConnectionException(getMessage(), getCause(), true); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/connection/ConnectionInitException.java b/core/src/main/java/com/datastax/oss/driver/api/core/connection/ConnectionInitException.java deleted file mode 100644 index 519624e8d5d..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/connection/ConnectionInitException.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.connection; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** - * Indicates a generic error while initializing a connection. - * - *

The only time when this is returned directly to the client (wrapped in a {@link - * AllNodesFailedException}) is at initialization. If it happens later when the driver is already - * connected, it is just logged and the connection is reattempted. - */ -public class ConnectionInitException extends DriverException { - public ConnectionInitException(@NonNull String message, @Nullable Throwable cause) { - super(message, null, cause, true); - } - - private ConnectionInitException(String message, ExecutionInfo executionInfo, Throwable cause) { - super(message, executionInfo, cause, true); - } - - @NonNull - @Override - public DriverException copy() { - return new ConnectionInitException(getMessage(), getExecutionInfo(), getCause()); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/connection/CrcMismatchException.java b/core/src/main/java/com/datastax/oss/driver/api/core/connection/CrcMismatchException.java deleted file mode 100644 index d0fc8fc3b73..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/connection/CrcMismatchException.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.connection; - -import com.datastax.oss.driver.api.core.DriverException; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * Thrown when the checksums in a server response don't match (protocol v5 or above). - * - *

This indicates a data corruption issue, either due to a hardware issue on the client, or on - * the network between the server and the client. It is not recoverable: the driver will drop the - * connection. - */ -public class CrcMismatchException extends DriverException { - - public CrcMismatchException(@NonNull String message) { - super(message, null, null, true); - } - - @NonNull - @Override - public DriverException copy() { - return new CrcMismatchException(getMessage()); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/connection/FrameTooLongException.java b/core/src/main/java/com/datastax/oss/driver/api/core/connection/FrameTooLongException.java deleted file mode 100644 index 9954aefb3d4..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/connection/FrameTooLongException.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.connection; - -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.net.SocketAddress; - -/** - * Thrown when an incoming or outgoing protocol frame exceeds the limit defined by {@code - * protocol.max-frame-length} in the configuration. - * - *

This error is always rethrown directly to the client, without any retry attempt. - */ -public class FrameTooLongException extends DriverException { - - private final SocketAddress address; - - public FrameTooLongException(@NonNull SocketAddress address, @NonNull String message) { - this(address, message, null); - } - - private FrameTooLongException( - SocketAddress address, String message, ExecutionInfo executionInfo) { - super(message, executionInfo, null, false); - this.address = address; - } - - /** The address of the node that encountered the error. */ - @NonNull - public SocketAddress getAddress() { - return address; - } - - @NonNull - @Override - public DriverException copy() { - return new FrameTooLongException(address, getMessage(), getExecutionInfo()); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/connection/HeartbeatException.java b/core/src/main/java/com/datastax/oss/driver/api/core/connection/HeartbeatException.java deleted file mode 100644 index 60c3d60a69d..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/connection/HeartbeatException.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.connection; - -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import com.datastax.oss.driver.api.core.session.Request; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.net.SocketAddress; - -/** - * Thrown when a heartbeat query fails. - * - *

Heartbeat queries are sent automatically on idle connections, to ensure that they are still - * alive. If a heartbeat query fails, the connection is closed, and all pending queries are aborted. - * The exception will be passed to {@link RetryPolicy#onRequestAbortedVerdict(Request, Throwable, - * int)}, which decides what to do next (the default policy retries the query on the next node). - */ -public class HeartbeatException extends DriverException { - - private final SocketAddress address; - - public HeartbeatException( - @NonNull SocketAddress address, @Nullable String message, @Nullable Throwable cause) { - this(address, message, null, cause); - } - - public HeartbeatException( - SocketAddress address, String message, ExecutionInfo executionInfo, Throwable cause) { - super(message, executionInfo, cause, true); - this.address = address; - } - - /** The address of the node that encountered the error. */ - @NonNull - public SocketAddress getAddress() { - return address; - } - - @NonNull - @Override - public DriverException copy() { - return new HeartbeatException(address, getMessage(), getExecutionInfo(), getCause()); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/connection/ReconnectionPolicy.java b/core/src/main/java/com/datastax/oss/driver/api/core/connection/ReconnectionPolicy.java deleted file mode 100644 index 9f81843c9c0..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/connection/ReconnectionPolicy.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.connection; - -import com.datastax.oss.driver.api.core.metadata.Node; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.time.Duration; - -/** - * Decides how often the driver tries to re-establish lost connections. - * - *

When a reconnection starts, the driver invokes this policy to create a {@link - * ReconnectionSchedule ReconnectionSchedule} instance. That schedule's {@link - * ReconnectionSchedule#nextDelay() nextDelay()} method will get called each time the driver needs - * to program the next connection attempt. When the reconnection succeeds, the schedule is - * discarded; if the connection is lost again later, the next reconnection attempt will query the - * policy again to obtain a new schedule. - * - *

There are two types of reconnection: - * - *

    - *
  • {@linkplain #newNodeSchedule(Node) for regular node connections}: when the connection pool - * for a node does not have its configured number of connections (see {@code - * advanced.connection.pool.*.size} in the configuration), a reconnection starts for that - * pool. - *
  • {@linkplain #newControlConnectionSchedule(boolean) for the control connection}: when the - * control node goes down, a reconnection starts to find another node to replace it. This is - * also used if the configuration option {@code advanced.reconnect-on-init} is set and the - * driver has to retry the initial connection. - *
- * - * This interface defines separate methods for those two cases, but implementations are free to - * delegate to the same method internally if the same type of schedule can be used. - */ -public interface ReconnectionPolicy extends AutoCloseable { - - /** Creates a new schedule for the given node. */ - @NonNull - ReconnectionSchedule newNodeSchedule(@NonNull Node node); - - /** - * Creates a new schedule for the control connection. - * - * @param isInitialConnection whether this schedule is generated for the driver's initial attempt - * to connect to the cluster. - *
    - *
  • {@code true} means that the configuration option {@code advanced.reconnect-on-init} - * is set, the driver failed to reach any contact point, and it is now scheduling - * reattempts. - *
  • {@code false} means that the driver was already initialized, lost connection to the - * control node, and is now scheduling attempts to connect to another node. - *
- */ - @NonNull - ReconnectionSchedule newControlConnectionSchedule(boolean isInitialConnection); - - /** Called when the cluster that this policy is associated with closes. */ - @Override - void close(); - - /** - * The reconnection schedule from the time a connection is lost, to the time all connections to - * this node have been restored. - */ - interface ReconnectionSchedule { - /** How long to wait before the next reconnection attempt. */ - @NonNull - Duration nextDelay(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/connection/package-info.java b/core/src/main/java/com/datastax/oss/driver/api/core/connection/package-info.java deleted file mode 100644 index 737f985ad1d..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/connection/package-info.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Types related to a connection to a Cassandra node. - * - *

The driver generally connects to multiple nodes, and may keep multiple connections to each - * node. - */ -package com.datastax.oss.driver.api.core.connection; diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/context/DriverContext.java b/core/src/main/java/com/datastax/oss/driver/api/core/context/DriverContext.java deleted file mode 100644 index 6f0afd3df8a..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/context/DriverContext.java +++ /dev/null @@ -1,158 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.context; - -import com.datastax.oss.driver.api.core.addresstranslation.AddressTranslator; -import com.datastax.oss.driver.api.core.auth.AuthProvider; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.connection.ReconnectionPolicy; -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; -import com.datastax.oss.driver.api.core.metadata.NodeStateListener; -import com.datastax.oss.driver.api.core.metadata.schema.SchemaChangeListener; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.core.session.throttling.RequestThrottler; -import com.datastax.oss.driver.api.core.specex.SpeculativeExecutionPolicy; -import com.datastax.oss.driver.api.core.ssl.SslEngineFactory; -import com.datastax.oss.driver.api.core.time.TimestampGenerator; -import com.datastax.oss.driver.api.core.tracker.RequestIdGenerator; -import com.datastax.oss.driver.api.core.tracker.RequestTracker; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; -import java.util.Optional; - -/** Holds common components that are shared throughout a driver instance. */ -public interface DriverContext extends AttachmentPoint { - - /** - * This is the same as {@link Session#getName()}, it's exposed here for components that only have - * a reference to the context. - */ - @NonNull - String getSessionName(); - - /** @return The driver's configuration; never {@code null}. */ - @NonNull - DriverConfig getConfig(); - - /** @return The driver's configuration loader; never {@code null}. */ - @NonNull - DriverConfigLoader getConfigLoader(); - - /** - * @return The driver's load balancing policies, keyed by profile name; the returned map is - * guaranteed to never be {@code null} and to always contain an entry for the {@value - * DriverExecutionProfile#DEFAULT_NAME} profile. - */ - @NonNull - Map getLoadBalancingPolicies(); - - /** - * @param profileName the profile name; never {@code null}. - * @return The driver's load balancing policy for the given profile; never {@code null}. - */ - @NonNull - default LoadBalancingPolicy getLoadBalancingPolicy(@NonNull String profileName) { - LoadBalancingPolicy policy = getLoadBalancingPolicies().get(profileName); - // Protect against a non-existent name - return (policy != null) - ? policy - : getLoadBalancingPolicies().get(DriverExecutionProfile.DEFAULT_NAME); - } - - /** - * @return The driver's retry policies, keyed by profile name; the returned map is guaranteed to - * never be {@code null} and to always contain an entry for the {@value - * DriverExecutionProfile#DEFAULT_NAME} profile. - */ - @NonNull - Map getRetryPolicies(); - - /** - * @param profileName the profile name; never {@code null}. - * @return The driver's retry policy for the given profile; never {@code null}. - */ - @NonNull - default RetryPolicy getRetryPolicy(@NonNull String profileName) { - RetryPolicy policy = getRetryPolicies().get(profileName); - return (policy != null) ? policy : getRetryPolicies().get(DriverExecutionProfile.DEFAULT_NAME); - } - - /** - * @return The driver's speculative execution policies, keyed by profile name; the returned map is - * guaranteed to never be {@code null} and to always contain an entry for the {@value - * DriverExecutionProfile#DEFAULT_NAME} profile. - */ - @NonNull - Map getSpeculativeExecutionPolicies(); - - /** - * @param profileName the profile name; never {@code null}. - * @return The driver's speculative execution policy for the given profile; never {@code null}. - */ - @NonNull - default SpeculativeExecutionPolicy getSpeculativeExecutionPolicy(@NonNull String profileName) { - SpeculativeExecutionPolicy policy = getSpeculativeExecutionPolicies().get(profileName); - return (policy != null) - ? policy - : getSpeculativeExecutionPolicies().get(DriverExecutionProfile.DEFAULT_NAME); - } - - /** @return The driver's timestamp generator; never {@code null}. */ - @NonNull - TimestampGenerator getTimestampGenerator(); - - /** @return The driver's reconnection policy; never {@code null}. */ - @NonNull - ReconnectionPolicy getReconnectionPolicy(); - - /** @return The driver's address translator; never {@code null}. */ - @NonNull - AddressTranslator getAddressTranslator(); - - /** @return The authentication provider, if authentication was configured. */ - @NonNull - Optional getAuthProvider(); - - /** @return The SSL engine factory, if SSL was configured. */ - @NonNull - Optional getSslEngineFactory(); - - /** @return The driver's request tracker; never {@code null}. */ - @NonNull - RequestTracker getRequestTracker(); - - /** @return The driver's request ID generator; never {@code null}. */ - @NonNull - Optional getRequestIdGenerator(); - - /** @return The driver's request throttler; never {@code null}. */ - @NonNull - RequestThrottler getRequestThrottler(); - - /** @return The driver's node state listener; never {@code null}. */ - @NonNull - NodeStateListener getNodeStateListener(); - - /** @return The driver's schema change listener; never {@code null}. */ - @NonNull - SchemaChangeListener getSchemaChangeListener(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/AsyncCqlSession.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/AsyncCqlSession.java deleted file mode 100644 index 7b56bd61a09..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/AsyncCqlSession.java +++ /dev/null @@ -1,160 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cql; - -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.internal.core.cql.DefaultPrepareRequest; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; -import java.util.Objects; -import java.util.concurrent.CompletionStage; - -/** - * A session that offers user-friendly methods to execute CQL requests asynchronously. - * - * @since 4.4.0 - */ -public interface AsyncCqlSession extends Session { - - /** - * Executes a CQL statement asynchronously (the call returns as soon as the statement was sent, - * generally before the result is available). - * - * @param statement the CQL query to execute (that can be any {@code Statement}). - * @return a {@code CompletionStage} that, once complete, will produce the async result set. - */ - @NonNull - default CompletionStage executeAsync(@NonNull Statement statement) { - return Objects.requireNonNull( - execute(statement, Statement.ASYNC), "The CQL processor should never return a null result"); - } - - /** - * Executes a CQL statement asynchronously (the call returns as soon as the statement was sent, - * generally before the result is available). - * - *

This is an alias for {@link #executeAsync(Statement)} - * executeAsync(SimpleStatement.newInstance(query))}. - * - * @param query the CQL query to execute. - * @return a {@code CompletionStage} that, once complete, will produce the async result set. - * @see SimpleStatement#newInstance(String) - */ - @NonNull - default CompletionStage executeAsync(@NonNull String query) { - return executeAsync(SimpleStatement.newInstance(query)); - } - - /** - * Executes a CQL statement asynchronously (the call returns as soon as the statement was sent, - * generally before the result is available). - * - *

This is an alias for {@link #executeAsync(Statement)} - * executeAsync(SimpleStatement.newInstance(query, values))}. - * - * @param query the CQL query to execute. - * @param values the values for placeholders in the query string. Individual values can be {@code - * null}, but the vararg array itself can't. - * @return a {@code CompletionStage} that, once complete, will produce the async result set. - * @see SimpleStatement#newInstance(String, Object...) - */ - @NonNull - default CompletionStage executeAsync( - @NonNull String query, @NonNull Object... values) { - return executeAsync(SimpleStatement.newInstance(query, values)); - } - - /** - * Executes a CQL statement asynchronously (the call returns as soon as the statement was sent, - * generally before the result is available). - * - *

This is an alias for {@link #executeAsync(Statement)} - * executeAsync(SimpleStatement.newInstance(query, values))}. - * - * @param query the CQL query to execute. - * @param values the values for named placeholders in the query string. Individual values can be - * {@code null}, but the map itself can't. - * @return a {@code CompletionStage} that, once complete, will produce the async result set. - * @see SimpleStatement#newInstance(String, Map) - */ - @NonNull - default CompletionStage executeAsync( - @NonNull String query, @NonNull Map values) { - return executeAsync(SimpleStatement.newInstance(query, values)); - } - - /** - * Prepares a CQL statement asynchronously (the call returns as soon as the prepare query was - * sent, generally before the statement is prepared). - * - *

Note that the bound statements created from the resulting prepared statement will inherit - * some of the attributes of {@code query}; see {@link SyncCqlSession#prepare(SimpleStatement)} - * for more details. - * - *

The result of this method is cached (see {@link SyncCqlSession#prepare(SimpleStatement)} for - * more explanations). - * - * @param statement the CQL query to prepare (that can be any {@code SimpleStatement}). - * @return a {@code CompletionStage} that, once complete, will produce the prepared statement. - */ - @NonNull - default CompletionStage prepareAsync(@NonNull SimpleStatement statement) { - return Objects.requireNonNull( - execute(new DefaultPrepareRequest(statement), PrepareRequest.ASYNC), - "The CQL prepare processor should never return a null result"); - } - - /** - * Prepares a CQL statement asynchronously (the call returns as soon as the prepare query was - * sent, generally before the statement is prepared). - * - *

The result of this method is cached (see {@link SyncCqlSession#prepare(SimpleStatement)} for - * more explanations). - * - * @param query the CQL query string to prepare. - * @return a {@code CompletionStage} that, once complete, will produce the prepared statement. - */ - @NonNull - default CompletionStage prepareAsync(@NonNull String query) { - return Objects.requireNonNull( - execute(new DefaultPrepareRequest(query), PrepareRequest.ASYNC), - "The CQL prepare processor should never return a null result"); - } - - /** - * Prepares a CQL statement asynchronously (the call returns as soon as the prepare query was - * sent, generally before the statement is prepared). - * - *

This variant is exposed in case you use an ad hoc {@link PrepareRequest} implementation to - * customize how attributes are propagated when you prepare a {@link SimpleStatement} (see {@link - * SyncCqlSession#prepare(SimpleStatement)} for more explanations). Otherwise, you should rarely - * have to deal with {@link PrepareRequest} directly. - * - *

The result of this method is cached (see {@link SyncCqlSession#prepare(SimpleStatement)} for - * more explanations). - * - * @param request the {@code PrepareRequest} to prepare. - * @return a {@code CompletionStage} that, once complete, will produce the prepared statement. - */ - @NonNull - default CompletionStage prepareAsync(PrepareRequest request) { - return Objects.requireNonNull( - execute(request, PrepareRequest.ASYNC), - "The CQL prepare processor should never return a null result"); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/AsyncResultSet.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/AsyncResultSet.java deleted file mode 100644 index 05a292ccbd0..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/AsyncResultSet.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cql; - -import com.datastax.oss.driver.api.core.AsyncPagingIterable; -import com.datastax.oss.driver.api.core.CqlSession; - -/** - * The result of an asynchronous CQL query. - * - * @see CqlSession#executeAsync(Statement) - * @see CqlSession#executeAsync(String) - */ -public interface AsyncResultSet extends AsyncPagingIterable { - - // overridden to amend the javadocs: - /** - * {@inheritDoc} - * - *

This is equivalent to calling: - * - *

-   *   this.iterator().next().getBoolean("[applied]")
-   * 
- * - * Except that this method peeks at the next row without consuming it. - */ - @Override - boolean wasApplied(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/BatchStatement.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/BatchStatement.java deleted file mode 100644 index 9deb33c6007..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/BatchStatement.java +++ /dev/null @@ -1,280 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cql; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.internal.core.cql.DefaultBatchStatement; -import com.datastax.oss.driver.internal.core.time.ServerSideTimestampGenerator; -import com.datastax.oss.driver.internal.core.util.Sizes; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.protocol.internal.PrimitiveSizes; -import edu.umd.cs.findbugs.annotations.CheckReturnValue; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; - -/** - * A statement that groups a number of other statements, so that they can be executed as a batch - * (i.e. sent together as a single protocol frame). - * - *

The default implementation returned by the driver is immutable and thread-safe. - * All mutating methods return a new instance. See also the static factory methods and builders in - * this interface. - */ -public interface BatchStatement extends Statement, Iterable> { - - /** - * Creates an instance of the default implementation for the given batch type. - * - *

Note that the returned object is immutable. - */ - @NonNull - static BatchStatement newInstance(@NonNull BatchType batchType) { - return new DefaultBatchStatement( - batchType, - new ArrayList<>(), - null, - null, - null, - null, - null, - null, - Collections.emptyMap(), - null, - false, - Statement.NO_DEFAULT_TIMESTAMP, - null, - Integer.MIN_VALUE, - null, - null, - null, - null, - Statement.NO_NOW_IN_SECONDS); - } - - /** - * Creates an instance of the default implementation for the given batch type, containing the - * given statements. - * - *

Note that the returned object is immutable. - */ - @NonNull - static BatchStatement newInstance( - @NonNull BatchType batchType, @NonNull Iterable> statements) { - return new DefaultBatchStatement( - batchType, - ImmutableList.copyOf(statements), - null, - null, - null, - null, - null, - null, - Collections.emptyMap(), - null, - false, - Statement.NO_DEFAULT_TIMESTAMP, - null, - Integer.MIN_VALUE, - null, - null, - null, - null, - Statement.NO_NOW_IN_SECONDS); - } - - /** - * Creates an instance of the default implementation for the given batch type, containing the - * given statements. - * - *

Note that the returned object is immutable. - */ - @NonNull - static BatchStatement newInstance( - @NonNull BatchType batchType, @NonNull BatchableStatement... statements) { - return new DefaultBatchStatement( - batchType, - ImmutableList.copyOf(statements), - null, - null, - null, - null, - null, - null, - Collections.emptyMap(), - null, - false, - Statement.NO_DEFAULT_TIMESTAMP, - null, - Integer.MIN_VALUE, - null, - null, - null, - null, - Statement.NO_NOW_IN_SECONDS); - } - - /** - * Returns a builder to create an instance of the default implementation. - * - *

Note that this builder is mutable and not thread-safe. - */ - @NonNull - static BatchStatementBuilder builder(@NonNull BatchType batchType) { - return new BatchStatementBuilder(batchType); - } - - /** - * Returns a builder to create an instance of the default implementation, copying the fields of - * the given statement. - * - *

Note that this builder is mutable and not thread-safe. - */ - @NonNull - static BatchStatementBuilder builder(@NonNull BatchStatement template) { - return new BatchStatementBuilder(template); - } - - @NonNull - BatchType getBatchType(); - - /** - * Sets the batch type. - * - *

The driver's built-in implementation is immutable, and returns a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance. - */ - @NonNull - @CheckReturnValue - BatchStatement setBatchType(@NonNull BatchType newBatchType); - - /** - * Sets the CQL keyspace to associate with this batch. - * - *

If the keyspace is not set explicitly with this method, it will be inferred from the first - * simple statement in the batch that has a keyspace set (or will be null if no such statement - * exists). - * - *

This feature is only available with {@link DefaultProtocolVersion#V5 native protocol v5} or - * higher. Specifying a per-request keyspace with lower protocol versions will cause a runtime - * error. - * - * @see Request#getKeyspace() - */ - @NonNull - @CheckReturnValue - BatchStatement setKeyspace(@Nullable CqlIdentifier newKeyspace); - - /** - * Shortcut for {@link #setKeyspace(CqlIdentifier) - * setKeyspace(CqlIdentifier.fromCql(newKeyspaceName))}. - */ - @NonNull - @CheckReturnValue - default BatchStatement setKeyspace(@NonNull String newKeyspaceName) { - return setKeyspace(CqlIdentifier.fromCql(newKeyspaceName)); - } - - /** - * Adds a new statement to the batch. - * - *

Note that, due to protocol limitations, simple statements with named values are currently - * not supported. - * - *

The driver's built-in implementation is immutable, and returns a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance. - */ - @NonNull - @CheckReturnValue - BatchStatement add(@NonNull BatchableStatement statement); - - /** - * Adds new statements to the batch. - * - *

Note that, due to protocol limitations, simple statements with named values are currently - * not supported. - * - *

The driver's built-in implementation is immutable, and returns a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance. - */ - @NonNull - @CheckReturnValue - BatchStatement addAll(@NonNull Iterable> statements); - - /** @see #addAll(Iterable) */ - @NonNull - @CheckReturnValue - default BatchStatement addAll(@NonNull BatchableStatement... statements) { - return addAll(Arrays.asList(statements)); - } - - /** @return The number of child statements in this batch. */ - int size(); - - /** - * Clears the batch, removing all the statements added so far. - * - *

The driver's built-in implementation is immutable, and returns a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance. - */ - @NonNull - @CheckReturnValue - BatchStatement clear(); - - @Override - default int computeSizeInBytes(@NonNull DriverContext context) { - int size = Sizes.minimumStatementSize(this, context); - - // BatchStatement's additional elements to take into account are: - // - batch type - // - inner statements (simple or bound) - // - per-query keyspace - // - timestamp - - // batch type - size += PrimitiveSizes.BYTE; - - // inner statements - size += PrimitiveSizes.SHORT; // number of statements - - for (BatchableStatement batchableStatement : this) { - size += - Sizes.sizeOfInnerBatchStatementInBytes( - batchableStatement, context.getProtocolVersion(), context.getCodecRegistry()); - } - - // per-query keyspace - if (getKeyspace() != null) { - size += PrimitiveSizes.sizeOfString(getKeyspace().asInternal()); - } - - // timestamp - if (!(context.getTimestampGenerator() instanceof ServerSideTimestampGenerator) - || getQueryTimestamp() != Statement.NO_DEFAULT_TIMESTAMP) { - - size += PrimitiveSizes.LONG; - } - - return size; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/BatchStatementBuilder.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/BatchStatementBuilder.java deleted file mode 100644 index a8e2b8ab659..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/BatchStatementBuilder.java +++ /dev/null @@ -1,164 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cql; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.internal.core.cql.DefaultBatchStatement; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.Iterables; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Arrays; -import net.jcip.annotations.NotThreadSafe; - -/** - * A builder to create a batch statement. - * - *

This class is mutable and not thread-safe. - */ -@NotThreadSafe -public class BatchStatementBuilder extends StatementBuilder { - - @NonNull private BatchType batchType; - @Nullable private CqlIdentifier keyspace; - @NonNull private ImmutableList.Builder> statementsBuilder; - private int statementsCount; - - public BatchStatementBuilder(@NonNull BatchType batchType) { - this.batchType = batchType; - this.statementsBuilder = ImmutableList.builder(); - } - - public BatchStatementBuilder(@NonNull BatchStatement template) { - super(template); - this.batchType = template.getBatchType(); - this.statementsBuilder = ImmutableList.>builder().addAll(template); - this.statementsCount = template.size(); - } - - /** - * Sets the CQL keyspace to execute this batch in. - * - * @return this builder; never {@code null}. - * @see BatchStatement#getKeyspace() - */ - @NonNull - public BatchStatementBuilder setKeyspace(@NonNull CqlIdentifier keyspace) { - this.keyspace = keyspace; - return this; - } - - /** - * Sets the CQL keyspace to execute this batch in. Shortcut for {@link #setKeyspace(CqlIdentifier) - * setKeyspace(CqlIdentifier.fromCql(keyspaceName))}. - * - * @return this builder; never {@code null}. - */ - @NonNull - public BatchStatementBuilder setKeyspace(@NonNull String keyspaceName) { - return setKeyspace(CqlIdentifier.fromCql(keyspaceName)); - } - - /** - * Adds a new statement to the batch. - * - * @return this builder; never {@code null}. - * @see BatchStatement#add(BatchableStatement) - */ - @NonNull - public BatchStatementBuilder addStatement(@NonNull BatchableStatement statement) { - if (statementsCount >= 0xFFFF) { - throw new IllegalStateException( - "Batch statement cannot contain more than " + 0xFFFF + " statements."); - } - statementsCount += 1; - statementsBuilder.add(statement); - return this; - } - - /** - * Adds new statements to the batch. - * - * @return this builder; never {@code null}. - * @see BatchStatement#addAll(Iterable) - */ - @NonNull - public BatchStatementBuilder addStatements(@NonNull Iterable> statements) { - int delta = Iterables.size(statements); - if (statementsCount + delta > 0xFFFF) { - throw new IllegalStateException( - "Batch statement cannot contain more than " + 0xFFFF + " statements."); - } - statementsCount += delta; - statementsBuilder.addAll(statements); - return this; - } - - /** - * Adds new statements to the batch. - * - * @return this builder; never {@code null}. - * @see BatchStatement#addAll(BatchableStatement[]) - */ - @NonNull - public BatchStatementBuilder addStatements(@NonNull BatchableStatement... statements) { - return addStatements(Arrays.asList(statements)); - } - - /** - * Clears all the statements in this batch. - * - * @return this builder; never {@code null}. - */ - @NonNull - public BatchStatementBuilder clearStatements() { - statementsBuilder = ImmutableList.builder(); - statementsCount = 0; - return this; - } - - /** @return a newly-allocated {@linkplain BatchStatement batch}; never {@code null}.. */ - @Override - @NonNull - public BatchStatement build() { - return new DefaultBatchStatement( - batchType, - statementsBuilder.build(), - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - buildCustomPayload(), - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - public int getStatementsCount() { - return this.statementsCount; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/BatchType.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/BatchType.java deleted file mode 100644 index 6b0a7f09688..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/BatchType.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cql; - -/** - * The type of a batch. - * - *

The only reason to model this as an interface (as opposed to an enum type) is to accommodate - * for custom protocol extensions. If you're connecting to a standard Apache Cassandra cluster, all - * {@code BatchType}s are {@link DefaultBatchType} instances. - */ -public interface BatchType { - - BatchType LOGGED = DefaultBatchType.LOGGED; - BatchType UNLOGGED = DefaultBatchType.UNLOGGED; - BatchType COUNTER = DefaultBatchType.COUNTER; - - /** The numerical value that the batch type is encoded to. */ - byte getProtocolCode(); - - // Implementation note: we don't have a "BatchTypeRegistry" because we never decode batch types. - // This can be added later if needed (see ConsistencyLevelRegistry for an example). -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/BatchableStatement.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/BatchableStatement.java deleted file mode 100644 index a25f625bae9..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/BatchableStatement.java +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cql; - -/** - * A statement that can be added to a CQL batch. - * - * @param the "self type" used for covariant returns in subtypes. - */ -public interface BatchableStatement> - extends Statement {} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/Bindable.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/Bindable.java deleted file mode 100644 index 64f0f22a051..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/Bindable.java +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cql; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.data.AccessibleByName; -import com.datastax.oss.driver.api.core.data.GettableById; -import com.datastax.oss.driver.api.core.data.GettableByName; -import com.datastax.oss.driver.api.core.data.SettableById; -import com.datastax.oss.driver.api.core.data.SettableByName; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import edu.umd.cs.findbugs.annotations.CheckReturnValue; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** A data container with the ability to unset values. */ -public interface Bindable> - extends GettableById, GettableByName, SettableById, SettableByName { - /** - * Whether the {@code i}th value has been set. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @SuppressWarnings("ReferenceEquality") - default boolean isSet(int i) { - return getBytesUnsafe(i) != ProtocolConstants.UNSET_VALUE; - } - - /** - * Whether the value for the first occurrence of {@code id} has been set. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IndexOutOfBoundsException if the id is invalid. - */ - @SuppressWarnings("ReferenceEquality") - default boolean isSet(@NonNull CqlIdentifier id) { - return getBytesUnsafe(id) != ProtocolConstants.UNSET_VALUE; - } - - /** - * Whether the value for the first occurrence of {@code name} has been set. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IndexOutOfBoundsException if the name is invalid. - */ - @SuppressWarnings("ReferenceEquality") - default boolean isSet(@NonNull String name) { - return getBytesUnsafe(name) != ProtocolConstants.UNSET_VALUE; - } - - /** - * Unsets the {@code i}th value. This will leave the statement in the same state as if no setter - * was ever called for this value. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT unset(int i) { - return setBytesUnsafe(i, ProtocolConstants.UNSET_VALUE); - } - - /** - * Unsets the value for the first occurrence of {@code id}. This will leave the statement in the - * same state as if no setter was ever called for this value. - * - * @throws IndexOutOfBoundsException if the id is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT unset(@NonNull CqlIdentifier id) { - return setBytesUnsafe(id, ProtocolConstants.UNSET_VALUE); - } - - /** - * Unsets the value for the first occurrence of {@code name}. This will leave the statement in the - * same state as if no setter was ever called for this value. - * - * @throws IndexOutOfBoundsException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT unset(@NonNull String name) { - return setBytesUnsafe(name, ProtocolConstants.UNSET_VALUE); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/BoundStatement.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/BoundStatement.java deleted file mode 100644 index bd7c142907f..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/BoundStatement.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cql; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.internal.core.time.ServerSideTimestampGenerator; -import com.datastax.oss.driver.internal.core.util.Sizes; -import com.datastax.oss.protocol.internal.PrimitiveSizes; -import com.datastax.oss.protocol.internal.request.query.Values; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.util.List; - -/** - * A prepared statement in its executable form, with values bound to the variables. - * - *

The default implementation returned by the driver is immutable and thread-safe. - * All mutating methods return a new instance. - */ -public interface BoundStatement - extends BatchableStatement, Bindable { - - /** The prepared statement that was used to create this statement. */ - @NonNull - PreparedStatement getPreparedStatement(); - - /** The values to bind, in their serialized form. */ - @NonNull - List getValues(); - - /** - * Always returns {@code null} (bound statements can't have a per-request keyspace, they always - * inherit the one of the statement that was initially prepared). - */ - @Override - @Nullable - default CqlIdentifier getKeyspace() { - return null; - } - - @Override - default int computeSizeInBytes(@NonNull DriverContext context) { - int size = Sizes.minimumStatementSize(this, context); - - // BoundStatement's additional elements to take into account are: - // - prepared ID - // - result metadata ID - // - parameters - // - page size - // - paging state - // - timestamp - - // prepared ID - size += PrimitiveSizes.sizeOfShortBytes(getPreparedStatement().getId()); - - // result metadata ID - if (getPreparedStatement().getResultMetadataId() != null) { - size += PrimitiveSizes.sizeOfShortBytes(getPreparedStatement().getResultMetadataId()); - } - - // parameters (always sent as positional values for bound statements) - size += Values.sizeOfPositionalValues(getValues()); - - // page size - size += PrimitiveSizes.INT; - - // paging state - if (getPagingState() != null) { - size += PrimitiveSizes.sizeOfBytes(getPagingState()); - } - - // timestamp - if (!(context.getTimestampGenerator() instanceof ServerSideTimestampGenerator) - || getQueryTimestamp() != Statement.NO_DEFAULT_TIMESTAMP) { - size += PrimitiveSizes.LONG; - } - - return size; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/BoundStatementBuilder.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/BoundStatementBuilder.java deleted file mode 100644 index 7e8f8723e1b..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/BoundStatementBuilder.java +++ /dev/null @@ -1,209 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cql; - -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.internal.core.cql.DefaultBoundStatement; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.List; -import java.util.Map; -import net.jcip.annotations.NotThreadSafe; - -/** - * A builder to create a bound statement. - * - *

This class is mutable and not thread-safe. - */ -@NotThreadSafe -public class BoundStatementBuilder extends StatementBuilder - implements Bindable { - - @NonNull private final PreparedStatement preparedStatement; - @NonNull private final ColumnDefinitions variableDefinitions; - @NonNull private final ByteBuffer[] values; - @NonNull private final CodecRegistry codecRegistry; - @NonNull private final ProtocolVersion protocolVersion; - - public BoundStatementBuilder( - @NonNull PreparedStatement preparedStatement, - @NonNull ColumnDefinitions variableDefinitions, - @NonNull ByteBuffer[] values, - @Nullable String executionProfileName, - @Nullable DriverExecutionProfile executionProfile, - @Nullable CqlIdentifier routingKeyspace, - @Nullable ByteBuffer routingKey, - @Nullable Token routingToken, - @NonNull Map customPayload, - @Nullable Boolean idempotent, - boolean tracing, - long timestamp, - @Nullable ByteBuffer pagingState, - int pageSize, - @Nullable ConsistencyLevel consistencyLevel, - @Nullable ConsistencyLevel serialConsistencyLevel, - @Nullable Duration timeout, - @NonNull CodecRegistry codecRegistry, - @NonNull ProtocolVersion protocolVersion) { - this.preparedStatement = preparedStatement; - this.variableDefinitions = variableDefinitions; - this.values = values; - this.executionProfileName = executionProfileName; - this.executionProfile = executionProfile; - this.routingKeyspace = routingKeyspace; - this.routingKey = routingKey; - this.routingToken = routingToken; - for (Map.Entry entry : customPayload.entrySet()) { - this.addCustomPayload(entry.getKey(), entry.getValue()); - } - this.idempotent = idempotent; - this.tracing = tracing; - this.timestamp = timestamp; - this.pagingState = pagingState; - this.pageSize = pageSize; - this.consistencyLevel = consistencyLevel; - this.serialConsistencyLevel = serialConsistencyLevel; - this.timeout = timeout; - this.codecRegistry = codecRegistry; - this.protocolVersion = protocolVersion; - } - - public BoundStatementBuilder(@NonNull BoundStatement template) { - super(template); - this.preparedStatement = template.getPreparedStatement(); - this.variableDefinitions = template.getPreparedStatement().getVariableDefinitions(); - this.values = template.getValues().toArray(new ByteBuffer[this.variableDefinitions.size()]); - this.codecRegistry = template.codecRegistry(); - this.protocolVersion = template.protocolVersion(); - this.node = template.getNode(); - } - - /** The prepared statement that was used to create this statement. */ - @NonNull - public PreparedStatement getPreparedStatement() { - return preparedStatement; - } - - @NonNull - @Override - public List allIndicesOf(@NonNull CqlIdentifier id) { - List indices = variableDefinitions.allIndicesOf(id); - if (indices.isEmpty()) { - throw new IllegalArgumentException(id + " is not a variable in this bound statement"); - } - return indices; - } - - @Override - public int firstIndexOf(@NonNull CqlIdentifier id) { - int indexOf = variableDefinitions.firstIndexOf(id); - if (indexOf == -1) { - throw new IllegalArgumentException(id + " is not a variable in this bound statement"); - } - return indexOf; - } - - @NonNull - @Override - public List allIndicesOf(@NonNull String name) { - List indices = variableDefinitions.allIndicesOf(name); - if (indices.isEmpty()) { - throw new IllegalArgumentException(name + " is not a variable in this bound statement"); - } - return indices; - } - - @Override - public int firstIndexOf(@NonNull String name) { - int indexOf = variableDefinitions.firstIndexOf(name); - if (indexOf == -1) { - throw new IllegalArgumentException(name + " is not a variable in this bound statement"); - } - return indexOf; - } - - @NonNull - @Override - public BoundStatementBuilder setBytesUnsafe(int i, ByteBuffer v) { - values[i] = v; - return this; - } - - @Override - public ByteBuffer getBytesUnsafe(int i) { - return values[i]; - } - - @Override - public int size() { - return values.length; - } - - @NonNull - @Override - public DataType getType(int i) { - return variableDefinitions.get(i).getType(); - } - - @NonNull - @Override - public CodecRegistry codecRegistry() { - return codecRegistry; - } - - @NonNull - @Override - public ProtocolVersion protocolVersion() { - return protocolVersion; - } - - @NonNull - @Override - public BoundStatement build() { - return new DefaultBoundStatement( - preparedStatement, - variableDefinitions, - values, - executionProfileName, - executionProfile, - routingKeyspace, - routingKey, - routingToken, - buildCustomPayload(), - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - codecRegistry, - protocolVersion, - node, - nowInSeconds); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/ColumnDefinition.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/ColumnDefinition.java deleted file mode 100644 index cb48f058be4..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/ColumnDefinition.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cql; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.detach.Detachable; -import com.datastax.oss.driver.api.core.type.DataType; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * Metadata about a CQL column. - * - *

The default implementation returned by the driver is immutable and serializable. If you write - * your own implementation, it should at least be thread-safe; serializability is not mandatory, but - * recommended for use with some 3rd-party tools like Apache Spark ™. - */ -public interface ColumnDefinition extends Detachable { - - @NonNull - CqlIdentifier getKeyspace(); - - @NonNull - CqlIdentifier getTable(); - - @NonNull - CqlIdentifier getName(); - - @NonNull - DataType getType(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/ColumnDefinitions.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/ColumnDefinitions.java deleted file mode 100644 index 7a775064317..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/ColumnDefinitions.java +++ /dev/null @@ -1,162 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cql; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.data.AccessibleByName; -import com.datastax.oss.driver.api.core.detach.Detachable; -import com.datastax.oss.driver.internal.core.util.Loggers; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Collections; -import java.util.List; - -/** - * Metadata about a set of CQL columns. - * - *

The default implementation returned by the driver is immutable and serializable. If you write - * your own implementation, it should at least be thread-safe; serializability is not mandatory, but - * recommended for use with some 3rd-party tools like Apache Spark ™. - */ -public interface ColumnDefinitions extends Iterable, Detachable { - - /** @return the number of definitions contained in this metadata. */ - int size(); - - /** - * @param i the index to check. - * @throws IndexOutOfBoundsException if the index is invalid. - * @return the {@code i}th {@link ColumnDefinition} in this metadata. - */ - @NonNull - ColumnDefinition get(int i); - - /** - * Get a definition by name. - * - *

This is the equivalent of: - * - *

-   *   get(firstIndexOf(name))
-   * 
- * - * @throws IllegalArgumentException if the name does not exist (in other words, if {@code - * !contains(name))}). - * @see #contains(String) - * @see #firstIndexOf(String) - */ - @NonNull - default ColumnDefinition get(@NonNull String name) { - if (!contains(name)) { - throw new IllegalArgumentException("No definition named " + name); - } else { - return get(firstIndexOf(name)); - } - } - - /** - * Get a definition by name. - * - *

This is the equivalent of: - * - *

-   *   get(firstIndexOf(name))
-   * 
- * - * @throws IllegalArgumentException if the name does not exist (in other words, if {@code - * !contains(name))}). - * @see #contains(CqlIdentifier) - * @see #firstIndexOf(CqlIdentifier) - */ - @NonNull - default ColumnDefinition get(@NonNull CqlIdentifier name) { - if (!contains(name)) { - throw new IllegalArgumentException("No definition named " + name); - } else { - return get(firstIndexOf(name)); - } - } - - /** - * Whether there is a definition using the given name. - * - *

Because raw strings are ambiguous with regard to case-sensitivity, the argument will be - * interpreted according to the rules described in {@link AccessibleByName}. - */ - boolean contains(@NonNull String name); - - /** Whether there is a definition using the given CQL identifier. */ - boolean contains(@NonNull CqlIdentifier id); - - /** - * Returns the indices of all columns that use the given name. - * - *

Because raw strings are ambiguous with regard to case-sensitivity, the argument will be - * interpreted according to the rules described in {@link AccessibleByName}. - * - * @return the indices, or an empty list if no column uses this name. - * @apiNote the default implementation only exists for backward compatibility. It wraps the result - * of {@link #firstIndexOf(String)} in a singleton list, which is not entirely correct, as it - * will only return the first occurrence. Therefore it also logs a warning. - *

Implementors should always override this method (all built-in driver implementations - * do). - */ - @NonNull - default List allIndicesOf(@NonNull String name) { - Loggers.COLUMN_DEFINITIONS.warn( - "{} should override allIndicesOf(String), the default implementation is a " - + "workaround for backward compatibility, it only returns the first occurrence", - getClass().getName()); - return Collections.singletonList(firstIndexOf(name)); - } - - /** - * Returns the index of the first column that uses the given name. - * - *

Because raw strings are ambiguous with regard to case-sensitivity, the argument will be - * interpreted according to the rules described in {@link AccessibleByName}. - * - * @return the index, or -1 if no column uses this name. - */ - int firstIndexOf(@NonNull String name); - - /** - * Returns the indices of all columns that use the given identifier. - * - * @return the indices, or an empty list if no column uses this identifier. - * @apiNote the default implementation only exists for backward compatibility. It wraps the result - * of {@link #firstIndexOf(CqlIdentifier)} in a singleton list, which is not entirely correct, - * as it will only return the first occurrence. Therefore it also logs a warning. - *

Implementors should always override this method (all built-in driver implementations - * do). - */ - @NonNull - default List allIndicesOf(@NonNull CqlIdentifier id) { - Loggers.COLUMN_DEFINITIONS.warn( - "{} should override allIndicesOf(CqlIdentifier), the default implementation is a " - + "workaround for backward compatibility, it only returns the first occurrence", - getClass().getName()); - return Collections.singletonList(firstIndexOf(id)); - } - - /** - * Returns the index of the first column that uses the given identifier. - * - * @return the index, or -1 if no column uses this identifier. - */ - int firstIndexOf(@NonNull CqlIdentifier id); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/DefaultBatchType.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/DefaultBatchType.java deleted file mode 100644 index f699438df59..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/DefaultBatchType.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cql; - -import com.datastax.oss.protocol.internal.ProtocolConstants; - -/** A default batch type supported by the driver out of the box. */ -public enum DefaultBatchType implements BatchType { - /** - * A logged batch: Cassandra will first write the batch to its distributed batch log to ensure the - * atomicity of the batch (atomicity meaning that if any statement in the batch succeeds, all will - * eventually succeed). - */ - LOGGED(ProtocolConstants.BatchType.LOGGED), - - /** - * A batch that doesn't use Cassandra's distributed batch log. Such batch are not guaranteed to be - * atomic. - */ - UNLOGGED(ProtocolConstants.BatchType.UNLOGGED), - - /** - * A counter batch. Note that such batch is the only type that can contain counter operations and - * it can only contain these. - */ - COUNTER(ProtocolConstants.BatchType.COUNTER), - ; - // Note that, for the sake of convenience, we also expose shortcuts to these constants on the - // BatchType interface. If you add a new enum constant, remember to update the interface as - // well. - - private final byte code; - - DefaultBatchType(byte code) { - this.code = code; - } - - @Override - public byte getProtocolCode() { - return code; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/ExecutionInfo.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/ExecutionInfo.java deleted file mode 100644 index 40cfca827d1..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/ExecutionInfo.java +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cql; - -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.RequestThrottlingException; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.retry.RetryDecision; -import com.datastax.oss.driver.api.core.servererrors.CoordinatorException; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.core.specex.SpeculativeExecutionPolicy; -import com.datastax.oss.driver.internal.core.cql.DefaultPagingState; -import com.datastax.oss.driver.internal.core.util.concurrent.BlockingOperation; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import java.util.concurrent.CompletionStage; - -/** - * Information about the execution of a query. - * - *

This can be obtained either from a result set for a successful query, or from a driver - * exception for a failed query. - * - * @see ResultSet#getExecutionInfo() - * @see DriverException#getExecutionInfo() - */ -public interface ExecutionInfo { - - /** @return The {@link Request} that was executed. */ - @NonNull - default Request getRequest() { - return getStatement(); - } - - /** - * @return The {@link Request} that was executed, if it can be cast to {@link Statement}. - * @deprecated Use {@link #getRequest()} instead. - * @throws ClassCastException If the request that was executed cannot be cast to {@link - * Statement}. - */ - @NonNull - @Deprecated - Statement getStatement(); - - /** - * The node that acted as a coordinator for the query. - * - *

For successful queries, this is never {@code null}. It is the node that sent the response - * from which the result was decoded. - * - *

For failed queries, this can either be {@code null} if the error occurred before any node - * could be contacted (for example a {@link RequestThrottlingException}), or present if a node was - * successfully contacted, but replied with an error response (any subclass of {@link - * CoordinatorException}). - */ - @Nullable - Node getCoordinator(); - - /** - * The number of speculative executions that were started for this query. - * - *

This does not include the initial, normal execution of the query. Therefore, if speculative - * executions are disabled, this will always be 0. If they are enabled and one speculative - * execution was triggered in addition to the initial execution, this will be 1, etc. - * - * @see SpeculativeExecutionPolicy - */ - int getSpeculativeExecutionCount(); - - /** - * The index of the execution that completed this query. - * - *

0 represents the initial, normal execution of the query, 1 the first speculative execution, - * etc. If this execution info is attached to an error, this might not be applicable, and will - * return -1. - * - * @see SpeculativeExecutionPolicy - */ - int getSuccessfulExecutionIndex(); - - /** - * The errors encountered on previous coordinators, if any. - * - *

The list is in chronological order, based on the time that the driver processed the error - * responses. If speculative executions are enabled, they run concurrently so their errors will be - * interleaved. A node can appear multiple times (if the retry policy decided to retry on the same - * node). - */ - @NonNull - List> getErrors(); - - /** - * The paging state of the query, in its raw form. - * - *

This represents the next page to be fetched if this query has multiple page of results. It - * can be saved and reused later on the same statement. - * - *

Note that this is the equivalent of driver 3's {@code getPagingStateUnsafe()}. If you're - * looking for the method that returns a {@link PagingState}, use {@link #getSafePagingState()}. - * - * @return the paging state, or {@code null} if there is no next page. - */ - @Nullable - ByteBuffer getPagingState(); - - /** - * The paging state of the query, in a safe wrapper that checks if it's reused on the right - * statement. - * - *

This represents the next page to be fetched if this query has multiple page of results. It - * can be saved and reused later on the same statement. - * - * @return the paging state, or {@code null} if there is no next page. - */ - @Nullable - default PagingState getSafePagingState() { - // Default implementation for backward compatibility, but we override it in the concrete class, - // because it knows the attachment point. - ByteBuffer rawPagingState = getPagingState(); - if (rawPagingState == null) { - return null; - } else { - Request request = getRequest(); - if (!(request instanceof Statement)) { - throw new IllegalStateException("Only statements should have a paging state"); - } - Statement statement = (Statement) request; - return new DefaultPagingState(rawPagingState, statement, AttachmentPoint.NONE); - } - } - - /** - * The server-side warnings for this query, if any (otherwise the list will be empty). - * - *

This feature is only available with {@link DefaultProtocolVersion#V4} or above; with lower - * versions, this list will always be empty. - */ - @NonNull - List getWarnings(); - - /** - * The custom payload sent back by the server with the response, if any (otherwise the map will be - * empty). - * - *

This method returns a read-only view of the original map, but its values remain inherently - * mutable. If multiple clients will read these values, care should be taken not to corrupt the - * data (in particular, preserve the indices by calling {@link ByteBuffer#duplicate()}). - * - *

This feature is only available with {@link DefaultProtocolVersion#V4} or above; with lower - * versions, this map will always be empty. - */ - @NonNull - Map getIncomingPayload(); - - /** - * Whether the cluster reached schema agreement after the execution of this query. - * - *

After a successful schema-altering query (ex: creating a table), the driver will check if - * the cluster's nodes agree on the new schema version. If not, it will keep retrying a few times - * (the retry delay and timeout are set through the configuration). - * - *

If this method returns {@code false}, clients can call {@link - * Session#checkSchemaAgreement()} later to perform the check manually. - * - *

Schema agreement is only checked for schema-altering queries. For other query types, this - * method will always return {@code true}. - * - * @see DefaultDriverOption#CONTROL_CONNECTION_AGREEMENT_INTERVAL - * @see DefaultDriverOption#CONTROL_CONNECTION_AGREEMENT_TIMEOUT - */ - boolean isSchemaInAgreement(); - - /** - * The tracing identifier if tracing was {@link Statement#isTracing() enabled} for this query, - * otherwise {@code null}. - */ - @Nullable - UUID getTracingId(); - - /** - * Fetches the query trace asynchronously, if tracing was enabled for this query. - * - *

Note that each call to this method triggers a new fetch, even if the previous call was - * successful (this allows fetching the trace again if the list of {@link QueryTrace#getEvents() - * events} was incomplete). - * - *

This method will return a failed future if tracing was disabled for the query (that is, if - * {@link #getTracingId()} is null). - */ - @NonNull - CompletionStage getQueryTraceAsync(); - - /** - * Convenience method to call {@link #getQueryTraceAsync()} and block for the result. - * - *

This must not be called on a driver thread. - * - * @throws IllegalStateException if {@link #getTracingId()} is null. - */ - @NonNull - default QueryTrace getQueryTrace() { - BlockingOperation.checkNotDriverThread(); - return CompletableFutures.getUninterruptibly(getQueryTraceAsync()); - } - - /** - * The size of the binary response in bytes. - * - *

This is the size of the protocol-level frame (including the frame header) before it was - * decoded by the driver, but after decompression (if compression is enabled). - * - *

If the information is not available (for example if this execution info comes from an {@link - * RetryDecision#IGNORE IGNORE} decision of the retry policy), this method returns -1. - * - * @see #getCompressedResponseSizeInBytes() - */ - int getResponseSizeInBytes(); - - /** - * The size of the compressed binary response in bytes. - * - *

This is the size of the protocol-level frame (including the frame header) as it came in the - * TCP response, before decompression and decoding by the driver. - * - *

If compression is disabled, or if the information is not available (for example if this - * execution info comes from an {@link RetryDecision#IGNORE IGNORE} decision of the retry policy), - * this method returns -1. - * - * @see #getResponseSizeInBytes() - */ - int getCompressedResponseSizeInBytes(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/PagingState.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/PagingState.java deleted file mode 100644 index b9042f99841..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/PagingState.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cql; - -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.internal.core.cql.DefaultPagingState; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; - -/** - * A safe wrapper around the paging state of a query. - * - *

This class performs additional checks to fail fast if the paging state is not reused on the - * same query, and it provides utility methods for conversion to/from strings and byte arrays. - * - *

The serialized form returned by {@link #toBytes()} and {@link Object#toString()} is an opaque - * sequence of bytes. Note however that it is not cryptographically secure: the contents are - * not encrypted and the checks are performed with a simple MD5 checksum. If you need stronger - * guarantees, you should build your own wrapper around {@link ExecutionInfo#getPagingState()}. - */ -public interface PagingState { - - /** Parses an instance from a string previously generated with {@code toString()}. */ - @NonNull - static PagingState fromString(@NonNull String string) { - return DefaultPagingState.fromString(string); - } - - /** Parses an instance from a byte array previously generated with {@link #toBytes()}. */ - @NonNull - static PagingState fromBytes(byte[] bytes) { - return DefaultPagingState.fromBytes(bytes); - } - - /** Returns a representation of this object as a byte array. */ - byte[] toBytes(); - - /** - * Checks if this paging state can be safely reused for the given statement. Specifically, the - * query string and any bound values must match. - * - *

Note that, if {@code statement} is a {@link SimpleStatement} with bound values, those values - * must be encoded in order to perform the check. This method uses the default codec registry and - * default protocol version. This might fail if you use custom codecs; in that case, use {@link - * #matches(Statement, Session)} instead. - * - *

If {@code statement} is a {@link BoundStatement}, it is always safe to call this method. - */ - default boolean matches(@NonNull Statement statement) { - return matches(statement, null); - } - - /** - * Alternative to {@link #matches(Statement)} that specifies the session the statement will be - * executed with. You only need this for simple statements, and if you use custom codecs. - * Bound statements already know which session they are attached to. - */ - boolean matches(@NonNull Statement statement, @Nullable Session session); - - /** - * Returns the underlying "unsafe" paging state (the equivalent of {@link - * ExecutionInfo#getPagingState()}). - */ - @NonNull - ByteBuffer getRawPagingState(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/PrepareRequest.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/PrepareRequest.java deleted file mode 100644 index eb04f26c046..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/PrepareRequest.java +++ /dev/null @@ -1,173 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cql; - -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.Map; -import java.util.concurrent.CompletionStage; - -/** - * A request to prepare a CQL query. - * - *

Driver clients should rarely have to deal directly with this type, it's used internally by - * {@link Session}'s prepare methods. However a {@link RetryPolicy} implementation might use it if - * it needs a custom behavior for prepare requests. - * - *

A client may also provide their own implementation of this interface to customize which - * attributes are propagated when preparing a simple statement; see {@link - * CqlSession#prepare(SimpleStatement)} for more explanations. - */ -public interface PrepareRequest extends Request { - - /** - * The type returned when a CQL statement is prepared synchronously. - * - *

Most users won't use this explicitly. It is needed for the generic execute method ({@link - * Session#execute(Request, GenericType)}), but CQL statements will generally be prepared with one - * of the driver's built-in helper methods (such as {@link CqlSession#prepare(SimpleStatement)}). - */ - GenericType SYNC = GenericType.of(PreparedStatement.class); - - /** - * The type returned when a CQL statement is prepared asynchronously. - * - *

Most users won't use this explicitly. It is needed for the generic execute method ({@link - * Session#execute(Request, GenericType)}), but CQL statements will generally be prepared with one - * of the driver's built-in helper methods (such as {@link - * CqlSession#prepareAsync(SimpleStatement)}. - */ - GenericType> ASYNC = - new GenericType>() {}; - - /** The CQL query to prepare. */ - @NonNull - String getQuery(); - - /** - * {@inheritDoc} - * - *

Note that this refers to the prepare query itself, not to the bound statements that will be - * created from the prepared statement (see {@link #areBoundStatementsIdempotent()}). - */ - @NonNull - @Override - default Boolean isIdempotent() { - // Retrying to prepare is always safe - return true; - } - - /** - * The name of the execution profile to use for the bound statements that will be created from the - * prepared statement. - * - *

Note that this will be ignored if {@link #getExecutionProfileForBoundStatements()} returns a - * non-null value. - */ - @Nullable - String getExecutionProfileNameForBoundStatements(); - - /** - * The execution profile to use for the bound statements that will be created from the prepared - * statement. - */ - @Nullable - DriverExecutionProfile getExecutionProfileForBoundStatements(); - - /** - * The routing keyspace to use for the bound statements that will be created from the prepared - * statement. - */ - CqlIdentifier getRoutingKeyspaceForBoundStatements(); - - /** - * The routing key to use for the bound statements that will be created from the prepared - * statement. - */ - ByteBuffer getRoutingKeyForBoundStatements(); - - /** - * The routing key to use for the bound statements that will be created from the prepared - * statement. - * - *

If it's not null, it takes precedence over {@link #getRoutingKeyForBoundStatements()}. - */ - Token getRoutingTokenForBoundStatements(); - - /** - * Returns the custom payload to send alongside the bound statements that will be created from the - * prepared statement. - */ - @NonNull - Map getCustomPayloadForBoundStatements(); - - /** - * Whether bound statements that will be created from the prepared statement are idempotent. - * - *

This follows the same semantics as {@link #isIdempotent()}. - */ - @Nullable - Boolean areBoundStatementsIdempotent(); - - /** - * The timeout to use for the bound statements that will be created from the prepared statement. - * If the value is null, the default value will be used from the configuration. - */ - @Nullable - Duration getTimeoutForBoundStatements(); - - /** - * The paging state to use for the bound statements that will be created from the prepared - * statement. - */ - ByteBuffer getPagingStateForBoundStatements(); - - /** - * The page size to use for the bound statements that will be created from the prepared statement. - * If the value is 0 or negative, the default value will be used from the configuration. - */ - int getPageSizeForBoundStatements(); - - /** - * The consistency level to use for the bound statements that will be created from the prepared - * statement or {@code null} to use the default value from the configuration. - */ - @Nullable - ConsistencyLevel getConsistencyLevelForBoundStatements(); - - /** - * The serial consistency level to use for the bound statements that will be created from the - * prepared statement or {@code null} to use the default value from the configuration. - */ - @Nullable - ConsistencyLevel getSerialConsistencyLevelForBoundStatements(); - - /** Whether bound statements that will be created from the prepared statement are tracing. */ - boolean areBoundStatementsTracing(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/PreparedStatement.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/PreparedStatement.java deleted file mode 100644 index 7828f9f809c..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/PreparedStatement.java +++ /dev/null @@ -1,145 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cql; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.util.List; - -/** - * A query with bind variables that has been pre-parsed by the database. - * - *

Client applications create instances with {@link CqlSession#prepare(SimpleStatement)}. Then - * they use {@link #bind(Object...)} to obtain an executable {@link BoundStatement}. - * - *

The default prepared statement implementation returned by the driver is thread-safe. - * Client applications can -- and are expected to -- prepare each query once and store the result in - * a place where it can be accessed concurrently by application threads (for example a final field). - * Preparing the same query string twice is suboptimal and a bad practice, and will cause the driver - * to log a warning. - */ -public interface PreparedStatement { - - /** - * A unique identifier for this prepared statement. - * - *

Note: the returned buffer is read-only. - */ - @NonNull - ByteBuffer getId(); - - @NonNull - String getQuery(); - - /** A description of the bind variables of this prepared statement. */ - @NonNull - ColumnDefinitions getVariableDefinitions(); - - /** - * The indices of the variables in {@link #getVariableDefinitions()} that correspond to the target - * table's partition key. - * - *

This is only present if all the partition key columns are expressed as bind variables. - * Otherwise, the list will be empty. For example, given the following schema: - * - *

-   *   CREATE TABLE foo (pk1 int, pk2 int, cc int, v int, PRIMARY KEY ((pk1, pk2), cc));
-   * 
- * - * And the following definitions: - * - *
-   * PreparedStatement ps1 = session.prepare("UPDATE foo SET v = ? WHERE pk1 = ? AND pk2 = ? AND v = ?");
-   * PreparedStatement ps2 = session.prepare("UPDATE foo SET v = ? WHERE pk1 = 1 AND pk2 = ? AND v = ?");
-   * 
- * - * Then {@code ps1.getPartitionKeyIndices()} contains 1 and 2, and {@code - * ps2.getPartitionKeyIndices()} is empty (because one of the partition key components is - * hard-coded in the query string). - */ - @NonNull - List getPartitionKeyIndices(); - - /** - * A unique identifier for result metadata (essentially a hash of {@link - * #getResultSetDefinitions()}). - * - *

This information is mostly for internal use: with protocol {@link DefaultProtocolVersion#V5} - * or higher, the driver sends it with every execution of the prepared statement, to validate that - * its result metadata is still up-to-date. - * - *

Note: this method returns {@code null} for protocol {@link DefaultProtocolVersion#V4} or - * lower; otherwise, the returned buffer is read-only. - * - * @see CASSANDRA-10786 - */ - @Nullable - ByteBuffer getResultMetadataId(); - - /** - * A description of the result set that will be returned when this prepared statement is bound and - * executed. - * - *

This information is only present for {@code SELECT} queries, otherwise it is always empty. - * Note that this is slightly incorrect for conditional updates (e.g. {@code INSERT ... IF NOT - * EXISTS}), which do return columns; for those cases, use {@link - * ResultSet#getColumnDefinitions()} on the result, not this method. - */ - @NonNull - ColumnDefinitions getResultSetDefinitions(); - - /** - * Updates {@link #getResultMetadataId()} and {@link #getResultSetDefinitions()} atomically. - * - *

This is for internal use by the driver. Calling this manually with incorrect information can - * cause existing queries to fail. - */ - void setResultMetadata( - @NonNull ByteBuffer newResultMetadataId, @NonNull ColumnDefinitions newResultSetDefinitions); - - /** - * Builds an executable statement that associates a set of values with the bind variables. - * - *

Note that the built-in bound statement implementation is immutable. If you need to set - * multiple execution parameters on the bound statement (such as {@link - * BoundStatement#setExecutionProfileName(String)}, {@link - * BoundStatement#setPagingState(ByteBuffer)}, etc.), consider using {@link - * #boundStatementBuilder(Object...)} instead to avoid unnecessary allocations. - * - * @param values the values of the bound variables in the statement. You can provide less values - * than the actual number of variables (or even none at all), in which case the remaining - * variables will be left unset. However, this method will throw an {@link - * IllegalArgumentException} if there are more values than variables. Individual values can be - * {@code null}, but the vararg array itself can't. - */ - @NonNull - BoundStatement bind(@NonNull Object... values); - - /** - * Returns a builder to construct an executable statement. - * - *

Note that this builder is mutable and not thread-safe. - * - * @see #bind(Object...) - */ - @NonNull - BoundStatementBuilder boundStatementBuilder(@NonNull Object... values); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/QueryTrace.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/QueryTrace.java deleted file mode 100644 index 37ebb85c0db..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/QueryTrace.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cql; - -import edu.umd.cs.findbugs.annotations.NonNull; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.util.List; -import java.util.Map; -import java.util.UUID; - -/** - * Tracing information for a query. - * - *

When {@link Statement#isTracing() tracing} is enabled for a query, Cassandra generates rows in - * the {@code sessions} and {@code events} table of the {@code system_traces} keyspace. This class - * is a client-side representation of that information. - */ -public interface QueryTrace { - - @NonNull - UUID getTracingId(); - - @NonNull - String getRequestType(); - - /** The server-side duration of the query in microseconds. */ - int getDurationMicros(); - - /** - * @deprecated returns the coordinator IP, but {@link #getCoordinatorAddress()} should be - * preferred, since C* 4.0 and above now returns the port was well. - */ - @NonNull - @Deprecated - InetAddress getCoordinator(); - - /** - * The IP and port of the node that coordinated the query. Prior to C* 4.0 the port is not set and - * will default to 0. - * - *

This method's default implementation returns {@link #getCoordinator()} with the port set to - * 0. The only reason it exists is to preserve binary compatibility. Internally, the driver - * overrides it to set the correct port. - * - * @since 4.6.0 - */ - @NonNull - default InetSocketAddress getCoordinatorAddress() { - return new InetSocketAddress(getCoordinator(), 0); - } - - /** The parameters attached to this trace. */ - @NonNull - Map getParameters(); - - /** The server-side timestamp of the start of this query. */ - long getStartedAt(); - - /** - * The events contained in this trace. - * - *

Query tracing is asynchronous in Cassandra. Hence, it is possible for the list returned to - * be missing some events for some of the replicas involved in the query if the query trace is - * requested just after the return of the query (the only guarantee being that the list will - * contain the events pertaining to the coordinator). - */ - @NonNull - List getEvents(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/ResultSet.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/ResultSet.java deleted file mode 100644 index 54f786b2068..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/ResultSet.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cql; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.PagingIterable; - -/** - * The result of a synchronous CQL query. - * - *

See {@link PagingIterable} for a few generic explanations about the behavior of this object; - * in particular, implementations are not thread-safe. They can only be iterated by the - * thread that invoked {@code session.execute}. - * - * @see CqlSession#execute(Statement) - * @see CqlSession#execute(String) - */ -public interface ResultSet extends PagingIterable { - - // overridden to amend the javadocs: - /** - * {@inheritDoc} - * - *

This is equivalent to calling: - * - *

-   *   this.iterator().next().getBoolean("[applied]")
-   * 
- * - * Except that this method peeks at the next row without consuming it. - */ - @Override - boolean wasApplied(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/Row.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/Row.java deleted file mode 100644 index 5eab449b057..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/Row.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cql; - -import com.datastax.oss.driver.api.core.data.GettableById; -import com.datastax.oss.driver.api.core.data.GettableByIndex; -import com.datastax.oss.driver.api.core.data.GettableByName; -import com.datastax.oss.driver.api.core.detach.Detachable; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * A row from a CQL table. - * - *

The default implementation returned by the driver is immutable and serializable. If you write - * your own implementation, it should at least be thread-safe; serializability is not mandatory, but - * recommended for use with some 3rd-party tools like Apache Spark ™. - */ -public interface Row extends GettableByIndex, GettableByName, GettableById, Detachable { - - /** @return the column definitions contained in this result set. */ - @NonNull - ColumnDefinitions getColumnDefinitions(); - - /** - * Returns a string representation of the contents of this row. - * - *

This produces a comma-separated list enclosed in square brackets. Each column is represented - * by its name, followed by a column and the value as a CQL literal. For example: - * - *

-   * [id:1, name:'test']
-   * 
- * - * Notes: - * - *
    - *
  • This method does not sanitize its output in any way. In particular, no effort is made to - * limit output size: all columns are included, and large strings or blobs will be appended - * as-is. - *
  • Be mindful of how you expose the result. For example, in high-security environments, it - * might be undesirable to leak data in application logs. - *
- */ - @NonNull - default String getFormattedContents() { - StringBuilder result = new StringBuilder("["); - ColumnDefinitions definitions = getColumnDefinitions(); - for (int i = 0; i < definitions.size(); i++) { - if (i > 0) { - result.append(", "); - } - ColumnDefinition definition = definitions.get(i); - String name = definition.getName().asCql(true); - TypeCodec codec = codecRegistry().codecFor(definition.getType()); - Object value = codec.decode(getBytesUnsafe(i), protocolVersion()); - result.append(name).append(':').append(codec.format(value)); - } - return result.append("]").toString(); - } - - /** - * Returns an abstract representation of this object, that may not include the row's - * contents. - * - *

The driver's built-in {@link Row} implementation returns the default format of {@link - * Object#toString()}: the class name, followed by the at-sign and the hash code of the object. - * - *

Omitting the contents was a deliberate choice, because we feel it would make it too easy to - * accidentally leak data (e.g. in application logs). If you want the contents, use {@link - * #getFormattedContents()}. - */ - @Override - String toString(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/SimpleStatement.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/SimpleStatement.java deleted file mode 100644 index ef04cd14a5b..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/SimpleStatement.java +++ /dev/null @@ -1,318 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cql; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.internal.core.cql.DefaultSimpleStatement; -import com.datastax.oss.driver.internal.core.time.ServerSideTimestampGenerator; -import com.datastax.oss.driver.internal.core.util.Sizes; -import com.datastax.oss.protocol.internal.PrimitiveSizes; -import com.datastax.oss.protocol.internal.util.collection.NullAllowingImmutableList; -import com.datastax.oss.protocol.internal.util.collection.NullAllowingImmutableMap; -import edu.umd.cs.findbugs.annotations.CheckReturnValue; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.List; -import java.util.Map; - -/** - * A one-off CQL statement consisting of a query string with optional placeholders, and a set of - * values for these placeholders. - * - *

To create instances, client applications can use the {@code newInstance} factory methods on - * this interface for common cases, or {@link #builder(String)} for more control over the - * parameters. They can then be passed to {@link CqlSession#execute(Statement)}. - * - *

Simple statements should be reserved for queries that will only be executed a few times by an - * application. For more frequent queries, {@link PreparedStatement} provides many advantages: it is - * more efficient because the server parses the query only once and caches the result; it allows the - * server to return metadata about the bind variables, which allows the driver to validate the - * values earlier, and apply certain optimizations like token-aware routing. - * - *

The default implementation returned by the driver is immutable and thread-safe. - * All mutating methods return a new instance. See also the static factory methods and builders in - * this interface. - * - *

If an application reuses the same statement more than once, it is recommended to cache it (for - * example in a final field). - */ -public interface SimpleStatement extends BatchableStatement { - - /** - * Shortcut to create an instance of the default implementation with only a CQL query (see {@link - * SimpleStatementBuilder} for the defaults for the other fields). - * - *

Note that the returned object is immutable. - */ - static SimpleStatement newInstance(@NonNull String cqlQuery) { - return new DefaultSimpleStatement( - cqlQuery, - NullAllowingImmutableList.of(), - NullAllowingImmutableMap.of(), - null, - null, - null, - null, - null, - null, - NullAllowingImmutableMap.of(), - null, - false, - Statement.NO_DEFAULT_TIMESTAMP, - null, - Integer.MIN_VALUE, - null, - null, - null, - null, - Statement.NO_NOW_IN_SECONDS); - } - - /** - * Shortcut to create an instance of the default implementation with only a CQL query and - * positional values (see {@link SimpleStatementBuilder} for the defaults for the other fields). - * - *

Note that the returned object is immutable. - * - * @param positionalValues the values for placeholders in the query string. Individual values can - * be {@code null}, but the vararg array itself can't. - */ - static SimpleStatement newInstance( - @NonNull String cqlQuery, @NonNull Object... positionalValues) { - return new DefaultSimpleStatement( - cqlQuery, - NullAllowingImmutableList.of(positionalValues), - NullAllowingImmutableMap.of(), - null, - null, - null, - null, - null, - null, - NullAllowingImmutableMap.of(), - null, - false, - Statement.NO_DEFAULT_TIMESTAMP, - null, - Integer.MIN_VALUE, - null, - null, - null, - null, - Statement.NO_NOW_IN_SECONDS); - } - - /** - * Shortcut to create an instance of the default implementation with only a CQL query and named - * values (see {@link SimpleStatementBuilder} for the defaults for other fields). - * - *

Note that the returned object is immutable. - */ - static SimpleStatement newInstance( - @NonNull String cqlQuery, @NonNull Map namedValues) { - return new DefaultSimpleStatement( - cqlQuery, - NullAllowingImmutableList.of(), - DefaultSimpleStatement.wrapKeys(namedValues), - null, - null, - null, - null, - null, - null, - NullAllowingImmutableMap.of(), - null, - false, - Statement.NO_DEFAULT_TIMESTAMP, - null, - Integer.MIN_VALUE, - null, - null, - null, - null, - Statement.NO_NOW_IN_SECONDS); - } - - /** - * Returns a builder to create an instance of the default implementation. - * - *

Note that this builder is mutable and not thread-safe. - */ - @NonNull - static SimpleStatementBuilder builder(@NonNull String query) { - return new SimpleStatementBuilder(query); - } - - /** - * Returns a builder to create an instance of the default implementation, copying the fields of - * the given statement. - * - *

Note that this builder is mutable and not thread-safe. - */ - @NonNull - static SimpleStatementBuilder builder(@NonNull SimpleStatement template) { - return new SimpleStatementBuilder(template); - } - - @NonNull - String getQuery(); - - /** - * Sets the CQL query to execute. - * - *

It may contain anonymous placeholders identified by a question mark, as in: - * - *

-   *   SELECT username FROM user WHERE id = ?
-   * 
- * - * Or named placeholders prefixed by a column, as in: - * - *
-   *   SELECT username FROM user WHERE id = :i
-   * 
- * - *

The driver's built-in implementation is immutable, and returns a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance. - * - * @see #setPositionalValues(List) - * @see #setNamedValuesWithIds(Map) - */ - @NonNull - @CheckReturnValue - SimpleStatement setQuery(@NonNull String newQuery); - - /** - * Sets the CQL keyspace to associate with the query. - * - *

This feature is only available with {@link DefaultProtocolVersion#V5 native protocol v5} or - * higher. Specifying a per-request keyspace with lower protocol versions will cause a runtime - * error. - * - * @see Request#getKeyspace() - */ - @NonNull - @CheckReturnValue - SimpleStatement setKeyspace(@Nullable CqlIdentifier newKeyspace); - - /** - * Shortcut for {@link #setKeyspace(CqlIdentifier) - * setKeyspace(CqlIdentifier.fromCql(newKeyspaceName))}. - */ - @NonNull - @CheckReturnValue - default SimpleStatement setKeyspace(@NonNull String newKeyspaceName) { - return setKeyspace(CqlIdentifier.fromCql(newKeyspaceName)); - } - - @NonNull - List getPositionalValues(); - - /** - * Sets the positional values to bind to anonymous placeholders. - * - *

You can use either positional or named values, but not both. Therefore if you call this - * method but {@link #getNamedValues()} returns a non-empty map, an {@link - * IllegalArgumentException} will be thrown. - * - *

The driver's built-in implementation is immutable, and returns a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance. - * - * @see #setQuery(String) - */ - @NonNull - @CheckReturnValue - SimpleStatement setPositionalValues(@NonNull List newPositionalValues); - - @NonNull - Map getNamedValues(); - - /** - * Sets the named values to bind to named placeholders. - * - *

Names must be stripped of the leading column. - * - *

You can use either positional or named values, but not both. Therefore if you call this - * method but {@link #getPositionalValues()} returns a non-empty list, an {@link - * IllegalArgumentException} will be thrown. - * - *

The driver's built-in implementation is immutable, and returns a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance. - * - * @see #setQuery(String) - */ - @NonNull - @CheckReturnValue - SimpleStatement setNamedValuesWithIds(@NonNull Map newNamedValues); - - /** - * Shortcut for {@link #setNamedValuesWithIds(Map)} with raw strings as value names. The keys are - * converted on the fly with {@link CqlIdentifier#fromCql(String)}. - */ - @NonNull - @CheckReturnValue - default SimpleStatement setNamedValues(@NonNull Map newNamedValues) { - return setNamedValuesWithIds(DefaultSimpleStatement.wrapKeys(newNamedValues)); - } - - @Override - default int computeSizeInBytes(@NonNull DriverContext context) { - int size = Sizes.minimumStatementSize(this, context); - - // SimpleStatement's additional elements to take into account are: - // - query string - // - parameters (named or not) - // - per-query keyspace - // - page size - // - paging state - // - timestamp - - // query - size += PrimitiveSizes.sizeOfLongString(getQuery()); - - // parameters - size += - Sizes.sizeOfSimpleStatementValues( - this, context.getProtocolVersion(), context.getCodecRegistry()); - - // per-query keyspace - if (getKeyspace() != null) { - size += PrimitiveSizes.sizeOfString(getKeyspace().asInternal()); - } - - // page size - size += PrimitiveSizes.INT; - - // paging state - if (getPagingState() != null) { - size += PrimitiveSizes.sizeOfBytes(getPagingState()); - } - - // timestamp - if (!(context.getTimestampGenerator() instanceof ServerSideTimestampGenerator) - || getQueryTimestamp() != Statement.NO_DEFAULT_TIMESTAMP) { - size += PrimitiveSizes.LONG; - } - - return size; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/SimpleStatementBuilder.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/SimpleStatementBuilder.java deleted file mode 100644 index 1ac910ff6a7..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/SimpleStatementBuilder.java +++ /dev/null @@ -1,190 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cql; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.internal.core.cql.DefaultSimpleStatement; -import com.datastax.oss.protocol.internal.util.collection.NullAllowingImmutableList; -import com.datastax.oss.protocol.internal.util.collection.NullAllowingImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Arrays; -import java.util.List; -import java.util.Map; -import net.jcip.annotations.NotThreadSafe; - -/** - * A builder to create a simple statement. - * - *

This class is mutable and not thread-safe. - */ -@NotThreadSafe -public class SimpleStatementBuilder - extends StatementBuilder { - - @NonNull private String query; - @Nullable private CqlIdentifier keyspace; - @Nullable private NullAllowingImmutableList.Builder positionalValuesBuilder; - @Nullable private NullAllowingImmutableMap.Builder namedValuesBuilder; - - public SimpleStatementBuilder(@NonNull String query) { - this.query = query; - } - - public SimpleStatementBuilder(@NonNull SimpleStatement template) { - super(template); - if (!template.getPositionalValues().isEmpty() && !template.getNamedValues().isEmpty()) { - throw new IllegalArgumentException( - "Illegal statement to copy, can't have both named and positional values"); - } - - this.query = template.getQuery(); - if (!template.getPositionalValues().isEmpty()) { - this.positionalValuesBuilder = - NullAllowingImmutableList.builder(template.getPositionalValues().size()) - .addAll(template.getPositionalValues()); - } - if (!template.getNamedValues().isEmpty()) { - this.namedValuesBuilder = - NullAllowingImmutableMap.builder(template.getNamedValues().size()) - .putAll(template.getNamedValues()); - } - } - - /** @see SimpleStatement#getQuery() */ - @NonNull - public SimpleStatementBuilder setQuery(@NonNull String query) { - this.query = query; - return this; - } - - /** @see SimpleStatement#getKeyspace() */ - @NonNull - public SimpleStatementBuilder setKeyspace(@Nullable CqlIdentifier keyspace) { - this.keyspace = keyspace; - return this; - } - - /** - * Shortcut for {@link #setKeyspace(CqlIdentifier) - * setKeyspace(CqlIdentifier.fromCql(keyspaceName))}. - */ - @NonNull - public SimpleStatementBuilder setKeyspace(@Nullable String keyspaceName) { - return setKeyspace(keyspaceName == null ? null : CqlIdentifier.fromCql(keyspaceName)); - } - - /** @see SimpleStatement#setPositionalValues(List) */ - @NonNull - public SimpleStatementBuilder addPositionalValue(@Nullable Object value) { - if (namedValuesBuilder != null) { - throw new IllegalArgumentException( - "Can't have both positional and named values in a statement."); - } - if (positionalValuesBuilder == null) { - positionalValuesBuilder = NullAllowingImmutableList.builder(); - } - positionalValuesBuilder.add(value); - return this; - } - - /** @see SimpleStatement#setPositionalValues(List) */ - @NonNull - public SimpleStatementBuilder addPositionalValues(@NonNull Iterable values) { - if (namedValuesBuilder != null) { - throw new IllegalArgumentException( - "Can't have both positional and named values in a statement."); - } - if (positionalValuesBuilder == null) { - positionalValuesBuilder = NullAllowingImmutableList.builder(); - } - positionalValuesBuilder.addAll(values); - return this; - } - - /** @see SimpleStatement#setPositionalValues(List) */ - @NonNull - public SimpleStatementBuilder addPositionalValues(@NonNull Object... values) { - return addPositionalValues(Arrays.asList(values)); - } - - /** @see SimpleStatement#setPositionalValues(List) */ - @NonNull - public SimpleStatementBuilder clearPositionalValues() { - positionalValuesBuilder = NullAllowingImmutableList.builder(); - return this; - } - - /** @see SimpleStatement#setNamedValuesWithIds(Map) */ - @NonNull - public SimpleStatementBuilder addNamedValue(@NonNull CqlIdentifier name, @Nullable Object value) { - if (positionalValuesBuilder != null) { - throw new IllegalArgumentException( - "Can't have both positional and named values in a statement."); - } - if (namedValuesBuilder == null) { - namedValuesBuilder = NullAllowingImmutableMap.builder(); - } - namedValuesBuilder.put(name, value); - return this; - } - - /** - * Shortcut for {@link #addNamedValue(CqlIdentifier, Object) - * addNamedValue(CqlIdentifier.fromCql(name), value)}. - */ - @NonNull - public SimpleStatementBuilder addNamedValue(@NonNull String name, @Nullable Object value) { - return addNamedValue(CqlIdentifier.fromCql(name), value); - } - - /** @see SimpleStatement#setNamedValuesWithIds(Map) */ - @NonNull - public SimpleStatementBuilder clearNamedValues() { - namedValuesBuilder = NullAllowingImmutableMap.builder(); - return this; - } - - @NonNull - @Override - public SimpleStatement build() { - return new DefaultSimpleStatement( - query, - (positionalValuesBuilder == null) - ? NullAllowingImmutableList.of() - : positionalValuesBuilder.build(), - (namedValuesBuilder == null) ? NullAllowingImmutableMap.of() : namedValuesBuilder.build(), - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - buildCustomPayload(), - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/Statement.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/Statement.java deleted file mode 100644 index d70c56686c5..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/Statement.java +++ /dev/null @@ -1,548 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cql; - -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.NoNodeAvailableException; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.core.time.TimestampGenerator; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.util.RoutingKey; -import com.datastax.oss.protocol.internal.request.query.QueryOptions; -import edu.umd.cs.findbugs.annotations.CheckReturnValue; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.Map; -import java.util.concurrent.CompletionStage; - -/** - * A request to execute a CQL query. - * - * @param the "self type" used for covariant returns in subtypes. - */ -public interface Statement> extends Request { - // Implementation note: "CqlRequest" would be a better name, but we keep "Statement" to match - // previous driver versions. - - /** - * The type returned when a CQL statement is executed synchronously. - * - *

Most users won't use this explicitly. It is needed for the generic execute method ({@link - * Session#execute(Request, GenericType)}), but CQL statements will generally be run with one of - * the driver's built-in helper methods (such as {@link CqlSession#execute(Statement)}). - */ - GenericType SYNC = GenericType.of(ResultSet.class); - - /** - * The type returned when a CQL statement is executed asynchronously. - * - *

Most users won't use this explicitly. It is needed for the generic execute method ({@link - * Session#execute(Request, GenericType)}), but CQL statements will generally be run with one of - * the driver's built-in helper methods (such as {@link CqlSession#executeAsync(Statement)}). - */ - GenericType> ASYNC = - new GenericType>() {}; - - /** - * A special value for {@link #getQueryTimestamp()} that means "no value". - * - *

It is equal to {@link Long#MIN_VALUE}. - */ - long NO_DEFAULT_TIMESTAMP = QueryOptions.NO_DEFAULT_TIMESTAMP; - - /** - * A special value for {@link #getNowInSeconds()} that means "no value". - * - *

It is equal to {@link Integer#MIN_VALUE}. - */ - int NO_NOW_IN_SECONDS = QueryOptions.NO_NOW_IN_SECONDS; - - /** - * Sets the name of the execution profile that will be used for this statement. - * - *

For all the driver's built-in implementations, calling this method with a non-null argument - * automatically resets {@link #getExecutionProfile()} to null. - * - *

All the driver's built-in implementations are immutable, and return a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance. - */ - @NonNull - @CheckReturnValue - SelfT setExecutionProfileName(@Nullable String newConfigProfileName); - - /** - * Sets the execution profile to use for this statement. - * - *

For all the driver's built-in implementations, calling this method with a non-null argument - * automatically resets {@link #getExecutionProfileName()} to null. - * - *

All the driver's built-in implementations are immutable, and return a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance. - */ - @NonNull - @CheckReturnValue - SelfT setExecutionProfile(@Nullable DriverExecutionProfile newProfile); - - /** - * Sets the keyspace to use for token-aware routing. - * - *

See {@link Request#getRoutingKey()} for a description of the token-aware routing algorithm. - * - * @param newRoutingKeyspace The keyspace to use, or {@code null} to disable token-aware routing. - */ - @NonNull - @CheckReturnValue - SelfT setRoutingKeyspace(@Nullable CqlIdentifier newRoutingKeyspace); - - /** - * Sets the {@link Node} that should handle this query. - * - *

In the general case, use of this method is heavily discouraged and should only be - * used in the following cases: - * - *

    - *
  1. Querying node-local tables, such as tables in the {@code system} and {@code system_views} - * keyspaces. - *
  2. Applying a series of schema changes, where it may be advantageous to execute schema - * changes in sequence on the same node. - *
- * - *

Configuring a specific node causes the configured {@link LoadBalancingPolicy} to be - * completely bypassed. However, if the load balancing policy dictates that the node is at - * distance {@link NodeDistance#IGNORED} or there is no active connectivity to the node, the - * request will fail with a {@link NoNodeAvailableException}. - * - * @param node The node that should be used to handle executions of this statement or null to - * delegate to the configured load balancing policy. - */ - @NonNull - @CheckReturnValue - SelfT setNode(@Nullable Node node); - - /** - * Shortcut for {@link #setRoutingKeyspace(CqlIdentifier) - * setRoutingKeyspace(CqlIdentifier.fromCql(newRoutingKeyspaceName))}. - * - * @param newRoutingKeyspaceName The keyspace to use, or {@code null} to disable token-aware - * routing. - */ - @NonNull - @CheckReturnValue - default SelfT setRoutingKeyspace(@Nullable String newRoutingKeyspaceName) { - return setRoutingKeyspace( - newRoutingKeyspaceName == null ? null : CqlIdentifier.fromCql(newRoutingKeyspaceName)); - } - - /** - * Sets the key to use for token-aware routing. - * - *

See {@link Request#getRoutingKey()} for a description of the token-aware routing algorithm. - * - * @param newRoutingKey The routing key to use, or {@code null} to disable token-aware routing. - */ - @NonNull - @CheckReturnValue - SelfT setRoutingKey(@Nullable ByteBuffer newRoutingKey); - - /** - * Sets the key to use for token-aware routing, when the partition key has multiple components. - * - *

This method assembles the components into a single byte buffer and passes it to {@link - * #setRoutingKey(ByteBuffer)}. Neither the individual components, nor the vararg array itself, - * can be {@code null}. - */ - @NonNull - @CheckReturnValue - default SelfT setRoutingKey(@NonNull ByteBuffer... newRoutingKeyComponents) { - return setRoutingKey(RoutingKey.compose(newRoutingKeyComponents)); - } - - /** - * Sets the token to use for token-aware routing. - * - *

See {@link Request#getRoutingKey()} for a description of the token-aware routing algorithm. - * - * @param newRoutingToken The routing token to use, or {@code null} to disable token-aware - * routing. - */ - @NonNull - @CheckReturnValue - SelfT setRoutingToken(@Nullable Token newRoutingToken); - - /** - * Sets the custom payload to use for execution. - * - *

All the driver's built-in statement implementations are immutable, and return a new instance - * from this method. However custom implementations may choose to be mutable and return the same - * instance. - * - *

Note that it's your responsibility to provide a thread-safe map. This can be achieved with a - * concurrent or immutable implementation, or by making it effectively immutable (meaning that - * it's never modified after being set on the statement). - */ - @NonNull - @CheckReturnValue - SelfT setCustomPayload(@NonNull Map newCustomPayload); - - /** - * Sets the idempotence to use for execution. - * - *

All the driver's built-in implementations are immutable, and return a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance. - * - * @param newIdempotence a boolean instance to set a statement-specific value, or {@code null} to - * use the default idempotence defined in the configuration. - */ - @NonNull - @CheckReturnValue - SelfT setIdempotent(@Nullable Boolean newIdempotence); - - /** - * Sets tracing for execution. - * - *

All the driver's built-in implementations are immutable, and return a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance. - */ - @NonNull - @CheckReturnValue - SelfT setTracing(boolean newTracing); - - /** - * @deprecated this method only exists to ease the transition from driver 3, it is an alias for - * {@link #setTracing(boolean) setTracing(true)}. - */ - @Deprecated - @NonNull - @CheckReturnValue - default SelfT enableTracing() { - return setTracing(true); - } - - /** - * @deprecated this method only exists to ease the transition from driver 3, it is an alias for - * {@link #setTracing(boolean) setTracing(false)}. - */ - @Deprecated - @NonNull - @CheckReturnValue - default SelfT disableTracing() { - return setTracing(false); - } - - /** - * Returns the query timestamp, in microseconds, to send with the statement. See {@link - * #setQueryTimestamp(long)} for details. - * - *

If this is equal to {@link #NO_DEFAULT_TIMESTAMP}, the {@link TimestampGenerator} configured - * for this driver instance will be used to generate a timestamp. - * - * @see #NO_DEFAULT_TIMESTAMP - * @see TimestampGenerator - */ - long getQueryTimestamp(); - - /** - * @deprecated this method only exists to ease the transition from driver 3, it is an alias for - * {@link #getQueryTimestamp()}. - */ - @Deprecated - default long getDefaultTimestamp() { - return getQueryTimestamp(); - } - - /** - * Sets the query timestamp, in microseconds, to send with the statement. - * - *

This is an alternative to appending a {@code USING TIMESTAMP} clause in the statement's - * query string, and has the advantage of sending the timestamp separately from the query string - * itself, which doesn't have to be modified when executing the same statement with different - * timestamps. Note that, if both a {@code USING TIMESTAMP} clause and a query timestamp are set - * for a given statement, the timestamp from the {@code USING TIMESTAMP} clause wins. - * - *

This method can be used on any instance of {@link SimpleStatement}, {@link BoundStatement} - * or {@link BatchStatement}. For a {@link BatchStatement}, the timestamp will apply to all its - * child statements; it is not possible to define per-child timestamps using this method, and - * consequently, if this method is called on a batch child statement, the provided timestamp will - * be silently ignored. If different timestamps are required for individual child statements, this - * can only be achieved with a custom {@code USING TIMESTAMP} clause in each child query. - * - *

If this is equal to {@link #NO_DEFAULT_TIMESTAMP}, the {@link TimestampGenerator} configured - * for this driver instance will be used to generate a timestamp. - * - *

All the driver's built-in implementations are immutable, and return a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance. - * - * @see #NO_DEFAULT_TIMESTAMP - * @see TimestampGenerator - */ - @NonNull - @CheckReturnValue - SelfT setQueryTimestamp(long newTimestamp); - - /** - * @deprecated this method only exists to ease the transition from driver 3, it is an alias for - * {@link #setQueryTimestamp(long)}. - */ - @Deprecated - @NonNull - @CheckReturnValue - default SelfT setDefaultTimestamp(long newTimestamp) { - return setQueryTimestamp(newTimestamp); - } - - /** - * Sets how long to wait for this request to complete. This is a global limit on the duration of a - * session.execute() call, including any retries the driver might do. - * - * @param newTimeout the timeout to use, or {@code null} to use the default value defined in the - * configuration. - * @see DefaultDriverOption#REQUEST_TIMEOUT - */ - @NonNull - @CheckReturnValue - SelfT setTimeout(@Nullable Duration newTimeout); - - /** - * Returns the paging state to send with the statement, or {@code null} if this statement has no - * paging state. - * - *

Paging states are used in scenarios where a paged result is interrupted then resumed later. - * The paging state can only be reused with the exact same statement (same query string, same - * parameters). It is an opaque value that is only meant to be collected, stored and re-used. If - * you try to modify its contents or reuse it with a different statement, the results are - * unpredictable. - */ - @Nullable - ByteBuffer getPagingState(); - - /** - * Sets the paging state to send with the statement, or {@code null} if this statement has no - * paging state. - * - *

Paging states are used in scenarios where a paged result is interrupted then resumed later. - * The paging state can only be reused with the exact same statement (same query string, same - * parameters). It is an opaque value that is only meant to be collected, stored and re-used. If - * you try to modify its contents or reuse it with a different statement, the results are - * unpredictable. - * - *

All the driver's built-in implementations are immutable, and return a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance; - * if you do so, you must override {@link #copy(ByteBuffer)}. - */ - @NonNull - @CheckReturnValue - SelfT setPagingState(@Nullable ByteBuffer newPagingState); - - /** - * Sets the paging state to send with the statement, or {@code null} if this statement has no - * paging state. - * - *

This variant uses the "safe" paging state wrapper, it will throw immediately if the - * statement doesn't match the one that the state was initially extracted from (same query string, - * same parameters). The advantage is that it fails fast, instead of waiting for an error response - * from the server. - * - *

Note that, if this statement is a {@link SimpleStatement} with bound values, those values - * must be encoded in order to perform the check. This method uses the default codec registry and - * default protocol version. This might fail if you use custom codecs; in that case, use {@link - * #setPagingState(PagingState, Session)} instead. - * - * @throws IllegalArgumentException if the given state does not match this statement. - * @see #setPagingState(ByteBuffer) - * @see ExecutionInfo#getSafePagingState() - */ - @NonNull - @CheckReturnValue - default SelfT setPagingState(@Nullable PagingState newPagingState) { - return setPagingState(newPagingState, null); - } - - /** - * Alternative to {@link #setPagingState(PagingState)} that specifies the session the statement - * will be executed with. You only need this for simple statements, and if you use custom - * codecs. Bound statements already know which session they are attached to. - */ - @NonNull - @CheckReturnValue - default SelfT setPagingState(@Nullable PagingState newPagingState, @Nullable Session session) { - if (newPagingState == null) { - return setPagingState((ByteBuffer) null); - } else if (newPagingState.matches(this, session)) { - return setPagingState(newPagingState.getRawPagingState()); - } else { - throw new IllegalArgumentException( - "Paging state mismatch, " - + "this means that either the paging state contents were altered, " - + "or you're trying to apply it to a different statement"); - } - } - - /** - * Returns the page size to use for the statement. - * - * @return the set page size, otherwise 0 or a negative value to use the default value defined in - * the configuration. - * @see DefaultDriverOption#REQUEST_PAGE_SIZE - */ - int getPageSize(); - - /** - * @deprecated this method only exists to ease the transition from driver 3, it is an alias for - * {@link #getPageSize()}. - */ - @Deprecated - default int getFetchSize() { - return getPageSize(); - } - - /** - * Configures how many rows will be retrieved simultaneously in a single network roundtrip (the - * goal being to avoid loading too many results in memory at the same time). - * - * @param newPageSize the page size to use, set to 0 or a negative value to use the default value - * defined in the configuration. - * @see DefaultDriverOption#REQUEST_PAGE_SIZE - */ - @NonNull - @CheckReturnValue - SelfT setPageSize(int newPageSize); - - /** - * @deprecated this method only exists to ease the transition from driver 3, it is an alias for - * {@link #setPageSize(int)}. - */ - @Deprecated - @NonNull - @CheckReturnValue - default SelfT setFetchSize(int newPageSize) { - return setPageSize(newPageSize); - } - - /** - * Returns the {@link ConsistencyLevel} to use for the statement. - * - * @return the set consistency, or {@code null} to use the default value defined in the - * configuration. - * @see DefaultDriverOption#REQUEST_CONSISTENCY - */ - @Nullable - ConsistencyLevel getConsistencyLevel(); - - /** - * Sets the {@link ConsistencyLevel} to use for this statement. - * - * @param newConsistencyLevel the consistency level to use, or null to use the default value - * defined in the configuration. - * @see DefaultDriverOption#REQUEST_CONSISTENCY - */ - @NonNull - @CheckReturnValue - SelfT setConsistencyLevel(@Nullable ConsistencyLevel newConsistencyLevel); - - /** - * Returns the serial {@link ConsistencyLevel} to use for the statement. - * - * @return the set serial consistency, or {@code null} to use the default value defined in the - * configuration. - * @see DefaultDriverOption#REQUEST_SERIAL_CONSISTENCY - */ - @Nullable - ConsistencyLevel getSerialConsistencyLevel(); - - /** - * Sets the serial {@link ConsistencyLevel} to use for this statement. - * - * @param newSerialConsistencyLevel the serial consistency level to use, or null to use the - * default value defined in the configuration. - * @see DefaultDriverOption#REQUEST_SERIAL_CONSISTENCY - */ - @NonNull - @CheckReturnValue - SelfT setSerialConsistencyLevel(@Nullable ConsistencyLevel newSerialConsistencyLevel); - - /** Whether tracing information should be recorded for this statement. */ - boolean isTracing(); - - /** - * A custom "now in seconds" to use when applying the request (for testing purposes). - * - *

This method's default implementation returns {@link #NO_NOW_IN_SECONDS}. The only reason it - * exists is to preserve binary compatibility. Internally, the driver overrides it to return the - * value that was set programmatically (if any). - * - * @see #NO_NOW_IN_SECONDS - */ - default int getNowInSeconds() { - return NO_NOW_IN_SECONDS; - } - - /** - * Sets the "now in seconds" to use when applying the request (for testing purposes). - * - *

This method's default implementation returns the statement unchanged. The only reason it - * exists is to preserve binary compatibility. Internally, the driver overrides it to record the - * new value. - * - * @see #NO_NOW_IN_SECONDS - */ - @NonNull - @CheckReturnValue - @SuppressWarnings("unchecked") - default SelfT setNowInSeconds(int nowInSeconds) { - return (SelfT) this; - } - - /** - * Calculates the approximate size in bytes that the statement will have when encoded. - * - *

The size might be over-estimated by a few bytes due to global options that may be defined on - * a {@link Session} but not explicitly set on the statement itself. - * - *

The result of this method is not cached, calling it will cause some encoding to be done in - * order to determine some of the statement's attributes sizes. Therefore, use this method - * sparingly in order to avoid unnecessary computation. - * - * @return the approximate number of bytes this statement will take when encoded. - */ - int computeSizeInBytes(@NonNull DriverContext context); - - /** - * Creates a new instance with a different paging state. - * - *

Since all the built-in statement implementations in the driver are immutable, this method's - * default implementation delegates to {@link #setPagingState(ByteBuffer)}. However, if you write - * your own mutable implementation, make sure it returns a different instance. - */ - @NonNull - @CheckReturnValue - default SelfT copy(@Nullable ByteBuffer newPagingState) { - return setPagingState(newPagingState); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/StatementBuilder.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/StatementBuilder.java deleted file mode 100644 index 531070b854c..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/StatementBuilder.java +++ /dev/null @@ -1,294 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cql; - -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.internal.core.util.RoutingKey; -import com.datastax.oss.protocol.internal.util.collection.NullAllowingImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.Map; -import net.jcip.annotations.NotThreadSafe; - -/** - * Handle options common to all statement builders. - * - * @see SimpleStatement#builder(String) - * @see BatchStatement#builder(BatchType) - * @see PreparedStatement#boundStatementBuilder(Object...) - */ -@NotThreadSafe -public abstract class StatementBuilder< - SelfT extends StatementBuilder, StatementT extends Statement> { - - @SuppressWarnings("unchecked") - private final SelfT self = (SelfT) this; - - @Nullable protected String executionProfileName; - @Nullable protected DriverExecutionProfile executionProfile; - @Nullable protected CqlIdentifier routingKeyspace; - @Nullable protected ByteBuffer routingKey; - @Nullable protected Token routingToken; - @Nullable private NullAllowingImmutableMap.Builder customPayloadBuilder; - @Nullable protected Boolean idempotent; - protected boolean tracing; - protected long timestamp = Statement.NO_DEFAULT_TIMESTAMP; - @Nullable protected ByteBuffer pagingState; - protected int pageSize = Integer.MIN_VALUE; - @Nullable protected ConsistencyLevel consistencyLevel; - @Nullable protected ConsistencyLevel serialConsistencyLevel; - @Nullable protected Duration timeout; - @Nullable protected Node node; - protected int nowInSeconds = Statement.NO_NOW_IN_SECONDS; - - protected StatementBuilder() { - // nothing to do - } - - protected StatementBuilder(StatementT template) { - this.executionProfileName = template.getExecutionProfileName(); - this.executionProfile = template.getExecutionProfile(); - this.routingKeyspace = template.getRoutingKeyspace(); - this.routingKey = template.getRoutingKey(); - this.routingToken = template.getRoutingToken(); - if (!template.getCustomPayload().isEmpty()) { - this.customPayloadBuilder = - NullAllowingImmutableMap.builder() - .putAll(template.getCustomPayload()); - } - this.idempotent = template.isIdempotent(); - this.tracing = template.isTracing(); - this.timestamp = template.getQueryTimestamp(); - this.pagingState = template.getPagingState(); - this.pageSize = template.getPageSize(); - this.consistencyLevel = template.getConsistencyLevel(); - this.serialConsistencyLevel = template.getSerialConsistencyLevel(); - this.timeout = template.getTimeout(); - this.node = template.getNode(); - this.nowInSeconds = template.getNowInSeconds(); - } - - /** @see Statement#setExecutionProfileName(String) */ - @NonNull - public SelfT setExecutionProfileName(@Nullable String executionProfileName) { - this.executionProfileName = executionProfileName; - if (executionProfileName != null) { - this.executionProfile = null; - } - return self; - } - - /** @see Statement#setExecutionProfile(DriverExecutionProfile) */ - @NonNull - public SelfT setExecutionProfile(@Nullable DriverExecutionProfile executionProfile) { - this.executionProfile = executionProfile; - if (executionProfile != null) { - this.executionProfileName = null; - } - return self; - } - - /** @see Statement#setRoutingKeyspace(CqlIdentifier) */ - @NonNull - public SelfT setRoutingKeyspace(@Nullable CqlIdentifier routingKeyspace) { - this.routingKeyspace = routingKeyspace; - return self; - } - - /** - * Shortcut for {@link #setRoutingKeyspace(CqlIdentifier) - * setRoutingKeyspace(CqlIdentifier.fromCql(routingKeyspaceName))}. - */ - @NonNull - public SelfT setRoutingKeyspace(@Nullable String routingKeyspaceName) { - return setRoutingKeyspace( - routingKeyspaceName == null ? null : CqlIdentifier.fromCql(routingKeyspaceName)); - } - - /** @see Statement#setRoutingKey(ByteBuffer) */ - @NonNull - public SelfT setRoutingKey(@Nullable ByteBuffer routingKey) { - this.routingKey = routingKey; - return self; - } - - /** @see Statement#setRoutingKey(ByteBuffer...) */ - @NonNull - public SelfT setRoutingKey(@NonNull ByteBuffer... newRoutingKeyComponents) { - return setRoutingKey(RoutingKey.compose(newRoutingKeyComponents)); - } - - /** @see Statement#setRoutingToken(Token) */ - @NonNull - public SelfT setRoutingToken(@Nullable Token routingToken) { - this.routingToken = routingToken; - return self; - } - - /** @see Statement#setCustomPayload(Map) */ - @NonNull - public SelfT addCustomPayload(@NonNull String key, @Nullable ByteBuffer value) { - if (customPayloadBuilder == null) { - customPayloadBuilder = NullAllowingImmutableMap.builder(); - } - customPayloadBuilder.put(key, value); - return self; - } - - /** @see Statement#setCustomPayload(Map) */ - @NonNull - public SelfT clearCustomPayload() { - customPayloadBuilder = null; - return self; - } - - /** @see Statement#setIdempotent(Boolean) */ - @NonNull - public SelfT setIdempotence(@Nullable Boolean idempotent) { - this.idempotent = idempotent; - return self; - } - - /** - * This method is a shortcut to {@link #setTracing(boolean)} with an argument of true. It is - * preserved to maintain API compatibility. - * - * @see Statement#setTracing(boolean) - */ - @NonNull - public SelfT setTracing() { - return setTracing(true); - } - - /** @see Statement#setTracing(boolean) */ - @NonNull - public SelfT setTracing(boolean tracing) { - this.tracing = tracing; - return self; - } - - /** - * @deprecated this method only exists to ease the transition from driver 3, it is an alias for - * {@link #setTracing(boolean) setTracing(true)}. - */ - @Deprecated - @NonNull - public SelfT enableTracing() { - return setTracing(true); - } - - /** - * @deprecated this method only exists to ease the transition from driver 3, it is an alias for - * {@link #setTracing(boolean) setTracing(false)}. - */ - @Deprecated - @NonNull - public SelfT disableTracing() { - return setTracing(false); - } - - /** @see Statement#setQueryTimestamp(long) */ - @NonNull - public SelfT setQueryTimestamp(long timestamp) { - this.timestamp = timestamp; - return self; - } - - /** - * @deprecated this method only exists to ease the transition from driver 3, it is an alias for - * {@link #setQueryTimestamp(long)}. - */ - @Deprecated - @NonNull - public SelfT setDefaultTimestamp(long timestamp) { - return setQueryTimestamp(timestamp); - } - - /** @see Statement#setPagingState(ByteBuffer) */ - @NonNull - public SelfT setPagingState(@Nullable ByteBuffer pagingState) { - this.pagingState = pagingState; - return self; - } - - /** @see Statement#setPageSize(int) */ - @NonNull - public SelfT setPageSize(int pageSize) { - this.pageSize = pageSize; - return self; - } - - /** - * @deprecated this method only exists to ease the transition from driver 3, it is an alias for - * {@link #setPageSize(int)}. - */ - @Deprecated - @NonNull - public SelfT setFetchSize(int pageSize) { - return this.setPageSize(pageSize); - } - - /** @see Statement#setConsistencyLevel(ConsistencyLevel) */ - @NonNull - public SelfT setConsistencyLevel(@Nullable ConsistencyLevel consistencyLevel) { - this.consistencyLevel = consistencyLevel; - return self; - } - - /** @see Statement#setSerialConsistencyLevel(ConsistencyLevel) */ - @NonNull - public SelfT setSerialConsistencyLevel(@Nullable ConsistencyLevel serialConsistencyLevel) { - this.serialConsistencyLevel = serialConsistencyLevel; - return self; - } - - /** @see Statement#setTimeout(Duration) */ - @NonNull - public SelfT setTimeout(@Nullable Duration timeout) { - this.timeout = timeout; - return self; - } - - /** @see Statement#setNode(Node) */ - public SelfT setNode(@Nullable Node node) { - this.node = node; - return self; - } - - /** @see Statement#setNowInSeconds(int) */ - public SelfT setNowInSeconds(int nowInSeconds) { - this.nowInSeconds = nowInSeconds; - return self; - } - - @NonNull - protected Map buildCustomPayload() { - return (customPayloadBuilder == null) - ? NullAllowingImmutableMap.of() - : customPayloadBuilder.build(); - } - - @NonNull - public abstract StatementT build(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/SyncCqlSession.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/SyncCqlSession.java deleted file mode 100644 index a0f752db407..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/SyncCqlSession.java +++ /dev/null @@ -1,252 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cql; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.servererrors.QueryExecutionException; -import com.datastax.oss.driver.api.core.servererrors.QueryValidationException; -import com.datastax.oss.driver.api.core.servererrors.SyntaxError; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.internal.core.cql.DefaultPrepareRequest; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; -import java.util.Objects; - -/** - * A session that offers user-friendly methods to execute CQL requests synchronously. - * - * @since 4.4.0 - */ -public interface SyncCqlSession extends Session { - - /** - * Executes a CQL statement synchronously (the calling thread blocks until the result becomes - * available). - * - * @param statement the CQL query to execute (that can be any {@link Statement}). - * @return the result of the query. That result will never be null but can be empty (and will be - * for any non SELECT query). - * @throws AllNodesFailedException if no host in the cluster can be contacted successfully to - * execute this query. - * @throws QueryExecutionException if the query triggered an execution exception, i.e. an - * exception thrown by Cassandra when it cannot execute the query with the requested - * consistency level successfully. - * @throws QueryValidationException if the query is invalid (syntax error, unauthorized or any - * other validation problem). - */ - @NonNull - default ResultSet execute(@NonNull Statement statement) { - return Objects.requireNonNull( - execute(statement, Statement.SYNC), "The CQL processor should never return a null result"); - } - - /** - * Executes a CQL statement synchronously (the calling thread blocks until the result becomes - * available). - * - *

This is an alias for {@link #execute(Statement) - * execute(SimpleStatement.newInstance(query))}. - * - * @param query the CQL query to execute. - * @return the result of the query. That result will never be null but can be empty (and will be - * for any non SELECT query). - * @throws AllNodesFailedException if no host in the cluster can be contacted successfully to - * execute this query. - * @throws QueryExecutionException if the query triggered an execution exception, i.e. an - * exception thrown by Cassandra when it cannot execute the query with the requested - * consistency level successfully. - * @throws QueryValidationException if the query if invalid (syntax error, unauthorized or any - * other validation problem). - * @see SimpleStatement#newInstance(String) - */ - @NonNull - default ResultSet execute(@NonNull String query) { - return execute(SimpleStatement.newInstance(query)); - } - - /** - * Executes a CQL statement synchronously (the calling thread blocks until the result becomes - * available). - * - *

This is an alias for {@link #execute(Statement) execute(SimpleStatement.newInstance(query, - * values))}. - * - * @param query the CQL query to execute. - * @param values the values for placeholders in the query string. Individual values can be {@code - * null}, but the vararg array itself can't. - * @return the result of the query. That result will never be null but can be empty (and will be - * for any non SELECT query). - * @throws AllNodesFailedException if no host in the cluster can be contacted successfully to - * execute this query. - * @throws QueryExecutionException if the query triggered an execution exception, i.e. an - * exception thrown by Cassandra when it cannot execute the query with the requested - * consistency level successfully. - * @throws QueryValidationException if the query if invalid (syntax error, unauthorized or any - * other validation problem). - * @see SimpleStatement#newInstance(String, Object...) - */ - @NonNull - default ResultSet execute(@NonNull String query, @NonNull Object... values) { - return execute(SimpleStatement.newInstance(query, values)); - } - - /** - * Executes a CQL statement synchronously (the calling thread blocks until the result becomes - * available). - * - *

This is an alias for {@link #execute(Statement) execute(SimpleStatement.newInstance(query, - * values))}. - * - * @param query the CQL query to execute. - * @param values the values for named placeholders in the query string. Individual values can be - * {@code null}, but the map itself can't. - * @return the result of the query. That result will never be null but can be empty (and will be - * for any non SELECT query). - * @throws AllNodesFailedException if no host in the cluster can be contacted successfully to - * execute this query. - * @throws QueryExecutionException if the query triggered an execution exception, i.e. an - * exception thrown by Cassandra when it cannot execute the query with the requested - * consistency level successfully. - * @throws QueryValidationException if the query if invalid (syntax error, unauthorized or any - * other validation problem). - * @see SimpleStatement#newInstance(String, Map) - */ - @NonNull - default ResultSet execute(@NonNull String query, @NonNull Map values) { - return execute(SimpleStatement.newInstance(query, values)); - } - - /** - * Prepares a CQL statement synchronously (the calling thread blocks until the statement is - * prepared). - * - *

Note that the bound statements created from the resulting prepared statement will inherit - * some of the attributes of the provided simple statement. That is, given: - * - *

{@code
-   * SimpleStatement simpleStatement = SimpleStatement.newInstance("...");
-   * PreparedStatement preparedStatement = session.prepare(simpleStatement);
-   * BoundStatement boundStatement = preparedStatement.bind();
-   * }
- * - * Then: - * - *
    - *
  • the following methods return the same value as their counterpart on {@code - * simpleStatement}: - *
      - *
    • {@link Request#getExecutionProfileName() boundStatement.getExecutionProfileName()} - *
    • {@link Request#getExecutionProfile() boundStatement.getExecutionProfile()} - *
    • {@link Statement#getPagingState() boundStatement.getPagingState()} - *
    • {@link Request#getRoutingKey() boundStatement.getRoutingKey()} - *
    • {@link Request#getRoutingToken() boundStatement.getRoutingToken()} - *
    • {@link Request#getCustomPayload() boundStatement.getCustomPayload()} - *
    • {@link Request#isIdempotent() boundStatement.isIdempotent()} - *
    • {@link Request#getTimeout() boundStatement.getTimeout()} - *
    • {@link Statement#getPagingState() boundStatement.getPagingState()} - *
    • {@link Statement#getPageSize() boundStatement.getPageSize()} - *
    • {@link Statement#getConsistencyLevel() boundStatement.getConsistencyLevel()} - *
    • {@link Statement#getSerialConsistencyLevel() - * boundStatement.getSerialConsistencyLevel()} - *
    • {@link Statement#isTracing() boundStatement.isTracing()} - *
    - *
  • {@link Request#getRoutingKeyspace() boundStatement.getRoutingKeyspace()} is set from - * either {@link Request#getKeyspace() simpleStatement.getKeyspace()} (if it's not {@code - * null}), or {@code simpleStatement.getRoutingKeyspace()}; - *
  • on the other hand, the following attributes are not propagated: - *
      - *
    • {@link Statement#getQueryTimestamp() boundStatement.getQueryTimestamp()} will be - * set to {@link Statement#NO_DEFAULT_TIMESTAMP}, meaning that the value will be - * assigned by the session's timestamp generator. - *
    • {@link Statement#getNode() boundStatement.getNode()} will always be {@code null}. - *
    • {@link Statement#getNowInSeconds()} boundStatement.getNowInSeconds()} will always - * be equal to {@link Statement#NO_NOW_IN_SECONDS}. - *
    - *
- * - * If you want to customize this behavior, you can write your own implementation of {@link - * PrepareRequest} and pass it to {@link #prepare(PrepareRequest)}. - * - *

The result of this method is cached: if you call it twice with the same {@link - * SimpleStatement}, you will get the same {@link PreparedStatement} instance. We still recommend - * keeping a reference to it (for example by caching it as a field in a DAO); if that's not - * possible (e.g. if query strings are generated dynamically), it's OK to call this method every - * time: there will just be a small performance overhead to check the internal cache. Note that - * caching is based on: - * - *

    - *
  • the query string exactly as you provided it: the driver does not perform any kind of - * trimming or sanitizing. - *
  • all other execution parameters: for example, preparing two statements with identical - * query strings but different {@linkplain SimpleStatement#getConsistencyLevel() consistency - * levels} will yield distinct prepared statements. - *
- * - * @param statement the CQL query to execute (that can be any {@link SimpleStatement}). - * @return the prepared statement corresponding to {@code statement}. - * @throws SyntaxError if the syntax of the query to prepare is not correct. - */ - @NonNull - default PreparedStatement prepare(@NonNull SimpleStatement statement) { - return Objects.requireNonNull( - execute(new DefaultPrepareRequest(statement), PrepareRequest.SYNC), - "The CQL prepare processor should never return a null result"); - } - - /** - * Prepares a CQL statement synchronously (the calling thread blocks until the statement is - * prepared). - * - *

The result of this method is cached (see {@link #prepare(SimpleStatement)} for more - * explanations). - * - * @param query the CQL string query to execute. - * @return the prepared statement corresponding to {@code query}. - * @throws SyntaxError if the syntax of the query to prepare is not correct. - */ - @NonNull - default PreparedStatement prepare(@NonNull String query) { - return Objects.requireNonNull( - execute(new DefaultPrepareRequest(query), PrepareRequest.SYNC), - "The CQL prepare processor should never return a null result"); - } - - /** - * Prepares a CQL statement synchronously (the calling thread blocks until the statement is - * prepared). - * - *

This variant is exposed in case you use an ad hoc {@link PrepareRequest} implementation to - * customize how attributes are propagated when you prepare a {@link SimpleStatement} (see {@link - * #prepare(SimpleStatement)} for more explanations). Otherwise, you should rarely have to deal - * with {@link PrepareRequest} directly. - * - *

The result of this method is cached (see {@link #prepare(SimpleStatement)} for more - * explanations). - * - * @param request the {@code PrepareRequest} to execute. - * @return the prepared statement corresponding to {@code request}. - * @throws SyntaxError if the syntax of the query to prepare is not correct. - */ - @NonNull - default PreparedStatement prepare(@NonNull PrepareRequest request) { - return Objects.requireNonNull( - execute(request, PrepareRequest.SYNC), - "The CQL prepare processor should never return a null result"); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/TraceEvent.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/TraceEvent.java deleted file mode 100644 index 3043d94057f..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/TraceEvent.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cql; - -import edu.umd.cs.findbugs.annotations.Nullable; -import java.net.InetAddress; -import java.net.InetSocketAddress; - -/** An event in a {@link QueryTrace}. */ -public interface TraceEvent { - - /** Which activity this event corresponds to. */ - @Nullable - String getActivity(); - - /** The server-side timestamp of the event. */ - long getTimestamp(); - - /** - * @deprecated returns the source IP, but {@link #getSourceAddress()} should be preferred, since - * C* 4.0 and above now returns the port was well. - */ - @Nullable - @Deprecated - InetAddress getSource(); - - /** - * The IP and Port of the host having generated this event. Prior to C* 4.0 the port will be set - * to zero. - * - *

This method's default implementation returns {@link #getSource()} with the port set to 0. - * The only reason it exists is to preserve binary compatibility. Internally, the driver overrides - * it to set the correct port. - * - * @since 4.6.0 - */ - @Nullable - default InetSocketAddress getSourceAddress() { - return new InetSocketAddress(getSource(), 0); - } - /** - * The number of microseconds elapsed on the source when this event occurred since the moment when - * the source started handling the query. - */ - int getSourceElapsedMicros(); - - /** The name of the thread on which this event occurred. */ - @Nullable - String getThreadName(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/data/AccessibleById.java b/core/src/main/java/com/datastax/oss/driver/api/core/data/AccessibleById.java deleted file mode 100644 index 2ca2222424c..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/data/AccessibleById.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.data; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.internal.core.util.Loggers; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Collections; -import java.util.List; - -/** - * A data structure where the values are accessible via a CQL identifier. - * - *

In the driver, these data structures are always accessible by index as well. - */ -public interface AccessibleById extends AccessibleByIndex { - - /** - * Returns all the indices where a given identifier appears. - * - * @throws IllegalArgumentException if the id is invalid. - * @apiNote the default implementation only exists for backward compatibility. It wraps the result - * of {@link #firstIndexOf(CqlIdentifier)} in a singleton list, which is not entirely correct, - * as it will only return the first occurrence. Therefore it also logs a warning. - *

Implementors should always override this method (all built-in driver implementations - * do). - */ - @NonNull - default List allIndicesOf(@NonNull CqlIdentifier id) { - Loggers.ACCESSIBLE_BY_ID.warn( - "{} should override allIndicesOf(CqlIdentifier), the default implementation is a " - + "workaround for backward compatibility, it only returns the first occurrence", - getClass().getName()); - return Collections.singletonList(firstIndexOf(id)); - } - - /** - * Returns the first index where a given identifier appears (depending on the implementation, - * identifiers may appear multiple times). - * - * @throws IllegalArgumentException if the id is invalid. - */ - int firstIndexOf(@NonNull CqlIdentifier id); - - /** - * Returns the CQL type of the value for the first occurrence of {@code id}. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @NonNull - DataType getType(@NonNull CqlIdentifier id); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/data/AccessibleByIndex.java b/core/src/main/java/com/datastax/oss/driver/api/core/data/AccessibleByIndex.java deleted file mode 100644 index 3007ed1fb68..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/data/AccessibleByIndex.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.data; - -import com.datastax.oss.driver.api.core.type.DataType; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** A data structure where the values are accessible via an integer index. */ -public interface AccessibleByIndex extends Data { - - /** Returns the number of values. */ - int size(); - - /** - * Returns the CQL type of the {@code i}th value. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - DataType getType(int i); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/data/AccessibleByName.java b/core/src/main/java/com/datastax/oss/driver/api/core/data/AccessibleByName.java deleted file mode 100644 index 74574a82f38..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/data/AccessibleByName.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.data; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.internal.core.util.Loggers; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Collections; -import java.util.List; - -/** - * A data structure where the values are accessible via a name string. - * - *

This is an optimized version of {@link AccessibleById}, in case the overhead of having to - * create a {@link CqlIdentifier} for each value is too much. - * - *

By default, case is ignored when matching names. If multiple names only differ by their case, - * then the first one is chosen. You can force an exact match by double-quoting the name. - * - *

For example, if the data structure contains three values named {@code Foo}, {@code foo} and - * {@code fOO}, then: - * - *

    - *
  • {@code getString("foo")} retrieves the first value (ignore case, first occurrence). - *
  • {@code getString("\"foo\"")} retrieves the second value (exact case). - *
  • {@code getString("\"fOO\"")} retrieves the third value (exact case). - *
  • {@code getString("\"FOO\"")} fails (exact case, no match). - *
- * - *

In the driver, these data structures are always accessible by index as well. - */ -public interface AccessibleByName extends AccessibleByIndex { - - /** - * Returns all the indices where a given identifier appears. - * - * @throws IllegalArgumentException if the name is invalid. - * @apiNote the default implementation only exists for backward compatibility. It wraps the result - * of {@link #firstIndexOf(String)} in a singleton list, which is not entirely correct, as it - * will only return the first occurrence. Therefore it also logs a warning. - *

Implementors should always override this method (all built-in driver implementations - * do). - */ - @NonNull - default List allIndicesOf(@NonNull String name) { - Loggers.ACCESSIBLE_BY_NAME.warn( - "{} should override allIndicesOf(String), the default implementation is a " - + "workaround for backward compatibility, it only returns the first occurrence", - getClass().getName()); - return Collections.singletonList(firstIndexOf(name)); - } - - /** - * Returns the first index where a given identifier appears (depending on the implementation, - * identifiers may appear multiple times). - * - * @throws IllegalArgumentException if the name is invalid. - */ - int firstIndexOf(@NonNull String name); - - /** - * Returns the CQL type of the value for the first occurrence of {@code name}. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * GettableByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - DataType getType(@NonNull String name); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/data/ByteUtils.java b/core/src/main/java/com/datastax/oss/driver/api/core/data/ByteUtils.java deleted file mode 100644 index d3dc68733e4..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/data/ByteUtils.java +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.data; - -import com.datastax.oss.protocol.internal.util.Bytes; -import java.nio.ByteBuffer; - -/** - * A set of static utility methods to work with byte buffers (associated with CQL type {@code - * blob}). - */ -public class ByteUtils { - - // Implementation note: this is just a gateway to the internal `Bytes` class in native-protocol. - // The difference is that this one is part of the public API. - - /** - * Converts a blob to its CQL hex string representation. - * - *

A CQL blob string representation consists of the hexadecimal representation of the blob - * bytes prefixed by "0x". - * - * @param bytes the blob/bytes to convert to a string. - * @return the CQL string representation of {@code bytes}. If {@code bytes} is {@code null}, this - * method returns {@code null}. - */ - public static String toHexString(ByteBuffer bytes) { - return Bytes.toHexString(bytes); - } - - /** - * Converts a blob to its CQL hex string representation. - * - *

A CQL blob string representation consists of the hexadecimal representation of the blob - * bytes prefixed by "0x". - * - * @param byteArray the blob/bytes array to convert to a string. - * @return the CQL string representation of {@code bytes}. If {@code bytes} is {@code null}, this - * method returns {@code null}. - */ - public static String toHexString(byte[] byteArray) { - return Bytes.toHexString(byteArray); - } - - /** - * Parses a hex string representing a CQL blob. - * - *

The input should be a valid representation of a CQL blob, i.e. it must start by "0x" - * followed by the hexadecimal representation of the blob bytes. - * - * @param str the CQL blob string representation to parse. - * @return the bytes corresponding to {@code str}. If {@code str} is {@code null}, this method - * returns {@code null}. - * @throws IllegalArgumentException if {@code str} is not a valid CQL blob string. - */ - public static ByteBuffer fromHexString(String str) { - return Bytes.fromHexString(str); - } - - /** - * Extracts the content of the provided {@code ByteBuffer} as a byte array. - * - *

This method works with any type of {@code ByteBuffer} (direct and non-direct ones), but when - * the buffer is backed by an array, it will try to avoid copy when possible. As a consequence, - * changes to the returned byte array may or may not reflect into the initial buffer. - * - * @param bytes the buffer whose contents to extract. - * @return a byte array with the contents of {@code bytes}. That array may be the array backing - * {@code bytes} if this can avoid a copy. - */ - public static byte[] getArray(ByteBuffer bytes) { - return Bytes.getArray(bytes); - } - - private ByteUtils() {} -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/data/CqlDuration.java b/core/src/main/java/com/datastax/oss/driver/api/core/data/CqlDuration.java deleted file mode 100644 index bfa9df20bbb..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/data/CqlDuration.java +++ /dev/null @@ -1,666 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.data; - -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.base.Objects; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.Serializable; -import java.time.Duration; -import java.time.Period; -import java.time.temporal.ChronoUnit; -import java.time.temporal.Temporal; -import java.time.temporal.TemporalAmount; -import java.time.temporal.TemporalUnit; -import java.time.temporal.UnsupportedTemporalTypeException; -import java.util.List; -import java.util.Locale; -import java.util.regex.Matcher; -import java.util.regex.Pattern; -import net.jcip.annotations.Immutable; - -/** - * A duration, as defined in CQL. - * - *

It stores months, days, and seconds separately due to the fact that the number of days in a - * month varies, and a day can have 23 or 25 hours if a daylight saving is involved. As such, this - * type differs from {@link java.time.Duration} (which only represents an amount between two points - * in time, regardless of the calendar). - */ -@Immutable -public final class CqlDuration implements TemporalAmount, Serializable { - - private static final long serialVersionUID = 1L; - - @VisibleForTesting static final long NANOS_PER_MICRO = 1000L; - @VisibleForTesting static final long NANOS_PER_MILLI = 1000 * NANOS_PER_MICRO; - @VisibleForTesting static final long NANOS_PER_SECOND = 1000 * NANOS_PER_MILLI; - @VisibleForTesting static final long NANOS_PER_MINUTE = 60 * NANOS_PER_SECOND; - @VisibleForTesting static final long NANOS_PER_HOUR = 60 * NANOS_PER_MINUTE; - @VisibleForTesting static final int DAYS_PER_WEEK = 7; - @VisibleForTesting static final int MONTHS_PER_YEAR = 12; - - /** The Regexp used to parse the duration provided as String. */ - private static final Pattern STANDARD_PATTERN = - Pattern.compile( - "\\G(\\d+)(y|Y|mo|MO|mO|Mo|w|W|d|D|h|H|s|S|ms|MS|mS|Ms|us|US|uS|Us|µs|µS|ns|NS|nS|Ns|m|M)"); - - /** - * The Regexp used to parse the duration when provided in the ISO 8601 format with designators. - */ - private static final Pattern ISO8601_PATTERN = - Pattern.compile("P((\\d+)Y)?((\\d+)M)?((\\d+)D)?(T((\\d+)H)?((\\d+)M)?((\\d+)S)?)?"); - - /** - * The Regexp used to parse the duration when provided in the ISO 8601 format with designators. - */ - private static final Pattern ISO8601_WEEK_PATTERN = Pattern.compile("P(\\d+)W"); - - /** The Regexp used to parse the duration when provided in the ISO 8601 alternative format. */ - private static final Pattern ISO8601_ALTERNATIVE_PATTERN = - Pattern.compile("P(\\d{4})-(\\d{2})-(\\d{2})T(\\d{2}):(\\d{2}):(\\d{2})"); - - private static final ImmutableList TEMPORAL_UNITS = - ImmutableList.of(ChronoUnit.MONTHS, ChronoUnit.DAYS, ChronoUnit.NANOS); - - /** @serial */ - private final int months; - /** @serial */ - private final int days; - /** @serial */ - private final long nanoseconds; - - private CqlDuration(int months, int days, long nanoseconds) { - // Makes sure that all the values are negative if one of them is - if ((months < 0 || days < 0 || nanoseconds < 0) - && (months > 0 || days > 0 || nanoseconds > 0)) { - throw new IllegalArgumentException( - String.format( - "All values must be either negative or positive, got %d months, %d days, %d nanoseconds", - months, days, nanoseconds)); - } - this.months = months; - this.days = days; - this.nanoseconds = nanoseconds; - } - - /** - * Creates a duration with the given number of months, days and nanoseconds. - * - *

A duration can be negative. In this case, all the non zero values must be negative. - * - * @param months the number of months - * @param days the number of days - * @param nanoseconds the number of nanoseconds - * @throws IllegalArgumentException if the values are not all negative or all positive - */ - public static CqlDuration newInstance(int months, int days, long nanoseconds) { - return new CqlDuration(months, days, nanoseconds); - } - - /** - * Converts a String into a duration. - * - *

The accepted formats are: - * - *

    - *
  • multiple digits followed by a time unit like: 12h30m where the time unit can be: - *
      - *
    • {@code y}: years - *
    • {@code mo}: months - *
    • {@code w}: weeks - *
    • {@code d}: days - *
    • {@code h}: hours - *
    • {@code m}: minutes - *
    • {@code s}: seconds - *
    • {@code ms}: milliseconds - *
    • {@code us} or {@code µs}: microseconds - *
    • {@code ns}: nanoseconds - *
    - *
  • ISO 8601 format: P[n]Y[n]M[n]DT[n]H[n]M[n]S or P[n]W - *
  • ISO 8601 alternative format: P[YYYY]-[MM]-[DD]T[hh]:[mm]:[ss] - *
- * - * @param input the String to convert - */ - public static CqlDuration from(@NonNull String input) { - boolean isNegative = input.startsWith("-"); - String source = isNegative ? input.substring(1) : input; - - if (source.startsWith("P")) { - if (source.endsWith("W")) { - return parseIso8601WeekFormat(isNegative, source); - } - if (source.contains("-")) { - return parseIso8601AlternativeFormat(isNegative, source); - } - return parseIso8601Format(isNegative, source); - } - return parseStandardFormat(isNegative, source); - } - - private static CqlDuration parseIso8601Format(boolean isNegative, @NonNull String source) { - Matcher matcher = ISO8601_PATTERN.matcher(source); - if (!matcher.matches()) - throw new IllegalArgumentException( - String.format("Unable to convert '%s' to a duration", source)); - - Builder builder = new Builder(isNegative); - if (matcher.group(1) != null) { - builder.addYears(groupAsLong(matcher, 2)); - } - if (matcher.group(3) != null) { - builder.addMonths(groupAsLong(matcher, 4)); - } - if (matcher.group(5) != null) { - builder.addDays(groupAsLong(matcher, 6)); - } - // Checks if the String contains time information - if (matcher.group(7) != null) { - if (matcher.group(8) != null) { - builder.addHours(groupAsLong(matcher, 9)); - } - if (matcher.group(10) != null) { - builder.addMinutes(groupAsLong(matcher, 11)); - } - if (matcher.group(12) != null) { - builder.addSeconds(groupAsLong(matcher, 13)); - } - } - return builder.build(); - } - - private static CqlDuration parseIso8601AlternativeFormat( - boolean isNegative, @NonNull String source) { - Matcher matcher = ISO8601_ALTERNATIVE_PATTERN.matcher(source); - if (!matcher.matches()) { - throw new IllegalArgumentException( - String.format("Unable to convert '%s' to a duration", source)); - } - return new Builder(isNegative) - .addYears(groupAsLong(matcher, 1)) - .addMonths(groupAsLong(matcher, 2)) - .addDays(groupAsLong(matcher, 3)) - .addHours(groupAsLong(matcher, 4)) - .addMinutes(groupAsLong(matcher, 5)) - .addSeconds(groupAsLong(matcher, 6)) - .build(); - } - - private static CqlDuration parseIso8601WeekFormat(boolean isNegative, @NonNull String source) { - Matcher matcher = ISO8601_WEEK_PATTERN.matcher(source); - if (!matcher.matches()) { - throw new IllegalArgumentException( - String.format("Unable to convert '%s' to a duration", source)); - } - return new Builder(isNegative).addWeeks(groupAsLong(matcher, 1)).build(); - } - - private static CqlDuration parseStandardFormat(boolean isNegative, @NonNull String source) { - Matcher matcher = STANDARD_PATTERN.matcher(source); - if (!matcher.find()) { - throw new IllegalArgumentException( - String.format("Unable to convert '%s' to a duration", source)); - } - Builder builder = new Builder(isNegative); - boolean done; - - do { - long number = groupAsLong(matcher, 1); - String symbol = matcher.group(2); - add(builder, number, symbol); - done = matcher.end() == source.length(); - } while (matcher.find()); - - if (!done) { - throw new IllegalArgumentException( - String.format("Unable to convert '%s' to a duration", source)); - } - return builder.build(); - } - - private static long groupAsLong(@NonNull Matcher matcher, int group) { - return Long.parseLong(matcher.group(group)); - } - - private static Builder add(@NonNull Builder builder, long number, @NonNull String symbol) { - String s = symbol.toLowerCase(Locale.ROOT); - if (s.equals("y")) { - return builder.addYears(number); - } else if (s.equals("mo")) { - return builder.addMonths(number); - } else if (s.equals("w")) { - return builder.addWeeks(number); - } else if (s.equals("d")) { - return builder.addDays(number); - } else if (s.equals("h")) { - return builder.addHours(number); - } else if (s.equals("m")) { - return builder.addMinutes(number); - } else if (s.equals("s")) { - return builder.addSeconds(number); - } else if (s.equals("ms")) { - return builder.addMillis(number); - } else if (s.equals("us") || s.equals("µs")) { - return builder.addMicros(number); - } else if (s.equals("ns")) { - return builder.addNanos(number); - } - throw new IllegalArgumentException(String.format("Unknown duration symbol '%s'", symbol)); - } - - /** - * Appends the result of the division to the specified builder if the dividend is not zero. - * - * @param builder the builder to append to - * @param dividend the dividend - * @param divisor the divisor - * @param unit the time unit to append after the result of the division - * @return the remainder of the division - */ - private static long append( - @NonNull StringBuilder builder, long dividend, long divisor, @NonNull String unit) { - if (dividend == 0 || dividend < divisor) { - return dividend; - } - builder.append(dividend / divisor).append(unit); - return dividend % divisor; - } - - /** - * Returns the number of months in this duration. - * - * @return the number of months in this duration. - */ - public int getMonths() { - return months; - } - - /** - * Returns the number of days in this duration. - * - * @return the number of days in this duration. - */ - public int getDays() { - return days; - } - - /** - * Returns the number of nanoseconds in this duration. - * - * @return the number of months in this duration. - */ - public long getNanoseconds() { - return nanoseconds; - } - - /** - * {@inheritDoc} - * - *

This implementation converts the months and days components to a {@link Period}, and the - * nanosecond component to a {@link Duration}, and adds those two amounts to the temporal object. - * Therefore the chronology of the temporal must be either the ISO chronology or null. - * - * @see Period#addTo(Temporal) - * @see Duration#addTo(Temporal) - */ - @Override - public Temporal addTo(Temporal temporal) { - return temporal.plus(Period.of(0, months, days)).plus(Duration.ofNanos(nanoseconds)); - } - - /** - * {@inheritDoc} - * - *

This implementation converts the months and days components to a {@link Period}, and the - * nanosecond component to a {@link Duration}, and subtracts those two amounts to the temporal - * object. Therefore the chronology of the temporal must be either the ISO chronology or null. - * - * @see Period#subtractFrom(Temporal) - * @see Duration#subtractFrom(Temporal) - */ - @Override - public Temporal subtractFrom(Temporal temporal) { - return temporal.minus(Period.of(0, months, days)).minus(Duration.ofNanos(nanoseconds)); - } - - @Override - public long get(TemporalUnit unit) { - if (unit == ChronoUnit.MONTHS) { - return months; - } else if (unit == ChronoUnit.DAYS) { - return days; - } else if (unit == ChronoUnit.NANOS) { - return nanoseconds; - } else { - throw new UnsupportedTemporalTypeException("Unsupported unit: " + unit); - } - } - - @Override - public List getUnits() { - return TEMPORAL_UNITS; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof CqlDuration) { - CqlDuration that = (CqlDuration) other; - return this.days == that.days - && this.months == that.months - && this.nanoseconds == that.nanoseconds; - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hashCode(days, months, nanoseconds); - } - - @Override - public String toString() { - StringBuilder builder = new StringBuilder(); - - if (months < 0 || days < 0 || nanoseconds < 0) { - builder.append('-'); - } - long remainder = append(builder, Math.abs(months), MONTHS_PER_YEAR, "y"); - append(builder, remainder, 1, "mo"); - - append(builder, Math.abs(days), 1, "d"); - - if (nanoseconds != 0) { - remainder = append(builder, Math.abs(nanoseconds), NANOS_PER_HOUR, "h"); - remainder = append(builder, remainder, NANOS_PER_MINUTE, "m"); - remainder = append(builder, remainder, NANOS_PER_SECOND, "s"); - remainder = append(builder, remainder, NANOS_PER_MILLI, "ms"); - remainder = append(builder, remainder, NANOS_PER_MICRO, "us"); - append(builder, remainder, 1, "ns"); - } - return builder.toString(); - } - - private static class Builder { - private final boolean isNegative; - private int months; - private int days; - private long nanoseconds; - - /** We need to make sure that the values for each units are provided in order. */ - private int currentUnitIndex; - - public Builder(boolean isNegative) { - this.isNegative = isNegative; - } - - /** - * Adds the specified amount of years. - * - * @param numberOfYears the number of years to add. - * @return this {@code Builder} - */ - @NonNull - public Builder addYears(long numberOfYears) { - validateOrder(1); - validateMonths(numberOfYears, MONTHS_PER_YEAR); - // Cast to avoid http://errorprone.info/bugpattern/NarrowingCompoundAssignment - // We could also change the method to accept an int, but keeping long allows us to keep the - // calling code generic. - months += (int) numberOfYears * MONTHS_PER_YEAR; - return this; - } - - /** - * Adds the specified amount of months. - * - * @param numberOfMonths the number of months to add. - * @return this {@code Builder} - */ - @NonNull - public Builder addMonths(long numberOfMonths) { - validateOrder(2); - validateMonths(numberOfMonths, 1); - months += (int) numberOfMonths; - return this; - } - - /** - * Adds the specified amount of weeks. - * - * @param numberOfWeeks the number of weeks to add. - * @return this {@code Builder} - */ - @NonNull - public Builder addWeeks(long numberOfWeeks) { - validateOrder(3); - validateDays(numberOfWeeks, DAYS_PER_WEEK); - days += (int) numberOfWeeks * DAYS_PER_WEEK; - return this; - } - - /** - * Adds the specified amount of days. - * - * @param numberOfDays the number of days to add. - * @return this {@code Builder} - */ - @NonNull - public Builder addDays(long numberOfDays) { - validateOrder(4); - validateDays(numberOfDays, 1); - days += (int) numberOfDays; - return this; - } - - /** - * Adds the specified amount of hours. - * - * @param numberOfHours the number of hours to add. - * @return this {@code Builder} - */ - @NonNull - public Builder addHours(long numberOfHours) { - validateOrder(5); - validateNanos(numberOfHours, NANOS_PER_HOUR); - nanoseconds += numberOfHours * NANOS_PER_HOUR; - return this; - } - - /** - * Adds the specified amount of minutes. - * - * @param numberOfMinutes the number of minutes to add. - * @return this {@code Builder} - */ - @NonNull - public Builder addMinutes(long numberOfMinutes) { - validateOrder(6); - validateNanos(numberOfMinutes, NANOS_PER_MINUTE); - nanoseconds += numberOfMinutes * NANOS_PER_MINUTE; - return this; - } - - /** - * Adds the specified amount of seconds. - * - * @param numberOfSeconds the number of seconds to add. - * @return this {@code Builder} - */ - @NonNull - public Builder addSeconds(long numberOfSeconds) { - validateOrder(7); - validateNanos(numberOfSeconds, NANOS_PER_SECOND); - nanoseconds += numberOfSeconds * NANOS_PER_SECOND; - return this; - } - - /** - * Adds the specified amount of milliseconds. - * - * @param numberOfMillis the number of milliseconds to add. - * @return this {@code Builder} - */ - @NonNull - public Builder addMillis(long numberOfMillis) { - validateOrder(8); - validateNanos(numberOfMillis, NANOS_PER_MILLI); - nanoseconds += numberOfMillis * NANOS_PER_MILLI; - return this; - } - - /** - * Adds the specified amount of microseconds. - * - * @param numberOfMicros the number of microseconds to add. - * @return this {@code Builder} - */ - @NonNull - public Builder addMicros(long numberOfMicros) { - validateOrder(9); - validateNanos(numberOfMicros, NANOS_PER_MICRO); - nanoseconds += numberOfMicros * NANOS_PER_MICRO; - return this; - } - - /** - * Adds the specified amount of nanoseconds. - * - * @param numberOfNanos the number of nanoseconds to add. - * @return this {@code Builder} - */ - @NonNull - public Builder addNanos(long numberOfNanos) { - validateOrder(10); - validateNanos(numberOfNanos, 1); - nanoseconds += numberOfNanos; - return this; - } - - /** - * Validates that the total number of months can be stored. - * - * @param units the number of units that need to be added - * @param monthsPerUnit the number of days per unit - */ - private void validateMonths(long units, int monthsPerUnit) { - validate(units, (Integer.MAX_VALUE - months) / monthsPerUnit, "months"); - } - - /** - * Validates that the total number of days can be stored. - * - * @param units the number of units that need to be added - * @param daysPerUnit the number of days per unit - */ - private void validateDays(long units, int daysPerUnit) { - validate(units, (Integer.MAX_VALUE - days) / daysPerUnit, "days"); - } - - /** - * Validates that the total number of nanoseconds can be stored. - * - * @param units the number of units that need to be added - * @param nanosPerUnit the number of nanoseconds per unit - */ - private void validateNanos(long units, long nanosPerUnit) { - validate(units, (Long.MAX_VALUE - nanoseconds) / nanosPerUnit, "nanoseconds"); - } - - /** - * Validates that the specified amount is less than the limit. - * - * @param units the number of units to check - * @param limit the limit on the number of units - * @param unitName the unit name - */ - private void validate(long units, long limit, @NonNull String unitName) { - Preconditions.checkArgument( - units <= limit, - "Invalid duration. The total number of %s must be less or equal to %s", - unitName, - Integer.MAX_VALUE); - } - - /** - * Validates that the duration values are added in the proper order. - * - * @param unitIndex the unit index (e.g. years=1, months=2, ...) - */ - private void validateOrder(int unitIndex) { - if (unitIndex == currentUnitIndex) { - throw new IllegalArgumentException( - String.format( - "Invalid duration. The %s are specified multiple times", getUnitName(unitIndex))); - } - if (unitIndex <= currentUnitIndex) { - throw new IllegalArgumentException( - String.format( - "Invalid duration. The %s should be after %s", - getUnitName(currentUnitIndex), getUnitName(unitIndex))); - } - currentUnitIndex = unitIndex; - } - - /** - * Returns the name of the unit corresponding to the specified index. - * - * @param unitIndex the unit index - * @return the name of the unit corresponding to the specified index. - */ - @NonNull - private String getUnitName(int unitIndex) { - switch (unitIndex) { - case 1: - return "years"; - case 2: - return "months"; - case 3: - return "weeks"; - case 4: - return "days"; - case 5: - return "hours"; - case 6: - return "minutes"; - case 7: - return "seconds"; - case 8: - return "milliseconds"; - case 9: - return "microseconds"; - case 10: - return "nanoseconds"; - default: - throw new AssertionError("unknown unit index: " + unitIndex); - } - } - - @NonNull - public CqlDuration build() { - return isNegative - ? new CqlDuration(-months, -days, -nanoseconds) - : new CqlDuration(months, days, nanoseconds); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/data/CqlVector.java b/core/src/main/java/com/datastax/oss/driver/api/core/data/CqlVector.java deleted file mode 100644 index 8089d551750..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/data/CqlVector.java +++ /dev/null @@ -1,293 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.data; - -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.internal.core.type.codec.ParseUtils; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.driver.shaded.guava.common.base.Predicates; -import com.datastax.oss.driver.shaded.guava.common.collect.Iterables; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.IOException; -import java.io.InvalidObjectException; -import java.io.ObjectInputStream; -import java.io.ObjectOutputStream; -import java.io.ObjectStreamException; -import java.io.Serializable; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Iterator; -import java.util.List; -import java.util.Objects; -import java.util.stream.Stream; - -/** - * Representation of a vector as defined in CQL. - * - *

A CQL vector is a fixed-length array of non-null numeric values. These properties don't map - * cleanly to an existing class in the standard JDK Collections hierarchy so we provide this value - * object instead. Like other value object collections returned by the driver instances of this - * class are not immutable; think of these value objects as a representation of a vector stored in - * the database as an initial step in some additional computation. - * - *

While we don't implement any Collection APIs we do implement Iterable. We also attempt to play - * nice with the Streams API in order to better facilitate integration with data pipelines. Finally, - * where possible we've tried to make the API of this class similar to the equivalent methods on - * {@link List}. - */ -public class CqlVector implements Iterable, Serializable { - - /** - * Create a new CqlVector containing the specified values. - * - * @param vals the collection of values to wrap. - * @return a CqlVector wrapping those values - */ - public static CqlVector newInstance(V... vals) { - - // Note that Array.asList() guarantees the return of an array which implements RandomAccess - return new CqlVector(Arrays.asList(vals)); - } - - /** - * Create a new CqlVector that "wraps" an existing ArrayList. Modifications to the passed - * ArrayList will also be reflected in the returned CqlVector. - * - * @param list the collection of values to wrap. - * @return a CqlVector wrapping those values - */ - public static CqlVector newInstance(List list) { - Preconditions.checkArgument(list != null, "Input list should not be null"); - return new CqlVector(list); - } - - /** - * Create a new CqlVector instance from the specified string representation. - * - * @param str a String representation of a CqlVector - * @param subtypeCodec - * @return a new CqlVector built from the String representation - */ - public static CqlVector from(@NonNull String str, @NonNull TypeCodec subtypeCodec) { - Preconditions.checkArgument(str != null, "Cannot create CqlVector from null string"); - Preconditions.checkArgument(!str.isEmpty(), "Cannot create CqlVector from empty string"); - if (str.equalsIgnoreCase("NULL")) return null; - - int idx = ParseUtils.skipSpaces(str, 0); - if (str.charAt(idx++) != '[') - throw new IllegalArgumentException( - String.format( - "Cannot parse vector value from \"%s\", at character %d expecting '[' but got '%c'", - str, idx, str.charAt(idx))); - - idx = ParseUtils.skipSpaces(str, idx); - - if (str.charAt(idx) == ']') { - return new CqlVector<>(new ArrayList<>()); - } - - List list = new ArrayList<>(); - while (idx < str.length()) { - int n; - try { - n = ParseUtils.skipCQLValue(str, idx); - } catch (IllegalArgumentException e) { - throw new IllegalArgumentException( - String.format( - "Cannot parse vector value from \"%s\", invalid CQL value at character %d", - str, idx), - e); - } - - list.add(subtypeCodec.parse(str.substring(idx, n))); - idx = n; - - idx = ParseUtils.skipSpaces(str, idx); - if (str.charAt(idx) == ']') return new CqlVector<>(list); - if (str.charAt(idx++) != ',') - throw new IllegalArgumentException( - String.format( - "Cannot parse vector value from \"%s\", at character %d expecting ',' but got '%c'", - str, idx, str.charAt(idx))); - - idx = ParseUtils.skipSpaces(str, idx); - } - throw new IllegalArgumentException( - String.format("Malformed vector value \"%s\", missing closing ']'", str)); - } - - private final List list; - - private CqlVector(@NonNull List list) { - - Preconditions.checkArgument( - Iterables.all(list, Predicates.notNull()), "CqlVectors cannot contain null values"); - this.list = list; - } - - /** - * Retrieve the value at the specified index. Modelled after {@link List#get(int)} - * - * @param idx the index to retrieve - * @return the value at the specified index - */ - public T get(int idx) { - return list.get(idx); - } - - /** - * Update the value at the specified index. Modelled after {@link List#set(int, Object)} - * - * @param idx the index to set - * @param val the new value for the specified index - * @return the old value for the specified index - */ - public T set(int idx, T val) { - return list.set(idx, val); - } - - /** - * Return the size of this vector. Modelled after {@link List#size()} - * - * @return the vector size - */ - public int size() { - return this.list.size(); - } - - /** - * Return a CqlVector consisting of the contents of a portion of this vector. Modelled after - * {@link List#subList(int, int)} - * - * @param from the index to start from (inclusive) - * @param to the index to end on (exclusive) - * @return a new CqlVector wrapping the sublist - */ - public CqlVector subVector(int from, int to) { - return new CqlVector(this.list.subList(from, to)); - } - - /** - * Return a boolean indicating whether the vector is empty. Modelled after {@link List#isEmpty()} - * - * @return true if the list is empty, false otherwise - */ - public boolean isEmpty() { - return this.list.isEmpty(); - } - - /** - * Create an {@link Iterator} for this vector - * - * @return the generated iterator - */ - @Override - public Iterator iterator() { - return this.list.iterator(); - } - - /** - * Create a {@link Stream} of the values in this vector - * - * @return the Stream instance - */ - public Stream stream() { - return this.list.stream(); - } - - @Override - public boolean equals(Object o) { - if (o == this) { - return true; - } else if (o instanceof CqlVector) { - CqlVector that = (CqlVector) o; - return this.list.equals(that.list); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(list); - } - - /** - * The string representation of the vector. Elements, like strings, may not be properly quoted. - * - * @return the string representation - */ - @Override - public String toString() { - return Iterables.toString(this.list); - } - - /** - * Serialization proxy for CqlVector. Allows serialization regardless of implementation of list - * field. - * - * @param inner type of CqlVector, assume Number is always Serializable. - */ - private static class SerializationProxy implements Serializable { - - private static final long serialVersionUID = 1; - - private transient List list; - - SerializationProxy(CqlVector vector) { - this.list = vector.list; - } - - // Reconstruct CqlVector's list of elements. - private void readObject(ObjectInputStream stream) throws IOException, ClassNotFoundException { - stream.defaultReadObject(); - - int size = stream.readInt(); - list = new ArrayList<>(); - for (int i = 0; i < size; i++) { - list.add((T) stream.readObject()); - } - } - - // Return deserialized proxy object as CqlVector. - private Object readResolve() throws ObjectStreamException { - return new CqlVector(list); - } - - // Write size of CqlVector followed by items in order. - private void writeObject(ObjectOutputStream stream) throws IOException { - stream.defaultWriteObject(); - - stream.writeInt(list.size()); - for (T item : list) { - stream.writeObject(item); - } - } - } - - /** @serialData The number of elements in the vector, followed by each element in-order. */ - private Object writeReplace() { - return new SerializationProxy(this); - } - - private void readObject(@SuppressWarnings("unused") ObjectInputStream stream) - throws InvalidObjectException { - // Should never be called since we serialized a proxy - throw new InvalidObjectException("Proxy required"); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/data/Data.java b/core/src/main/java/com/datastax/oss/driver/api/core/data/Data.java deleted file mode 100644 index 495b96e97c7..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/data/Data.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.data; - -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.detach.Detachable; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** A data structure containing CQL values. */ -public interface Data { - - /** - * Returns the registry of all the codecs currently available to convert values for this instance. - * - *

If you obtained this object from the driver, this will be set automatically. If you created - * it manually, or just deserialized it, it is set to {@link CodecRegistry#DEFAULT}. You can - * reattach this object to an existing driver instance to use its codec registry. - * - * @see Detachable - */ - @NonNull - CodecRegistry codecRegistry(); - - /** - * Returns the protocol version that is currently used to convert values for this instance. - * - *

If you obtained this object from the driver, this will be set automatically. If you created - * it manually, or just deserialized it, it is set to {@link DefaultProtocolVersion#DEFAULT}. You - * can reattach this object to an existing driver instance to use its protocol version. - * - * @see Detachable - */ - @NonNull - ProtocolVersion protocolVersion(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/data/GettableById.java b/core/src/main/java/com/datastax/oss/driver/api/core/data/GettableById.java deleted file mode 100644 index 8393bc9f758..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/data/GettableById.java +++ /dev/null @@ -1,676 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.data; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.type.codec.CodecNotFoundException; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.math.BigDecimal; -import java.math.BigInteger; -import java.net.InetAddress; -import java.nio.ByteBuffer; -import java.time.Instant; -import java.time.LocalDate; -import java.time.LocalTime; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; - -/** A data structure that provides methods to retrieve its values via a CQL identifier. */ -public interface GettableById extends GettableByIndex, AccessibleById { - - /** - * Returns the raw binary representation of the value for the first occurrence of {@code id}. - * - *

This is primarily for internal use; you'll likely want to use one of the typed getters - * instead, to get a higher-level Java representation. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @return the raw value, or {@code null} if the CQL value is {@code NULL}. For performance - * reasons, this is the actual instance used internally. If you read data from the buffer, - * make sure to {@link ByteBuffer#duplicate() duplicate} it beforehand, or only use relative - * methods. If you change the buffer's index or its contents in any way, any other getter - * invocation for this value will have unpredictable results. - * @throws IllegalArgumentException if the id is invalid. - */ - @Nullable - default ByteBuffer getBytesUnsafe(@NonNull CqlIdentifier id) { - return getBytesUnsafe(firstIndexOf(id)); - } - - /** - * Indicates whether the value for the first occurrence of {@code id} is a CQL {@code NULL}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - default boolean isNull(@NonNull CqlIdentifier id) { - return isNull(firstIndexOf(id)); - } - - /** - * Returns the value for the first occurrence of {@code id}, using the given codec for the - * conversion. - * - *

This method completely bypasses the {@link #codecRegistry()}, and forces the driver to use - * the given codec instead. This can be useful if the codec would collide with a previously - * registered one, or if you want to use the codec just once without registering it. - * - *

It is the caller's responsibility to ensure that the given codec is appropriate for the - * conversion. Failing to do so will result in errors at runtime. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @Nullable - default ValueT get(@NonNull CqlIdentifier id, @NonNull TypeCodec codec) { - return get(firstIndexOf(id), codec); - } - - /** - * Returns the value for the first occurrence of {@code id}, converting it to the given Java type. - * - *

The {@link #codecRegistry()} will be used to look up a codec to handle the conversion. - * - *

This variant is for generic Java types. If the target type is not generic, use {@link - * #get(int, Class)} instead, which may perform slightly better. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - * @throws CodecNotFoundException if no codec can perform the conversion. - */ - @Nullable - default ValueT get(@NonNull CqlIdentifier id, @NonNull GenericType targetType) { - return get(firstIndexOf(id), targetType); - } - - /** - * Returns the value for the first occurrence of {@code id}, converting it to the given Java type. - * - *

The {@link #codecRegistry()} will be used to look up a codec to handle the conversion. - * - *

If the target type is generic, use {@link #get(int, GenericType)} instead. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - * @throws CodecNotFoundException if no codec can perform the conversion. - */ - @Nullable - default ValueT get(@NonNull CqlIdentifier id, @NonNull Class targetClass) { - return get(firstIndexOf(id), targetClass); - } - - /** - * Returns the value for the first occurrence of {@code id}, converting it to the most appropriate - * Java type. - * - *

The {@link #codecRegistry()} will be used to look up a codec to handle the conversion. - * - *

Use this method to dynamically inspect elements when types aren't known in advance, for - * instance if you're writing a generic row logger. If you know the target Java type, it is - * generally preferable to use typed variants, such as the ones for built-in types ({@link - * #getBoolean(int)}, {@link #getInt(int)}, etc.), or {@link #get(int, Class)} and {@link - * #get(int, GenericType)} for custom types. - * - *

The definition of "most appropriate" is unspecified, and left to the appreciation of the - * {@link #codecRegistry()} implementation. By default, the driver uses the mapping described in - * the other {@code getXxx()} methods (for example {@link #getString(int) String for text, varchar - * and ascii}, etc). - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - * @throws CodecNotFoundException if no codec can perform the conversion. - */ - @Nullable - default Object getObject(@NonNull CqlIdentifier id) { - return getObject(firstIndexOf(id)); - } - - /** - * Returns the value for the first occurrence of {@code id} as a Java primitive boolean. - * - *

By default, this works with CQL type {@code boolean}. - * - *

Note that, due to its signature, this method cannot return {@code null}. If the CQL value is - * {@code NULL}, it will return {@code false}. If this doesn't work for you, either call {@link - * #isNull(CqlIdentifier)} before calling this method, or use {@code get(id, Boolean.class)} - * instead. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - default boolean getBoolean(@NonNull CqlIdentifier id) { - return getBoolean(firstIndexOf(id)); - } - - /** - * @deprecated this method only exists to ease the transition from driver 3, it is an alias for - * {@link #getBoolean(CqlIdentifier)}. - */ - @Deprecated - default boolean getBool(@NonNull CqlIdentifier id) { - return getBoolean(id); - } - - /** - * Returns the value for the first occurrence of {@code id} as a Java primitive byte. - * - *

By default, this works with CQL type {@code tinyint}. - * - *

Note that, due to its signature, this method cannot return {@code null}. If the CQL value is - * {@code NULL}, it will return {@code 0}. If this doesn't work for you, either call {@link - * #isNull(CqlIdentifier)} before calling this method, or use {@code get(id, Byte.class)} instead. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - default byte getByte(@NonNull CqlIdentifier id) { - return getByte(firstIndexOf(id)); - } - - /** - * Returns the value for the first occurrence of {@code id} as a Java primitive double. - * - *

By default, this works with CQL type {@code double}. - * - *

Note that, due to its signature, this method cannot return {@code null}. If the CQL value is - * {@code NULL}, it will return {@code 0.0}. If this doesn't work for you, either call {@link - * #isNull(CqlIdentifier)} before calling this method, or use {@code get(id, Double.class)} - * instead. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - default double getDouble(@NonNull CqlIdentifier id) { - return getDouble(firstIndexOf(id)); - } - - /** - * Returns the value for the first occurrence of {@code id} as a Java primitive float. - * - *

By default, this works with CQL type {@code float}. - * - *

Note that, due to its signature, this method cannot return {@code null}. If the CQL value is - * {@code NULL}, it will return {@code 0.0}. If this doesn't work for you, either call {@link - * #isNull(CqlIdentifier)} before calling this method, or use {@code get(id, Float.class)} - * instead. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - default float getFloat(@NonNull CqlIdentifier id) { - return getFloat(firstIndexOf(id)); - } - - /** - * Returns the value for the first occurrence of {@code id} as a Java primitive integer. - * - *

By default, this works with CQL type {@code int}. - * - *

Note that, due to its signature, this method cannot return {@code null}. If the CQL value is - * {@code NULL}, it will return {@code 0}. If this doesn't work for you, either call {@link - * #isNull(CqlIdentifier)} before calling this method, or use {@code get(id, Integer.class)} - * instead. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - default int getInt(@NonNull CqlIdentifier id) { - return getInt(firstIndexOf(id)); - } - - /** - * Returns the value for the first occurrence of {@code id} as a Java primitive long. - * - *

By default, this works with CQL types {@code bigint} and {@code counter}. - * - *

Note that, due to its signature, this method cannot return {@code null}. If the CQL value is - * {@code NULL}, it will return {@code 0}. If this doesn't work for you, either call {@link - * #isNull(CqlIdentifier)} before calling this method, or use {@code get(id, Long.class)} instead. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - default long getLong(@NonNull CqlIdentifier id) { - return getLong(firstIndexOf(id)); - } - - /** - * Returns the value for the first occurrence of {@code id} as a Java primitive short. - * - *

By default, this works with CQL type {@code smallint}. - * - *

Note that, due to its signature, this method cannot return {@code null}. If the CQL value is - * {@code NULL}, it will return {@code 0}. If this doesn't work for you, either call {@link - * #isNull(CqlIdentifier)} before calling this method, or use {@code get(id, Short.class)} - * instead. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - default short getShort(@NonNull CqlIdentifier id) { - return getShort(firstIndexOf(id)); - } - - /** - * Returns the value for the first occurrence of {@code id} as a Java instant. - * - *

By default, this works with CQL type {@code timestamp}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @Nullable - default Instant getInstant(@NonNull CqlIdentifier id) { - return getInstant(firstIndexOf(id)); - } - - /** - * Returns the value for the first occurrence of {@code id} as a Java local date. - * - *

By default, this works with CQL type {@code date}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @Nullable - default LocalDate getLocalDate(@NonNull CqlIdentifier id) { - return getLocalDate(firstIndexOf(id)); - } - - /** - * Returns the value for the first occurrence of {@code id} as a Java local time. - * - *

By default, this works with CQL type {@code time}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @Nullable - default LocalTime getLocalTime(@NonNull CqlIdentifier id) { - return getLocalTime(firstIndexOf(id)); - } - - /** - * Returns the value for the first occurrence of {@code id} as a Java byte buffer. - * - *

By default, this works with CQL type {@code blob}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @Nullable - default ByteBuffer getByteBuffer(@NonNull CqlIdentifier id) { - return getByteBuffer(firstIndexOf(id)); - } - - /** - * Returns the value for the first occurrence of {@code id} as a Java string. - * - *

By default, this works with CQL types {@code text}, {@code varchar} and {@code ascii}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @Nullable - default String getString(@NonNull CqlIdentifier id) { - return getString(firstIndexOf(id)); - } - - /** - * Returns the value for the first occurrence of {@code id} as a Java big integer. - * - *

By default, this works with CQL type {@code varint}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @Nullable - default BigInteger getBigInteger(@NonNull CqlIdentifier id) { - return getBigInteger(firstIndexOf(id)); - } - - /** - * Returns the value for the first occurrence of {@code id} as a Java big decimal. - * - *

By default, this works with CQL type {@code decimal}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @Nullable - default BigDecimal getBigDecimal(@NonNull CqlIdentifier id) { - return getBigDecimal(firstIndexOf(id)); - } - - /** - * Returns the value for the first occurrence of {@code id} as a Java UUID. - * - *

By default, this works with CQL types {@code uuid} and {@code timeuuid}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @Nullable - default UUID getUuid(@NonNull CqlIdentifier id) { - return getUuid(firstIndexOf(id)); - } - - /** - * Returns the value for the first occurrence of {@code id} as a Java IP address. - * - *

By default, this works with CQL type {@code inet}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @Nullable - default InetAddress getInetAddress(@NonNull CqlIdentifier id) { - return getInetAddress(firstIndexOf(id)); - } - - /** - * Returns the value for the first occurrence of {@code id} as a duration. - * - *

By default, this works with CQL type {@code duration}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @Nullable - default CqlDuration getCqlDuration(@NonNull CqlIdentifier id) { - return getCqlDuration(firstIndexOf(id)); - } - - /** - * Returns the value for the first occurrence of {@code id} as a vector. - * - *

By default, this works with CQL type {@code vector}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @Nullable - default CqlVector getVector( - @NonNull CqlIdentifier id, @NonNull Class elementsClass) { - return getVector(firstIndexOf(id), elementsClass); - } - - /** - * Returns the value for the first occurrence of {@code id} as a token. - * - *

Note that, for simplicity, this method relies on the CQL type of the column to pick the - * correct token implementation. Therefore it must only be called on columns of the type that - * matches the partitioner in use for this cluster: {@code bigint} for {@code Murmur3Partitioner}, - * {@code blob} for {@code ByteOrderedPartitioner}, and {@code varint} for {@code - * RandomPartitioner}. Calling it for the wrong type will produce corrupt tokens that are unusable - * with this driver instance. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the column type can not be converted to a known token type - * or if the name is invalid. - */ - @Nullable - default Token getToken(@NonNull CqlIdentifier id) { - return getToken(firstIndexOf(id)); - } - - /** - * Returns the value for the first occurrence of {@code id} as a Java list. - * - *

By default, this works with CQL type {@code list}. - * - *

This method is provided for convenience when the element type is a non-generic type. For - * more complex list types, use {@link #get(int, GenericType)}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - *

Apache Cassandra does not make any distinction between an empty collection and {@code null}. - * Whether this method will return an empty collection or {@code null} will depend on the codec - * used; by default, the driver's built-in codecs all return empty collections. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @Nullable - default List getList( - @NonNull CqlIdentifier id, @NonNull Class elementsClass) { - return getList(firstIndexOf(id), elementsClass); - } - - /** - * Returns the value for the first occurrence of {@code id} as a Java set. - * - *

By default, this works with CQL type {@code set}. - * - *

This method is provided for convenience when the element type is a non-generic type. For - * more complex set types, use {@link #get(int, GenericType)}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - *

Apache Cassandra does not make any distinction between an empty collection and {@code null}. - * Whether this method will return an empty collection or {@code null} will depend on the codec - * used; by default, the driver's built-in codecs all return empty collections. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @Nullable - default Set getSet( - @NonNull CqlIdentifier id, @NonNull Class elementsClass) { - return getSet(firstIndexOf(id), elementsClass); - } - - /** - * Returns the value for the first occurrence of {@code id} as a Java map. - * - *

By default, this works with CQL type {@code map}. - * - *

This method is provided for convenience when the element type is a non-generic type. For - * more complex map types, use {@link #get(int, GenericType)}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - *

Apache Cassandra does not make any distinction between an empty collection and {@code null}. - * Whether this method will return an empty collection or {@code null} will depend on the codec - * used; by default, the driver's built-in codecs all return empty collections. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @Nullable - default Map getMap( - @NonNull CqlIdentifier id, @NonNull Class keyClass, @NonNull Class valueClass) { - return getMap(firstIndexOf(id), keyClass, valueClass); - } - - /** - * Returns the value for the first occurrence of {@code id} as a user defined type value. - * - *

By default, this works with CQL user-defined types. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @Nullable - default UdtValue getUdtValue(@NonNull CqlIdentifier id) { - return getUdtValue(firstIndexOf(id)); - } - - /** - * Returns the value for the first occurrence of {@code id} as a tuple value. - * - *

By default, this works with CQL tuples. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @Nullable - default TupleValue getTupleValue(@NonNull CqlIdentifier id) { - return getTupleValue(firstIndexOf(id)); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/data/GettableByIndex.java b/core/src/main/java/com/datastax/oss/driver/api/core/data/GettableByIndex.java deleted file mode 100644 index bb75bd9a2b4..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/data/GettableByIndex.java +++ /dev/null @@ -1,565 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.data; - -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.CodecNotFoundException; -import com.datastax.oss.driver.api.core.type.codec.PrimitiveBooleanCodec; -import com.datastax.oss.driver.api.core.type.codec.PrimitiveByteCodec; -import com.datastax.oss.driver.api.core.type.codec.PrimitiveDoubleCodec; -import com.datastax.oss.driver.api.core.type.codec.PrimitiveFloatCodec; -import com.datastax.oss.driver.api.core.type.codec.PrimitiveIntCodec; -import com.datastax.oss.driver.api.core.type.codec.PrimitiveLongCodec; -import com.datastax.oss.driver.api.core.type.codec.PrimitiveShortCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.metadata.token.ByteOrderedToken; -import com.datastax.oss.driver.internal.core.metadata.token.Murmur3Token; -import com.datastax.oss.driver.internal.core.metadata.token.RandomToken; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.math.BigDecimal; -import java.math.BigInteger; -import java.net.InetAddress; -import java.nio.ByteBuffer; -import java.time.Instant; -import java.time.LocalDate; -import java.time.LocalTime; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; - -/** A data structure that provides methods to retrieve its values via an integer index. */ -public interface GettableByIndex extends AccessibleByIndex { - - /** - * Returns the raw binary representation of the {@code i}th value. - * - *

This is primarily for internal use; you'll likely want to use one of the typed getters - * instead, to get a higher-level Java representation. - * - * @return the raw value, or {@code null} if the CQL value is {@code NULL}. For performance - * reasons, this is the actual instance used internally. If you read data from the buffer, - * make sure to {@link ByteBuffer#duplicate() duplicate} it beforehand, or only use relative - * methods. If you change the buffer's index or its contents in any way, any other getter - * invocation for this value will have unpredictable results. - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @Nullable - ByteBuffer getBytesUnsafe(int i); - - /** - * Indicates whether the {@code i}th value is a CQL {@code NULL}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - default boolean isNull(int i) { - return getBytesUnsafe(i) == null; - } - - /** - * Returns the {@code i}th value, using the given codec for the conversion. - * - *

This method completely bypasses the {@link #codecRegistry()}, and forces the driver to use - * the given codec instead. This can be useful if the codec would collide with a previously - * registered one, or if you want to use the codec just once without registering it. - * - *

It is the caller's responsibility to ensure that the given codec is appropriate for the - * conversion. Failing to do so will result in errors at runtime. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @Nullable - default ValueT get(int i, TypeCodec codec) { - return codec.decode(getBytesUnsafe(i), protocolVersion()); - } - - /** - * Returns the {@code i}th value, converting it to the given Java type. - * - *

The {@link #codecRegistry()} will be used to look up a codec to handle the conversion. - * - *

This variant is for generic Java types. If the target type is not generic, use {@link - * #get(int, Class)} instead, which may perform slightly better. - * - * @throws IndexOutOfBoundsException if the index is invalid. - * @throws CodecNotFoundException if no codec can perform the conversion. - */ - @Nullable - default ValueT get(int i, GenericType targetType) { - DataType cqlType = getType(i); - TypeCodec codec = codecRegistry().codecFor(cqlType, targetType); - return get(i, codec); - } - - /** - * Returns the {@code i}th value, converting it to the given Java type. - * - *

The {@link #codecRegistry()} will be used to look up a codec to handle the conversion. - * - *

If the target type is generic, use {@link #get(int, GenericType)} instead. - * - * @throws IndexOutOfBoundsException if the index is invalid. - * @throws CodecNotFoundException if no codec can perform the conversion. - */ - @Nullable - default ValueT get(int i, Class targetClass) { - // This is duplicated from the GenericType variant, because we want to give the codec registry - // a chance to process the unwrapped class directly, if it can do so in a more efficient way. - DataType cqlType = getType(i); - TypeCodec codec = codecRegistry().codecFor(cqlType, targetClass); - return get(i, codec); - } - - /** - * Returns the {@code i}th value, converting it to the most appropriate Java type. - * - *

The {@link #codecRegistry()} will be used to look up a codec to handle the conversion. - * - *

Use this method to dynamically inspect elements when types aren't known in advance, for - * instance if you're writing a generic row logger. If you know the target Java type, it is - * generally preferable to use typed variants, such as the ones for built-in types ({@link - * #getBoolean(int)}, {@link #getInt(int)}, etc.), or {@link #get(int, Class)} and {@link - * #get(int, GenericType)} for custom types. - * - *

The definition of "most appropriate" is unspecified, and left to the appreciation of the - * {@link #codecRegistry()} implementation. By default, the driver uses the mapping described in - * the other {@code getXxx()} methods (for example {@link #getString(int) String for text, varchar - * and ascii}, etc). - * - * @throws IndexOutOfBoundsException if the index is invalid. - * @throws CodecNotFoundException if no codec can perform the conversion. - */ - @Nullable - default Object getObject(int i) { - DataType cqlType = getType(i); - TypeCodec codec = codecRegistry().codecFor(cqlType); - return codec.decode(getBytesUnsafe(i), protocolVersion()); - } - - /** - * Returns the {@code i}th value as a Java primitive boolean. - * - *

By default, this works with CQL type {@code boolean}. - * - *

Note that, due to its signature, this method cannot return {@code null}. If the CQL value is - * {@code NULL}, it will return {@code false}. If this doesn't work for you, either call {@link - * #isNull(int)} before calling this method, or use {@code get(i, Boolean.class)} instead. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - default boolean getBoolean(int i) { - DataType cqlType = getType(i); - TypeCodec codec = codecRegistry().codecFor(cqlType, Boolean.class); - if (codec instanceof PrimitiveBooleanCodec) { - return ((PrimitiveBooleanCodec) codec).decodePrimitive(getBytesUnsafe(i), protocolVersion()); - } else { - Boolean value = get(i, codec); - return value == null ? false : value; - } - } - - /** - * @deprecated this method only exists to ease the transition from driver 3, it is an alias for - * {@link #getBoolean(int)}. - */ - @Deprecated - default boolean getBool(int i) { - return getBoolean(i); - } - - /** - * Returns the {@code i}th value as a Java primitive byte. - * - *

By default, this works with CQL type {@code tinyint}. - * - *

Note that, due to its signature, this method cannot return {@code null}. If the CQL value is - * {@code NULL}, it will return {@code 0}. If this doesn't work for you, either call {@link - * #isNull(int)} before calling this method, or use {@code get(i, Byte.class)} instead. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - default byte getByte(int i) { - DataType cqlType = getType(i); - TypeCodec codec = codecRegistry().codecFor(cqlType, Byte.class); - if (codec instanceof PrimitiveByteCodec) { - return ((PrimitiveByteCodec) codec).decodePrimitive(getBytesUnsafe(i), protocolVersion()); - } else { - Byte value = get(i, codec); - return value == null ? 0 : value; - } - } - - /** - * Returns the {@code i}th value as a Java primitive double. - * - *

By default, this works with CQL type {@code double}. - * - *

Note that, due to its signature, this method cannot return {@code null}. If the CQL value is - * {@code NULL}, it will return {@code 0.0}. If this doesn't work for you, either call {@link - * #isNull(int)} before calling this method, or use {@code get(i, Double.class)} instead. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - default double getDouble(int i) { - DataType cqlType = getType(i); - TypeCodec codec = codecRegistry().codecFor(cqlType, Double.class); - if (codec instanceof PrimitiveDoubleCodec) { - return ((PrimitiveDoubleCodec) codec).decodePrimitive(getBytesUnsafe(i), protocolVersion()); - } else { - Double value = get(i, codec); - return value == null ? 0 : value; - } - } - - /** - * Returns the {@code i}th value as a Java primitive float. - * - *

By default, this works with CQL type {@code float}. - * - *

Note that, due to its signature, this method cannot return {@code null}. If the CQL value is - * {@code NULL}, it will return {@code 0.0}. If this doesn't work for you, either call {@link - * #isNull(int)} before calling this method, or use {@code get(i, Float.class)} instead. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - default float getFloat(int i) { - DataType cqlType = getType(i); - TypeCodec codec = codecRegistry().codecFor(cqlType, Float.class); - if (codec instanceof PrimitiveFloatCodec) { - return ((PrimitiveFloatCodec) codec).decodePrimitive(getBytesUnsafe(i), protocolVersion()); - } else { - Float value = get(i, codec); - return value == null ? 0 : value; - } - } - - /** - * Returns the {@code i}th value as a Java primitive integer. - * - *

By default, this works with CQL type {@code int}. - * - *

Note that, due to its signature, this method cannot return {@code null}. If the CQL value is - * {@code NULL}, it will return {@code 0}. If this doesn't work for you, either call {@link - * #isNull(int)} before calling this method, or use {@code get(i, Integer.class)} instead. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - default int getInt(int i) { - DataType cqlType = getType(i); - TypeCodec codec = codecRegistry().codecFor(cqlType, Integer.class); - if (codec instanceof PrimitiveIntCodec) { - return ((PrimitiveIntCodec) codec).decodePrimitive(getBytesUnsafe(i), protocolVersion()); - } else { - Integer value = get(i, codec); - return value == null ? 0 : value; - } - } - - /** - * Returns the {@code i}th value as a Java primitive long. - * - *

By default, this works with CQL types {@code bigint} and {@code counter}. - * - *

Note that, due to its signature, this method cannot return {@code null}. If the CQL value is - * {@code NULL}, it will return {@code 0}. If this doesn't work for you, either call {@link - * #isNull(int)} before calling this method, or use {@code get(i, Long.class)} instead. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - default long getLong(int i) { - DataType cqlType = getType(i); - TypeCodec codec = codecRegistry().codecFor(cqlType, Long.class); - if (codec instanceof PrimitiveLongCodec) { - return ((PrimitiveLongCodec) codec).decodePrimitive(getBytesUnsafe(i), protocolVersion()); - } else { - Long value = get(i, codec); - return value == null ? 0 : value; - } - } - - /** - * Returns the {@code i}th value as a Java primitive short. - * - *

By default, this works with CQL type {@code smallint}. - * - *

Note that, due to its signature, this method cannot return {@code null}. If the CQL value is - * {@code NULL}, it will return {@code 0}. If this doesn't work for you, either call {@link - * #isNull(int)} before calling this method, or use {@code get(i, Short.class)} instead. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - default short getShort(int i) { - DataType cqlType = getType(i); - TypeCodec codec = codecRegistry().codecFor(cqlType, Short.class); - if (codec instanceof PrimitiveShortCodec) { - return ((PrimitiveShortCodec) codec).decodePrimitive(getBytesUnsafe(i), protocolVersion()); - } else { - Short value = get(i, codec); - return value == null ? 0 : value; - } - } - - /** - * Returns the {@code i}th value as a Java instant. - * - *

By default, this works with CQL type {@code timestamp}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @Nullable - default Instant getInstant(int i) { - return get(i, Instant.class); - } - - /** - * Returns the {@code i}th value as a Java local date. - * - *

By default, this works with CQL type {@code date}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @Nullable - default LocalDate getLocalDate(int i) { - return get(i, LocalDate.class); - } - - /** - * Returns the {@code i}th value as a Java local time. - * - *

By default, this works with CQL type {@code time}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @Nullable - default LocalTime getLocalTime(int i) { - return get(i, LocalTime.class); - } - - /** - * Returns the {@code i}th value as a Java byte buffer. - * - *

By default, this works with CQL type {@code blob}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @Nullable - default ByteBuffer getByteBuffer(int i) { - return get(i, ByteBuffer.class); - } - - /** - * Returns the {@code i}th value as a Java string. - * - *

By default, this works with CQL types {@code text}, {@code varchar} and {@code ascii}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @Nullable - default String getString(int i) { - return get(i, String.class); - } - - /** - * Returns the {@code i}th value as a Java big integer. - * - *

By default, this works with CQL type {@code varint}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @Nullable - default BigInteger getBigInteger(int i) { - return get(i, BigInteger.class); - } - - /** - * Returns the {@code i}th value as a Java big decimal. - * - *

By default, this works with CQL type {@code decimal}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @Nullable - default BigDecimal getBigDecimal(int i) { - return get(i, BigDecimal.class); - } - - /** - * Returns the {@code i}th value as a Java UUID. - * - *

By default, this works with CQL types {@code uuid} and {@code timeuuid}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @Nullable - default UUID getUuid(int i) { - return get(i, UUID.class); - } - - /** - * Returns the {@code i}th value as a Java IP address. - * - *

By default, this works with CQL type {@code inet}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @Nullable - default InetAddress getInetAddress(int i) { - return get(i, InetAddress.class); - } - - /** - * Returns the {@code i}th value as a duration. - * - *

By default, this works with CQL type {@code duration}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @Nullable - default CqlDuration getCqlDuration(int i) { - return get(i, CqlDuration.class); - } - - /** - * Returns the {@code i}th value as a vector. - * - *

By default, this works with CQL type {@code vector}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @Nullable - default CqlVector getVector(int i, @NonNull Class elementsClass) { - return get(i, GenericType.vectorOf(elementsClass)); - } - - /** - * Returns the {@code i}th value as a token. - * - *

Note that, for simplicity, this method relies on the CQL type of the column to pick the - * correct token implementation. Therefore it must only be called on columns of the type that - * matches the partitioner in use for this cluster: {@code bigint} for {@code Murmur3Partitioner}, - * {@code blob} for {@code ByteOrderedPartitioner}, and {@code varint} for {@code - * RandomPartitioner}. Calling it for the wrong type will produce corrupt tokens that are unusable - * with this driver instance. - * - * @throws IndexOutOfBoundsException if the index is invalid. - * @throws IllegalArgumentException if the column type can not be converted to a known token type. - */ - @Nullable - default Token getToken(int i) { - DataType type = getType(i); - // Simply enumerate all known implementations. This goes against the concept of TokenFactory, - // but injecting the factory here is too much of a hassle. - // The only issue is if someone uses a custom partitioner, but this is highly unlikely, and even - // then they can get the value manually as a workaround. - if (type.equals(DataTypes.BIGINT)) { - return isNull(i) ? null : new Murmur3Token(getLong(i)); - } else if (type.equals(DataTypes.BLOB)) { - return isNull(i) ? null : new ByteOrderedToken(getByteBuffer(i)); - } else if (type.equals(DataTypes.VARINT)) { - return isNull(i) ? null : new RandomToken(getBigInteger(i)); - } else { - throw new IllegalArgumentException("Can't convert CQL type " + type + " into a token"); - } - } - - /** - * Returns the {@code i}th value as a Java list. - * - *

By default, this works with CQL type {@code list}. - * - *

This method is provided for convenience when the element type is a non-generic type. For - * more complex list types, use {@link #get(int, GenericType)}. - * - *

Apache Cassandra does not make any distinction between an empty collection and {@code null}. - * Whether this method will return an empty collection or {@code null} will depend on the codec - * used; by default, the driver's built-in codecs all return empty collections. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @Nullable - default List getList(int i, @NonNull Class elementsClass) { - return get(i, GenericType.listOf(elementsClass)); - } - - /** - * Returns the {@code i}th value as a Java set. - * - *

By default, this works with CQL type {@code set}. - * - *

This method is provided for convenience when the element type is a non-generic type. For - * more complex set types, use {@link #get(int, GenericType)}. - * - *

Apache Cassandra does not make any distinction between an empty collection and {@code null}. - * Whether this method will return an empty collection or {@code null} will depend on the codec - * used; by default, the driver's built-in codecs all return empty collections. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @Nullable - default Set getSet(int i, @NonNull Class elementsClass) { - return get(i, GenericType.setOf(elementsClass)); - } - - /** - * Returns the {@code i}th value as a Java map. - * - *

By default, this works with CQL type {@code map}. - * - *

This method is provided for convenience when the element type is a non-generic type. For - * more complex map types, use {@link #get(int, GenericType)}. - * - *

Apache Cassandra does not make any distinction between an empty collection and {@code null}. - * Whether this method will return an empty collection or {@code null} will depend on the codec - * used; by default, the driver's built-in codecs all return empty collections. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @Nullable - default Map getMap( - int i, @NonNull Class keyClass, @NonNull Class valueClass) { - return get(i, GenericType.mapOf(keyClass, valueClass)); - } - - /** - * Returns the {@code i}th value as a user defined type value. - * - *

By default, this works with CQL user-defined types. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @Nullable - default UdtValue getUdtValue(int i) { - return get(i, UdtValue.class); - } - - /** - * Returns the {@code i}th value as a tuple value. - * - *

By default, this works with CQL tuples. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @Nullable - default TupleValue getTupleValue(int i) { - return get(i, TupleValue.class); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/data/GettableByName.java b/core/src/main/java/com/datastax/oss/driver/api/core/data/GettableByName.java deleted file mode 100644 index b0a4660033b..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/data/GettableByName.java +++ /dev/null @@ -1,672 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.data; - -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.type.codec.CodecNotFoundException; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.math.BigDecimal; -import java.math.BigInteger; -import java.net.InetAddress; -import java.nio.ByteBuffer; -import java.time.Instant; -import java.time.LocalDate; -import java.time.LocalTime; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; - -/** A data structure that provides methods to retrieve its values via a name. */ -public interface GettableByName extends GettableByIndex, AccessibleByName { - - /** - * Returns the raw binary representation of the value for the first occurrence of {@code name}. - * - *

This is primarily for internal use; you'll likely want to use one of the typed getters - * instead, to get a higher-level Java representation. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @return the raw value, or {@code null} if the CQL value is {@code NULL}. For performance - * reasons, this is the actual instance used internally. If you read data from the buffer, - * make sure to {@link ByteBuffer#duplicate() duplicate} it beforehand, or only use relative - * methods. If you change the buffer's index or its contents in any way, any other getter - * invocation for this value will have unpredictable results. - * @throws IllegalArgumentException if the name is invalid. - */ - @Nullable - default ByteBuffer getBytesUnsafe(@NonNull String name) { - return getBytesUnsafe(firstIndexOf(name)); - } - - /** - * Indicates whether the value for the first occurrence of {@code name} is a CQL {@code NULL}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - default boolean isNull(@NonNull String name) { - return isNull(firstIndexOf(name)); - } - - /** - * Returns the value for the first occurrence of {@code name}, using the given codec for the - * conversion. - * - *

This method completely bypasses the {@link #codecRegistry()}, and forces the driver to use - * the given codec instead. This can be useful if the codec would collide with a previously - * registered one, or if you want to use the codec just once without registering it. - * - *

It is the caller's responsibility to ensure that the given codec is appropriate for the - * conversion. Failing to do so will result in errors at runtime. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @Nullable - default ValueT get(@NonNull String name, @NonNull TypeCodec codec) { - return get(firstIndexOf(name), codec); - } - - /** - * Returns the value for the first occurrence of {@code name}, converting it to the given Java - * type. - * - *

The {@link #codecRegistry()} will be used to look up a codec to handle the conversion. - * - *

This variant is for generic Java types. If the target type is not generic, use {@link - * #get(int, Class)} instead, which may perform slightly better. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - * @throws CodecNotFoundException if no codec can perform the conversion. - */ - @Nullable - default ValueT get(@NonNull String name, @NonNull GenericType targetType) { - return get(firstIndexOf(name), targetType); - } - - /** - * Returns the value for the first occurrence of {@code name}, converting it to the given Java - * type. - * - *

The {@link #codecRegistry()} will be used to look up a codec to handle the conversion. - * - *

If the target type is generic, use {@link #get(int, GenericType)} instead. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - * @throws CodecNotFoundException if no codec can perform the conversion. - */ - @Nullable - default ValueT get(@NonNull String name, @NonNull Class targetClass) { - return get(firstIndexOf(name), targetClass); - } - - /** - * Returns the value for the first occurrence of {@code name}, converting it to the most - * appropriate Java type. - * - *

The {@link #codecRegistry()} will be used to look up a codec to handle the conversion. - * - *

Use this method to dynamically inspect elements when types aren't known in advance, for - * instance if you're writing a generic row logger. If you know the target Java type, it is - * generally preferable to use typed variants, such as the ones for built-in types ({@link - * #getBoolean(int)}, {@link #getInt(int)}, etc.), or {@link #get(int, Class)} and {@link - * #get(int, GenericType)} for custom types. - * - *

The definition of "most appropriate" is unspecified, and left to the appreciation of the - * {@link #codecRegistry()} implementation. By default, the driver uses the mapping described in - * the other {@code getXxx()} methods (for example {@link #getString(int) String for text, varchar - * and ascii}, etc). - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - * @throws CodecNotFoundException if no codec can perform the conversion. - */ - @Nullable - default Object getObject(@NonNull String name) { - return getObject(firstIndexOf(name)); - } - - /** - * Returns the value for the first occurrence of {@code name} as a Java primitive boolean. - * - *

By default, this works with CQL type {@code boolean}. - * - *

Note that, due to its signature, this method cannot return {@code null}. If the CQL value is - * {@code NULL}, it will return {@code false}. If this doesn't work for you, either call {@link - * #isNull(String)} before calling this method, or use {@code get(name, Boolean.class)} instead. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - default boolean getBoolean(@NonNull String name) { - return getBoolean(firstIndexOf(name)); - } - - /** - * @deprecated this method only exists to ease the transition from driver 3, it is an alias for - * {@link #getBoolean(String)}. - */ - @Deprecated - default boolean getBool(@NonNull String name) { - return getBoolean(name); - } - - /** - * Returns the value for the first occurrence of {@code name} as a Java primitive byte. - * - *

By default, this works with CQL type {@code tinyint}. - * - *

Note that, due to its signature, this method cannot return {@code null}. If the CQL value is - * {@code NULL}, it will return {@code 0}. If this doesn't work for you, either call {@link - * #isNull(String)} before calling this method, or use {@code get(name, Byte.class)} instead. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - default byte getByte(@NonNull String name) { - return getByte(firstIndexOf(name)); - } - - /** - * Returns the value for the first occurrence of {@code name} as a Java primitive double. - * - *

By default, this works with CQL type {@code double}. - * - *

Note that, due to its signature, this method cannot return {@code null}. If the CQL value is - * {@code NULL}, it will return {@code 0.0}. If this doesn't work for you, either call {@link - * #isNull(String)} before calling this method, or use {@code get(name, Double.class)} instead. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - default double getDouble(@NonNull String name) { - return getDouble(firstIndexOf(name)); - } - - /** - * Returns the value for the first occurrence of {@code name} as a Java primitive float. - * - *

By default, this works with CQL type {@code float}. - * - *

Note that, due to its signature, this method cannot return {@code null}. If the CQL value is - * {@code NULL}, it will return {@code 0.0}. If this doesn't work for you, either call {@link - * #isNull(String)} before calling this method, or use {@code get(name, Float.class)} instead. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - default float getFloat(@NonNull String name) { - return getFloat(firstIndexOf(name)); - } - - /** - * Returns the value for the first occurrence of {@code name} as a Java primitive integer. - * - *

By default, this works with CQL type {@code int}. - * - *

Note that, due to its signature, this method cannot return {@code null}. If the CQL value is - * {@code NULL}, it will return {@code 0}. If this doesn't work for you, either call {@link - * #isNull(String)} before calling this method, or use {@code get(name, Integer.class)} instead. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - default int getInt(@NonNull String name) { - return getInt(firstIndexOf(name)); - } - - /** - * Returns the value for the first occurrence of {@code name} as a Java primitive long. - * - *

By default, this works with CQL types {@code bigint} and {@code counter}. - * - *

Note that, due to its signature, this method cannot return {@code null}. If the CQL value is - * {@code NULL}, it will return {@code 0}. If this doesn't work for you, either call {@link - * #isNull(String)} before calling this method, or use {@code get(name, Long.class)} instead. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - default long getLong(@NonNull String name) { - return getLong(firstIndexOf(name)); - } - - /** - * Returns the value for the first occurrence of {@code name} as a Java primitive short. - * - *

By default, this works with CQL type {@code smallint}. - * - *

Note that, due to its signature, this method cannot return {@code null}. If the CQL value is - * {@code NULL}, it will return {@code 0}. If this doesn't work for you, either call {@link - * #isNull(String)} before calling this method, or use {@code get(name, Short.class)} instead. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - default short getShort(@NonNull String name) { - return getShort(firstIndexOf(name)); - } - - /** - * Returns the value for the first occurrence of {@code name} as a Java instant. - * - *

By default, this works with CQL type {@code timestamp}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @Nullable - default Instant getInstant(@NonNull String name) { - return getInstant(firstIndexOf(name)); - } - - /** - * Returns the value for the first occurrence of {@code name} as a Java local date. - * - *

By default, this works with CQL type {@code date}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @Nullable - default LocalDate getLocalDate(@NonNull String name) { - return getLocalDate(firstIndexOf(name)); - } - - /** - * Returns the value for the first occurrence of {@code name} as a Java local time. - * - *

By default, this works with CQL type {@code time}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @Nullable - default LocalTime getLocalTime(@NonNull String name) { - return getLocalTime(firstIndexOf(name)); - } - - /** - * Returns the value for the first occurrence of {@code name} as a Java byte buffer. - * - *

By default, this works with CQL type {@code blob}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @Nullable - default ByteBuffer getByteBuffer(@NonNull String name) { - return getByteBuffer(firstIndexOf(name)); - } - - /** - * Returns the value for the first occurrence of {@code name} as a Java string. - * - *

By default, this works with CQL types {@code text}, {@code varchar} and {@code ascii}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @Nullable - default String getString(@NonNull String name) { - return getString(firstIndexOf(name)); - } - - /** - * Returns the value for the first occurrence of {@code name} as a Java big integer. - * - *

By default, this works with CQL type {@code varint}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @Nullable - default BigInteger getBigInteger(@NonNull String name) { - return getBigInteger(firstIndexOf(name)); - } - - /** - * Returns the value for the first occurrence of {@code name} as a Java big decimal. - * - *

By default, this works with CQL type {@code decimal}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @Nullable - default BigDecimal getBigDecimal(@NonNull String name) { - return getBigDecimal(firstIndexOf(name)); - } - - /** - * Returns the value for the first occurrence of {@code name} as a Java UUID. - * - *

By default, this works with CQL types {@code uuid} and {@code timeuuid}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @Nullable - default UUID getUuid(@NonNull String name) { - return getUuid(firstIndexOf(name)); - } - - /** - * Returns the value for the first occurrence of {@code name} as a Java IP address. - * - *

By default, this works with CQL type {@code inet}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @Nullable - default InetAddress getInetAddress(@NonNull String name) { - return getInetAddress(firstIndexOf(name)); - } - - /** - * Returns the value for the first occurrence of {@code name} as a duration. - * - *

By default, this works with CQL type {@code duration}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @Nullable - default CqlDuration getCqlDuration(@NonNull String name) { - return getCqlDuration(firstIndexOf(name)); - } - - /** - * Returns the value for the first occurrence of {@code name} as a vector. - * - *

By default, this works with CQL type {@code vector}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @Nullable - default CqlVector getVector( - @NonNull String name, @NonNull Class elementsClass) { - return getVector(firstIndexOf(name), elementsClass); - } - - /** - * Returns the value for the first occurrence of {@code name} as a token. - * - *

Note that, for simplicity, this method relies on the CQL type of the column to pick the - * correct token implementation. Therefore it must only be called on columns of the type that - * matches the partitioner in use for this cluster: {@code bigint} for {@code Murmur3Partitioner}, - * {@code blob} for {@code ByteOrderedPartitioner}, and {@code varint} for {@code - * RandomPartitioner}. Calling it for the wrong type will produce corrupt tokens that are unusable - * with this driver instance. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the column type can not be converted to a known token type - * or if the name is invalid. - */ - @Nullable - default Token getToken(@NonNull String name) { - return getToken(firstIndexOf(name)); - } - - /** - * Returns the value for the first occurrence of {@code name} as a Java list. - * - *

By default, this works with CQL type {@code list}. - * - *

This method is provided for convenience when the element type is a non-generic type. For - * more complex list types, use {@link #get(int, GenericType)}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - *

Apache Cassandra does not make any distinction between an empty collection and {@code null}. - * Whether this method will return an empty collection or {@code null} will depend on the codec - * used; by default, the driver's built-in codecs all return empty collections. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @Nullable - default List getList( - @NonNull String name, @NonNull Class elementsClass) { - return getList(firstIndexOf(name), elementsClass); - } - - /** - * Returns the value for the first occurrence of {@code name} as a Java set. - * - *

By default, this works with CQL type {@code set}. - * - *

This method is provided for convenience when the element type is a non-generic type. For - * more complex set types, use {@link #get(int, GenericType)}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - *

Apache Cassandra does not make any distinction between an empty collection and {@code null}. - * Whether this method will return an empty collection or {@code null} will depend on the codec - * used; by default, the driver's built-in codecs all return empty collections. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @Nullable - default Set getSet( - @NonNull String name, @NonNull Class elementsClass) { - return getSet(firstIndexOf(name), elementsClass); - } - - /** - * Returns the value for the first occurrence of {@code name} as a Java map. - * - *

By default, this works with CQL type {@code map}. - * - *

This method is provided for convenience when the element type is a non-generic type. For - * more complex map types, use {@link #get(int, GenericType)}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - *

Apache Cassandra does not make any distinction between an empty collection and {@code null}. - * Whether this method will return an empty collection or {@code null} will depend on the codec - * used; by default, the driver's built-in codecs all return empty collections. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @Nullable - default Map getMap( - @NonNull String name, @NonNull Class keyClass, @NonNull Class valueClass) { - return getMap(firstIndexOf(name), keyClass, valueClass); - } - - /** - * Returns the value for the first occurrence of {@code name} as a user defined type value. - * - *

By default, this works with CQL user-defined types. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @Nullable - default UdtValue getUdtValue(@NonNull String name) { - return getUdtValue(firstIndexOf(name)); - } - - /** - * Returns the value for the first occurrence of {@code name} as a tuple value. - * - *

By default, this works with CQL tuples. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @Nullable - default TupleValue getTupleValue(@NonNull String name) { - return getTupleValue(firstIndexOf(name)); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/data/SettableById.java b/core/src/main/java/com/datastax/oss/driver/api/core/data/SettableById.java deleted file mode 100644 index 0f5e3cd9daa..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/data/SettableById.java +++ /dev/null @@ -1,734 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.data; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.codec.CodecNotFoundException; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.CheckReturnValue; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.math.BigDecimal; -import java.math.BigInteger; -import java.net.InetAddress; -import java.nio.ByteBuffer; -import java.time.Instant; -import java.time.LocalDate; -import java.time.LocalTime; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; - -/** A data structure that provides methods to set its values via a CQL identifier. */ -public interface SettableById> - extends SettableByIndex, AccessibleById { - - /** - * Sets the raw binary representation of the value for all occurrences of {@code id}. - * - *

This is primarily for internal use; you'll likely want to use one of the typed setters - * instead, to pass a higher-level Java representation. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @param v the raw value, or {@code null} to set the CQL value {@code NULL}. For performance - * reasons, this is the actual instance used internally. If pass in a buffer that you're going - * to modify elsewhere in your application, make sure to {@link ByteBuffer#duplicate() - * duplicate} it beforehand. If you change the buffer's index or its contents in any way, - * further usage of this data will have unpredictable results. - * @throws IllegalArgumentException if the id is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setBytesUnsafe(@NonNull CqlIdentifier id, @Nullable ByteBuffer v) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).setBytesUnsafe(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - @NonNull - @Override - default DataType getType(@NonNull CqlIdentifier id) { - return getType(firstIndexOf(id)); - } - - /** - * Sets the value for all occurrences of {@code id} to CQL {@code NULL}. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setToNull(@NonNull CqlIdentifier id) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).setToNull(i); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code id}, using the given codec for the conversion. - * - *

This method completely bypasses the {@link #codecRegistry()}, and forces the driver to use - * the given codec instead. This can be useful if the codec would collide with a previously - * registered one, or if you want to use the codec just once without registering it. - * - *

It is the caller's responsibility to ensure that the given codec is appropriate for the - * conversion. Failing to do so will result in errors at runtime. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT set( - @NonNull CqlIdentifier id, @Nullable ValueT v, @NonNull TypeCodec codec) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).set(i, v, codec); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code id}, converting it to the given Java type. - * - *

The {@link #codecRegistry()} will be used to look up a codec to handle the conversion. - * - *

This variant is for generic Java types. If the target type is not generic, use {@link - * #set(int, Object, Class)} instead, which may perform slightly better. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - * @throws CodecNotFoundException if no codec can perform the conversion. - */ - @NonNull - @CheckReturnValue - default SelfT set( - @NonNull CqlIdentifier id, @Nullable ValueT v, @NonNull GenericType targetType) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).set(i, v, targetType); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Returns the value for all occurrences of {@code id}, converting it to the given Java type. - * - *

The {@link #codecRegistry()} will be used to look up a codec to handle the conversion. - * - *

If the target type is generic, use {@link #set(int, Object, GenericType)} instead. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - * @throws CodecNotFoundException if no codec can perform the conversion. - */ - @NonNull - @CheckReturnValue - default SelfT set( - @NonNull CqlIdentifier id, @Nullable ValueT v, @NonNull Class targetClass) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).set(i, v, targetClass); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code id} to the provided Java primitive boolean. - * - *

By default, this works with CQL type {@code boolean}. - * - *

To set the value to CQL {@code NULL}, use {@link #setToNull(int)}, or {@code set(i, v, - * Boolean.class)}. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setBoolean(@NonNull CqlIdentifier id, boolean v) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).setBoolean(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * @deprecated this method only exists to ease the transition from driver 3, it is an alias for - * {@link #setBoolean(CqlIdentifier, boolean)}. - */ - @Deprecated - @NonNull - @CheckReturnValue - default SelfT setBool(@NonNull CqlIdentifier id, boolean v) { - return setBoolean(id, v); - } - - /** - * Sets the value for all occurrences of {@code id} to the provided Java primitive byte. - * - *

By default, this works with CQL type {@code tinyint}. - * - *

To set the value to CQL {@code NULL}, use {@link #setToNull(int)}, or {@code set(i, v, - * Boolean.class)}. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setByte(@NonNull CqlIdentifier id, byte v) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).setByte(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code id} to the provided Java primitive double. - * - *

By default, this works with CQL type {@code double}. - * - *

To set the value to CQL {@code NULL}, use {@link #setToNull(int)}, or {@code set(i, v, - * Double.class)}. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setDouble(@NonNull CqlIdentifier id, double v) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).setDouble(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code id} to the provided Java primitive float. - * - *

By default, this works with CQL type {@code float}. - * - *

To set the value to CQL {@code NULL}, use {@link #setToNull(int)}, or {@code set(i, v, - * Float.class)}. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setFloat(@NonNull CqlIdentifier id, float v) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).setFloat(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code id} to the provided Java primitive integer. - * - *

By default, this works with CQL type {@code int}. - * - *

To set the value to CQL {@code NULL}, use {@link #setToNull(int)}, or {@code set(i, v, - * Integer.class)}. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setInt(@NonNull CqlIdentifier id, int v) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).setInt(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code id} to the provided Java primitive long. - * - *

By default, this works with CQL types {@code bigint} and {@code counter}. - * - *

To set the value to CQL {@code NULL}, use {@link #setToNull(int)}, or {@code set(i, v, - * Long.class)}. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setLong(@NonNull CqlIdentifier id, long v) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).setLong(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code id} to the provided Java primitive short. - * - *

By default, this works with CQL type {@code smallint}. - * - *

To set the value to CQL {@code NULL}, use {@link #setToNull(int)}, or {@code set(i, v, - * Short.class)}. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setShort(@NonNull CqlIdentifier id, short v) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).setShort(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code id} to the provided Java instant. - * - *

By default, this works with CQL type {@code timestamp}. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setInstant(@NonNull CqlIdentifier id, @Nullable Instant v) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).setInstant(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code id} to the provided Java local date. - * - *

By default, this works with CQL type {@code date}. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setLocalDate(@NonNull CqlIdentifier id, @Nullable LocalDate v) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).setLocalDate(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code id} to the provided Java local time. - * - *

By default, this works with CQL type {@code time}. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setLocalTime(@NonNull CqlIdentifier id, @Nullable LocalTime v) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).setLocalTime(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code id} to the provided Java byte buffer. - * - *

By default, this works with CQL type {@code blob}. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setByteBuffer(@NonNull CqlIdentifier id, @Nullable ByteBuffer v) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).setByteBuffer(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code id} to the provided Java string. - * - *

By default, this works with CQL types {@code text}, {@code varchar} and {@code ascii}. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setString(@NonNull CqlIdentifier id, @Nullable String v) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).setString(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code id} to the provided Java big integer. - * - *

By default, this works with CQL type {@code varint}. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setBigInteger(@NonNull CqlIdentifier id, @Nullable BigInteger v) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).setBigInteger(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code id} to the provided Java big decimal. - * - *

By default, this works with CQL type {@code decimal}. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setBigDecimal(@NonNull CqlIdentifier id, @Nullable BigDecimal v) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).setBigDecimal(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code id} to the provided Java UUID. - * - *

By default, this works with CQL types {@code uuid} and {@code timeuuid}. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setUuid(@NonNull CqlIdentifier id, @Nullable UUID v) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).setUuid(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code id} to the provided Java IP address. - * - *

By default, this works with CQL type {@code inet}. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setInetAddress(@NonNull CqlIdentifier id, @Nullable InetAddress v) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).setInetAddress(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code id} to the provided duration. - * - *

By default, this works with CQL type {@code duration}. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setCqlDuration(@NonNull CqlIdentifier id, @Nullable CqlDuration v) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).setCqlDuration(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code id} to the provided {@code vector}. - * - *

By default, this works with CQL type {@code vector}. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setVector( - @NonNull CqlIdentifier id, - @Nullable CqlVector v, - @NonNull Class elementsClass) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).setVector(i, v, elementsClass); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code id} to the provided token. - * - *

This works with the CQL type matching the partitioner in use for this cluster: {@code - * bigint} for {@code Murmur3Partitioner}, {@code blob} for {@code ByteOrderedPartitioner}, and - * {@code varint} for {@code RandomPartitioner}. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setToken(@NonNull CqlIdentifier id, @NonNull Token v) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).setToken(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code id} to the provided Java list. - * - *

By default, this works with CQL type {@code list}. - * - *

This method is provided for convenience when the element type is a non-generic type. For - * more complex list types, use {@link #set(int, Object, GenericType)}. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setList( - @NonNull CqlIdentifier id, - @Nullable List v, - @NonNull Class elementsClass) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).setList(i, v, elementsClass); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code id} to the provided Java set. - * - *

By default, this works with CQL type {@code set}. - * - *

This method is provided for convenience when the element type is a non-generic type. For - * more complex set types, use {@link #set(int, Object, GenericType)}. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setSet( - @NonNull CqlIdentifier id, - @Nullable Set v, - @NonNull Class elementsClass) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).setSet(i, v, elementsClass); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code id} to the provided Java map. - * - *

By default, this works with CQL type {@code map}. - * - *

This method is provided for convenience when the element type is a non-generic type. For - * more complex map types, use {@link #set(int, Object, GenericType)}. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setMap( - @NonNull CqlIdentifier id, - @Nullable Map v, - @NonNull Class keyClass, - @NonNull Class valueClass) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).setMap(i, v, keyClass, valueClass); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code id} to the provided user defined type value. - * - *

By default, this works with CQL user-defined types. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setUdtValue(@NonNull CqlIdentifier id, @Nullable UdtValue v) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).setUdtValue(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code id} to the provided tuple value. - * - *

By default, this works with CQL tuples. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setTupleValue(@NonNull CqlIdentifier id, @Nullable TupleValue v) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).setTupleValue(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/data/SettableByIndex.java b/core/src/main/java/com/datastax/oss/driver/api/core/data/SettableByIndex.java deleted file mode 100644 index 4ecdf647590..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/data/SettableByIndex.java +++ /dev/null @@ -1,539 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.data; - -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.codec.CodecNotFoundException; -import com.datastax.oss.driver.api.core.type.codec.PrimitiveBooleanCodec; -import com.datastax.oss.driver.api.core.type.codec.PrimitiveByteCodec; -import com.datastax.oss.driver.api.core.type.codec.PrimitiveDoubleCodec; -import com.datastax.oss.driver.api.core.type.codec.PrimitiveFloatCodec; -import com.datastax.oss.driver.api.core.type.codec.PrimitiveIntCodec; -import com.datastax.oss.driver.api.core.type.codec.PrimitiveLongCodec; -import com.datastax.oss.driver.api.core.type.codec.PrimitiveShortCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.metadata.token.ByteOrderedToken; -import com.datastax.oss.driver.internal.core.metadata.token.Murmur3Token; -import com.datastax.oss.driver.internal.core.metadata.token.RandomToken; -import edu.umd.cs.findbugs.annotations.CheckReturnValue; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.math.BigDecimal; -import java.math.BigInteger; -import java.net.InetAddress; -import java.nio.ByteBuffer; -import java.time.Instant; -import java.time.LocalDate; -import java.time.LocalTime; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; - -/** A data structure that provides methods to set its values via an integer index. */ -public interface SettableByIndex> extends AccessibleByIndex { - - /** - * Sets the raw binary representation of the {@code i}th value. - * - *

This is primarily for internal use; you'll likely want to use one of the typed setters - * instead, to pass a higher-level Java representation. - * - * @param v the raw value, or {@code null} to set the CQL value {@code NULL}. For performance - * reasons, this is the actual instance used internally. If pass in a buffer that you're going - * to modify elsewhere in your application, make sure to {@link ByteBuffer#duplicate() - * duplicate} it beforehand. If you change the buffer's index or its contents in any way, - * further usage of this data will have unpredictable results. - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - SelfT setBytesUnsafe(int i, @Nullable ByteBuffer v); - - /** - * Sets the {@code i}th value to CQL {@code NULL}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setToNull(int i) { - return setBytesUnsafe(i, null); - } - - /** - * Sets the {@code i}th value, using the given codec for the conversion. - * - *

This method completely bypasses the {@link #codecRegistry()}, and forces the driver to use - * the given codec instead. This can be useful if the codec would collide with a previously - * registered one, or if you want to use the codec just once without registering it. - * - *

It is the caller's responsibility to ensure that the given codec is appropriate for the - * conversion. Failing to do so will result in errors at runtime. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT set(int i, @Nullable ValueT v, @NonNull TypeCodec codec) { - return setBytesUnsafe(i, codec.encode(v, protocolVersion())); - } - - /** - * Sets the {@code i}th value, converting it to the given Java type. - * - *

The {@link #codecRegistry()} will be used to look up a codec to handle the conversion. - * - *

This variant is for generic Java types. If the target type is not generic, use {@link - * #set(int, Object, Class)} instead, which may perform slightly better. - * - * @throws IndexOutOfBoundsException if the index is invalid. - * @throws CodecNotFoundException if no codec can perform the conversion. - */ - @NonNull - @CheckReturnValue - default SelfT set(int i, @Nullable ValueT v, @NonNull GenericType targetType) { - DataType cqlType = getType(i); - TypeCodec codec = codecRegistry().codecFor(cqlType, targetType); - return set(i, v, codec); - } - - /** - * Returns the {@code i}th value, converting it to the given Java type. - * - *

The {@link #codecRegistry()} will be used to look up a codec to handle the conversion. - * - *

If the target type is generic, use {@link #set(int, Object, GenericType)} instead. - * - * @throws IndexOutOfBoundsException if the index is invalid. - * @throws CodecNotFoundException if no codec can perform the conversion. - */ - @NonNull - @CheckReturnValue - default SelfT set(int i, @Nullable ValueT v, @NonNull Class targetClass) { - // This is duplicated from the GenericType variant, because we want to give the codec registry - // a chance to process the unwrapped class directly, if it can do so in a more efficient way. - DataType cqlType = getType(i); - TypeCodec codec = codecRegistry().codecFor(cqlType, targetClass); - return set(i, v, codec); - } - - /** - * Sets the {@code i}th value to the provided Java primitive boolean. - * - *

By default, this works with CQL type {@code boolean}. - * - *

To set the value to CQL {@code NULL}, use {@link #setToNull(int)}, or {@code set(i, v, - * Boolean.class)}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setBoolean(int i, boolean v) { - DataType cqlType = getType(i); - TypeCodec codec = codecRegistry().codecFor(cqlType, Boolean.class); - return (codec instanceof PrimitiveBooleanCodec) - ? setBytesUnsafe(i, ((PrimitiveBooleanCodec) codec).encodePrimitive(v, protocolVersion())) - : set(i, v, codec); - } - - /** - * @deprecated this method only exists to ease the transition from driver 3, it is an alias for - * {@link #setBoolean(int, boolean)}. - */ - @Deprecated - @NonNull - @CheckReturnValue - default SelfT setBool(int i, boolean v) { - return setBoolean(i, v); - } - - /** - * Sets the {@code i}th value to the provided Java primitive byte. - * - *

By default, this works with CQL type {@code tinyint}. - * - *

To set the value to CQL {@code NULL}, use {@link #setToNull(int)}, or {@code set(i, v, - * Boolean.class)}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setByte(int i, byte v) { - DataType cqlType = getType(i); - TypeCodec codec = codecRegistry().codecFor(cqlType, Byte.class); - return (codec instanceof PrimitiveByteCodec) - ? setBytesUnsafe(i, ((PrimitiveByteCodec) codec).encodePrimitive(v, protocolVersion())) - : set(i, v, codec); - } - - /** - * Sets the {@code i}th value to the provided Java primitive double. - * - *

By default, this works with CQL type {@code double}. - * - *

To set the value to CQL {@code NULL}, use {@link #setToNull(int)}, or {@code set(i, v, - * Double.class)}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setDouble(int i, double v) { - DataType cqlType = getType(i); - TypeCodec codec = codecRegistry().codecFor(cqlType, Double.class); - return (codec instanceof PrimitiveDoubleCodec) - ? setBytesUnsafe(i, ((PrimitiveDoubleCodec) codec).encodePrimitive(v, protocolVersion())) - : set(i, v, codec); - } - - /** - * Sets the {@code i}th value to the provided Java primitive float. - * - *

By default, this works with CQL type {@code float}. - * - *

To set the value to CQL {@code NULL}, use {@link #setToNull(int)}, or {@code set(i, v, - * Float.class)}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setFloat(int i, float v) { - DataType cqlType = getType(i); - TypeCodec codec = codecRegistry().codecFor(cqlType, Float.class); - return (codec instanceof PrimitiveFloatCodec) - ? setBytesUnsafe(i, ((PrimitiveFloatCodec) codec).encodePrimitive(v, protocolVersion())) - : set(i, v, codec); - } - - /** - * Sets the {@code i}th value to the provided Java primitive integer. - * - *

By default, this works with CQL type {@code int}. - * - *

To set the value to CQL {@code NULL}, use {@link #setToNull(int)}, or {@code set(i, v, - * Integer.class)}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setInt(int i, int v) { - DataType cqlType = getType(i); - TypeCodec codec = codecRegistry().codecFor(cqlType, Integer.class); - return (codec instanceof PrimitiveIntCodec) - ? setBytesUnsafe(i, ((PrimitiveIntCodec) codec).encodePrimitive(v, protocolVersion())) - : set(i, v, codec); - } - - /** - * Sets the {@code i}th value to the provided Java primitive long. - * - *

By default, this works with CQL types {@code bigint} and {@code counter}. - * - *

To set the value to CQL {@code NULL}, use {@link #setToNull(int)}, or {@code set(i, v, - * Long.class)}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setLong(int i, long v) { - DataType cqlType = getType(i); - TypeCodec codec = codecRegistry().codecFor(cqlType, Long.class); - return (codec instanceof PrimitiveLongCodec) - ? setBytesUnsafe(i, ((PrimitiveLongCodec) codec).encodePrimitive(v, protocolVersion())) - : set(i, v, codec); - } - - /** - * Sets the {@code i}th value to the provided Java primitive short. - * - *

By default, this works with CQL type {@code smallint}. - * - *

To set the value to CQL {@code NULL}, use {@link #setToNull(int)}, or {@code set(i, v, - * Short.class)}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setShort(int i, short v) { - DataType cqlType = getType(i); - TypeCodec codec = codecRegistry().codecFor(cqlType, Short.class); - return (codec instanceof PrimitiveShortCodec) - ? setBytesUnsafe(i, ((PrimitiveShortCodec) codec).encodePrimitive(v, protocolVersion())) - : set(i, v, codec); - } - - /** - * Sets the {@code i}th value to the provided Java instant. - * - *

By default, this works with CQL type {@code timestamp}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setInstant(int i, @Nullable Instant v) { - return set(i, v, Instant.class); - } - - /** - * Sets the {@code i}th value to the provided Java local date. - * - *

By default, this works with CQL type {@code date}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setLocalDate(int i, @Nullable LocalDate v) { - return set(i, v, LocalDate.class); - } - - /** - * Sets the {@code i}th value to the provided Java local time. - * - *

By default, this works with CQL type {@code time}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setLocalTime(int i, @Nullable LocalTime v) { - return set(i, v, LocalTime.class); - } - - /** - * Sets the {@code i}th value to the provided Java byte buffer. - * - *

By default, this works with CQL type {@code blob}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setByteBuffer(int i, @Nullable ByteBuffer v) { - return set(i, v, ByteBuffer.class); - } - - /** - * Sets the {@code i}th value to the provided Java string. - * - *

By default, this works with CQL types {@code text}, {@code varchar} and {@code ascii}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setString(int i, @Nullable String v) { - return set(i, v, String.class); - } - - /** - * Sets the {@code i}th value to the provided Java big integer. - * - *

By default, this works with CQL type {@code varint}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setBigInteger(int i, @Nullable BigInteger v) { - return set(i, v, BigInteger.class); - } - - /** - * Sets the {@code i}th value to the provided Java big decimal. - * - *

By default, this works with CQL type {@code decimal}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setBigDecimal(int i, @Nullable BigDecimal v) { - return set(i, v, BigDecimal.class); - } - - /** - * Sets the {@code i}th value to the provided Java UUID. - * - *

By default, this works with CQL types {@code uuid} and {@code timeuuid}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setUuid(int i, @Nullable UUID v) { - return set(i, v, UUID.class); - } - - /** - * Sets the {@code i}th value to the provided Java IP address. - * - *

By default, this works with CQL type {@code inet}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setInetAddress(int i, @Nullable InetAddress v) { - return set(i, v, InetAddress.class); - } - - /** - * Sets the {@code i}th value to the provided duration. - * - *

By default, this works with CQL type {@code duration}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setCqlDuration(int i, @Nullable CqlDuration v) { - return set(i, v, CqlDuration.class); - } - - /** - * Sets the {@code i}th value to the provided vector. - * - *

By default, this works with CQL type {@code vector}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setVector( - int i, @Nullable CqlVector v, @NonNull Class elementsClass) { - return set(i, v, GenericType.vectorOf(elementsClass)); - } - - /** - * Sets the {@code i}th value to the provided token. - * - *

This works with the CQL type matching the partitioner in use for this cluster: {@code - * bigint} for {@code Murmur3Partitioner}, {@code blob} for {@code ByteOrderedPartitioner}, and - * {@code varint} for {@code RandomPartitioner}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setToken(int i, @NonNull Token v) { - // Simply enumerate all known implementations. This goes against the concept of TokenFactory, - // but injecting the factory here is too much of a hassle. - // The only issue is if someone uses a custom partitioner, but this is highly unlikely, and even - // then they can set the value manually as a workaround. - if (v instanceof Murmur3Token) { - return setLong(i, ((Murmur3Token) v).getValue()); - } else if (v instanceof ByteOrderedToken) { - return setByteBuffer(i, ((ByteOrderedToken) v).getValue()); - } else if (v instanceof RandomToken) { - return setBigInteger(i, ((RandomToken) v).getValue()); - } else { - throw new IllegalArgumentException("Unsupported token type " + v.getClass()); - } - } - - /** - * Sets the {@code i}th value to the provided Java list. - * - *

By default, this works with CQL type {@code list}. - * - *

This method is provided for convenience when the element type is a non-generic type. For - * more complex list types, use {@link #set(int, Object, GenericType)}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setList( - int i, @Nullable List v, @NonNull Class elementsClass) { - return set(i, v, GenericType.listOf(elementsClass)); - } - - /** - * Sets the {@code i}th value to the provided Java set. - * - *

By default, this works with CQL type {@code set}. - * - *

This method is provided for convenience when the element type is a non-generic type. For - * more complex set types, use {@link #set(int, Object, GenericType)}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setSet( - int i, @Nullable Set v, @NonNull Class elementsClass) { - return set(i, v, GenericType.setOf(elementsClass)); - } - - /** - * Sets the {@code i}th value to the provided Java map. - * - *

By default, this works with CQL type {@code map}. - * - *

This method is provided for convenience when the element type is a non-generic type. For - * more complex map types, use {@link #set(int, Object, GenericType)}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setMap( - int i, - @Nullable Map v, - @NonNull Class keyClass, - @NonNull Class valueClass) { - return set(i, v, GenericType.mapOf(keyClass, valueClass)); - } - - /** - * Sets the {@code i}th value to the provided user defined type value. - * - *

By default, this works with CQL user-defined types. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setUdtValue(int i, @Nullable UdtValue v) { - return set(i, v, UdtValue.class); - } - - /** - * Sets the {@code i}th value to the provided tuple value. - * - *

By default, this works with CQL tuples. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setTupleValue(int i, @Nullable TupleValue v) { - return set(i, v, TupleValue.class); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/data/SettableByName.java b/core/src/main/java/com/datastax/oss/driver/api/core/data/SettableByName.java deleted file mode 100644 index afe9ba59f64..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/data/SettableByName.java +++ /dev/null @@ -1,729 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.data; - -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.codec.CodecNotFoundException; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.CheckReturnValue; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.math.BigDecimal; -import java.math.BigInteger; -import java.net.InetAddress; -import java.nio.ByteBuffer; -import java.time.Instant; -import java.time.LocalDate; -import java.time.LocalTime; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; - -/** A data structure that provides methods to set its values via a name. */ -public interface SettableByName> - extends SettableByIndex, AccessibleByName { - - /** - * Sets the raw binary representation of the value for all occurrences of {@code name}. - * - *

This is primarily for internal use; you'll likely want to use one of the typed setters - * instead, to pass a higher-level Java representation. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @param v the raw value, or {@code null} to set the CQL value {@code NULL}. For performance - * reasons, this is the actual instance used internally. If pass in a buffer that you're going - * to modify elsewhere in your application, make sure to {@link ByteBuffer#duplicate() - * duplicate} it beforehand. If you change the buffer's index or its contents in any way, - * further usage of this data will have unpredictable results. - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setBytesUnsafe(@NonNull String name, @Nullable ByteBuffer v) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).setBytesUnsafe(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - @NonNull - @Override - default DataType getType(@NonNull String name) { - return getType(firstIndexOf(name)); - } - - /** - * Sets the value for all occurrences of {@code name} to CQL {@code NULL}. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setToNull(@NonNull String name) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).setToNull(i); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code name}, using the given codec for the conversion. - * - *

This method completely bypasses the {@link #codecRegistry()}, and forces the driver to use - * the given codec instead. This can be useful if the codec would collide with a previously - * registered one, or if you want to use the codec just once without registering it. - * - *

It is the caller's responsibility to ensure that the given codec is appropriate for the - * conversion. Failing to do so will result in errors at runtime. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT set( - @NonNull String name, @Nullable ValueT v, @NonNull TypeCodec codec) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).set(i, v, codec); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code name}, converting it to the given Java type. - * - *

The {@link #codecRegistry()} will be used to look up a codec to handle the conversion. - * - *

This variant is for generic Java types. If the target type is not generic, use {@link - * #set(int, Object, Class)} instead, which may perform slightly better. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - * @throws CodecNotFoundException if no codec can perform the conversion. - */ - @NonNull - @CheckReturnValue - default SelfT set( - @NonNull String name, @Nullable ValueT v, @NonNull GenericType targetType) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).set(i, v, targetType); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Returns the value for all occurrences of {@code name}, converting it to the given Java type. - * - *

The {@link #codecRegistry()} will be used to look up a codec to handle the conversion. - * - *

If the target type is generic, use {@link #set(int, Object, GenericType)} instead. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - * @throws CodecNotFoundException if no codec can perform the conversion. - */ - @NonNull - @CheckReturnValue - default SelfT set( - @NonNull String name, @Nullable ValueT v, @NonNull Class targetClass) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).set(i, v, targetClass); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code name} to the provided Java primitive boolean. - * - *

By default, this works with CQL type {@code boolean}. - * - *

To set the value to CQL {@code NULL}, use {@link #setToNull(int)}, or {@code set(i, v, - * Boolean.class)}. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setBoolean(@NonNull String name, boolean v) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).setBoolean(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * @deprecated this method only exists to ease the transition from driver 3, it is an alias - * for{@link #setBoolean(String, boolean)}. - */ - @Deprecated - @NonNull - @CheckReturnValue - default SelfT setBool(@NonNull String name, boolean v) { - return setBoolean(name, v); - } - - /** - * Sets the value for all occurrences of {@code name} to the provided Java primitive byte. - * - *

By default, this works with CQL type {@code tinyint}. - * - *

To set the value to CQL {@code NULL}, use {@link #setToNull(int)}, or {@code set(i, v, - * Boolean.class)}. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setByte(@NonNull String name, byte v) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).setByte(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code name} to the provided Java primitive double. - * - *

By default, this works with CQL type {@code double}. - * - *

To set the value to CQL {@code NULL}, use {@link #setToNull(int)}, or {@code set(i, v, - * Double.class)}. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setDouble(@NonNull String name, double v) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).setDouble(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code name} to the provided Java primitive float. - * - *

By default, this works with CQL type {@code float}. - * - *

To set the value to CQL {@code NULL}, use {@link #setToNull(int)}, or {@code set(i, v, - * Float.class)}. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setFloat(@NonNull String name, float v) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).setFloat(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code name} to the provided Java primitive integer. - * - *

By default, this works with CQL type {@code int}. - * - *

To set the value to CQL {@code NULL}, use {@link #setToNull(int)}, or {@code set(i, v, - * Integer.class)}. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setInt(@NonNull String name, int v) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).setInt(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code name} to the provided Java primitive long. - * - *

By default, this works with CQL types {@code bigint} and {@code counter}. - * - *

To set the value to CQL {@code NULL}, use {@link #setToNull(int)}, or {@code set(i, v, - * Long.class)}. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setLong(@NonNull String name, long v) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).setLong(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code name} to the provided Java primitive short. - * - *

By default, this works with CQL type {@code smallint}. - * - *

To set the value to CQL {@code NULL}, use {@link #setToNull(int)}, or {@code set(i, v, - * Short.class)}. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setShort(@NonNull String name, short v) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).setShort(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code name} to the provided Java instant. - * - *

By default, this works with CQL type {@code timestamp}. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setInstant(@NonNull String name, @Nullable Instant v) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).setInstant(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code name} to the provided Java local date. - * - *

By default, this works with CQL type {@code date}. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setLocalDate(@NonNull String name, @Nullable LocalDate v) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).setLocalDate(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code name} to the provided Java local time. - * - *

By default, this works with CQL type {@code time}. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setLocalTime(@NonNull String name, @Nullable LocalTime v) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).setLocalTime(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code name} to the provided Java byte buffer. - * - *

By default, this works with CQL type {@code blob}. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setByteBuffer(@NonNull String name, @Nullable ByteBuffer v) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).setByteBuffer(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code name} to the provided Java string. - * - *

By default, this works with CQL types {@code text}, {@code varchar} and {@code ascii}. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setString(@NonNull String name, @Nullable String v) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).setString(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code name} to the provided Java big integer. - * - *

By default, this works with CQL type {@code varint}. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setBigInteger(@NonNull String name, @Nullable BigInteger v) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).setBigInteger(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code name} to the provided Java big decimal. - * - *

By default, this works with CQL type {@code decimal}. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setBigDecimal(@NonNull String name, @Nullable BigDecimal v) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).setBigDecimal(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code name} to the provided Java UUID. - * - *

By default, this works with CQL types {@code uuid} and {@code timeuuid}. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setUuid(@NonNull String name, @Nullable UUID v) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).setUuid(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code name} to the provided Java IP address. - * - *

By default, this works with CQL type {@code inet}. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setInetAddress(@NonNull String name, @Nullable InetAddress v) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).setInetAddress(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code name} to the provided duration. - * - *

By default, this works with CQL type {@code duration}. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setCqlDuration(@NonNull String name, @Nullable CqlDuration v) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).setCqlDuration(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code name} to the provided vector. - * - *

By default, this works with CQL type {@code vector}. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setVector( - @NonNull String name, - @Nullable CqlVector v, - @NonNull Class elementsClass) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).setVector(i, v, elementsClass); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code name} to the provided token. - * - *

This works with the CQL type matching the partitioner in use for this cluster: {@code - * bigint} for {@code Murmur3Partitioner}, {@code blob} for {@code ByteOrderedPartitioner}, and - * {@code varint} for {@code RandomPartitioner}. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setToken(@NonNull String name, @NonNull Token v) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).setToken(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code name} to the provided Java list. - * - *

By default, this works with CQL type {@code list}. - * - *

This method is provided for convenience when the element type is a non-generic type. For - * more complex list types, use {@link #set(int, Object, GenericType)}. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setList( - @NonNull String name, @Nullable List v, @NonNull Class elementsClass) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).setList(i, v, elementsClass); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code name} to the provided Java set. - * - *

By default, this works with CQL type {@code set}. - * - *

This method is provided for convenience when the element type is a non-generic type. For - * more complex set types, use {@link #set(int, Object, GenericType)}. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setSet( - @NonNull String name, @Nullable Set v, @NonNull Class elementsClass) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).setSet(i, v, elementsClass); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code name} to the provided Java map. - * - *

By default, this works with CQL type {@code map}. - * - *

This method is provided for convenience when the element type is a non-generic type. For - * more complex map types, use {@link #set(int, Object, GenericType)}. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setMap( - @NonNull String name, - @Nullable Map v, - @NonNull Class keyClass, - @NonNull Class valueClass) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).setMap(i, v, keyClass, valueClass); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code name} to the provided user defined type value. - * - *

By default, this works with CQL user-defined types. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setUdtValue(@NonNull String name, @Nullable UdtValue v) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).setUdtValue(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code name} to the provided tuple value. - * - *

By default, this works with CQL tuples. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setTupleValue(@NonNull String name, @Nullable TupleValue v) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).setTupleValue(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/data/TupleValue.java b/core/src/main/java/com/datastax/oss/driver/api/core/data/TupleValue.java deleted file mode 100644 index 0fde2d87e71..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/data/TupleValue.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.data; - -import com.datastax.oss.driver.api.core.detach.Detachable; -import com.datastax.oss.driver.api.core.type.TupleType; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * Driver-side representation of a CQL {@code tuple} value. - * - *

It is an ordered set of anonymous, typed fields. - * - *

A tuple value is attached if and only if its type is attached (see {@link Detachable}). - * - *

The default implementation returned by the driver is mutable and serializable. If you write - * your own implementation, serializability is not mandatory, but recommended for use with some - * 3rd-party tools like Apache Spark ™. - */ -public interface TupleValue extends GettableByIndex, SettableByIndex { - - @NonNull - TupleType getType(); - - /** - * Returns a string representation of the contents of this tuple. - * - *

This produces a CQL literal, for example: - * - *

-   * (1,'test')
-   * 
- * - * Notes: - * - *
    - *
  • This method does not sanitize its output in any way. In particular, no effort is made to - * limit output size: all fields are included, and large strings or blobs will be appended - * as-is. - *
  • Be mindful of how you expose the result. For example, in high-security environments, it - * might be undesirable to leak data in application logs. - *
- */ - @NonNull - default String getFormattedContents() { - return codecRegistry().codecFor(getType(), TupleValue.class).format(this); - } - - /** - * Returns an abstract representation of this object, that may not include the tuple's - * contents. - * - *

The driver's built-in {@link TupleValue} implementation returns the default format of {@link - * Object#toString()}: the class name, followed by the at-sign and the hash code of the object. - * - *

Omitting the contents was a deliberate choice, because we feel it would make it too easy to - * accidentally leak data (e.g. in application logs). If you want the contents, use {@link - * #getFormattedContents()}. - */ - @Override - String toString(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/data/UdtValue.java b/core/src/main/java/com/datastax/oss/driver/api/core/data/UdtValue.java deleted file mode 100644 index 7e8bc80793b..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/data/UdtValue.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.data; - -import com.datastax.oss.driver.api.core.detach.Detachable; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * Driver-side representation of an instance of a CQL user defined type. - * - *

It is an ordered set of named, typed fields. - * - *

A tuple value is attached if and only if its type is attached (see {@link Detachable}). - * - *

The default implementation returned by the driver is mutable and serializable. If you write - * your own implementation, serializability is not mandatory, but recommended for use with some - * 3rd-party tools like Apache Spark ™. - */ -public interface UdtValue - extends GettableById, GettableByName, SettableById, SettableByName { - - @NonNull - UserDefinedType getType(); - - /** - * Returns a string representation of the contents of this UDT. - * - *

This produces a CQL literal, for example: - * - *

-   * {street:'42 Main Street',zip:12345}
-   * 
- * - * Notes: - * - *
    - *
  • This method does not sanitize its output in any way. In particular, no effort is made to - * limit output size: all fields are included, and large strings or blobs will be appended - * as-is. - *
  • Be mindful of how you expose the result. For example, in high-security environments, it - * might be undesirable to leak data in application logs. - *
- */ - @NonNull - default String getFormattedContents() { - return codecRegistry().codecFor(getType(), UdtValue.class).format(this); - } - - /** - * Returns an abstract representation of this object, that may not include the UDT's - * contents. - * - *

The driver's built-in {@link UdtValue} implementation returns the default format of {@link - * Object#toString()}: the class name, followed by the at-sign and the hash code of the object. - * - *

Omitting the contents was a deliberate choice, because we feel it would make it too easy to - * accidentally leak data (e.g. in application logs). If you want the contents, use {@link - * #getFormattedContents()}. - */ - @Override - String toString(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/detach/AttachmentPoint.java b/core/src/main/java/com/datastax/oss/driver/api/core/detach/AttachmentPoint.java deleted file mode 100644 index d1897f66e16..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/detach/AttachmentPoint.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.detach; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.api.core.type.codec.registry.MutableCodecRegistry; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** @see Detachable */ -public interface AttachmentPoint { - AttachmentPoint NONE = - new AttachmentPoint() { - @NonNull - @Override - public ProtocolVersion getProtocolVersion() { - return ProtocolVersion.DEFAULT; - } - - @NonNull - @Override - public CodecRegistry getCodecRegistry() { - return CodecRegistry.DEFAULT; - } - }; - - @NonNull - ProtocolVersion getProtocolVersion(); - - /** - * Note that the default registry implementation returned by the driver also implements {@link - * MutableCodecRegistry}, which allows you to register new codecs at runtime. You can safely cast - * the result of this method (as long as you didn't extend the driver context to plug a custom - * registry implementation). - */ - @NonNull - CodecRegistry getCodecRegistry(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/detach/Detachable.java b/core/src/main/java/com/datastax/oss/driver/api/core/detach/Detachable.java deleted file mode 100644 index 0c92bb727ea..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/detach/Detachable.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.detach; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.data.Data; -import com.datastax.oss.driver.api.core.type.TupleType; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * Defines the contract of an object that can be detached and reattached to a driver instance. - * - *

The driver's {@link Data data structure} types (such as rows, tuples and UDT values) store - * their data as byte buffers, and only decode it on demand, when the end user accesses a particular - * column or field. - * - *

Decoding requires a {@link ProtocolVersion} (because the encoded format might change across - * versions), and a {@link CodecRegistry} (because the user might ask us to decode to a custom - * type). - * - *

    - *
  • When a data container was obtained from a driver instance (for example, reading a row from - * a result set, or reading a value from a UDT column), it is attached: its protocol - * version and registry are those of the driver. - *
  • When it is created manually by the user (for example, creating an instance from a manually - * created {@link TupleType}), it is detached: it uses {@link - * ProtocolVersion#DEFAULT} and {@link CodecRegistry#DEFAULT}. - *
- * - * The only way an attached object can become detached is if it is serialized and deserialized - * (referring to Java serialization). - * - *

A detached object can be reattached to a driver instance. This is done automatically if you - * pass the object to one of the driver methods, for example if you use a manually created tuple as - * a query parameter. - */ -public interface Detachable { - boolean isDetached(); - - void attach(@NonNull AttachmentPoint attachmentPoint); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/loadbalancing/LoadBalancingPolicy.java b/core/src/main/java/com/datastax/oss/driver/api/core/loadbalancing/LoadBalancingPolicy.java deleted file mode 100644 index de0d9db4ebd..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/loadbalancing/LoadBalancingPolicy.java +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.loadbalancing; - -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeState; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.core.tracker.RequestTracker; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Collections; -import java.util.Map; -import java.util.Optional; -import java.util.Queue; -import java.util.UUID; - -/** Decides which Cassandra nodes to contact for each query. */ -public interface LoadBalancingPolicy extends AutoCloseable { - - /** - * Returns an optional {@link RequestTracker} to be registered with the session. Registering a - * request tracker allows load-balancing policies to track node latencies in order to pick the - * fastest ones. - * - *

This method is invoked only once during session configuration, and before any other methods - * in this interface. Note that at this point, the driver hasn't connected to any node yet. - * - * @since 4.13.0 - */ - @NonNull - default Optional getRequestTracker() { - return Optional.empty(); - } - - /** - * Initializes this policy with the nodes discovered during driver initialization. - * - *

This method is guaranteed to be called exactly once per instance, and before any other - * method in this interface except {@link #getRequestTracker()}. At this point, the driver has - * successfully connected to one of the contact points, and performed a first refresh of topology - * information (by default, the contents of {@code system.peers}), to discover other nodes in the - * cluster. - * - *

This method must call {@link DistanceReporter#setDistance(Node, NodeDistance) - * distanceReporter.setDistance} for each provided node (otherwise that node will stay at distance - * {@link NodeDistance#IGNORED IGNORED}, and the driver won't open connections to it). Note that - * the node's {@link Node#getState() state} can be either {@link NodeState#UP UP} (for the - * successful contact point), {@link NodeState#DOWN DOWN} (for contact points that were tried - * unsuccessfully), or {@link NodeState#UNKNOWN UNKNOWN} (for contact points that weren't tried, - * or any other node discovered from the topology refresh). Node states may be updated - * concurrently while this method executes, but if so this policy will get notified after this - * method has returned, through other methods such as {@link #onUp(Node)} or {@link - * #onDown(Node)}. - * - * @param nodes all the nodes that are known to exist in the cluster (regardless of their state) - * at the time of invocation. - * @param distanceReporter an object that will be used by the policy to signal distance changes. - * Implementations will typically store this in a field, since new nodes may get {@link - * #onAdd(Node) added} later and will need to have their distance set (or the policy might - * change distances dynamically over time). - */ - void init(@NonNull Map nodes, @NonNull DistanceReporter distanceReporter); - - /** Returns map containing details that impact C* node connectivity. */ - @NonNull - default Map getStartupConfiguration() { - return Collections.emptyMap(); - } - - /** - * Returns the coordinators to use for a new query. - * - *

Each new query will call this method, and try the returned nodes sequentially. - * - * @param request the request that is being routed. Note that this can be null for some internal - * uses. - * @param session the session that is executing the request. Note that this can be null for some - * internal uses. - * @return the list of coordinators to try. This must be a concurrent queue; {@link - * java.util.concurrent.ConcurrentLinkedQueue} is a good choice. - */ - @NonNull - Queue newQueryPlan(@Nullable Request request, @Nullable Session session); - - /** - * Called when a node is added to the cluster. - * - *

The new node will be at distance {@link NodeDistance#IGNORED IGNORED}, and have the state - * {@link NodeState#UNKNOWN UNKNOWN}. - * - *

If this method assigns an active distance to the node, the driver will try to create a - * connection pool to it (resulting in a state change to {@link #onUp(Node) UP} or {@link - * #onDown(Node) DOWN} depending on the outcome). - * - *

If it leaves it at distance {@link NodeDistance#IGNORED IGNORED}, the driver won't attempt - * any connection. The node state will remain unknown, but might be updated later if a topology - * event is received from the cluster. - * - * @see #init(Map, DistanceReporter) - */ - void onAdd(@NonNull Node node); - - /** Called when a node is determined to be up. */ - void onUp(@NonNull Node node); - - /** Called when a node is determined to be down. */ - void onDown(@NonNull Node node); - - /** Called when a node is removed from the cluster. */ - void onRemove(@NonNull Node node); - - /** Called when the cluster that this policy is associated with closes. */ - @Override - void close(); - - /** An object that the policy uses to signal decisions it makes about node distances. */ - interface DistanceReporter { - void setDistance(@NonNull Node node, @NonNull NodeDistance distance); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/loadbalancing/NodeDistance.java b/core/src/main/java/com/datastax/oss/driver/api/core/loadbalancing/NodeDistance.java deleted file mode 100644 index aaae7957d00..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/loadbalancing/NodeDistance.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.loadbalancing; -/** - * Determines how the driver will manage connections to a Cassandra node. - * - *

The distance is assigned by a {@link LoadBalancingPolicy}. - */ -public enum NodeDistance { - /** - * An "active" distance that, indicates that the driver should maintain connections to the node; - * it also marks it as "preferred", meaning that the number or capacity of the connections may be - * higher, and that the node may also have priority for some tasks (for example, being chosen as - * the control host). - */ - LOCAL, - /** - * An "active" distance that, indicates that the driver should maintain connections to the node; - * it also marks it as "less preferred", meaning that the number or capacity of the connections - * may be lower, and that other nodes may have a higher priority for some tasks (for example, - * being chosen as the control host). - */ - REMOTE, - /** - * An "inactive" distance, that indicates that the driver will not open any connection to the - * node. - */ - IGNORED, -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/loadbalancing/NodeDistanceEvaluator.java b/core/src/main/java/com/datastax/oss/driver/api/core/loadbalancing/NodeDistanceEvaluator.java deleted file mode 100644 index 9a5a7f5a894..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/loadbalancing/NodeDistanceEvaluator.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.loadbalancing; - -import com.datastax.oss.driver.api.core.metadata.Node; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** - * A pluggable {@link NodeDistance} evaluator. - * - *

Node distance evaluators are recognized by all the driver built-in load balancing policies. - * They can be specified {@linkplain - * com.datastax.oss.driver.api.core.session.SessionBuilder#withNodeDistanceEvaluator(String, - * NodeDistanceEvaluator) programmatically} or through the configuration (with the {@code - * load-balancing-policy.evaluator.class} option). - * - * @see com.datastax.oss.driver.api.core.session.SessionBuilder#withNodeDistanceEvaluator(String, - * NodeDistanceEvaluator) - */ -@FunctionalInterface -public interface NodeDistanceEvaluator { - - /** - * Evaluates the distance to apply to the given node. - * - *

This method will be invoked each time the {@link LoadBalancingPolicy} processes a topology - * or state change, and will be passed the node being inspected, and the local datacenter name (or - * null if none is defined). If it returns a non-null {@link NodeDistance}, the policy will - * suggest that distance for the node; if it returns null, the policy will assign a default - * distance instead, based on its internal algorithm for computing node distances. - * - * @param node The node to assign a new distance to. - * @param localDc The local datacenter name, if defined, or null otherwise. - * @return The {@link NodeDistance} to assign to the node, or null to let the policy decide. - */ - @Nullable - NodeDistance evaluateDistance(@NonNull Node node, @Nullable String localDc); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/EndPoint.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/EndPoint.java deleted file mode 100644 index 530f2ad38ac..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/EndPoint.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata; - -import edu.umd.cs.findbugs.annotations.NonNull; -import java.net.InetSocketAddress; -import java.net.SocketAddress; - -/** - * Encapsulates the information needed to open connections to a node. - * - *

By default, the driver assumes plain TCP connections, and this is just a wrapper around an - * {@link InetSocketAddress}. However, more complex deployment scenarios might use a custom - * implementation that contains additional information; for example, if the nodes are accessed - * through a proxy with SNI routing, an SNI server name is needed in addition to the proxy address. - */ -public interface EndPoint { - - /** - * Resolves this instance to a socket address. - * - *

This will be called each time the driver opens a new connection to the node. The returned - * address cannot be null. - */ - @NonNull - SocketAddress resolve(); - - /** - * Returns an alternate string representation for use in node-level metric names. - * - *

Because metrics names are path-like, dot-separated strings, raw IP addresses don't make very - * good identifiers. So this method will typically replace the dots by another character, for - * example {@code 127_0_0_1_9042}. - */ - @NonNull - String asMetricPrefix(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/Metadata.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/Metadata.java deleted file mode 100644 index 21ad200abed..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/Metadata.java +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; -import com.datastax.oss.driver.api.core.session.Session; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.net.InetSocketAddress; -import java.util.Map; -import java.util.Optional; -import java.util.UUID; - -/** - * The metadata of the Cassandra cluster that this driver instance is connected to. - * - *

Updates to this object are guaranteed to be atomic: the node list, schema, and token metadata - * are immutable, and will always be consistent for a given metadata instance. The node instances - * are the only mutable objects in the hierarchy, and some of their fields will be modified - * dynamically (in particular the node state). - * - * @see Session#getMetadata() - */ -public interface Metadata { - /** - * The nodes known to the driver, indexed by their unique identifier ({@code host_id} in {@code - * system.local}/{@code system.peers}). This might include nodes that are currently viewed as - * down, or ignored by the load balancing policy. - */ - @NonNull - Map getNodes(); - - /** - * Finds the node with the given {@linkplain Node#getEndPoint() connection information}, if it - * exists. - * - *

Note that this method performs a linear search of {@link #getNodes()}. - */ - @NonNull - default Optional findNode(@NonNull EndPoint endPoint) { - for (Node node : getNodes().values()) { - if (node.getEndPoint().equals(endPoint)) { - return Optional.of(node); - } - } - return Optional.empty(); - } - - /** - * Finds the node with the given untranslated {@linkplain Node#getBroadcastRpcAddress() - * broadcast RPC address}, if it exists. - * - *

Note that this method performs a linear search of {@link #getNodes()}. - */ - @NonNull - default Optional findNode(@NonNull InetSocketAddress broadcastRpcAddress) { - for (Node node : getNodes().values()) { - Optional o = node.getBroadcastRpcAddress(); - if (o.isPresent() && o.get().equals(broadcastRpcAddress)) { - return Optional.of(node); - } - } - return Optional.empty(); - } - - /** - * The keyspaces defined in this cluster. - * - *

Note that schema metadata can be disabled or restricted to a subset of keyspaces, therefore - * this map might be empty or incomplete. - * - * @see DefaultDriverOption#METADATA_SCHEMA_ENABLED - * @see Session#setSchemaMetadataEnabled(Boolean) - * @see DefaultDriverOption#METADATA_SCHEMA_REFRESHED_KEYSPACES - */ - @NonNull - Map getKeyspaces(); - - @NonNull - default Optional getKeyspace(@NonNull CqlIdentifier keyspaceId) { - return Optional.ofNullable(getKeyspaces().get(keyspaceId)); - } - - /** - * Shortcut for {@link #getKeyspace(CqlIdentifier) - * getKeyspace(CqlIdentifier.fromCql(keyspaceName))}. - */ - @NonNull - default Optional getKeyspace(@NonNull String keyspaceName) { - return getKeyspace(CqlIdentifier.fromCql(keyspaceName)); - } - - /** - * The token map for this cluster. - * - *

Note that this property might be absent if token metadata was disabled, or if there was a - * runtime error while computing the map (this would generate a warning log). - * - * @see DefaultDriverOption#METADATA_TOKEN_MAP_ENABLED - */ - @NonNull - Optional getTokenMap(); - - /** - * The cluster name to which this session is connected. The Optional returned should contain the - * value from the server for system.local.cluster_name. - * - *

Note that this method has a default implementation for backwards compatibility. It is - * expected that any implementing classes override this method. - */ - @NonNull - default Optional getClusterName() { - return Optional.empty(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/Node.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/Node.java deleted file mode 100644 index fbfc748dd52..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/Node.java +++ /dev/null @@ -1,219 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata; - -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.session.Session; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.net.InetSocketAddress; -import java.util.Map; -import java.util.Optional; -import java.util.UUID; - -/** - * Metadata about a Cassandra node in the cluster. - * - *

This object is mutable, all of its properties may be updated at runtime to reflect the latest - * state of the node. - * - *

Note that the default implementation returned by the driver uses reference equality. A - * {@link Session} will always return the same instance for a given {@link #getHostId() host id}. - * However, instances coming from different sessions will not be equal, even if they refer to the - * same host id. - */ -public interface Node { - - /** - * The information that the driver uses to connect to the node. - * - *

In default deployments, the endpoint address is usually derived from the node's {@linkplain - * #getBroadcastAddress() broadcast RPC address} for peers hosts. For the control host however, - * the driver doesn't rely on that value because it may be wrong (see CASSANDRA-11181); instead, - * it simply uses the control connection's own endpoint. - * - *

When behind a proxy, the endpoint reported here usually refers to the proxy itself, and is - * unrelated to the node's broadcast RPC address. - */ - @NonNull - EndPoint getEndPoint(); - - /** - * The node's broadcast RPC address. That is, the address that the node expects clients to connect - * to. - * - *

This is computed from values reported in {@code system.local.rpc_address} and {@code - * system.peers.rpc_address} (Cassandra 3), or {@code system.local.rpc_address}, {@code - * system.local.rpc_port}, {@code system.peers_v2.native_address} and {@code - * system.peers_v2.native_port} (Cassandra 4+). - * - *

However, the address reported here might not be what the driver uses directly; to know which - * address the driver is really using to connect to this node, check {@link #getEndPoint()}. - * - *

This may not be known at all times. In particular, some Cassandra versions (less than - * 2.0.16, 2.1.6 or 2.2.0-rc1) don't store it in the {@code system.local} table, so this will be - * unknown for the control node, until the control connection reconnects to another node. - * - * @see CASSANDRA-9436 (where the - * information was added to system.local) - */ - @NonNull - Optional getBroadcastRpcAddress(); - - /** - * The node's broadcast address. That is, the address that other nodes use to communicate with - * that node. - * - *

This is computed from values reported in {@code system.local.broadcast_address} and {@code - * system.peers.peer} (Cassandra 3), or {@code system.local.broadcast_address}, {@code - * system.local.broadcast_port}, {@code system.peers_v2.peer} and {@code - * system.peers_v2.peer_port} (Cassandra 4+). If the port is set to 0 it is unknown. - * - *

This may not be known at all times. In particular, some Cassandra versions (less than - * 2.0.16, 2.1.6 or 2.2.0-rc1) don't store it in the {@code system.local} table, so this will be - * unknown for the control node, until the control connection reconnects to another node. - * - * @see CASSANDRA-9436 (where the - * information was added to system.local) - */ - @NonNull - Optional getBroadcastAddress(); - - /** - * The node's listen address. That is, the address that the Cassandra process binds to. - * - *

This is computed from values reported in {@code system.local.listen_address} (Cassandra 3), - * or {@code system.local.listen_address} and {@code system.local.listen_port} (Cassandra 4+). If - * the port is set to 0 it is unknown. - * - *

This may not be known at all times. In particular, current Cassandra versions (up to 3.11) - * only store it in {@code system.local}, so this will be known only for the control node. - */ - @NonNull - Optional getListenAddress(); - - /** - * The datacenter that this node belongs to (according to the server-side snitch). - * - *

This should be non-null in a healthy deployment, but the driver will still function, and - * report {@code null} here, if the server metadata was corrupted. - */ - @Nullable - String getDatacenter(); - - /** - * The rack that this node belongs to (according to the server-side snitch). - * - *

This should be non-null in a healthy deployment, but the driver will still function, and - * report {@code null} here, if the server metadata was corrupted. - */ - @Nullable - String getRack(); - - /** - * The Cassandra version of the server. - * - *

This should be non-null in a healthy deployment, but the driver will still function, and - * report {@code null} here, if the server metadata was corrupted or the reported version could - * not be parsed. - */ - @Nullable - Version getCassandraVersion(); - - /** - * An additional map of free-form properties. - * - *

This is intended for future evolution or custom driver extensions. The contents of this map - * are unspecified and may change at any point in time, always check for the existence of a key - * before using it. - * - *

Note that the returned map is immutable: if the properties change, this is reflected by - * publishing a new map instance, therefore you must call this method again to see the changes. - */ - @NonNull - Map getExtras(); - - @NonNull - NodeState getState(); - - /** - * The last time that this node transitioned to the UP state, in milliseconds since the epoch, or - * -1 if it's not up at the moment. - */ - long getUpSinceMillis(); - - /** - * The total number of active connections currently open by this driver instance to the node. This - * can be either pooled connections, or the control connection. - */ - int getOpenConnections(); - - /** - * Whether the driver is currently trying to reconnect to this node. That is, whether the active - * connection count is below the value mandated by the configuration. This does not mean that the - * node is down, there could be some active connections but not enough. - */ - boolean isReconnecting(); - - /** - * The distance assigned to this node by the {@link LoadBalancingPolicy}, that controls certain - * aspects of connection management. - * - *

This is exposed here for information only. Distance events are handled internally by the - * driver. - */ - @NonNull - NodeDistance getDistance(); - - /** - * The host ID that is assigned to this node by Cassandra. This value can be used to uniquely - * identify a node even when the underling IP address changes. - * - *

This information is always present once the session has initialized. However, there is a - * narrow corner case where a driver client can observe a null value: if a {@link - * NodeStateListener} is registered, the very first {@code onUp} call will reference a node - * that has a null id (that node is the initial contact point, and the driver hasn't read host ids - * from {@code system.local} and {@code system.peers} yet). Beyond that point — including - * any other {@code onUp} call — the host id will always be present. - * - *

-   * CqlSession session = CqlSession.builder()
-   *     .withNodeStateListener(
-   *         new NodeStateListenerBase() {
-   *           @Override
-   *           public void onUp(@NonNull Node node) {
-   *             // node.getHostId() == null for the first invocation only
-   *           }
-   *         })
-   *     .build();
-   * 
- */ - @Nullable - UUID getHostId(); - - /** - * The current version that is associated with the node's schema. - * - *

This should be non-null in a healthy deployment, but the driver will still function, and - * report {@code null} here, if the server metadata was corrupted. - */ - @Nullable - UUID getSchemaVersion(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/NodeState.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/NodeState.java deleted file mode 100644 index 2f2460886ef..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/NodeState.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata; - -import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; -import com.datastax.oss.driver.internal.core.metadata.TopologyEvent; -import java.net.InetSocketAddress; - -/** The state of a node, as viewed from the driver. */ -public enum NodeState { - /** - * The driver has never tried to connect to the node, nor received any topology events about it. - * - *

This happens when nodes are first added to the cluster, and will persist if your {@link - * LoadBalancingPolicy} decides to ignore them. Since the driver does not connect to them, the - * only way it can assess their states is from topology events. - */ - UNKNOWN, - /** - * A node is considered up in either of the following situations: 1) the driver has at least one - * active connection to the node, or 2) the driver is not actively trying to connect to the node - * (because it's ignored by the {@link LoadBalancingPolicy}), but it has received a topology event - * indicating that the node is up. - */ - UP, - /** - * A node is considered down in either of the following situations: 1) the driver has lost all - * connections to the node (and is currently trying to reconnect), or 2) the driver is not - * actively trying to connect to the node (because it's ignored by the {@link - * LoadBalancingPolicy}), but it has received a topology event indicating that the node is down. - */ - DOWN, - /** - * The node was forced down externally, the driver will never try to reconnect to it, whatever the - * {@link LoadBalancingPolicy} says. - * - *

This is used for edge error cases, for example when the driver detects that it's trying to - * connect to a node that does not belong to the Cassandra cluster (e.g. a wrong address was - * provided in the contact points). It can also be {@link - * TopologyEvent#forceDown(InetSocketAddress) triggered explicitly} by components (for example a - * custom load balancing policy) that want to limit the number of nodes that the driver connects - * to. - */ - FORCED_DOWN, -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/NodeStateListener.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/NodeStateListener.java deleted file mode 100644 index bb52e9d1496..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/NodeStateListener.java +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata; - -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.core.session.SessionBuilder; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * A listener that gets notified when nodes states change. - * - *

Implementations of this interface can be registered either via the configuration (see {@code - * reference.conf} in the manual or core driver JAR), or programmatically via {@link - * SessionBuilder#addNodeStateListener(NodeStateListener)}. - * - *

Note that the methods defined by this interface will be executed by internal driver threads, - * and are therefore expected to have short execution times. If you need to perform long - * computations or blocking calls in response to schema change events, it is strongly recommended to - * schedule them asynchronously on a separate thread provided by your application code. - * - *

If you implement this interface but don't need to implement all the methods, extend {@link - * NodeStateListenerBase}. - * - *

If your implementation of this interface requires access to a fully-initialized session, - * consider wrapping it in a {@link SafeInitNodeStateListener}. - */ -public interface NodeStateListener extends AutoCloseable { - - /** - * Invoked when a node is first added to the cluster. - * - *

The node is not up yet at this point. {@link #onUp(Node)} will be notified later if the - * driver successfully connects to the node (provided that a session is opened and the node is not - * {@link NodeDistance#IGNORED ignored}), or receives a topology event for it. - * - *

This method is not invoked for the contact points provided at initialization. It is - * however for new nodes discovered during the full node list refresh after the first connection. - */ - void onAdd(@NonNull Node node); - - /** Invoked when a node's state switches to {@link NodeState#UP}. */ - void onUp(@NonNull Node node); - - /** - * Invoked when a node's state switches to {@link NodeState#DOWN} or {@link - * NodeState#FORCED_DOWN}. - */ - void onDown(@NonNull Node node); - - /** - * Invoked when a node leaves the cluster. - * - *

This can be triggered by a topology event, or during a full node list refresh if the node is - * absent from the new list. - */ - void onRemove(@NonNull Node node); - - /** - * Invoked when the session is ready to process user requests. - * - *

This corresponds to the moment when {@link SessionBuilder#build()} returns, or the future - * returned by {@link SessionBuilder#buildAsync()} completes. If the session initialization fails, - * this method will not get called. - * - *

Listener methods are invoked from different threads; if you store the session in a field, - * make it at least volatile to guarantee proper publication. - * - *

Note that this method will not be the first one invoked on the listener; the driver emits - * node events before that, during the initialization of the session: - * - *

    - *
  • First the driver shuffles the contact points, and tries each one sequentially. For any - * contact point that can't be reached, {@link #onDown(Node)} is invoked; for the one that - * eventually succeeds, {@link #onUp(Node)} is invoked and that node becomes the control - * node (if none succeeds, the session initialization fails and the process stops here). - *
  • The control node's {@code system.peers} table is inspected to discover the remaining - * nodes in the cluster. For any node that wasn't already a contact point, {@link - * #onAdd(Node)} is invoked; for any contact point that doesn't have a corresponding entry - * in the table, {@link #onRemove(Node)} is invoked; - *
  • The load balancing policy computes the nodes' {@linkplain NodeDistance distances}, and, - * for each LOCAL or REMOTE node, the driver creates a connection pool. If at least one - * pooled connection can be established, {@link #onUp(Node)} is invoked; otherwise, {@link - * #onDown(Node)} is invoked (no additional event is emitted for the control node, it is - * considered up since we already have a connection to it). - *
  • Once all the pools are created, the session is fully initialized and this method is - * invoked. - *
- * - * If you're not interested in those init events, or want to delay them until after the session is - * ready, take a look at {@link SafeInitNodeStateListener}. - * - *

This method's default implementation is empty. - */ - default void onSessionReady(@NonNull Session session) {} -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/NodeStateListenerBase.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/NodeStateListenerBase.java deleted file mode 100644 index 0b747a00084..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/NodeStateListenerBase.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata; - -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * Convenience class for listener implementations that that don't need to override all methods (all - * methods in this class are empty). - */ -public class NodeStateListenerBase implements NodeStateListener { - - @Override - public void onAdd(@NonNull Node node) { - // nothing to do - } - - @Override - public void onUp(@NonNull Node node) { - // nothing to do - } - - @Override - public void onDown(@NonNull Node node) { - // nothing to do - } - - @Override - public void onRemove(@NonNull Node node) { - // nothing to do - } - - @Override - public void close() throws Exception { - // nothing to do - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/SafeInitNodeStateListener.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/SafeInitNodeStateListener.java deleted file mode 100644 index c33f7616b5a..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/SafeInitNodeStateListener.java +++ /dev/null @@ -1,195 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata; - -import com.datastax.oss.driver.api.core.session.Session; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.ArrayList; -import java.util.List; -import java.util.Objects; -import java.util.concurrent.locks.ReadWriteLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; -import java.util.function.BiConsumer; -import net.jcip.annotations.GuardedBy; - -/** - * A node state listener wrapper that delays (or ignores) init events until after the session is - * ready. - * - *

By default, the driver calls node state events, such as {@link #onUp} and {@link #onAdd}, - * before the session is ready; see {@link NodeStateListener#onSessionReady(Session)} for a detailed - * explanation. This can make things complicated if your listener implementation needs the session - * to process those events. - * - *

This class wraps another implementation to shield it from those details: - * - *

- * NodeStateListener delegate = ... // your listener implementation
- *
- * SafeInitNodeStateListener wrapper =
- *     new SafeInitNodeStateListener(delegate, true);
- *
- * CqlSession session = CqlSession.builder()
- *     .withNodeStateListener(wrapper)
- *     .build();
- * 
- * - * With this setup, {@code delegate.onSessionReady} is guaranteed to be invoked first, before any - * other method. The second constructor argument indicates what to do with the method calls that - * were ignored before that: - * - *
    - *
  • if {@code true}, they are recorded, and replayed to {@code delegate} immediately after - * {@link #onSessionReady}. They are guaranteed to happen in the original order, and before - * any post-initialization events. - *
  • if {@code false}, they are discarded. - *
- * - *

Usage in non-blocking applications: beware that this class is not lock-free. It is implemented - * with locks for internal coordination. - * - * @since 4.6.0 - */ -public class SafeInitNodeStateListener implements NodeStateListener { - - private final NodeStateListener delegate; - private final boolean replayInitEvents; - - // Write lock: recording init events or setting sessionReady - // Read lock: reading init events or checking sessionReady - private final ReadWriteLock lock = new ReentrantReadWriteLock(); - - @GuardedBy("lock") - private boolean sessionReady; - - @GuardedBy("lock") - private final List initEvents = new ArrayList<>(); - - /** - * Creates a new instance. - * - * @param delegate the wrapped listener, to which method invocations will be forwarded. - * @param replayInitEvents whether to record events during initialization and replay them to the - * child listener once it's created, or just ignore them. - */ - public SafeInitNodeStateListener(@NonNull NodeStateListener delegate, boolean replayInitEvents) { - this.delegate = Objects.requireNonNull(delegate); - this.replayInitEvents = replayInitEvents; - } - - @Override - public void onSessionReady(@NonNull Session session) { - lock.writeLock().lock(); - try { - if (!sessionReady) { - sessionReady = true; - delegate.onSessionReady(session); - if (replayInitEvents) { - for (InitEvent event : initEvents) { - event.invoke(delegate); - } - } - } - } finally { - lock.writeLock().unlock(); - } - } - - @Override - public void onAdd(@NonNull Node node) { - onEvent(node, InitEvent.Type.ADD); - } - - @Override - public void onUp(@NonNull Node node) { - onEvent(node, InitEvent.Type.UP); - } - - @Override - public void onDown(@NonNull Node node) { - onEvent(node, InitEvent.Type.DOWN); - } - - @Override - public void onRemove(@NonNull Node node) { - onEvent(node, InitEvent.Type.REMOVE); - } - - private void onEvent(Node node, InitEvent.Type eventType) { - - // Cheap case: the session is ready, just delegate - lock.readLock().lock(); - try { - if (sessionReady) { - eventType.listenerMethod.accept(delegate, node); - return; - } - } finally { - lock.readLock().unlock(); - } - - // Otherwise, we must acquire the write lock to record the event - if (replayInitEvents) { - lock.writeLock().lock(); - try { - // Must re-check because we completely released the lock for a short duration - if (sessionReady) { - eventType.listenerMethod.accept(delegate, node); - } else { - initEvents.add(new InitEvent(node, eventType)); - } - } finally { - lock.writeLock().unlock(); - } - } - } - - @Override - public void close() throws Exception { - delegate.close(); - } - - private static class InitEvent { - enum Type { - ADD(NodeStateListener::onAdd), - UP(NodeStateListener::onUp), - DOWN(NodeStateListener::onDown), - REMOVE(NodeStateListener::onRemove), - ; - - @SuppressWarnings("ImmutableEnumChecker") - final BiConsumer listenerMethod; - - Type(BiConsumer listenerMethod) { - this.listenerMethod = listenerMethod; - } - } - - final Node node; - final Type type; - - InitEvent(@NonNull Node node, @NonNull Type type) { - this.node = Objects.requireNonNull(node); - this.type = Objects.requireNonNull(type); - } - - void invoke(@NonNull NodeStateListener target) { - type.listenerMethod.accept(Objects.requireNonNull(target), node); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/TokenMap.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/TokenMap.java deleted file mode 100644 index 7746bf3382e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/TokenMap.java +++ /dev/null @@ -1,153 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.metadata.token.TokenRange; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.util.Set; - -/** - * Utility component to work with the tokens of a given driver instance. - * - *

Note that the methods that take a keyspace argument are based on schema metadata, which can be - * disabled or restricted to a subset of keyspaces; therefore these methods might return empty - * results for some or all of the keyspaces. - * - * @see DefaultDriverOption#METADATA_SCHEMA_ENABLED - * @see Session#setSchemaMetadataEnabled(Boolean) - * @see DefaultDriverOption#METADATA_SCHEMA_REFRESHED_KEYSPACES - */ -public interface TokenMap { - - /** Builds a token from its string representation. */ - @NonNull - Token parse(@NonNull String tokenString); - - /** Formats a token into a string representation appropriate for concatenation in a CQL query. */ - @NonNull - String format(@NonNull Token token); - - /** - * Builds a token from a partition key. - * - * @param partitionKey the partition key components, in their serialized form (which can be - * obtained with {@link TypeCodec#encode(Object, ProtocolVersion)}. Neither the individual - * components, nor the vararg array itself, can be {@code null}. - */ - @NonNull - Token newToken(@NonNull ByteBuffer... partitionKey); - - @NonNull - TokenRange newTokenRange(@NonNull Token start, @NonNull Token end); - - /** The token ranges that define data distribution on the ring. */ - @NonNull - Set getTokenRanges(); - - /** The token ranges for which a given node is the primary replica. */ - @NonNull - Set getTokenRanges(Node node); - - /** - * The tokens owned by the given node. - * - *

This is functionally equivalent to {@code getTokenRanges(node).map(r -> r.getEnd())}. Note - * that the set is rebuilt every time you call this method. - */ - @NonNull - default Set getTokens(@NonNull Node node) { - ImmutableSet.Builder result = ImmutableSet.builder(); - for (TokenRange range : getTokenRanges(node)) { - result.add(range.getEnd()); - } - return result.build(); - } - - /** The token ranges that are replicated on the given node, for the given keyspace. */ - @NonNull - Set getTokenRanges(@NonNull CqlIdentifier keyspace, @NonNull Node replica); - - /** - * Shortcut for {@link #getTokenRanges(CqlIdentifier, Node) - * getTokenRanges(CqlIdentifier.fromCql(keyspaceName), replica)}. - */ - @NonNull - default Set getTokenRanges(@NonNull String keyspaceName, @NonNull Node replica) { - return getTokenRanges(CqlIdentifier.fromCql(keyspaceName), replica); - } - - /** The replicas for a given partition key in the given keyspace. */ - @NonNull - Set getReplicas(@NonNull CqlIdentifier keyspace, @NonNull ByteBuffer partitionKey); - - /** - * Shortcut for {@link #getReplicas(CqlIdentifier, ByteBuffer) - * getReplicas(CqlIdentifier.fromCql(keyspaceName), partitionKey)}. - */ - @NonNull - default Set getReplicas(@NonNull String keyspaceName, @NonNull ByteBuffer partitionKey) { - return getReplicas(CqlIdentifier.fromCql(keyspaceName), partitionKey); - } - - /** The replicas for a given token in the given keyspace. */ - @NonNull - Set getReplicas(@NonNull CqlIdentifier keyspace, @NonNull Token token); - - /** - * Shortcut for {@link #getReplicas(CqlIdentifier, Token) - * getReplicas(CqlIdentifier.fromCql(keyspaceName), token)}. - */ - @NonNull - default Set getReplicas(@NonNull String keyspaceName, @NonNull Token token) { - return getReplicas(CqlIdentifier.fromCql(keyspaceName), token); - } - - /** - * The replicas for a given range in the given keyspace. - * - *

It is assumed that the input range does not overlap across multiple node ranges. If the - * range extends over multiple nodes, it only returns the nodes that are replicas for the last - * token of the range. In other words, this method is a shortcut for {@code getReplicas(keyspace, - * range.getEnd())}. - */ - @NonNull - default Set getReplicas(@NonNull CqlIdentifier keyspace, @NonNull TokenRange range) { - return getReplicas(keyspace, range.getEnd()); - } - - /** - * Shortcut for {@link #getReplicas(CqlIdentifier, TokenRange) - * getReplicas(CqlIdentifier.fromCql(keyspaceName), range)}. - */ - @NonNull - default Set getReplicas(@NonNull String keyspaceName, @NonNull TokenRange range) { - return getReplicas(CqlIdentifier.fromCql(keyspaceName), range); - } - - /** The name of the partitioner class in use, as reported by the Cassandra nodes. */ - @NonNull - String getPartitionerName(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/AggregateMetadata.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/AggregateMetadata.java deleted file mode 100644 index 35eec88eb45..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/AggregateMetadata.java +++ /dev/null @@ -1,146 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.internal.core.metadata.schema.ScriptBuilder; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Optional; - -/** A CQL aggregate in the schema metadata. */ -public interface AggregateMetadata extends Describable { - - @NonNull - CqlIdentifier getKeyspace(); - - @NonNull - FunctionSignature getSignature(); - - /** - * The signature of the final function of this aggregate, or empty if there is none. - * - *

This is the function specified with {@code FINALFUNC} in the {@code CREATE AGGREGATE...} - * statement. It transforms the final value after the aggregation is complete. - */ - @NonNull - Optional getFinalFuncSignature(); - - /** - * The initial state value of this aggregate, or {@code null} if there is none. - * - *

This is the value specified with {@code INITCOND} in the {@code CREATE AGGREGATE...} - * statement. It's passed to the initial invocation of the state function (if that function does - * not accept null arguments). - * - *

The actual type of the returned object depends on the aggregate's {@link #getStateType() - * state type} and on the {@link TypeCodec codec} used to {@link TypeCodec#parse(String) parse} - * the {@code INITCOND} literal. - * - *

If, for some reason, the {@code INITCOND} literal cannot be parsed, a warning will be logged - * and the returned object will be the original {@code INITCOND} literal in its textual, - * non-parsed form. - * - * @return the initial state, or empty if there is none. - */ - @NonNull - Optional getInitCond(); - - /** - * The return type of this aggregate. - * - *

This is the final type of the value computed by this aggregate; in other words, the return - * type of the final function if it is defined, or the state type otherwise. - */ - @NonNull - DataType getReturnType(); - - /** - * The signature of the state function of this aggregate. - * - *

This is the function specified with {@code SFUNC} in the {@code CREATE AGGREGATE...} - * statement. It aggregates the current state with each row to produce a new state. - */ - @NonNull - FunctionSignature getStateFuncSignature(); - - /** - * The state type of this aggregate. - * - *

This is the type specified with {@code STYPE} in the {@code CREATE AGGREGATE...} statement. - * It defines the type of the value that is accumulated as the aggregate iterates through the - * rows. - */ - @NonNull - DataType getStateType(); - - @NonNull - @Override - default String describeWithChildren(boolean pretty) { - // An aggregate has no children - return describe(pretty); - } - - @NonNull - @Override - default String describe(boolean pretty) { - ScriptBuilder builder = new ScriptBuilder(pretty); - builder - .append("CREATE AGGREGATE ") - .append(getKeyspace()) - .append(".") - .append(getSignature().getName()) - .append("("); - boolean first = true; - for (int i = 0; i < getSignature().getParameterTypes().size(); i++) { - if (first) { - first = false; - } else { - builder.append(","); - } - DataType type = getSignature().getParameterTypes().get(i); - builder.append(type.asCql(false, pretty)); - } - builder - .increaseIndent() - .append(")") - .newLine() - .append("SFUNC ") - .append(getStateFuncSignature().getName()) - .newLine() - .append("STYPE ") - .append(getStateType().asCql(false, pretty)); - - if (getFinalFuncSignature().isPresent()) { - builder.newLine().append("FINALFUNC ").append(getFinalFuncSignature().get().getName()); - } - if (getInitCond().isPresent()) { - Optional formatInitCond = formatInitCond(); - assert formatInitCond.isPresent(); - builder.newLine().append("INITCOND ").append(formatInitCond.get()); - } - return builder.append(";").build(); - } - - /** - * Formats the {@linkplain #getInitCond() initial state value} for inclusion in a CQL statement. - */ - @NonNull - Optional formatInitCond(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/ClusteringOrder.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/ClusteringOrder.java deleted file mode 100644 index 97613e2d2f1..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/ClusteringOrder.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata.schema; - -/** The order of a clustering column in a table or materialized view. */ -public enum ClusteringOrder { - ASC, - DESC -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/ColumnMetadata.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/ColumnMetadata.java deleted file mode 100644 index fb91211e2fa..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/ColumnMetadata.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** A column in the schema metadata. */ -public interface ColumnMetadata { - - @NonNull - CqlIdentifier getKeyspace(); - - /** - * The identifier of the {@link TableMetadata} or a {@link ViewMetadata} that this column belongs - * to. - */ - @NonNull - CqlIdentifier getParent(); - - @NonNull - CqlIdentifier getName(); - - @NonNull - DataType getType(); - - boolean isStatic(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/Describable.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/Describable.java deleted file mode 100644 index bf1bf97b19e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/Describable.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** A schema element that can be described in terms of CQL {@code CREATE} statements. */ -public interface Describable { - - /** - * Returns a single CQL statement that creates the element. - * - * @param pretty if {@code true}, make the output more human-readable (line breaks, indents, and - * {@link CqlIdentifier#asCql(boolean) pretty identifiers}). If {@code false}, return the - * statement on a single line with minimal formatting. - */ - @NonNull - String describe(boolean pretty); - - /** - * Returns a CQL script that creates the element and all of its children. For example: a schema - * with its tables, materialized views, types, etc. A table with its indices. - * - * @param pretty if {@code true}, make the output more human-readable (line breaks, indents, and - * {@link CqlIdentifier#asCql(boolean) pretty identifiers}). If {@code false}, return each - * statement on a single line with minimal formatting. - */ - @NonNull - String describeWithChildren(boolean pretty); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/FunctionMetadata.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/FunctionMetadata.java deleted file mode 100644 index ed2d4d780de..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/FunctionMetadata.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.internal.core.metadata.schema.ScriptBuilder; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.List; - -/** A CQL function in the schema metadata. */ -public interface FunctionMetadata extends Describable { - - @NonNull - CqlIdentifier getKeyspace(); - - @NonNull - FunctionSignature getSignature(); - - /** - * The names of the parameters. This is in the same order as {@code - * getSignature().getParameterTypes()} - */ - @NonNull - List getParameterNames(); - - @NonNull - String getBody(); - - boolean isCalledOnNullInput(); - - @NonNull - String getLanguage(); - - @NonNull - DataType getReturnType(); - - @NonNull - @Override - default String describe(boolean pretty) { - ScriptBuilder builder = new ScriptBuilder(pretty); - builder - .append("CREATE FUNCTION ") - .append(getKeyspace()) - .append(".") - .append(getSignature().getName()) - .append("("); - boolean first = true; - for (int i = 0; i < getSignature().getParameterTypes().size(); i++) { - if (first) { - first = false; - } else { - builder.append(","); - } - DataType type = getSignature().getParameterTypes().get(i); - CqlIdentifier name = getParameterNames().get(i); - builder.append(name).append(" ").append(type.asCql(false, pretty)); - } - return builder - .append(")") - .increaseIndent() - .newLine() - .append(isCalledOnNullInput() ? "CALLED ON NULL INPUT" : "RETURNS NULL ON NULL INPUT") - .newLine() - .append("RETURNS ") - .append(getReturnType().asCql(false, true)) - .newLine() - .append("LANGUAGE ") - .append(getLanguage()) - .newLine() - .append("AS '") - .append(getBody()) - .append("';") - .build(); - } - - @NonNull - @Override - default String describeWithChildren(boolean pretty) { - // A function has no children - return describe(pretty); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/FunctionSignature.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/FunctionSignature.java deleted file mode 100644 index 8108b4b7afd..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/FunctionSignature.java +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.Serializable; -import java.util.List; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -/** - * The signature that uniquely identifies a CQL function or aggregate in a keyspace. - * - *

It's composed of a name and a list of parameter types. Overloads (such as {@code sum(int)} and - * {@code sum(int, int)} are not equal. - */ -@Immutable -public class FunctionSignature implements Serializable { - - private static final long serialVersionUID = 1; - - @NonNull private final CqlIdentifier name; - @NonNull private final List parameterTypes; - - public FunctionSignature( - @NonNull CqlIdentifier name, @NonNull Iterable parameterTypes) { - this.name = name; - this.parameterTypes = ImmutableList.copyOf(parameterTypes); - } - - /** - * @param parameterTypes neither the individual types, nor the vararg array itself, can be null. - */ - public FunctionSignature(@NonNull CqlIdentifier name, @NonNull DataType... parameterTypes) { - this( - name, - parameterTypes.length == 0 - ? ImmutableList.of() - : ImmutableList.builder().add(parameterTypes).build()); - } - - /** - * Shortcut for {@link #FunctionSignature(CqlIdentifier, Iterable) new - * FunctionSignature(CqlIdentifier.fromCql(name), parameterTypes)}. - */ - public FunctionSignature(@NonNull String name, @NonNull Iterable parameterTypes) { - this(CqlIdentifier.fromCql(name), parameterTypes); - } - - /** - * Shortcut for {@link #FunctionSignature(CqlIdentifier, DataType...)} new - * FunctionSignature(CqlIdentifier.fromCql(name), parameterTypes)}. - * - * @param parameterTypes neither the individual types, nor the vararg array itself, can be null. - */ - public FunctionSignature(@NonNull String name, @NonNull DataType... parameterTypes) { - this(CqlIdentifier.fromCql(name), parameterTypes); - } - - @NonNull - public CqlIdentifier getName() { - return name; - } - - @NonNull - public List getParameterTypes() { - return parameterTypes; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof FunctionSignature) { - FunctionSignature that = (FunctionSignature) other; - return this.name.equals(that.name) && this.parameterTypes.equals(that.parameterTypes); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(name, parameterTypes); - } - - @Override - public String toString() { - StringBuilder builder = new StringBuilder(name.asInternal()).append('('); - boolean first = true; - for (DataType type : parameterTypes) { - if (first) { - first = false; - } else { - builder.append(", "); - } - builder.append(type.asCql(true, true)); - } - return builder.append(')').toString(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/IndexKind.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/IndexKind.java deleted file mode 100644 index 67ac4c06a2c..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/IndexKind.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata.schema; - -/** A kind of index in the schema. */ -public enum IndexKind { - KEYS, - CUSTOM, - COMPOSITES -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/IndexMetadata.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/IndexMetadata.java deleted file mode 100644 index 631a6584a27..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/IndexMetadata.java +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.internal.core.metadata.schema.ScriptBuilder; -import com.datastax.oss.driver.shaded.guava.common.collect.Maps; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; -import java.util.Optional; - -/** A secondary index in the schema metadata. */ -public interface IndexMetadata extends Describable { - - @NonNull - CqlIdentifier getKeyspace(); - - @NonNull - CqlIdentifier getTable(); - - @NonNull - CqlIdentifier getName(); - - @NonNull - IndexKind getKind(); - - @NonNull - String getTarget(); - - /** If this index is custom, the name of the server-side implementation. Otherwise, empty. */ - @NonNull - default Optional getClassName() { - return Optional.ofNullable(getOptions().get("class_name")); - } - - /** - * The options of the index. - * - *

This directly reflects the corresponding column of the system table ({@code - * system.schema_columns.index_options} in Cassandra <= 2.2, or {@code - * system_schema.indexes.options} in later versions). - * - *

Note that some of these options might also be exposed as standalone fields in this - * interface, namely {@link #getClassName()} and {{@link #getTarget()}}. - */ - @NonNull - Map getOptions(); - - @NonNull - @Override - default String describe(boolean pretty) { - ScriptBuilder builder = new ScriptBuilder(pretty); - if (getClassName().isPresent()) { - builder - .append("CREATE CUSTOM INDEX ") - .append(getName()) - .append(" ON ") - .append(getKeyspace()) - .append(".") - .append(getTable()) - .append(String.format(" (%s)", getTarget())) - .newLine() - .append(String.format("USING '%s'", getClassName().get())); - - // Some options already appear in the CREATE statement, ignore them - Map describedOptions = - Maps.filterKeys(getOptions(), k -> !"target".equals(k) && !"class_name".equals(k)); - if (!describedOptions.isEmpty()) { - builder.newLine().append("WITH OPTIONS = {").newLine().increaseIndent(); - boolean first = true; - for (Map.Entry option : describedOptions.entrySet()) { - if (first) { - first = false; - } else { - builder.append(",").newLine(); - } - builder.append(String.format("'%s' : '%s'", option.getKey(), option.getValue())); - } - builder.decreaseIndent().append("}"); - } - } else { - builder - .append("CREATE INDEX ") - .append(getName()) - .append(" ON ") - .append(getKeyspace()) - .append(".") - .append(getTable()) - .append(String.format(" (%s);", getTarget())); - } - return builder.build(); - } - - @NonNull - @Override - default String describeWithChildren(boolean pretty) { - // An index has no children - return describe(pretty); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/KeyspaceMetadata.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/KeyspaceMetadata.java deleted file mode 100644 index e5080932b3c..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/KeyspaceMetadata.java +++ /dev/null @@ -1,264 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.internal.core.metadata.schema.ScriptBuilder; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.Iterables; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; -import java.util.Objects; -import java.util.Optional; - -/** A keyspace in the schema metadata. */ -public interface KeyspaceMetadata extends Describable { - - @NonNull - CqlIdentifier getName(); - - /** Whether durable writes are set on this keyspace. */ - boolean isDurableWrites(); - - /** Whether this keyspace is virtual */ - boolean isVirtual(); - - /** The replication options defined for this keyspace. */ - @NonNull - Map getReplication(); - - @NonNull - Map getTables(); - - @NonNull - default Optional getTable(@NonNull CqlIdentifier tableId) { - return Optional.ofNullable(getTables().get(tableId)); - } - - /** Shortcut for {@link #getTable(CqlIdentifier) getTable(CqlIdentifier.fromCql(tableName))}. */ - @NonNull - default Optional getTable(@NonNull String tableName) { - return getTable(CqlIdentifier.fromCql(tableName)); - } - - @NonNull - Map getViews(); - - /** Gets the views based on a given table. */ - @NonNull - default Map getViewsOnTable(@NonNull CqlIdentifier tableId) { - ImmutableMap.Builder builder = ImmutableMap.builder(); - for (ViewMetadata view : getViews().values()) { - if (view.getBaseTable().equals(tableId)) { - builder.put(view.getName(), view); - } - } - return builder.build(); - } - - @NonNull - default Optional getView(@NonNull CqlIdentifier viewId) { - return Optional.ofNullable(getViews().get(viewId)); - } - - /** Shortcut for {@link #getView(CqlIdentifier) getView(CqlIdentifier.fromCql(viewName))}. */ - @NonNull - default Optional getView(@NonNull String viewName) { - return getView(CqlIdentifier.fromCql(viewName)); - } - - @NonNull - Map getUserDefinedTypes(); - - @NonNull - default Optional getUserDefinedType(@NonNull CqlIdentifier typeId) { - return Optional.ofNullable(getUserDefinedTypes().get(typeId)); - } - - /** - * Shortcut for {@link #getUserDefinedType(CqlIdentifier) - * getUserDefinedType(CqlIdentifier.fromCql(typeName))}. - */ - @NonNull - default Optional getUserDefinedType(@NonNull String typeName) { - return getUserDefinedType(CqlIdentifier.fromCql(typeName)); - } - - @NonNull - Map getFunctions(); - - @NonNull - default Optional getFunction(@NonNull FunctionSignature functionSignature) { - return Optional.ofNullable(getFunctions().get(functionSignature)); - } - - @NonNull - default Optional getFunction( - @NonNull CqlIdentifier functionId, @NonNull Iterable parameterTypes) { - return Optional.ofNullable( - getFunctions().get(new FunctionSignature(functionId, parameterTypes))); - } - - /** - * Shortcut for {@link #getFunction(CqlIdentifier, Iterable) - * getFunction(CqlIdentifier.fromCql(functionName), parameterTypes)}. - */ - @NonNull - default Optional getFunction( - @NonNull String functionName, @NonNull Iterable parameterTypes) { - return getFunction(CqlIdentifier.fromCql(functionName), parameterTypes); - } - - /** - * @param parameterTypes neither the individual types, nor the vararg array itself, can be null. - */ - @NonNull - default Optional getFunction( - @NonNull CqlIdentifier functionId, @NonNull DataType... parameterTypes) { - return Optional.ofNullable( - getFunctions().get(new FunctionSignature(functionId, parameterTypes))); - } - - /** - * Shortcut for {@link #getFunction(CqlIdentifier, DataType...) - * getFunction(CqlIdentifier.fromCql(functionName), parameterTypes)}. - * - * @param parameterTypes neither the individual types, nor the vararg array itself, can be null. - */ - @NonNull - default Optional getFunction( - @NonNull String functionName, @NonNull DataType... parameterTypes) { - return getFunction(CqlIdentifier.fromCql(functionName), parameterTypes); - } - - @NonNull - Map getAggregates(); - - @NonNull - default Optional getAggregate(@NonNull FunctionSignature aggregateSignature) { - return Optional.ofNullable(getAggregates().get(aggregateSignature)); - } - - @NonNull - default Optional getAggregate( - @NonNull CqlIdentifier aggregateId, @NonNull Iterable parameterTypes) { - return Optional.ofNullable( - getAggregates().get(new FunctionSignature(aggregateId, parameterTypes))); - } - - /** - * Shortcut for {@link #getAggregate(CqlIdentifier, Iterable) - * getAggregate(CqlIdentifier.fromCql(aggregateName), parameterTypes)}. - */ - @NonNull - default Optional getAggregate( - @NonNull String aggregateName, @NonNull Iterable parameterTypes) { - return getAggregate(CqlIdentifier.fromCql(aggregateName), parameterTypes); - } - - /** - * @param parameterTypes neither the individual types, nor the vararg array itself, can be null. - */ - @NonNull - default Optional getAggregate( - @NonNull CqlIdentifier aggregateId, @NonNull DataType... parameterTypes) { - return Optional.ofNullable( - getAggregates().get(new FunctionSignature(aggregateId, parameterTypes))); - } - - /** - * Shortcut for {@link #getAggregate(CqlIdentifier, DataType...)} - * getAggregate(CqlIdentifier.fromCql(aggregateName), parameterTypes)}. - * - * @param parameterTypes neither the individual types, nor the vararg array itself, can be null. - */ - @NonNull - default Optional getAggregate( - @NonNull String aggregateName, @NonNull DataType... parameterTypes) { - return getAggregate(CqlIdentifier.fromCql(aggregateName), parameterTypes); - } - - @NonNull - @Override - default String describe(boolean pretty) { - ScriptBuilder builder = new ScriptBuilder(pretty); - if (isVirtual()) { - builder.append("/* VIRTUAL "); - } else { - builder.append("CREATE "); - } - builder - .append("KEYSPACE ") - .append(getName()) - .append(" WITH replication = { 'class' : '") - .append(getReplication().get("class")) - .append("'"); - for (Map.Entry entry : getReplication().entrySet()) { - if (!entry.getKey().equals("class")) { - builder - .append(", '") - .append(entry.getKey()) - .append("': '") - .append(entry.getValue()) - .append("'"); - } - } - builder - .append(" } AND durable_writes = ") - .append(Boolean.toString(isDurableWrites())) - .append(";"); - if (isVirtual()) { - builder.append(" */"); - } - return builder.build(); - } - - @NonNull - @Override - default String describeWithChildren(boolean pretty) { - String createKeyspace = describe(pretty); - ScriptBuilder builder = new ScriptBuilder(pretty).append(createKeyspace); - - for (Describable element : - Iterables.concat( - getUserDefinedTypes().values(), - getTables().values(), - getViews().values(), - getFunctions().values(), - getAggregates().values())) { - builder.forceNewLine(2).append(element.describeWithChildren(pretty)); - } - - return builder.build(); - } - - default boolean shallowEquals(Object other) { - if (other == this) { - return true; - } else if (other instanceof KeyspaceMetadata) { - KeyspaceMetadata that = (KeyspaceMetadata) other; - return Objects.equals(this.getName(), that.getName()) - && this.isDurableWrites() == that.isDurableWrites() - && Objects.equals(this.getReplication(), that.getReplication()); - } else { - return false; - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/RelationMetadata.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/RelationMetadata.java deleted file mode 100644 index 8b70ba04955..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/RelationMetadata.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.UUID; - -/** A table or materialized view in the schema metadata. */ -public interface RelationMetadata extends Describable { - - @NonNull - CqlIdentifier getKeyspace(); - - @NonNull - CqlIdentifier getName(); - - /** The unique id generated by the server for this element. */ - Optional getId(); - - /** - * Convenience method to get all the primary key columns (partition key + clustering columns) in a - * single call. - * - *

Note that this creates a new list instance on each call. - * - * @see #getPartitionKey() - * @see #getClusteringColumns() - */ - @NonNull - default List getPrimaryKey() { - return ImmutableList.builder() - .addAll(getPartitionKey()) - .addAll(getClusteringColumns().keySet()) - .build(); - } - - @NonNull - List getPartitionKey(); - - @NonNull - Map getClusteringColumns(); - - @NonNull - Map getColumns(); - - @NonNull - default Optional getColumn(@NonNull CqlIdentifier columnId) { - return Optional.ofNullable(getColumns().get(columnId)); - } - - /** - * Shortcut for {@link #getColumn(CqlIdentifier) getColumn(CqlIdentifier.fromCql(columnName))}. - */ - @NonNull - default Optional getColumn(@NonNull String columnName) { - return getColumn(CqlIdentifier.fromCql(columnName)); - } - - /** - * The options of this table or materialized view. - * - *

This corresponds to the {@code WITH} clauses in the {@code CREATE} statement that would - * recreate this element. The exact set of keys and the types of the values depend on the server - * version that this metadata was extracted from. For example, in Cassandra 2.2 and below, {@code - * WITH caching} takes a string argument, whereas starting with Cassandra 3.0 it is a map. - */ - @NonNull - Map getOptions(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/SchemaChangeListener.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/SchemaChangeListener.java deleted file mode 100644 index ac7317574ed..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/SchemaChangeListener.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata.schema; - -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.core.session.SessionBuilder; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * Tracks schema changes. - * - *

Implementations of this interface can be registered either via the configuration (see {@code - * reference.conf} in the manual or core driver JAR), or programmatically via {@link - * SessionBuilder#addSchemaChangeListener(SchemaChangeListener)}. - * - *

Note that the methods defined by this interface will be executed by internal driver threads, - * and are therefore expected to have short execution times. If you need to perform long - * computations or blocking calls in response to schema change events, it is strongly recommended to - * schedule them asynchronously on a separate thread provided by your application code. - * - *

If you implement this interface but don't need to implement all the methods, extend {@link - * SchemaChangeListenerBase}. - */ -public interface SchemaChangeListener extends AutoCloseable { - - void onKeyspaceCreated(@NonNull KeyspaceMetadata keyspace); - - void onKeyspaceDropped(@NonNull KeyspaceMetadata keyspace); - - void onKeyspaceUpdated(@NonNull KeyspaceMetadata current, @NonNull KeyspaceMetadata previous); - - void onTableCreated(@NonNull TableMetadata table); - - void onTableDropped(@NonNull TableMetadata table); - - void onTableUpdated(@NonNull TableMetadata current, @NonNull TableMetadata previous); - - void onUserDefinedTypeCreated(@NonNull UserDefinedType type); - - void onUserDefinedTypeDropped(@NonNull UserDefinedType type); - - void onUserDefinedTypeUpdated( - @NonNull UserDefinedType current, @NonNull UserDefinedType previous); - - void onFunctionCreated(@NonNull FunctionMetadata function); - - void onFunctionDropped(@NonNull FunctionMetadata function); - - void onFunctionUpdated(@NonNull FunctionMetadata current, @NonNull FunctionMetadata previous); - - void onAggregateCreated(@NonNull AggregateMetadata aggregate); - - void onAggregateDropped(@NonNull AggregateMetadata aggregate); - - void onAggregateUpdated(@NonNull AggregateMetadata current, @NonNull AggregateMetadata previous); - - void onViewCreated(@NonNull ViewMetadata view); - - void onViewDropped(@NonNull ViewMetadata view); - - void onViewUpdated(@NonNull ViewMetadata current, @NonNull ViewMetadata previous); - - /** - * Invoked when the session is ready to process user requests. - * - *

This corresponds to the moment when {@link SessionBuilder#build()} returns, or the future - * returned by {@link SessionBuilder#buildAsync()} completes. If the session initialization fails, - * this method will not get called. - * - *

Listener methods are invoked from different threads; if you store the session in a field, - * make it at least volatile to guarantee proper publication. - * - *

This method is guaranteed to be the first one invoked on this object. - * - *

The default implementation is empty. - */ - default void onSessionReady(@NonNull Session session) {} -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/SchemaChangeListenerBase.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/SchemaChangeListenerBase.java deleted file mode 100644 index 1cd449b39d8..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/SchemaChangeListenerBase.java +++ /dev/null @@ -1,127 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata.schema; - -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * Convenience class for listener implementations that that don't need to override all methods (all - * methods in this class are empty). - */ -public class SchemaChangeListenerBase implements SchemaChangeListener { - - @Override - public void onKeyspaceCreated(@NonNull KeyspaceMetadata keyspace) { - // nothing to do - } - - @Override - public void onKeyspaceDropped(@NonNull KeyspaceMetadata keyspace) { - // nothing to do - } - - @Override - public void onKeyspaceUpdated( - @NonNull KeyspaceMetadata current, @NonNull KeyspaceMetadata previous) { - // nothing to do - } - - @Override - public void onTableCreated(@NonNull TableMetadata table) { - // nothing to do - } - - @Override - public void onTableDropped(@NonNull TableMetadata table) { - // nothing to do - } - - @Override - public void onTableUpdated(@NonNull TableMetadata current, @NonNull TableMetadata previous) { - // nothing to do - } - - @Override - public void onUserDefinedTypeCreated(@NonNull UserDefinedType type) { - // nothing to do - } - - @Override - public void onUserDefinedTypeDropped(@NonNull UserDefinedType type) { - // nothing to do - } - - @Override - public void onUserDefinedTypeUpdated( - @NonNull UserDefinedType current, @NonNull UserDefinedType previous) { - // nothing to do - } - - @Override - public void onFunctionCreated(@NonNull FunctionMetadata function) { - // nothing to do - } - - @Override - public void onFunctionDropped(@NonNull FunctionMetadata function) { - // nothing to do - } - - @Override - public void onFunctionUpdated( - @NonNull FunctionMetadata current, @NonNull FunctionMetadata previous) { - // nothing to do - } - - @Override - public void onAggregateCreated(@NonNull AggregateMetadata aggregate) { - // nothing to do - } - - @Override - public void onAggregateDropped(@NonNull AggregateMetadata aggregate) { - // nothing to do - } - - @Override - public void onAggregateUpdated( - @NonNull AggregateMetadata current, @NonNull AggregateMetadata previous) { - // nothing to do - } - - @Override - public void onViewCreated(@NonNull ViewMetadata view) { - // nothing to do - } - - @Override - public void onViewDropped(@NonNull ViewMetadata view) { - // nothing to do - } - - @Override - public void onViewUpdated(@NonNull ViewMetadata current, @NonNull ViewMetadata previous) { - // nothing to do - } - - @Override - public void close() throws Exception { - // nothing to do - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/TableMetadata.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/TableMetadata.java deleted file mode 100644 index bcda226b45d..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/TableMetadata.java +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.internal.core.metadata.schema.ScriptBuilder; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.RelationParser; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; -import java.util.Optional; - -/** A table in the schema metadata. */ -public interface TableMetadata extends RelationMetadata { - - boolean isCompactStorage(); - - /** Whether this table is virtual */ - boolean isVirtual(); - - @NonNull - Map getIndexes(); - - @NonNull - default Optional getIndex(@NonNull CqlIdentifier indexId) { - return Optional.ofNullable(getIndexes().get(indexId)); - } - - /** Shortcut for {@link #getIndex(CqlIdentifier) getIndex(CqlIdentifier.fromCql(indexName))}. */ - @NonNull - default Optional getIndex(@NonNull String indexName) { - return getIndex(CqlIdentifier.fromCql(indexName)); - } - - @NonNull - @Override - default String describe(boolean pretty) { - ScriptBuilder builder = new ScriptBuilder(pretty); - if (isVirtual()) { - builder.append("/* VIRTUAL "); - } else { - builder.append("CREATE "); - } - - builder - .append("TABLE ") - .append(getKeyspace()) - .append(".") - .append(getName()) - .append(" (") - .newLine() - .increaseIndent(); - - for (ColumnMetadata column : getColumns().values()) { - builder.append(column.getName()).append(" ").append(column.getType().asCql(true, pretty)); - if (column.isStatic()) { - builder.append(" static"); - } - builder.append(",").newLine(); - } - - // PK - builder.append("PRIMARY KEY ("); - if (getPartitionKey().size() == 1) { // PRIMARY KEY (k - builder.append(getPartitionKey().get(0).getName()); - } else { // PRIMARY KEY ((k1, k2) - builder.append("("); - boolean first = true; - for (ColumnMetadata pkColumn : getPartitionKey()) { - if (first) { - first = false; - } else { - builder.append(", "); - } - builder.append(pkColumn.getName()); - } - builder.append(")"); - } - // PRIMARY KEY (, cc1, cc2, cc3) - for (ColumnMetadata clusteringColumn : getClusteringColumns().keySet()) { - builder.append(", ").append(clusteringColumn.getName()); - } - builder.append(")"); - - builder.newLine().decreaseIndent().append(")"); - - builder.increaseIndent(); - if (isCompactStorage()) { - builder.andWith().append("COMPACT STORAGE"); - } - if (getClusteringColumns().containsValue(ClusteringOrder.DESC)) { - builder.andWith().append("CLUSTERING ORDER BY ("); - boolean first = true; - for (Map.Entry entry : getClusteringColumns().entrySet()) { - if (first) { - first = false; - } else { - builder.append(", "); - } - builder.append(entry.getKey().getName()).append(" ").append(entry.getValue().name()); - } - builder.append(")"); - } - Map options = getOptions(); - RelationParser.appendOptions(options, builder); - builder.append(";"); - if (isVirtual()) { - builder.append(" */"); - } - return builder.build(); - } - - /** - * {@inheritDoc} - * - *

This describes the table and all of its indices. Contrary to previous driver versions, views - * are not included. - */ - @NonNull - @Override - default String describeWithChildren(boolean pretty) { - String createTable = describe(pretty); - ScriptBuilder builder = new ScriptBuilder(pretty).append(createTable); - for (IndexMetadata indexMetadata : getIndexes().values()) { - builder.forceNewLine(2).append(indexMetadata.describeWithChildren(pretty)); - } - return builder.build(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/ViewMetadata.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/ViewMetadata.java deleted file mode 100644 index e6b06cffb97..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/ViewMetadata.java +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.internal.core.metadata.schema.ScriptBuilder; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.RelationParser; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Optional; - -/** A materialized view in the schema metadata. */ -public interface ViewMetadata extends RelationMetadata { - - /** The table that this view is based on. */ - @NonNull - CqlIdentifier getBaseTable(); - - /** - * Whether this view does a {@code SELECT *} on its base table (this only affects the output of - * {@link #describe(boolean)}). - */ - boolean includesAllColumns(); - - @NonNull - Optional getWhereClause(); - - @NonNull - @Override - default String describe(boolean pretty) { - ScriptBuilder builder = - new ScriptBuilder(pretty) - .append("CREATE MATERIALIZED VIEW ") - .append(getKeyspace()) - .append(".") - .append(getName()) - .append(" AS") - .newLine(); - - builder.append("SELECT"); - if (includesAllColumns()) { - builder.append(" * "); - } else { - builder.newLine().increaseIndent(); - boolean first = true; - for (ColumnMetadata column : getColumns().values()) { - if (first) { - first = false; - } else { - builder.append(",").newLine(); - } - builder.append(column.getName()); - } - builder.newLine().decreaseIndent(); - } - - builder.append("FROM ").append(getKeyspace()).append(".").append(getBaseTable()); - - Optional whereClause = getWhereClause(); - if (whereClause.isPresent() && !whereClause.get().isEmpty()) { - builder.newLine().append("WHERE ").append(whereClause.get()); - } - - builder.newLine().append("PRIMARY KEY ("); - if (getPartitionKey().size() == 1) { // PRIMARY KEY (k - builder.append(getPartitionKey().get(0).getName()); - } else { // PRIMARY KEY ((k1, k2) - builder.append("("); - boolean first = true; - for (ColumnMetadata pkColumn : getPartitionKey()) { - if (first) { - first = false; - } else { - builder.append(", "); - } - builder.append(pkColumn.getName()); - } - builder.append(")"); - } - // PRIMARY KEY (, cc1, cc2, cc3) - for (ColumnMetadata clusteringColumn : getClusteringColumns().keySet()) { - builder.append(", ").append(clusteringColumn.getName()); - } - builder.append(")").increaseIndent(); - - RelationParser.appendOptions(getOptions(), builder); - return builder.append(";").build(); - } - - @NonNull - @Override - default String describeWithChildren(boolean pretty) { - return describe(pretty); // A view has no children - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/token/Token.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/token/Token.java deleted file mode 100644 index f39de8ec5b6..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/token/Token.java +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata.token; - -/** A token on the ring. */ -public interface Token extends Comparable {} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/token/TokenRange.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/token/TokenRange.java deleted file mode 100644 index e384300c571..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/token/TokenRange.java +++ /dev/null @@ -1,149 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata.token; - -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.List; - -/** - * A range of tokens on the Cassandra ring. - * - *

A range is start-exclusive and end-inclusive. It is empty when start and end are the same - * token, except if that is the minimum token, in which case the range covers the whole ring (this - * is consistent with the behavior of CQL range queries). - * - *

Note that CQL does not handle wrapping. To query all partitions in a range, see {@link - * #unwrap()}. - */ -public interface TokenRange extends Comparable { - - /** The start of the range (exclusive). */ - @NonNull - Token getStart(); - - /** The end of the range (inclusive). */ - @NonNull - Token getEnd(); - - /** - * Splits this range into a number of smaller ranges of equal "size" (referring to the number of - * tokens, not the actual amount of data). - * - *

Splitting an empty range is not permitted. But note that, in edge cases, splitting a range - * might produce one or more empty ranges. - * - * @throws IllegalArgumentException if the range is empty or if {@code numberOfSplits < 1}. - */ - @NonNull - List splitEvenly(int numberOfSplits); - - /** - * Whether this range is empty. - * - *

A range is empty when {@link #getStart()} and {@link #getEnd()} are the same token, except - * if that is the minimum token, in which case the range covers the whole ring (this is consistent - * with the behavior of CQL range queries). - */ - boolean isEmpty(); - - /** Whether this range wraps around the end of the ring. */ - boolean isWrappedAround(); - - /** Whether this range represents the full ring. */ - boolean isFullRing(); - - /** - * Splits this range into a list of two non-wrapping ranges. This will return the range itself if - * it is non-wrapping, or two ranges otherwise. - * - *

For example: - * - *

    - *
  • {@code ]1,10]} unwraps to itself; - *
  • {@code ]10,1]} unwraps to {@code ]10,min_token]} and {@code ]min_token,1]}. - *
- * - *

This is useful for CQL range queries, which do not handle wrapping: - * - *

{@code
-   * List rows = new ArrayList();
-   * for (TokenRange subRange : range.unwrap()) {
-   *     ResultSet rs = session.execute(
-   *         "SELECT * FROM mytable WHERE token(pk) > ? and token(pk) <= ?",
-   *         subRange.getStart(), subRange.getEnd());
-   *     rows.addAll(rs.all());
-   * }
-   * }
- */ - @NonNull - List unwrap(); - - /** - * Whether this range intersects another one. - * - *

For example: - * - *

    - *
  • {@code ]3,5]} intersects {@code ]1,4]}, {@code ]4,5]}... - *
  • {@code ]3,5]} does not intersect {@code ]1,2]}, {@code ]2,3]}, {@code ]5,7]}... - *
- */ - boolean intersects(@NonNull TokenRange that); - - /** - * Computes the intersection of this range with another one, producing one or more ranges. - * - *

If either of these ranges overlap the the ring, they are unwrapped and the unwrapped ranges - * are compared to one another. - * - *

This call will fail if the two ranges do not intersect, you must check by calling {@link - * #intersects(TokenRange)} first. - * - * @param that the other range. - * @return the range(s) resulting from the intersection. - * @throws IllegalArgumentException if the ranges do not intersect. - */ - @NonNull - List intersectWith(@NonNull TokenRange that); - - /** - * Checks whether this range contains a given token, i.e. {@code range.start < token <= - * range.end}. - */ - boolean contains(@NonNull Token token); - - /** - * Merges this range with another one. - * - *

The two ranges should either intersect or be adjacent; in other words, the merged range - * should not include tokens that are in neither of the original ranges. - * - *

For example: - * - *

    - *
  • merging {@code ]3,5]} with {@code ]4,7]} produces {@code ]3,7]}; - *
  • merging {@code ]3,5]} with {@code ]4,5]} produces {@code ]3,5]}; - *
  • merging {@code ]3,5]} with {@code ]5,8]} produces {@code ]3,8]}; - *
  • merging {@code ]3,5]} with {@code ]6,8]} fails. - *
- * - * @throws IllegalArgumentException if the ranges neither intersect nor are adjacent. - */ - @NonNull - TokenRange mergeWith(@NonNull TokenRange that); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metrics/DefaultNodeMetric.java b/core/src/main/java/com/datastax/oss/driver/api/core/metrics/DefaultNodeMetric.java deleted file mode 100644 index 0e9934c7034..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metrics/DefaultNodeMetric.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metrics; - -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; - -/** See {@code reference.conf} for a description of each metric. */ -public enum DefaultNodeMetric implements NodeMetric { - OPEN_CONNECTIONS("pool.open-connections"), - AVAILABLE_STREAMS("pool.available-streams"), - IN_FLIGHT("pool.in-flight"), - ORPHANED_STREAMS("pool.orphaned-streams"), - BYTES_SENT("bytes-sent"), - BYTES_RECEIVED("bytes-received"), - CQL_MESSAGES("cql-messages"), - UNSENT_REQUESTS("errors.request.unsent"), - ABORTED_REQUESTS("errors.request.aborted"), - WRITE_TIMEOUTS("errors.request.write-timeouts"), - READ_TIMEOUTS("errors.request.read-timeouts"), - UNAVAILABLES("errors.request.unavailables"), - OTHER_ERRORS("errors.request.others"), - RETRIES("retries.total"), - RETRIES_ON_ABORTED("retries.aborted"), - RETRIES_ON_READ_TIMEOUT("retries.read-timeout"), - RETRIES_ON_WRITE_TIMEOUT("retries.write-timeout"), - RETRIES_ON_UNAVAILABLE("retries.unavailable"), - RETRIES_ON_OTHER_ERROR("retries.other"), - IGNORES("ignores.total"), - IGNORES_ON_ABORTED("ignores.aborted"), - IGNORES_ON_READ_TIMEOUT("ignores.read-timeout"), - IGNORES_ON_WRITE_TIMEOUT("ignores.write-timeout"), - IGNORES_ON_UNAVAILABLE("ignores.unavailable"), - IGNORES_ON_OTHER_ERROR("ignores.other"), - SPECULATIVE_EXECUTIONS("speculative-executions"), - CONNECTION_INIT_ERRORS("errors.connection.init"), - AUTHENTICATION_ERRORS("errors.connection.auth"), - ; - - private static final Map BY_PATH = sortByPath(); - - private final String path; - - DefaultNodeMetric(String path) { - this.path = path; - } - - @Override - @NonNull - public String getPath() { - return path; - } - - @NonNull - public static DefaultNodeMetric fromPath(@NonNull String path) { - DefaultNodeMetric metric = BY_PATH.get(path); - if (metric == null) { - throw new IllegalArgumentException("Unknown node metric path " + path); - } - return metric; - } - - private static Map sortByPath() { - ImmutableMap.Builder result = ImmutableMap.builder(); - for (DefaultNodeMetric value : values()) { - result.put(value.getPath(), value); - } - return result.build(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metrics/DefaultSessionMetric.java b/core/src/main/java/com/datastax/oss/driver/api/core/metrics/DefaultSessionMetric.java deleted file mode 100644 index 63027a23fe7..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metrics/DefaultSessionMetric.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metrics; - -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; - -/** See {@code reference.conf} for a description of each metric. */ -public enum DefaultSessionMetric implements SessionMetric { - BYTES_SENT("bytes-sent"), - BYTES_RECEIVED("bytes-received"), - CONNECTED_NODES("connected-nodes"), - CQL_REQUESTS("cql-requests"), - CQL_CLIENT_TIMEOUTS("cql-client-timeouts"), - THROTTLING_DELAY("throttling.delay"), - THROTTLING_QUEUE_SIZE("throttling.queue-size"), - THROTTLING_ERRORS("throttling.errors"), - CQL_PREPARED_CACHE_SIZE("cql-prepared-cache-size"), - ; - - private static final Map BY_PATH = sortByPath(); - - private final String path; - - DefaultSessionMetric(String path) { - this.path = path; - } - - @NonNull - @Override - public String getPath() { - return path; - } - - @NonNull - public static DefaultSessionMetric fromPath(@NonNull String path) { - DefaultSessionMetric metric = BY_PATH.get(path); - if (metric == null) { - throw new IllegalArgumentException("Unknown session metric path " + path); - } - return metric; - } - - private static Map sortByPath() { - ImmutableMap.Builder result = ImmutableMap.builder(); - for (DefaultSessionMetric value : values()) { - result.put(value.getPath(), value); - } - return result.build(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metrics/Metrics.java b/core/src/main/java/com/datastax/oss/driver/api/core/metrics/Metrics.java deleted file mode 100644 index 58d531b3464..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metrics/Metrics.java +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metrics; - -import com.codahale.metrics.Metric; -import com.codahale.metrics.MetricRegistry; -import com.datastax.oss.driver.api.core.metadata.Node; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Optional; - -/** - * A wrapper around a {@link MetricRegistry} to expose the driver's metrics. - * - *

This type exists mainly to avoid a hard dependency to Dropwizard Metrics (that is, the JAR can - * be completely removed from the classpath if metrics are disabled). It also provides convenience - * methods to access individual metrics programmatically. - */ -public interface Metrics { - - /** - * Returns the underlying Dropwizard registry. - * - *

Typically, this can be used to configure a reporter. - * - * @see Reporters - * (Dropwizard Metrics manual) - * @leaks-private-api - */ - @NonNull - MetricRegistry getRegistry(); - - /** - * Retrieves a session-level metric from the registry. - * - *

To determine the type of each metric, refer to the comments in the default {@code - * reference.conf} (included in the driver's codebase and JAR file). Note that the method does not - * check that this type is correct (there is no way to do this at runtime because some metrics are - * generic); if you use the wrong type, you will get a {@code ClassCastException} in your code: - * - *

{@code
-   * // Correct:
-   * Gauge connectedNodes = getSessionMetric(DefaultSessionMetric.CONNECTED_NODES);
-   *
-   * // Wrong, will throw CCE:
-   * Counter connectedNodes = getSessionMetric(DefaultSessionMetric.CONNECTED_NODES);
-   * }
- * - * @param profileName the name of the execution profile, or {@code null} if the metric is not - * associated to any profile. Note that this is only included for future extensibility: at - * this time, the driver does not break up metrics per profile. Therefore you can always use - * {@link #getSessionMetric(SessionMetric)} instead of this method. - * @return the metric, or empty if it is disabled. - */ - @SuppressWarnings("TypeParameterUnusedInFormals") - @NonNull - Optional getSessionMetric( - @NonNull SessionMetric metric, @Nullable String profileName); - - /** - * Shortcut for {@link #getSessionMetric(SessionMetric, String) getSessionMetric(metric, null)}. - */ - @SuppressWarnings("TypeParameterUnusedInFormals") - @NonNull - default Optional getSessionMetric(@NonNull SessionMetric metric) { - return getSessionMetric(metric, null); - } - - /** - * Retrieves a node-level metric for a given node from the registry. - * - *

To determine the type of each metric, refer to the comments in the default {@code - * reference.conf} (included in the driver's codebase and JAR file). Note that the method does not - * check that this type is correct (there is no way to do this at runtime because some metrics are - * generic); if you use the wrong type, you will get a {@code ClassCastException} in your code: - * - *

{@code
-   * // Correct:
-   * Gauge openConnections = getNodeMetric(node, DefaultNodeMetric.OPEN_CONNECTIONS);
-   *
-   * // Wrong, will throw CCE:
-   * Counter openConnections = getNodeMetric(node, DefaultNodeMetric.OPEN_CONNECTIONS);
-   * }
- * - * @param profileName the name of the execution profile, or {@code null} if the metric is not - * associated to any profile. Note that this is only included for future extensibility: at - * this time, the driver does not break up metrics per profile. Therefore you can always use - * {@link #getNodeMetric(Node, NodeMetric)} instead of this method. - * @return the metric, or empty if it is disabled. - */ - @SuppressWarnings("TypeParameterUnusedInFormals") - @NonNull - Optional getNodeMetric( - @NonNull Node node, @NonNull NodeMetric metric, @Nullable String profileName); - - /** - * Shortcut for {@link #getNodeMetric(Node, NodeMetric, String) getNodeMetric(node, metric, - * null)}. - */ - @SuppressWarnings("TypeParameterUnusedInFormals") - @NonNull - default Optional getNodeMetric( - @NonNull Node node, @NonNull NodeMetric metric) { - return getNodeMetric(node, metric, null); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metrics/NodeMetric.java b/core/src/main/java/com/datastax/oss/driver/api/core/metrics/NodeMetric.java deleted file mode 100644 index b31c0ed8bcf..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metrics/NodeMetric.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metrics; - -import com.datastax.oss.driver.api.core.session.Session; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * A node-level metric exposed through {@link Session#getMetrics()}. - * - *

All metrics exposed out of the box by the driver are instances of {@link DefaultNodeMetric} or - * {@link com.datastax.dse.driver.api.core.metrics.DseNodeMetric DseNodeMetric} (this interface only - * exists to allow custom metrics in driver extensions). - * - * @see SessionMetric - */ -public interface NodeMetric { - - @NonNull - String getPath(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metrics/SessionMetric.java b/core/src/main/java/com/datastax/oss/driver/api/core/metrics/SessionMetric.java deleted file mode 100644 index 2a1ee599754..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metrics/SessionMetric.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metrics; - -import com.datastax.oss.driver.api.core.session.Session; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * A session-level metric exposed through {@link Session#getMetrics()}. - * - *

All metrics exposed out of the box by the driver are instances of {@link DefaultSessionMetric} - * or {@link com.datastax.dse.driver.api.core.metrics.DseSessionMetric DseSessionMetric} (this - * interface only exists to allow custom metrics in driver extensions). - * - * @see NodeMetric - */ -public interface SessionMetric { - - @NonNull - String getPath(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/package-info.java b/core/src/main/java/com/datastax/oss/driver/api/core/package-info.java deleted file mode 100644 index 597b333267b..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/package-info.java +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** The core API of the driver, that deals with query execution and cluster metadata. */ -package com.datastax.oss.driver.api.core; diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/paging/OffsetPager.java b/core/src/main/java/com/datastax/oss/driver/api/core/paging/OffsetPager.java deleted file mode 100644 index 3cb838f3171..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/paging/OffsetPager.java +++ /dev/null @@ -1,321 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.paging; - -import com.datastax.oss.driver.api.core.AsyncPagingIterable; -import com.datastax.oss.driver.api.core.MappedAsyncPagingIterable; -import com.datastax.oss.driver.api.core.PagingIterable; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; -import java.util.Objects; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import net.jcip.annotations.ThreadSafe; - -/** - * A utility to emulate offset queries on the client side (this comes with important performance - * trade-offs, make sure you read and understand the whole javadocs before using this class). - * - *

Web UIs and services often provide paginated results with random access, for example: given a - * page size of 20 elements, fetch page 5. Cassandra does not support this natively (see CASSANDRA-6511), because such - * queries are inherently linear: the database would have to restart from the beginning every time, - * and skip unwanted rows until it reaches the desired offset. - * - *

However, random pagination is a real need for many applications, and linear performance can be - * a reasonable trade-off if the cardinality stays low. This class provides a way to emulate this - * behavior on the client side. - * - *

Performance considerations

- * - * For each page that you want to retrieve: - * - *
    - *
  • you need to re-execute the query, in order to start with a fresh result set; - *
  • this class starts iterating from the beginning, and skips rows until it reaches the desired - * offset. - *
- * - *
- * - *
- * String query = "SELECT ...";
- * OffsetPager pager = new OffsetPager(20);
- *
- * // Get page 2: start from a fresh result set, throw away rows 1-20, then return rows 21-40
- * ResultSet rs = session.execute(query);
- * OffsetPager.Page<Row> page2 = pager.getPage(rs, 2);
- *
- * // Get page 5: start from a fresh result set, throw away rows 1-80, then return rows 81-100
- * rs = session.execute(query);
- * OffsetPager.Page<Row> page5 = pager.getPage(rs, 5);
- * 
- * - *

Establishing application-level guardrails

- * - * Linear performance should be fine for the values typically encountered in real-world - * applications: for example, if the page size is 25 and users never go past page 10, the worst case - * is only 250 rows, which is a very small result set. However, we strongly recommend that you - * implement hard limits in your application code: if the page number is exposed to the user (for - * example if it is passed as a URL parameter), make sure it is properly validated and enforce a - * maximum, so that an attacker can't inject a large value that could potentially fetch millions of - * rows. - * - *

Relation with protocol-level paging

- * - * Protocol-level paging refers to the ability to split large response into multiple network chunks: - * see {@link Statement#setPageSize(int)} and {@code basic.request.page-size} in the configuration. - * It happens under the hood, and is completely transparent for offset paging: this class will work - * the same no matter how many network roundtrips were needed to fetch the result. You don't need to - * set the protocol page size and the logical page size to the same value. - */ -@ThreadSafe -public class OffsetPager { - - /** A page returned as the result of an offset query. */ - public interface Page { - - /** The elements in the page. */ - @NonNull - List getElements(); - - /** - * The page number (1 for the first page, 2 for the second page, etc). - * - *

Note that it may be different than the number you passed to {@link - * #getPage(PagingIterable, int)}: if the result set was too short, this is the actual number of - * the last page. - */ - int getPageNumber(); - - /** Whether this is the last page in the result set. */ - boolean isLast(); - } - - private final int pageSize; - - /** - * Creates a new instance. - * - * @param pageSize the number of elements per page. Must be greater than or equal to 1. - */ - public OffsetPager(int pageSize) { - if (pageSize < 1) { - throw new IllegalArgumentException("Invalid pageSize, expected >=1, got " + pageSize); - } - this.pageSize = pageSize; - } - - /** - * Extracts a page from a synchronous result set, by skipping rows until we get to the requested - * offset. - * - * @param iterable the iterable to extract the results from: typically a {@link ResultSet}, or a - * {@link PagingIterable} returned by the mapper. - * @param targetPageNumber the page to return (1 for the first page, 2 for the second page, etc). - * Must be greater than or equal to 1. - * @return the requested page, or the last page if the requested page was past the end of the - * iterable. - * @throws IllegalArgumentException if the conditions on the arguments are not respected. - */ - @NonNull - public Page getPage( - @NonNull PagingIterable iterable, final int targetPageNumber) { - - throwIfIllegalArguments(iterable, targetPageNumber); - - // Holds the contents of the target page. We also need to record the current page as we go, - // because our iterable is forward-only and we can't predict when we'll hit the end. - List currentPageElements = new ArrayList<>(); - - int currentPageNumber = 1; - int currentPageSize = 0; - for (ElementT element : iterable) { - currentPageSize += 1; - - if (currentPageSize > pageSize) { - currentPageNumber += 1; - currentPageSize = 1; - currentPageElements.clear(); - } - - currentPageElements.add(element); - - if (currentPageNumber == targetPageNumber && currentPageSize == pageSize) { - // The target page has the full size and we've seen all of its elements - break; - } - } - - // Either we have the full target page, or we've reached the end of the result set. - boolean isLast = iterable.one() == null; - return new DefaultPage<>(currentPageElements, currentPageNumber, isLast); - } - - /** - * Extracts a page from an asynchronous result set, by skipping rows until we get to the requested - * offset. - * - * @param iterable the iterable to extract the results from. Typically an {@link - * AsyncPagingIterable}, or a {@link MappedAsyncPagingIterable} returned by the mapper. - * @param targetPageNumber the page to return (1 for the first page, 2 for the second page, etc). - * Must be greater than or equal to 1. - * @return a stage that will complete with the requested page, or the last page if the requested - * page was past the end of the iterable. - * @throws IllegalArgumentException if the conditions on the arguments are not respected. - */ - @NonNull - public > - CompletionStage> getPage( - @NonNull IterableT iterable, final int targetPageNumber) { - - // Throw IllegalArgumentException directly instead of failing the stage, since it signals - // blatant programming errors - throwIfIllegalArguments(iterable, targetPageNumber); - - CompletableFuture> pageFuture = new CompletableFuture<>(); - getPage(iterable, targetPageNumber, 1, 0, new ArrayList<>(), pageFuture); - - return pageFuture; - } - - private void throwIfIllegalArguments(@NonNull Object iterable, int targetPageNumber) { - Objects.requireNonNull(iterable); - if (targetPageNumber < 1) { - throw new IllegalArgumentException( - "Invalid targetPageNumber, expected >=1, got " + targetPageNumber); - } - } - - /** - * Main method for the async iteration. - * - *

See the synchronous version in {@link #getPage(PagingIterable, int)} for more explanations: - * this is identical, except that it is async and we need to handle protocol page transitions - * manually. - */ - private , ElementT> void getPage( - @NonNull IterableT iterable, - final int targetPageNumber, - int currentPageNumber, - int currentPageSize, - @NonNull List currentPageElements, - @NonNull CompletableFuture> pageFuture) { - - // Note: iterable.currentPage()/fetchNextPage() refer to protocol-level pages, do not confuse - // with logical pages handled by this class - Iterator currentFrame = iterable.currentPage().iterator(); - while (currentFrame.hasNext()) { - ElementT element = currentFrame.next(); - - currentPageSize += 1; - - if (currentPageSize > pageSize) { - currentPageNumber += 1; - currentPageSize = 1; - currentPageElements.clear(); - } - - currentPageElements.add(element); - - if (currentPageNumber == targetPageNumber && currentPageSize == pageSize) { - // Full-size target page. In this method it's simpler to finish directly here. - if (currentFrame.hasNext()) { - pageFuture.complete(new DefaultPage<>(currentPageElements, currentPageNumber, false)); - } else if (!iterable.hasMorePages()) { - pageFuture.complete(new DefaultPage<>(currentPageElements, currentPageNumber, true)); - } else { - // It's possible for the server to return an empty last frame, so we need to fetch it to - // know for sure whether there are more elements - int finalCurrentPageNumber = currentPageNumber; - iterable - .fetchNextPage() - .whenComplete( - (nextIterable, throwable) -> { - if (throwable != null) { - pageFuture.completeExceptionally(throwable); - } else { - boolean isLastPage = !nextIterable.currentPage().iterator().hasNext(); - pageFuture.complete( - new DefaultPage<>( - currentPageElements, finalCurrentPageNumber, isLastPage)); - } - }); - } - return; - } - } - - if (iterable.hasMorePages()) { - int finalCurrentPageNumber = currentPageNumber; - int finalCurrentPageSize = currentPageSize; - iterable - .fetchNextPage() - .whenComplete( - (nextIterable, throwable) -> { - if (throwable != null) { - pageFuture.completeExceptionally(throwable); - } else { - getPage( - nextIterable, - targetPageNumber, - finalCurrentPageNumber, - finalCurrentPageSize, - currentPageElements, - pageFuture); - } - }); - } else { - // Reached the end of the result set, finish with what we have so far - pageFuture.complete(new DefaultPage<>(currentPageElements, currentPageNumber, true)); - } - } - - private static class DefaultPage implements Page { - private final List elements; - private final int pageNumber; - private final boolean isLast; - - DefaultPage(@NonNull List elements, int pageNumber, boolean isLast) { - this.elements = ImmutableList.copyOf(elements); - this.pageNumber = pageNumber; - this.isLast = isLast; - } - - @NonNull - @Override - public List getElements() { - return elements; - } - - @Override - public int getPageNumber() { - return pageNumber; - } - - @Override - public boolean isLast() { - return isLast; - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/retry/RetryDecision.java b/core/src/main/java/com/datastax/oss/driver/api/core/retry/RetryDecision.java deleted file mode 100644 index 4b57b781822..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/retry/RetryDecision.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.retry; - -/** - * A decision from the {@link RetryPolicy} on how to handle a retry. - * - * @see RetryVerdict#getRetryDecision() - */ -public enum RetryDecision { - /** Retry the operation on the same node. */ - RETRY_SAME, - /** Retry the operation on the next available node in the query plan (if any). */ - RETRY_NEXT, - /** Rethrow to the calling code, as the result of the execute operation. */ - RETHROW, - /** Don't retry and return an empty result set to the calling code. */ - IGNORE, - ; -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/retry/RetryPolicy.java b/core/src/main/java/com/datastax/oss/driver/api/core/retry/RetryPolicy.java deleted file mode 100644 index e8546816e23..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/retry/RetryPolicy.java +++ /dev/null @@ -1,334 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.retry; - -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.connection.ClosedConnectionException; -import com.datastax.oss.driver.api.core.connection.HeartbeatException; -import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; -import com.datastax.oss.driver.api.core.servererrors.BootstrappingException; -import com.datastax.oss.driver.api.core.servererrors.CoordinatorException; -import com.datastax.oss.driver.api.core.servererrors.FunctionFailureException; -import com.datastax.oss.driver.api.core.servererrors.OverloadedException; -import com.datastax.oss.driver.api.core.servererrors.ProtocolError; -import com.datastax.oss.driver.api.core.servererrors.QueryValidationException; -import com.datastax.oss.driver.api.core.servererrors.ReadFailureException; -import com.datastax.oss.driver.api.core.servererrors.ReadTimeoutException; -import com.datastax.oss.driver.api.core.servererrors.ServerError; -import com.datastax.oss.driver.api.core.servererrors.TruncateException; -import com.datastax.oss.driver.api.core.servererrors.WriteFailureException; -import com.datastax.oss.driver.api.core.servererrors.WriteType; -import com.datastax.oss.driver.api.core.session.Request; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * Defines the behavior to adopt when a request fails. - * - *

For each request, the driver gets a "query plan" (a list of coordinators to try) from the - * {@link LoadBalancingPolicy}, and tries each node in sequence. This policy is invoked if the - * request to that node fails. - * - *

The methods of this interface are invoked on I/O threads, therefore implementations should - * never block. In particular, don't call {@link Thread#sleep(long)} to retry after a delay: - * this would prevent asynchronous processing of other requests, and very negatively impact - * throughput. If the application needs to back off and retry later, this should be implemented in - * client code, not in this policy. - */ -public interface RetryPolicy extends AutoCloseable { - - /** - * Whether to retry when the server replied with a {@code READ_TIMEOUT} error; this indicates a - * server-side timeout during a read query, i.e. some replicas did not reply to the - * coordinator in time. - * - * @param request the request that timed out. - * @param cl the requested consistency level. - * @param blockFor the minimum number of replica acknowledgements/responses that were required to - * fulfill the operation. - * @param received the number of replica that had acknowledged/responded to the operation before - * it failed. - * @param dataPresent whether the actual data was amongst the received replica responses. See - * {@link ReadTimeoutException#wasDataPresent()}. - * @param retryCount how many times the retry policy has been invoked already for this request - * (not counting the current invocation). - * @deprecated As of version 4.10, use {@link #onReadTimeoutVerdict(Request, ConsistencyLevel, - * int, int, boolean, int)} instead. - */ - @Deprecated - RetryDecision onReadTimeout( - @NonNull Request request, - @NonNull ConsistencyLevel cl, - int blockFor, - int received, - boolean dataPresent, - int retryCount); - - /** - * Whether to retry when the server replied with a {@code READ_TIMEOUT} error; this indicates a - * server-side timeout during a read query, i.e. some replicas did not reply to the - * coordinator in time. - * - * @param request the request that timed out. - * @param cl the requested consistency level. - * @param blockFor the minimum number of replica acknowledgements/responses that were required to - * fulfill the operation. - * @param received the number of replica that had acknowledged/responded to the operation before - * it failed. - * @param dataPresent whether the actual data was amongst the received replica responses. See - * {@link ReadTimeoutException#wasDataPresent()}. - * @param retryCount how many times the retry policy has been invoked already for this request - * (not counting the current invocation). - */ - default RetryVerdict onReadTimeoutVerdict( - @NonNull Request request, - @NonNull ConsistencyLevel cl, - int blockFor, - int received, - boolean dataPresent, - int retryCount) { - RetryDecision decision = - onReadTimeout(request, cl, blockFor, received, dataPresent, retryCount); - return () -> decision; - } - - /** - * Whether to retry when the server replied with a {@code WRITE_TIMEOUT} error; this indicates a - * server-side timeout during a write query, i.e. some replicas did not reply to the - * coordinator in time. - * - *

Note that this method will only be invoked for {@link Request#isIdempotent()} idempotent} - * requests: when a write times out, it is impossible to determine with 100% certainty whether the - * mutation was applied or not, so the write is never safe to retry; the driver will rethrow the - * error directly, without invoking the retry policy. - * - * @param request the request that timed out. - * @param cl the requested consistency level. - * @param writeType the type of the write for which the timeout was raised. - * @param blockFor the minimum number of replica acknowledgements/responses that were required to - * fulfill the operation. - * @param received the number of replica that had acknowledged/responded to the operation before - * it failed. - * @param retryCount how many times the retry policy has been invoked already for this request - * (not counting the current invocation). - * @deprecated As of version 4.10, use {@link #onWriteTimeoutVerdict(Request, ConsistencyLevel, - * WriteType, int, int, int)} instead. - */ - @Deprecated - RetryDecision onWriteTimeout( - @NonNull Request request, - @NonNull ConsistencyLevel cl, - @NonNull WriteType writeType, - int blockFor, - int received, - int retryCount); - - /** - * Whether to retry when the server replied with a {@code WRITE_TIMEOUT} error; this indicates a - * server-side timeout during a write query, i.e. some replicas did not reply to the - * coordinator in time. - * - *

Note that this method will only be invoked for {@link Request#isIdempotent()} idempotent} - * requests: when a write times out, it is impossible to determine with 100% certainty whether the - * mutation was applied or not, so the write is never safe to retry; the driver will rethrow the - * error directly, without invoking the retry policy. - * - * @param request the request that timed out. - * @param cl the requested consistency level. - * @param writeType the type of the write for which the timeout was raised. - * @param blockFor the minimum number of replica acknowledgements/responses that were required to - * fulfill the operation. - * @param received the number of replica that had acknowledged/responded to the operation before - * it failed. - * @param retryCount how many times the retry policy has been invoked already for this request - * (not counting the current invocation). - */ - default RetryVerdict onWriteTimeoutVerdict( - @NonNull Request request, - @NonNull ConsistencyLevel cl, - @NonNull WriteType writeType, - int blockFor, - int received, - int retryCount) { - RetryDecision decision = onWriteTimeout(request, cl, writeType, blockFor, received, retryCount); - return () -> decision; - } - - /** - * Whether to retry when the server replied with an {@code UNAVAILABLE} error; this indicates that - * the coordinator determined that there were not enough replicas alive to perform a query with - * the requested consistency level. - * - * @param request the request that timed out. - * @param cl the requested consistency level. - * @param required the number of replica acknowledgements/responses required to perform the - * operation (with its required consistency level). - * @param alive the number of replicas that were known to be alive by the coordinator node when it - * tried to execute the operation. - * @param retryCount how many times the retry policy has been invoked already for this request - * (not counting the current invocation). - * @deprecated As of version 4.10, use {@link #onUnavailableVerdict(Request, ConsistencyLevel, - * int, int, int)} instead. - */ - @Deprecated - RetryDecision onUnavailable( - @NonNull Request request, - @NonNull ConsistencyLevel cl, - int required, - int alive, - int retryCount); - - /** - * Whether to retry when the server replied with an {@code UNAVAILABLE} error; this indicates that - * the coordinator determined that there were not enough replicas alive to perform a query with - * the requested consistency level. - * - * @param request the request that timed out. - * @param cl the requested consistency level. - * @param required the number of replica acknowledgements/responses required to perform the - * operation (with its required consistency level). - * @param alive the number of replicas that were known to be alive by the coordinator node when it - * tried to execute the operation. - * @param retryCount how many times the retry policy has been invoked already for this request - * (not counting the current invocation). - */ - default RetryVerdict onUnavailableVerdict( - @NonNull Request request, - @NonNull ConsistencyLevel cl, - int required, - int alive, - int retryCount) { - RetryDecision decision = onUnavailable(request, cl, required, alive, retryCount); - return () -> decision; - } - - /** - * Whether to retry when a request was aborted before we could get a response from the server. - * - *

This can happen in two cases: if the connection was closed due to an external event (this - * will manifest as a {@link ClosedConnectionException}, or {@link HeartbeatException} for a - * heartbeat failure); or if there was an unexpected error while decoding the response (this can - * only be a driver bug). - * - *

Note that this method will only be invoked for {@linkplain Request#isIdempotent() - * idempotent} requests: when execution was aborted before getting a response, it is impossible to - * determine with 100% certainty whether a mutation was applied or not, so a write is never safe - * to retry; the driver will rethrow the error directly, without invoking the retry policy. - * - * @param request the request that was aborted. - * @param error the error. - * @param retryCount how many times the retry policy has been invoked already for this request - * (not counting the current invocation). - * @deprecated As of version 4.10, use {@link #onRequestAbortedVerdict(Request, Throwable, int)} - * instead. - */ - @Deprecated - RetryDecision onRequestAborted( - @NonNull Request request, @NonNull Throwable error, int retryCount); - - /** - * Whether to retry when a request was aborted before we could get a response from the server. - * - *

This can happen in two cases: if the connection was closed due to an external event (this - * will manifest as a {@link ClosedConnectionException}, or {@link HeartbeatException} for a - * heartbeat failure); or if there was an unexpected error while decoding the response (this can - * only be a driver bug). - * - *

Note that this method will only be invoked for {@linkplain Request#isIdempotent() - * idempotent} requests: when execution was aborted before getting a response, it is impossible to - * determine with 100% certainty whether a mutation was applied or not, so a write is never safe - * to retry; the driver will rethrow the error directly, without invoking the retry policy. - * - * @param request the request that was aborted. - * @param error the error. - * @param retryCount how many times the retry policy has been invoked already for this request - * (not counting the current invocation). - */ - default RetryVerdict onRequestAbortedVerdict( - @NonNull Request request, @NonNull Throwable error, int retryCount) { - RetryDecision decision = onRequestAborted(request, error, retryCount); - return () -> decision; - } - - /** - * Whether to retry when the server replied with a recoverable error (other than {@code - * READ_TIMEOUT}, {@code WRITE_TIMEOUT} or {@code UNAVAILABLE}). - * - *

This can happen for the following errors: {@link OverloadedException}, {@link ServerError}, - * {@link TruncateException}, {@link ReadFailureException}, {@link WriteFailureException}. - * - *

The following errors are handled internally by the driver, and therefore will never - * be encountered in this method: - * - *

    - *
  • {@link BootstrappingException}: always retried on the next node; - *
  • {@link QueryValidationException} (and its subclasses), {@link FunctionFailureException} - * and {@link ProtocolError}: always rethrown. - *
- * - *

Note that this method will only be invoked for {@link Request#isIdempotent()} idempotent} - * requests: when execution was aborted before getting a response, it is impossible to determine - * with 100% certainty whether a mutation was applied or not, so a write is never safe to retry; - * the driver will rethrow the error directly, without invoking the retry policy. - * - * @param request the request that failed. - * @param error the error. - * @param retryCount how many times the retry policy has been invoked already for this request - * (not counting the current invocation). - * @deprecated As of version 4.10, use {@link #onErrorResponseVerdict(Request, - * CoordinatorException, int)} instead. - */ - @Deprecated - RetryDecision onErrorResponse( - @NonNull Request request, @NonNull CoordinatorException error, int retryCount); - - /** - * Whether to retry when the server replied with a recoverable error (other than {@code - * READ_TIMEOUT}, {@code WRITE_TIMEOUT} or {@code UNAVAILABLE}). - * - *

This can happen for the following errors: {@link OverloadedException}, {@link ServerError}, - * {@link TruncateException}, {@link ReadFailureException}, {@link WriteFailureException}. - * - *

The following errors are handled internally by the driver, and therefore will never - * be encountered in this method: - * - *

    - *
  • {@link BootstrappingException}: always retried on the next node; - *
  • {@link QueryValidationException} (and its subclasses), {@link FunctionFailureException} - * and {@link ProtocolError}: always rethrown. - *
- * - *

Note that this method will only be invoked for {@link Request#isIdempotent()} idempotent} - * requests: when execution was aborted before getting a response, it is impossible to determine - * with 100% certainty whether a mutation was applied or not, so a write is never safe to retry; - * the driver will rethrow the error directly, without invoking the retry policy. - * - * @param request the request that failed. - * @param error the error. - * @param retryCount how many times the retry policy has been invoked already for this request - * (not counting the current invocation). - */ - default RetryVerdict onErrorResponseVerdict( - @NonNull Request request, @NonNull CoordinatorException error, int retryCount) { - RetryDecision decision = onErrorResponse(request, error, retryCount); - return () -> decision; - } - - /** Called when the cluster that this policy is associated with closes. */ - @Override - void close(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/retry/RetryVerdict.java b/core/src/main/java/com/datastax/oss/driver/api/core/retry/RetryVerdict.java deleted file mode 100644 index 9abb54156db..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/retry/RetryVerdict.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.retry; - -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.internal.core.retry.DefaultRetryVerdict; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * The verdict returned by a {@link RetryPolicy} determining what to do when a request failed. A - * verdict contains a {@link RetryDecision} indicating if a retry should be attempted at all and - * where, and a method that allows the original request to be modified before the retry. - */ -@FunctionalInterface -public interface RetryVerdict { - - /** A retry verdict that retries the same request on the same node. */ - RetryVerdict RETRY_SAME = new DefaultRetryVerdict(RetryDecision.RETRY_SAME); - - /** A retry verdict that retries the same request on the next node in the query plan. */ - RetryVerdict RETRY_NEXT = new DefaultRetryVerdict(RetryDecision.RETRY_NEXT); - - /** A retry verdict that ignores the error, returning and empty result set to the caller. */ - RetryVerdict IGNORE = new DefaultRetryVerdict(RetryDecision.IGNORE); - - /** A retry verdict that rethrows the execution error to the calling code. */ - RetryVerdict RETHROW = new DefaultRetryVerdict(RetryDecision.RETHROW); - - /** @return The retry decision to apply. */ - @NonNull - RetryDecision getRetryDecision(); - - /** - * Returns the request to retry, based on the request that was just executed (and failed). - * - *

The default retry policy always returns the request as is. Custom retry policies can use - * this method to customize the request to retry, for example, by changing its consistency level, - * query timestamp, custom payload, or even its execution profile. - * - * @param The actual type of the request. - * @param previous The request that was just executed (and failed). - * @return The request to retry. - */ - @NonNull - default RequestT getRetryRequest(@NonNull RequestT previous) { - return previous; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/AlreadyExistsException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/AlreadyExistsException.java deleted file mode 100644 index 2bf541c91de..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/AlreadyExistsException.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.servererrors; - -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** - * Thrown when a query attempts to create a keyspace or table that already exists. - * - *

This exception does not go through the {@link RetryPolicy}, it is always rethrown directly to - * the client. - */ -public class AlreadyExistsException extends QueryValidationException { - - private final String keyspace; - private final String table; - - public AlreadyExistsException( - @NonNull Node coordinator, @NonNull String keyspace, @NonNull String table) { - this(coordinator, makeMessage(keyspace, table), keyspace, table, null, false); - } - - private AlreadyExistsException( - @NonNull Node coordinator, - @NonNull String message, - @NonNull String keyspace, - @NonNull String table, - @Nullable ExecutionInfo executionInfo, - boolean writableStackTrace) { - super(coordinator, message, executionInfo, writableStackTrace); - this.keyspace = keyspace; - this.table = table; - } - - private static String makeMessage(String keyspace, String table) { - if (table == null || table.isEmpty()) { - return String.format("Keyspace %s already exists", keyspace); - } else { - return String.format("Object %s.%s already exists", keyspace, table); - } - } - - @NonNull - @Override - public DriverException copy() { - return new AlreadyExistsException( - getCoordinator(), getMessage(), keyspace, table, getExecutionInfo(), true); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/BootstrappingException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/BootstrappingException.java deleted file mode 100644 index a408e0384f5..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/BootstrappingException.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.servererrors; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** - * Thrown when the coordinator was bootstrapping when it received a query. - * - *

This exception does not go through the {@link RetryPolicy}, the query is always retried on the - * next node. Therefore the only way the client can observe this exception is in an {@link - * AllNodesFailedException}. - */ -public class BootstrappingException extends QueryExecutionException { - - public BootstrappingException(@NonNull Node coordinator) { - this(coordinator, String.format("%s is bootstrapping", coordinator), null, false); - } - - private BootstrappingException( - @NonNull Node coordinator, - @NonNull String message, - @Nullable ExecutionInfo executionInfo, - boolean writableStackTrace) { - super(coordinator, message, executionInfo, writableStackTrace); - } - - @NonNull - @Override - public DriverException copy() { - return new BootstrappingException(getCoordinator(), getMessage(), getExecutionInfo(), true); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/CASWriteUnknownException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/CASWriteUnknownException.java deleted file mode 100644 index 477bf7813c9..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/CASWriteUnknownException.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.servererrors; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import com.datastax.oss.driver.api.core.session.Request; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * The result of a CAS operation is in an unknown state. - * - *

This exception is processed by {@link RetryPolicy#onErrorResponseVerdict(Request, - * CoordinatorException, int)} , which will decide if it is rethrown directly to the client or if - * the request should be retried. If all other tried nodes also fail, this exception will appear in - * the {@link AllNodesFailedException} thrown to the client. - */ -public class CASWriteUnknownException extends QueryConsistencyException { - - public CASWriteUnknownException( - @NonNull Node coordinator, - @NonNull ConsistencyLevel consistencyLevel, - int received, - int blockFor) { - this( - coordinator, - String.format( - "CAS operation result is unknown - proposal was not accepted by a quorum. (%d / %d)", - received, blockFor), - consistencyLevel, - received, - blockFor, - null, - false); - } - - private CASWriteUnknownException( - @NonNull Node coordinator, - @NonNull String message, - @NonNull ConsistencyLevel consistencyLevel, - int received, - int blockFor, - ExecutionInfo executionInfo, - boolean writableStackTrace) { - super( - coordinator, - message, - consistencyLevel, - received, - blockFor, - executionInfo, - writableStackTrace); - } - - @NonNull - @Override - public DriverException copy() { - return new CASWriteUnknownException( - getCoordinator(), - getMessage(), - getConsistencyLevel(), - getReceived(), - getBlockFor(), - getExecutionInfo(), - true); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/CDCWriteFailureException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/CDCWriteFailureException.java deleted file mode 100644 index 3ce782653ab..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/CDCWriteFailureException.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.servererrors; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import com.datastax.oss.driver.api.core.session.Request; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** - * An attempt was made to write to a commitlog segment which doesn't support CDC mutations. - * - *

This exception is processed by {@link RetryPolicy#onErrorResponseVerdict(Request, - * CoordinatorException, int)}, which will decide if it is rethrown directly to the client or if the - * request should be retried. If all other tried nodes also fail, this exception will appear in the - * {@link AllNodesFailedException} thrown to the client. - */ -public class CDCWriteFailureException extends QueryExecutionException { - - public CDCWriteFailureException(@NonNull Node coordinator) { - super(coordinator, "Commitlog does not support CDC mutations", null, false); - } - - public CDCWriteFailureException(@NonNull Node coordinator, @NonNull String message) { - super(coordinator, "Commitlog does not support CDC mutations", null, false); - } - - private CDCWriteFailureException( - @NonNull Node coordinator, - @NonNull String message, - @Nullable ExecutionInfo executionInfo, - boolean writableStackTrace) { - super(coordinator, message, executionInfo, writableStackTrace); - } - - @NonNull - @Override - public DriverException copy() { - return new CDCWriteFailureException(getCoordinator(), getMessage(), getExecutionInfo(), true); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/CoordinatorException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/CoordinatorException.java deleted file mode 100644 index 8f6052850df..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/CoordinatorException.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.servererrors; - -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.Node; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** A server-side error thrown by the coordinator node in response to a driver request. */ -public abstract class CoordinatorException extends DriverException { - - // This is also present on ExecutionInfo. But the execution info is only set for errors that are - // rethrown to the client, not on errors that get retried. It can be useful to know the node in - // the retry policy, so store it here, it might be duplicated but that doesn't matter. - private final Node coordinator; - - protected CoordinatorException( - @NonNull Node coordinator, - @NonNull String message, - @Nullable ExecutionInfo executionInfo, - boolean writableStackTrace) { - super(message, executionInfo, null, writableStackTrace); - this.coordinator = coordinator; - } - - @NonNull - public Node getCoordinator() { - return coordinator; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/DefaultWriteType.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/DefaultWriteType.java deleted file mode 100644 index a24097e6e5b..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/DefaultWriteType.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.servererrors; - -/** A default write type supported by the driver out of the box. */ -public enum DefaultWriteType implements WriteType { - - /** A write to a single partition key. Such writes are guaranteed to be atomic and isolated. */ - SIMPLE, - /** - * A write to a multiple partition key that used the distributed batch log to ensure atomicity - * (atomicity meaning that if any statement in the batch succeeds, all will eventually succeed). - */ - BATCH, - /** - * A write to a multiple partition key that doesn't use the distributed batch log. Atomicity for - * such writes is not guaranteed - */ - UNLOGGED_BATCH, - /** - * A counter write (that can be for one or multiple partition key). Such write should not be - * replayed to avoid over-counting. - */ - COUNTER, - /** - * The initial write to the distributed batch log that Cassandra performs internally before a - * BATCH write. - */ - BATCH_LOG, - /** - * A conditional write. If a timeout has this {@code WriteType}, the timeout has happened while - * doing the compare-and-swap for an conditional update. In this case, the update may or may not - * have been applied. - */ - CAS, - /** - * Indicates that the timeout was related to acquiring locks needed for updating materialized - * views affected by write operation. - */ - VIEW, - /** - * Indicates that the timeout was related to acquiring space for change data capture logs for cdc - * tracked tables. - */ - CDC, - ; - // Note that, for the sake of convenience, we also expose shortcuts to these constants on the - // WriteType interface. If you add a new enum constant, remember to update the interface as - // well. -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/FunctionFailureException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/FunctionFailureException.java deleted file mode 100644 index 31993762319..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/FunctionFailureException.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.servererrors; - -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** - * An error during the execution of a CQL function. - * - *

This exception does not go through the {@link RetryPolicy}, it is always rethrown directly to - * the client. - */ -public class FunctionFailureException extends QueryExecutionException { - - public FunctionFailureException(@NonNull Node coordinator, @NonNull String message) { - this(coordinator, message, null, false); - } - - private FunctionFailureException( - @NonNull Node coordinator, - @NonNull String message, - @Nullable ExecutionInfo executionInfo, - boolean writableStackTrace) { - super(coordinator, message, executionInfo, writableStackTrace); - } - - @NonNull - @Override - public DriverException copy() { - return new FunctionFailureException(getCoordinator(), getMessage(), getExecutionInfo(), true); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/InvalidConfigurationInQueryException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/InvalidConfigurationInQueryException.java deleted file mode 100644 index 405efa47299..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/InvalidConfigurationInQueryException.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.servererrors; - -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** - * Indicates that a query is invalid because of some configuration problem. - * - *

This is generally throw by queries that manipulate the schema (CREATE and ALTER) when the - * required configuration options are invalid. - * - *

This exception does not go through the {@link RetryPolicy}, it is always rethrown directly to - * the client. - */ -public class InvalidConfigurationInQueryException extends QueryValidationException { - - public InvalidConfigurationInQueryException(@NonNull Node coordinator, @NonNull String message) { - this(coordinator, message, null, false); - } - - private InvalidConfigurationInQueryException( - @NonNull Node coordinator, - @NonNull String message, - @Nullable ExecutionInfo executionInfo, - boolean writableStackTrace) { - super(coordinator, message, executionInfo, writableStackTrace); - } - - @NonNull - @Override - public DriverException copy() { - return new InvalidConfigurationInQueryException( - getCoordinator(), getMessage(), getExecutionInfo(), true); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/InvalidQueryException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/InvalidQueryException.java deleted file mode 100644 index 468de8a1bd0..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/InvalidQueryException.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.servererrors; - -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** - * Indicates a syntactically correct, but invalid query. - * - *

This exception does not go through the {@link RetryPolicy}, it is always rethrown directly to - * the client. - */ -public class InvalidQueryException extends QueryValidationException { - - public InvalidQueryException(@NonNull Node coordinator, @NonNull String message) { - this(coordinator, message, null, false); - } - - private InvalidQueryException( - @NonNull Node coordinator, - @NonNull String message, - @Nullable ExecutionInfo executionInfo, - boolean writableStackTrace) { - super(coordinator, message, executionInfo, writableStackTrace); - } - - @NonNull - @Override - public DriverException copy() { - return new InvalidQueryException(getCoordinator(), getMessage(), getExecutionInfo(), true); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/OverloadedException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/OverloadedException.java deleted file mode 100644 index f56a7f30a7e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/OverloadedException.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.servererrors; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import com.datastax.oss.driver.api.core.session.Request; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** - * Thrown when the coordinator reported itself as being overloaded. - * - *

This exception is processed by {@link RetryPolicy#onErrorResponseVerdict(Request, - * CoordinatorException, int)}, which will decide if it is rethrown directly to the client or if the - * request should be retried. If all other tried nodes also fail, this exception will appear in the - * {@link AllNodesFailedException} thrown to the client. - */ -public class OverloadedException extends QueryExecutionException { - - public OverloadedException(@NonNull Node coordinator) { - super(coordinator, String.format("%s is overloaded", coordinator), null, false); - } - - public OverloadedException(@NonNull Node coordinator, @NonNull String message) { - super(coordinator, String.format("%s is overloaded: %s", coordinator, message), null, false); - } - - private OverloadedException( - @NonNull Node coordinator, - @NonNull String message, - @Nullable ExecutionInfo executionInfo, - boolean writableStackTrace) { - super(coordinator, message, executionInfo, writableStackTrace); - } - - @NonNull - @Override - public DriverException copy() { - return new OverloadedException(getCoordinator(), getMessage(), getExecutionInfo(), true); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/ProtocolError.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/ProtocolError.java deleted file mode 100644 index 898a857954f..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/ProtocolError.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.servererrors; - -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** - * Indicates that the contacted node reported a protocol error. - * - *

Protocol errors indicate that the client triggered a protocol violation (for instance, a - * {@code QUERY} message is sent before a {@code STARTUP} one has been sent). Protocol errors should - * be considered as a bug in the driver and reported as such. - * - *

This exception does not go through the {@link RetryPolicy}, it is always rethrown directly to - * the client. - */ -public class ProtocolError extends CoordinatorException { - - public ProtocolError(@NonNull Node coordinator, @NonNull String message) { - this(coordinator, message, null, false); - } - - private ProtocolError( - @NonNull Node coordinator, - @NonNull String message, - @Nullable ExecutionInfo executionInfo, - boolean writableStackTrace) { - super(coordinator, message, executionInfo, writableStackTrace); - } - - @NonNull - @Override - public DriverException copy() { - return new ProtocolError(getCoordinator(), getMessage(), getExecutionInfo(), true); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/QueryConsistencyException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/QueryConsistencyException.java deleted file mode 100644 index 4a6f97f3342..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/QueryConsistencyException.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.servererrors; - -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.Node; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * A failure to reach the required consistency level during the execution of a query. - * - *

Such an exception is returned when the query has been tried by Cassandra but cannot be - * achieved with the requested consistency level because either: - * - *

    - *
  • the coordinator did not receive enough replica responses within the rpc timeout set for - * Cassandra; - *
  • some replicas replied with an error. - *
- */ -public abstract class QueryConsistencyException extends QueryExecutionException { - - private final ConsistencyLevel consistencyLevel; - private final int received; - private final int blockFor; - - protected QueryConsistencyException( - @NonNull Node coordinator, - @NonNull String message, - @NonNull ConsistencyLevel consistencyLevel, - int received, - int blockFor, - ExecutionInfo executionInfo, - boolean writableStackTrace) { - super(coordinator, message, executionInfo, writableStackTrace); - this.consistencyLevel = consistencyLevel; - this.received = received; - this.blockFor = blockFor; - } - - /** The consistency level of the operation that failed. */ - @NonNull - public ConsistencyLevel getConsistencyLevel() { - return consistencyLevel; - } - - /** The number of replica that had acknowledged/responded to the operation before it failed. */ - public int getReceived() { - return received; - } - - /** - * The minimum number of replica acknowledgements/responses that were required to fulfill the - * operation. - */ - public int getBlockFor() { - return blockFor; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/QueryExecutionException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/QueryExecutionException.java deleted file mode 100644 index 541a32d9fba..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/QueryExecutionException.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.servererrors; - -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.Node; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** A server-side error thrown when a valid query cannot be executed. */ -public abstract class QueryExecutionException extends CoordinatorException { - - protected QueryExecutionException( - @NonNull Node coordinator, - @NonNull String message, - @Nullable ExecutionInfo executionInfo, - boolean writableStackTrace) { - super(coordinator, message, executionInfo, writableStackTrace); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/QueryValidationException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/QueryValidationException.java deleted file mode 100644 index 9c8dfe537b6..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/QueryValidationException.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.servererrors; - -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.Node; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** - * A server-side error thrown when a query cannot be executed because it is syntactically incorrect, - * invalid or unauthorized. - */ -public abstract class QueryValidationException extends CoordinatorException { - - protected QueryValidationException( - @NonNull Node coordinator, - @NonNull String message, - @Nullable ExecutionInfo executionInfo, - boolean writableStackTrace) { - super(coordinator, message, executionInfo, writableStackTrace); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/ReadFailureException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/ReadFailureException.java deleted file mode 100644 index 94c4404f8d9..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/ReadFailureException.java +++ /dev/null @@ -1,153 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.servererrors; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import com.datastax.oss.driver.api.core.session.Request; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.net.InetAddress; -import java.util.Map; - -/** - * A non-timeout error during a read query. - * - *

This happens when some of the replicas that were contacted by the coordinator replied with an - * error. - * - *

This exception is processed by {@link RetryPolicy#onErrorResponseVerdict(Request, - * CoordinatorException, int)}, which will decide if it is rethrown directly to the client or if the - * request should be retried. If all other tried nodes also fail, this exception will appear in the - * {@link AllNodesFailedException} thrown to the client. - */ -public class ReadFailureException extends QueryConsistencyException { - - private final int numFailures; - private final boolean dataPresent; - private final Map reasonMap; - - public ReadFailureException( - @NonNull Node coordinator, - @NonNull ConsistencyLevel consistencyLevel, - int received, - int blockFor, - int numFailures, - boolean dataPresent, - @NonNull Map reasonMap) { - this( - coordinator, - String.format( - "Cassandra failure during read query at consistency %s " - + "(%d responses were required but only %d replica responded, %d failed)", - consistencyLevel, blockFor, received, numFailures), - consistencyLevel, - received, - blockFor, - numFailures, - dataPresent, - reasonMap, - null, - false); - } - - private ReadFailureException( - @NonNull Node coordinator, - @NonNull String message, - @NonNull ConsistencyLevel consistencyLevel, - int received, - int blockFor, - int numFailures, - boolean dataPresent, - @NonNull Map reasonMap, - @Nullable ExecutionInfo executionInfo, - boolean writableStackTrace) { - super( - coordinator, - message, - consistencyLevel, - received, - blockFor, - executionInfo, - writableStackTrace); - this.numFailures = numFailures; - this.dataPresent = dataPresent; - this.reasonMap = reasonMap; - } - - /** Returns the number of replicas that experienced a failure while executing the request. */ - public int getNumFailures() { - return numFailures; - } - - /** - * Whether the actual data was amongst the received replica responses. - * - *

During reads, Cassandra doesn't request data from every replica to minimize internal network - * traffic. Instead, some replicas are only asked for a checksum of the data. A read failure may - * occur even if enough replicas have responded to fulfill the consistency level, if only checksum - * responses have been received. This method allows to detect that case. - */ - public boolean wasDataPresent() { - return dataPresent; - } - - /** - * Returns the a failure reason code for each node that failed. - * - *

At the time of writing, the existing reason codes are: - * - *

    - *
  • {@code 0x0000}: the error does not have a specific code assigned yet, or the cause is - * unknown. - *
  • {@code 0x0001}: The read operation scanned too many tombstones (as defined by {@code - * tombstone_failure_threshold} in {@code cassandra.yaml}, causing a {@code - * TombstoneOverwhelmingException}. - *
- * - * (please refer to the Cassandra documentation for your version for the most up-to-date list of - * errors) - * - *

This feature is available for protocol v5 or above only. With lower protocol versions, the - * map will always be empty. - */ - @NonNull - public Map getReasonMap() { - return reasonMap; - } - - @NonNull - @Override - public DriverException copy() { - return new ReadFailureException( - getCoordinator(), - getMessage(), - getConsistencyLevel(), - getReceived(), - getBlockFor(), - numFailures, - dataPresent, - reasonMap, - getExecutionInfo(), - true); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/ReadTimeoutException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/ReadTimeoutException.java deleted file mode 100644 index 4dddfedf49a..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/ReadTimeoutException.java +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.servererrors; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import com.datastax.oss.driver.api.core.session.Request; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * A server-side timeout during a read query. - * - *

This exception is processed by {@link RetryPolicy#onReadTimeoutVerdict(Request, - * ConsistencyLevel, int, int, boolean, int)}, which will decide if it is rethrown directly to the - * client or if the request should be retried. If all other tried nodes also fail, this exception - * will appear in the {@link AllNodesFailedException} thrown to the client. - */ -public class ReadTimeoutException extends QueryConsistencyException { - - private final boolean dataPresent; - - public ReadTimeoutException( - @NonNull Node coordinator, - @NonNull ConsistencyLevel consistencyLevel, - int received, - int blockFor, - boolean dataPresent) { - this( - coordinator, - String.format( - "Cassandra timeout during read query at consistency %s (%s). " - + "In case this was generated during read repair, the consistency level is not representative of the actual consistency.", - consistencyLevel, formatDetails(received, blockFor, dataPresent)), - consistencyLevel, - received, - blockFor, - dataPresent, - null, - false); - } - - private ReadTimeoutException( - @NonNull Node coordinator, - @NonNull String message, - @NonNull ConsistencyLevel consistencyLevel, - int received, - int blockFor, - boolean dataPresent, - ExecutionInfo executionInfo, - boolean writableStackTrace) { - super( - coordinator, - message, - consistencyLevel, - received, - blockFor, - executionInfo, - writableStackTrace); - this.dataPresent = dataPresent; - } - - private static String formatDetails(int received, int blockFor, boolean dataPresent) { - if (received < blockFor) { - return String.format( - "%d responses were required but only %d replica responded", blockFor, received); - } else if (!dataPresent) { - return "the replica queried for data didn't respond"; - } else { - return "timeout while waiting for repair of inconsistent replica"; - } - } - - /** - * Whether the actual data was amongst the received replica responses. - * - *

During reads, Cassandra doesn't request data from every replica to minimize internal network - * traffic. Instead, some replicas are only asked for a checksum of the data. A read timeout may - * occur even if enough replicas have responded to fulfill the consistency level, if only checksum - * responses have been received. This method allows to detect that case. - */ - public boolean wasDataPresent() { - return dataPresent; - } - - @NonNull - @Override - public DriverException copy() { - return new ReadTimeoutException( - getCoordinator(), - getMessage(), - getConsistencyLevel(), - getReceived(), - getBlockFor(), - dataPresent, - getExecutionInfo(), - true); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/ServerError.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/ServerError.java deleted file mode 100644 index de300803421..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/ServerError.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.servererrors; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import com.datastax.oss.driver.api.core.session.Request; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** - * Indicates that the contacted node reported an internal error. - * - *

This should be considered as a server bug and reported as such. - * - *

This exception is processed by {@link RetryPolicy#onErrorResponseVerdict(Request, - * CoordinatorException, int)}, which will decide if it is rethrown directly to the client or if the - * request should be retried. If all other tried nodes also fail, this exception will appear in the - * {@link AllNodesFailedException} thrown to the client. - */ -public class ServerError extends CoordinatorException { - - public ServerError(@NonNull Node coordinator, @NonNull String message) { - this(coordinator, message, null, false); - } - - private ServerError( - @NonNull Node coordinator, - @NonNull String message, - @Nullable ExecutionInfo executionInfo, - boolean writableStackTrace) { - super(coordinator, message, executionInfo, writableStackTrace); - } - - @NonNull - @Override - public DriverException copy() { - return new ServerError(getCoordinator(), getMessage(), getExecutionInfo(), true); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/SyntaxError.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/SyntaxError.java deleted file mode 100644 index 708068c0299..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/SyntaxError.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.servererrors; - -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** - * A syntax error in a query. - * - *

This exception does not go through the {@link RetryPolicy}, it is always rethrown directly to - * the client. - */ -public class SyntaxError extends QueryValidationException { - - public SyntaxError(@NonNull Node coordinator, @NonNull String message) { - this(coordinator, message, null, false); - } - - private SyntaxError( - @NonNull Node coordinator, - @NonNull String message, - @Nullable ExecutionInfo executionInfo, - boolean writableStackTrace) { - super(coordinator, message, executionInfo, writableStackTrace); - } - - @NonNull - @Override - public DriverException copy() { - return new SyntaxError(getCoordinator(), getMessage(), getExecutionInfo(), true); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/TruncateException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/TruncateException.java deleted file mode 100644 index 2091d166e98..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/TruncateException.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.servererrors; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import com.datastax.oss.driver.api.core.session.Request; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** - * An error during a truncation operation. - * - *

This exception is processed by {@link RetryPolicy#onErrorResponseVerdict(Request, - * CoordinatorException, int)}, which will decide if it is rethrown directly to the client or if the - * request should be retried. If all other tried nodes also fail, this exception will appear in the - * {@link AllNodesFailedException} thrown to the client. - */ -public class TruncateException extends QueryExecutionException { - - public TruncateException(@NonNull Node coordinator, @NonNull String message) { - this(coordinator, message, null, false); - } - - private TruncateException( - @NonNull Node coordinator, - @NonNull String message, - @Nullable ExecutionInfo executionInfo, - boolean writableStackTrace) { - super(coordinator, message, executionInfo, writableStackTrace); - } - - @NonNull - @Override - public DriverException copy() { - return new TruncateException(getCoordinator(), getMessage(), getExecutionInfo(), true); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/UnauthorizedException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/UnauthorizedException.java deleted file mode 100644 index 7a6235422de..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/UnauthorizedException.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.servererrors; - -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** - * Indicates that a query cannot be performed due to the authorization restrictions of the logged - * user. - * - *

This exception does not go through the {@link RetryPolicy}, it is always rethrown directly to - * the client. - */ -public class UnauthorizedException extends QueryValidationException { - - public UnauthorizedException(@NonNull Node coordinator, @NonNull String message) { - this(coordinator, message, null, false); - } - - private UnauthorizedException( - @NonNull Node coordinator, - @NonNull String message, - @Nullable ExecutionInfo executionInfo, - boolean writableStackTrace) { - super(coordinator, message, executionInfo, writableStackTrace); - } - - @NonNull - @Override - public DriverException copy() { - return new UnauthorizedException(getCoordinator(), getMessage(), getExecutionInfo(), true); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/UnavailableException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/UnavailableException.java deleted file mode 100644 index b9e9848ce36..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/UnavailableException.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.servererrors; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import com.datastax.oss.driver.api.core.session.Request; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * Thrown when the coordinator knows there is not enough replicas alive to perform a query with the - * requested consistency level. - * - *

This exception is processed by {@link RetryPolicy#onUnavailableVerdict(Request, - * ConsistencyLevel, int, int, int)}, which will decide if it is rethrown directly to the client or - * if the request should be retried. If all other tried nodes also fail, this exception will appear - * in the {@link AllNodesFailedException} thrown to the client. - */ -public class UnavailableException extends QueryExecutionException { - private final ConsistencyLevel consistencyLevel; - private final int required; - private final int alive; - - public UnavailableException( - @NonNull Node coordinator, - @NonNull ConsistencyLevel consistencyLevel, - int required, - int alive) { - this( - coordinator, - String.format( - "Not enough replicas available for query at consistency %s (%d required but only %d alive)", - consistencyLevel, required, alive), - consistencyLevel, - required, - alive, - null, - false); - } - - private UnavailableException( - @NonNull Node coordinator, - @NonNull String message, - @NonNull ConsistencyLevel consistencyLevel, - int required, - int alive, - ExecutionInfo executionInfo, - boolean writableStackTrace) { - super(coordinator, message, executionInfo, writableStackTrace); - this.consistencyLevel = consistencyLevel; - this.required = required; - this.alive = alive; - } - - /** The consistency level of the operation triggering this exception. */ - @NonNull - public ConsistencyLevel getConsistencyLevel() { - return consistencyLevel; - } - - /** - * The number of replica acknowledgements/responses required to perform the operation (with its - * required consistency level). - */ - public int getRequired() { - return required; - } - - /** - * The number of replicas that were known to be alive by the coordinator node when it tried to - * execute the operation. - */ - public int getAlive() { - return alive; - } - - @NonNull - @Override - public DriverException copy() { - return new UnavailableException( - getCoordinator(), - getMessage(), - consistencyLevel, - required, - alive, - getExecutionInfo(), - true); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/WriteFailureException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/WriteFailureException.java deleted file mode 100644 index ffbbd2aef6f..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/WriteFailureException.java +++ /dev/null @@ -1,147 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.servererrors; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import com.datastax.oss.driver.api.core.session.Request; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.net.InetAddress; -import java.util.Map; - -/** - * A non-timeout error during a write query. - * - *

This happens when some of the replicas that were contacted by the coordinator replied with an - * error. - * - *

This exception is processed by {@link RetryPolicy#onErrorResponseVerdict(Request, - * CoordinatorException, int)}, which will decide if it is rethrown directly to the client or if the - * request should be retried. If all other tried nodes also fail, this exception will appear in the - * {@link AllNodesFailedException} thrown to the client. - */ -public class WriteFailureException extends QueryConsistencyException { - - private final WriteType writeType; - private final int numFailures; - private final Map reasonMap; - - public WriteFailureException( - @NonNull Node coordinator, - @NonNull ConsistencyLevel consistencyLevel, - int received, - int blockFor, - @NonNull WriteType writeType, - int numFailures, - @NonNull Map reasonMap) { - this( - coordinator, - String.format( - "Cassandra failure during write query at consistency %s " - + "(%d responses were required but only %d replica responded, %d failed)", - consistencyLevel, blockFor, received, numFailures), - consistencyLevel, - received, - blockFor, - writeType, - numFailures, - reasonMap, - null, - false); - } - - private WriteFailureException( - @NonNull Node coordinator, - @NonNull String message, - @NonNull ConsistencyLevel consistencyLevel, - int received, - int blockFor, - @NonNull WriteType writeType, - int numFailures, - @NonNull Map reasonMap, - @Nullable ExecutionInfo executionInfo, - boolean writableStackTrace) { - super( - coordinator, - message, - consistencyLevel, - received, - blockFor, - executionInfo, - writableStackTrace); - this.writeType = writeType; - this.numFailures = numFailures; - this.reasonMap = reasonMap; - } - - /** The type of the write for which this failure was raised. */ - @NonNull - public WriteType getWriteType() { - return writeType; - } - - /** Returns the number of replicas that experienced a failure while executing the request. */ - public int getNumFailures() { - return numFailures; - } - - /** - * Returns the a failure reason code for each node that failed. - * - *

At the time of writing, the existing reason codes are: - * - *

    - *
  • {@code 0x0000}: the error does not have a specific code assigned yet, or the cause is - * unknown. - *
  • {@code 0x0001}: The read operation scanned too many tombstones (as defined by {@code - * tombstone_failure_threshold} in {@code cassandra.yaml}, causing a {@code - * TombstoneOverwhelmingException}. - *
- * - * (please refer to the Cassandra documentation for your version for the most up-to-date list of - * errors) - * - *

This feature is available for protocol v5 or above only. With lower protocol versions, the - * map will always be empty. - */ - @NonNull - public Map getReasonMap() { - return reasonMap; - } - - @NonNull - @Override - public DriverException copy() { - return new WriteFailureException( - getCoordinator(), - getMessage(), - getConsistencyLevel(), - getReceived(), - getBlockFor(), - writeType, - numFailures, - reasonMap, - getExecutionInfo(), - true); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/WriteTimeoutException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/WriteTimeoutException.java deleted file mode 100644 index 9913dbd0a91..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/WriteTimeoutException.java +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.servererrors; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import com.datastax.oss.driver.api.core.session.Request; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** - * A server-side timeout during a write query. - * - *

This exception is processed by {@link RetryPolicy#onWriteTimeoutVerdict(Request, - * ConsistencyLevel, WriteType, int, int, int)}, which will decide if it is rethrown directly to the - * client or if the request should be retried. If all other tried nodes also fail, this exception - * will appear in the {@link AllNodesFailedException} thrown to the client. - */ -public class WriteTimeoutException extends QueryConsistencyException { - - private final WriteType writeType; - - public WriteTimeoutException( - @NonNull Node coordinator, - @NonNull ConsistencyLevel consistencyLevel, - int received, - int blockFor, - @NonNull WriteType writeType) { - this( - coordinator, - String.format( - "Cassandra timeout during %s write query at consistency %s " - + "(%d replica were required but only %d acknowledged the write)", - writeType, consistencyLevel, blockFor, received), - consistencyLevel, - received, - blockFor, - writeType, - null, - false); - } - - private WriteTimeoutException( - @NonNull Node coordinator, - @NonNull String message, - @NonNull ConsistencyLevel consistencyLevel, - int received, - int blockFor, - @NonNull WriteType writeType, - @Nullable ExecutionInfo executionInfo, - boolean writableStackTrace) { - super( - coordinator, - message, - consistencyLevel, - received, - blockFor, - executionInfo, - writableStackTrace); - this.writeType = writeType; - } - - /** The type of the write for which a timeout was raised. */ - @NonNull - public WriteType getWriteType() { - return writeType; - } - - @NonNull - @Override - public DriverException copy() { - return new WriteTimeoutException( - getCoordinator(), - getMessage(), - getConsistencyLevel(), - getReceived(), - getBlockFor(), - writeType, - getExecutionInfo(), - true); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/WriteType.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/WriteType.java deleted file mode 100644 index 05ad99e5ce4..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/WriteType.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.servererrors; - -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * The type of a Cassandra write query. - * - *

This information is returned by Cassandra when a write timeout is raised, to indicate what - * type of write timed out. It is useful to decide which retry decision to adopt. - * - *

The only reason to model this as an interface (as opposed to an enum type) is to accommodate - * for custom protocol extensions. If you're connecting to a standard Apache Cassandra cluster, all - * {@code WriteType}s are {@link DefaultWriteType} instances. - */ -public interface WriteType { - - WriteType SIMPLE = DefaultWriteType.SIMPLE; - WriteType BATCH = DefaultWriteType.BATCH; - WriteType UNLOGGED_BATCH = DefaultWriteType.UNLOGGED_BATCH; - WriteType COUNTER = DefaultWriteType.COUNTER; - WriteType BATCH_LOG = DefaultWriteType.BATCH_LOG; - WriteType CAS = DefaultWriteType.CAS; - WriteType VIEW = DefaultWriteType.VIEW; - WriteType CDC = DefaultWriteType.CDC; - - /** The textual representation that the write type is encoded to in protocol frames. */ - @NonNull - String name(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/session/ProgrammaticArguments.java b/core/src/main/java/com/datastax/oss/driver/api/core/session/ProgrammaticArguments.java deleted file mode 100644 index 5e10fb4d915..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/session/ProgrammaticArguments.java +++ /dev/null @@ -1,451 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.session; - -import com.datastax.oss.driver.api.core.auth.AuthProvider; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistanceEvaluator; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeStateListener; -import com.datastax.oss.driver.api.core.metadata.schema.SchemaChangeListener; -import com.datastax.oss.driver.api.core.ssl.SslEngineFactory; -import com.datastax.oss.driver.api.core.tracker.RequestIdGenerator; -import com.datastax.oss.driver.api.core.tracker.RequestTracker; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.registry.MutableCodecRegistry; -import com.datastax.oss.driver.internal.core.loadbalancing.helper.NodeFilterToDistanceEvaluatorAdapter; -import com.datastax.oss.driver.internal.core.metadata.MultiplexingNodeStateListener; -import com.datastax.oss.driver.internal.core.metadata.schema.MultiplexingSchemaChangeListener; -import com.datastax.oss.driver.internal.core.tracker.MultiplexingRequestTracker; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.net.InetSocketAddress; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Objects; -import java.util.UUID; -import java.util.function.Predicate; - -/** - * The arguments that can be set programmatically when building a session. - * - *

This is mostly for internal use, you only need to deal with this directly if you write custom - * {@link SessionBuilder} subclasses. - */ -public class ProgrammaticArguments { - - @NonNull - public static Builder builder() { - return new Builder(); - } - - private final List> typeCodecs; - private final NodeStateListener nodeStateListener; - private final SchemaChangeListener schemaChangeListener; - private final RequestTracker requestTracker; - private final RequestIdGenerator requestIdGenerator; - private final Map localDatacenters; - private final Map> nodeFilters; - private final Map nodeDistanceEvaluators; - private final ClassLoader classLoader; - private final AuthProvider authProvider; - private final SslEngineFactory sslEngineFactory; - private final InetSocketAddress cloudProxyAddress; - private final UUID startupClientId; - private final String startupApplicationName; - private final String startupApplicationVersion; - private final MutableCodecRegistry codecRegistry; - private final Object metricRegistry; - - private ProgrammaticArguments( - @NonNull List> typeCodecs, - @Nullable NodeStateListener nodeStateListener, - @Nullable SchemaChangeListener schemaChangeListener, - @Nullable RequestTracker requestTracker, - @Nullable RequestIdGenerator requestIdGenerator, - @NonNull Map localDatacenters, - @NonNull Map> nodeFilters, - @NonNull Map nodeDistanceEvaluators, - @Nullable ClassLoader classLoader, - @Nullable AuthProvider authProvider, - @Nullable SslEngineFactory sslEngineFactory, - @Nullable InetSocketAddress cloudProxyAddress, - @Nullable UUID startupClientId, - @Nullable String startupApplicationName, - @Nullable String startupApplicationVersion, - @Nullable MutableCodecRegistry codecRegistry, - @Nullable Object metricRegistry) { - - this.typeCodecs = typeCodecs; - this.nodeStateListener = nodeStateListener; - this.schemaChangeListener = schemaChangeListener; - this.requestTracker = requestTracker; - this.requestIdGenerator = requestIdGenerator; - this.localDatacenters = localDatacenters; - this.nodeFilters = nodeFilters; - this.nodeDistanceEvaluators = nodeDistanceEvaluators; - this.classLoader = classLoader; - this.authProvider = authProvider; - this.sslEngineFactory = sslEngineFactory; - this.cloudProxyAddress = cloudProxyAddress; - this.startupClientId = startupClientId; - this.startupApplicationName = startupApplicationName; - this.startupApplicationVersion = startupApplicationVersion; - this.codecRegistry = codecRegistry; - this.metricRegistry = metricRegistry; - } - - @NonNull - public List> getTypeCodecs() { - return typeCodecs; - } - - @Nullable - public NodeStateListener getNodeStateListener() { - return nodeStateListener; - } - - @Nullable - public SchemaChangeListener getSchemaChangeListener() { - return schemaChangeListener; - } - - @Nullable - public RequestTracker getRequestTracker() { - return requestTracker; - } - - @Nullable - public RequestIdGenerator getRequestIdGenerator() { - return requestIdGenerator; - } - - @NonNull - public Map getLocalDatacenters() { - return localDatacenters; - } - - @NonNull - @Deprecated - @SuppressWarnings("DeprecatedIsStillUsed") - public Map> getNodeFilters() { - return nodeFilters; - } - - @NonNull - public Map getNodeDistanceEvaluators() { - return nodeDistanceEvaluators; - } - - @Nullable - public ClassLoader getClassLoader() { - return classLoader; - } - - @Nullable - public AuthProvider getAuthProvider() { - return authProvider; - } - - @Nullable - public SslEngineFactory getSslEngineFactory() { - return sslEngineFactory; - } - - @Nullable - public InetSocketAddress getCloudProxyAddress() { - return cloudProxyAddress; - } - - @Nullable - public UUID getStartupClientId() { - return startupClientId; - } - - @Nullable - public String getStartupApplicationName() { - return startupApplicationName; - } - - @Nullable - public String getStartupApplicationVersion() { - return startupApplicationVersion; - } - - @Nullable - public MutableCodecRegistry getCodecRegistry() { - return codecRegistry; - } - - @Nullable - public Object getMetricRegistry() { - return metricRegistry; - } - - public static class Builder { - - private final ImmutableList.Builder> typeCodecsBuilder = ImmutableList.builder(); - private NodeStateListener nodeStateListener; - private SchemaChangeListener schemaChangeListener; - private RequestTracker requestTracker; - private RequestIdGenerator requestIdGenerator; - private ImmutableMap.Builder localDatacentersBuilder = ImmutableMap.builder(); - private final ImmutableMap.Builder> nodeFiltersBuilder = - ImmutableMap.builder(); - private final ImmutableMap.Builder - nodeDistanceEvaluatorsBuilder = ImmutableMap.builder(); - private ClassLoader classLoader; - private AuthProvider authProvider; - private SslEngineFactory sslEngineFactory; - private InetSocketAddress cloudProxyAddress; - private UUID startupClientId; - private String startupApplicationName; - private String startupApplicationVersion; - private MutableCodecRegistry codecRegistry; - private Object metricRegistry; - - @NonNull - public Builder addTypeCodecs(@NonNull TypeCodec... typeCodecs) { - this.typeCodecsBuilder.add(typeCodecs); - return this; - } - - @NonNull - public Builder withNodeStateListener(@Nullable NodeStateListener nodeStateListener) { - this.nodeStateListener = nodeStateListener; - return this; - } - - @NonNull - public Builder addNodeStateListener(@NonNull NodeStateListener nodeStateListener) { - Objects.requireNonNull(nodeStateListener, "nodeStateListener cannot be null"); - if (this.nodeStateListener == null) { - this.nodeStateListener = nodeStateListener; - } else { - NodeStateListener previousListener = this.nodeStateListener; - if (previousListener instanceof MultiplexingNodeStateListener) { - ((MultiplexingNodeStateListener) previousListener).register(nodeStateListener); - } else { - MultiplexingNodeStateListener multiplexingNodeStateListener = - new MultiplexingNodeStateListener(); - multiplexingNodeStateListener.register(previousListener); - multiplexingNodeStateListener.register(nodeStateListener); - this.nodeStateListener = multiplexingNodeStateListener; - } - } - return this; - } - - @NonNull - public Builder withSchemaChangeListener(@Nullable SchemaChangeListener schemaChangeListener) { - this.schemaChangeListener = schemaChangeListener; - return this; - } - - @NonNull - public Builder addSchemaChangeListener(@NonNull SchemaChangeListener schemaChangeListener) { - Objects.requireNonNull(schemaChangeListener, "schemaChangeListener cannot be null"); - if (this.schemaChangeListener == null) { - this.schemaChangeListener = schemaChangeListener; - } else { - SchemaChangeListener previousListener = this.schemaChangeListener; - if (previousListener instanceof MultiplexingSchemaChangeListener) { - ((MultiplexingSchemaChangeListener) previousListener).register(schemaChangeListener); - } else { - MultiplexingSchemaChangeListener multiplexingSchemaChangeListener = - new MultiplexingSchemaChangeListener(); - multiplexingSchemaChangeListener.register(previousListener); - multiplexingSchemaChangeListener.register(schemaChangeListener); - this.schemaChangeListener = multiplexingSchemaChangeListener; - } - } - return this; - } - - @NonNull - public Builder withRequestTracker(@Nullable RequestTracker requestTracker) { - this.requestTracker = requestTracker; - return this; - } - - @NonNull - public Builder addRequestTracker(@NonNull RequestTracker requestTracker) { - Objects.requireNonNull(requestTracker, "requestTracker cannot be null"); - if (this.requestTracker == null) { - this.requestTracker = requestTracker; - } else { - RequestTracker previousTracker = this.requestTracker; - if (previousTracker instanceof MultiplexingRequestTracker) { - ((MultiplexingRequestTracker) previousTracker).register(requestTracker); - } else { - MultiplexingRequestTracker multiplexingRequestTracker = new MultiplexingRequestTracker(); - multiplexingRequestTracker.register(previousTracker); - multiplexingRequestTracker.register(requestTracker); - this.requestTracker = multiplexingRequestTracker; - } - } - return this; - } - - @NonNull - public Builder withRequestIdGenerator(@Nullable RequestIdGenerator requestIdGenerator) { - this.requestIdGenerator = requestIdGenerator; - return this; - } - - @NonNull - public Builder withLocalDatacenter( - @NonNull String profileName, @NonNull String localDatacenter) { - this.localDatacentersBuilder.put(profileName, localDatacenter); - return this; - } - - @NonNull - public Builder clearDatacenters() { - this.localDatacentersBuilder = ImmutableMap.builder(); - return this; - } - - @NonNull - public Builder withLocalDatacenters(Map localDatacenters) { - for (Map.Entry entry : localDatacenters.entrySet()) { - this.localDatacentersBuilder.put(entry.getKey(), entry.getValue()); - } - return this; - } - - @NonNull - public Builder withNodeDistanceEvaluator( - @NonNull String profileName, @NonNull NodeDistanceEvaluator nodeDistanceEvaluator) { - this.nodeDistanceEvaluatorsBuilder.put(profileName, nodeDistanceEvaluator); - return this; - } - - @NonNull - public Builder withNodeDistanceEvaluators( - Map nodeDistanceReporters) { - for (Entry entry : nodeDistanceReporters.entrySet()) { - this.nodeDistanceEvaluatorsBuilder.put(entry.getKey(), entry.getValue()); - } - return this; - } - - /** - * @deprecated Use {@link #withNodeDistanceEvaluator(String, NodeDistanceEvaluator)} instead. - */ - @NonNull - @Deprecated - public Builder withNodeFilter( - @NonNull String profileName, @NonNull Predicate nodeFilter) { - this.nodeFiltersBuilder.put(profileName, nodeFilter); - this.nodeDistanceEvaluatorsBuilder.put( - profileName, new NodeFilterToDistanceEvaluatorAdapter(nodeFilter)); - return this; - } - - /** @deprecated Use {@link #withNodeDistanceEvaluators(Map)} instead. */ - @NonNull - @Deprecated - public Builder withNodeFilters(Map> nodeFilters) { - for (Map.Entry> entry : nodeFilters.entrySet()) { - this.nodeFiltersBuilder.put(entry.getKey(), entry.getValue()); - this.nodeDistanceEvaluatorsBuilder.put( - entry.getKey(), new NodeFilterToDistanceEvaluatorAdapter(entry.getValue())); - } - return this; - } - - @NonNull - public Builder withClassLoader(@Nullable ClassLoader classLoader) { - this.classLoader = classLoader; - return this; - } - - @NonNull - public Builder withCloudProxyAddress(@Nullable InetSocketAddress cloudAddress) { - this.cloudProxyAddress = cloudAddress; - return this; - } - - @NonNull - public Builder withAuthProvider(@Nullable AuthProvider authProvider) { - this.authProvider = authProvider; - return this; - } - - @NonNull - public Builder withSslEngineFactory(@Nullable SslEngineFactory sslEngineFactory) { - this.sslEngineFactory = sslEngineFactory; - return this; - } - - @NonNull - public Builder withStartupClientId(@Nullable UUID startupClientId) { - this.startupClientId = startupClientId; - return this; - } - - @NonNull - public Builder withStartupApplicationName(@Nullable String startupApplicationName) { - this.startupApplicationName = startupApplicationName; - return this; - } - - @NonNull - public Builder withStartupApplicationVersion(@Nullable String startupApplicationVersion) { - this.startupApplicationVersion = startupApplicationVersion; - return this; - } - - @NonNull - public Builder withCodecRegistry(@Nullable MutableCodecRegistry codecRegistry) { - this.codecRegistry = codecRegistry; - return this; - } - - @NonNull - public Builder withMetricRegistry(@Nullable Object metricRegistry) { - this.metricRegistry = metricRegistry; - return this; - } - - @NonNull - public ProgrammaticArguments build() { - return new ProgrammaticArguments( - typeCodecsBuilder.build(), - nodeStateListener, - schemaChangeListener, - requestTracker, - requestIdGenerator, - localDatacentersBuilder.build(), - nodeFiltersBuilder.build(), - nodeDistanceEvaluatorsBuilder.build(), - classLoader, - authProvider, - sslEngineFactory, - cloudProxyAddress, - startupClientId, - startupApplicationName, - startupApplicationVersion, - codecRegistry, - metricRegistry); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/session/Request.java b/core/src/main/java/com/datastax/oss/driver/api/core/session/Request.java deleted file mode 100644 index 7d122276cbf..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/session/Request.java +++ /dev/null @@ -1,176 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.session; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.Map; - -/** - * A request executed by a {@link Session}. - * - *

This is a high-level abstraction, agnostic to the actual language (e.g. CQL). A request is - * anything that can be converted to a protocol message, provided that you register a request - * processor with the driver to do that conversion. - */ -public interface Request { - - /** - * The name of the execution profile that will be used for this request, or {@code null} if no - * profile has been set. - * - *

Note that this will be ignored if {@link #getExecutionProfile()} returns a non-null value. - * - * @see DriverConfig - */ - @Nullable - String getExecutionProfileName(); - - /** - * The execution profile to use for this request, or {@code null} if no profile has been set. - * - *

It is generally simpler to specify a profile name with {@link #getExecutionProfileName()}. - * However, this method can be used to provide a "derived" profile that was built programmatically - * by the client code. If specified, it overrides the profile name. - * - * @see DriverExecutionProfile - */ - @Nullable - DriverExecutionProfile getExecutionProfile(); - - /** - * The CQL keyspace to execute this request in, or {@code null} if this request does not specify - * any keyspace. - * - *

This overrides {@link Session#getKeyspace()} for this particular request, providing a way to - * specify the keyspace without forcing it globally on the session, nor hard-coding it in the - * query string. - * - *

This feature is only available with {@link DefaultProtocolVersion#V5 native protocol v5} or - * higher. Specifying a per-request keyspace with lower protocol versions will cause a runtime - * error. - * - * @see CASSANDRA-10145 - */ - @Nullable - CqlIdentifier getKeyspace(); - - /** - * The keyspace to use for token-aware routing. - * - *

Note that if a {@linkplain #getKeyspace() per-request keyspace} is already defined for this - * request, it takes precedence over this method. - * - *

See {@link #getRoutingKey()} for a detailed explanation of token-aware routing. - */ - @Nullable - CqlIdentifier getRoutingKeyspace(); - - /** - * The partition key to use for token-aware routing. - * - *

For each request, the driver tries to determine a routing keyspace and a - * routing key by calling the following methods: - * - *

    - *
  • routing keyspace: - *
      - *
    • the result of {@link #getKeyspace()}, if not null; - *
    • otherwise, the result of {@link #getRoutingKeyspace()}, if not null; - *
    • otherwise, the result of {@link Session#getKeyspace()}, if not empty; - *
    • otherwise, null. - *
    - *
  • routing key: - *
      - *
    • the result of {@link #getRoutingToken()}, if not null; - *
    • otherwise, the result of {@link #getRoutingKey()}, if not null; - *
    • otherwise, null. - *
    - *
- * - * This provides a hint of the partition that the request operates on. When the driver picks a - * coordinator for execution, it will prioritize the replicas that own that partition, in order to - * avoid an extra network jump on the server side. - * - *

Routing information is optional: if either keyspace or key is null, token-aware routing is - * disabled for this request. - */ - @Nullable - ByteBuffer getRoutingKey(); - - /** - * The token to use for token-aware routing. - * - *

This is an alternative to {@link #getRoutingKey()}. Both methods represent the same - * information, a request can provide one or the other. - * - *

See {@link #getRoutingKey()} for a detailed explanation of token-aware routing. - */ - @Nullable - Token getRoutingToken(); - - /** - * Returns the custom payload to send alongside the request. - * - *

This is used to exchange extra information with the server. By default, Cassandra doesn't do - * anything with this, you'll only need it if you have a custom request handler on the - * server-side. - * - * @return The custom payload, or an empty map if no payload is present. - */ - @NonNull - Map getCustomPayload(); - - /** - * Whether the request is idempotent; that is, whether applying the request twice leaves the - * database in the same state. - * - *

This is used internally for retries and speculative executions: if a request is not - * idempotent, the driver will take extra care to ensure that it is not sent twice (for example, - * don't retry if there is the slightest chance that the request reached a coordinator). - * - * @return a boolean value, or {@code null} to use the default value defined in the configuration. - * @see DefaultDriverOption#REQUEST_DEFAULT_IDEMPOTENCE - */ - @Nullable - Boolean isIdempotent(); - - /** - * How long to wait for this request to complete. This is a global limit on the duration of a - * session.execute() call, including any retries the driver might do. - * - * @return the set duration, or {@code null} to use the default value defined in the - * configuration. - * @see DefaultDriverOption#REQUEST_TIMEOUT - */ - @Nullable - Duration getTimeout(); - - /** @return The node configured on this statement, or null if none is configured. */ - @Nullable - Node getNode(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/session/Session.java b/core/src/main/java/com/datastax/oss/driver/api/core/session/Session.java deleted file mode 100644 index e047bf2fe09..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/session/Session.java +++ /dev/null @@ -1,225 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.session; - -import com.datastax.oss.driver.api.core.AsyncAutoCloseable; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.CqlSessionBuilder; -import com.datastax.oss.driver.api.core.MavenCoordinates; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.metadata.Metadata; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeState; -import com.datastax.oss.driver.api.core.metrics.Metrics; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.DefaultMavenCoordinates; -import com.datastax.oss.driver.internal.core.util.concurrent.BlockingOperation; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Optional; -import java.util.concurrent.CompletionStage; - -/** - * A nexus to send requests to a Cassandra cluster. - * - *

This is a high-level abstraction capable of handling arbitrary request and result types. The - * driver's built-in {@link CqlSession} is a more convenient subtype for most client applications. - * - *

The driver's request execution logic is pluggable (see {@code RequestProcessor} in the - * internal API) to allow custom extensions. Hence the generic {@link #execute(Request, - * GenericType)} method in this interface, that makes no assumptions about the request or result - * type. - * - * @see CqlSession#builder() - */ -public interface Session extends AsyncAutoCloseable { - - /** - * The Maven coordinates of the core driver artifact. - * - *

This is intended for products that wrap or extend the driver, as a way to check - * compatibility if end-users override the driver version in their application. - */ - @NonNull - MavenCoordinates OSS_DRIVER_COORDINATES = - DefaultMavenCoordinates.buildFromResourceAndPrint( - Session.class.getResource("/com/datastax/oss/driver/Driver.properties")); - - /** - * The unique name identifying this session instance. This is used as a prefix for log messages - * and metrics. - * - *

This gets populated from the option {@code basic.session-name} in the configuration. If that - * option is absent, the driver will generate an identifier composed of the letter 's' followed by - * an incrementing counter. - * - *

Note that this is purely a client-side identifier; in particular, it has no relation with - * {@code system.local.cluster_name} on the server. - */ - @NonNull - String getName(); - - /** - * Returns a snapshot of the Cassandra cluster's topology and schema metadata. - * - *

In order to provide atomic updates, this method returns an immutable object: the node list, - * token map, and schema contained in a given instance will always be consistent with each other - * (but note that {@link Node} itself is not immutable: some of its properties will be updated - * dynamically, in particular {@link Node#getState()}). - * - *

As a consequence of the above, you should call this method each time you need a fresh view - * of the metadata. Do not call it once and store the result, because it is a frozen - * snapshot that will become stale over time. - * - *

If a metadata refresh triggers events (such as node added/removed, or schema events), then - * the new version of the metadata is guaranteed to be visible by the time you receive these - * events. - * - *

The returned object is never {@code null}, but may be empty if metadata has been disabled in - * the configuration. - */ - @NonNull - Metadata getMetadata(); - - /** Whether schema metadata is currently enabled. */ - boolean isSchemaMetadataEnabled(); - - /** - * Enable or disable schema metadata programmatically. - * - *

Use this method to override the value defined in the driver's configuration; one typical use - * case is to temporarily disable schema metadata while the client issues a sequence of DDL - * statements. - * - *

If calling this method re-enables the metadata (that is, {@link #isSchemaMetadataEnabled()} - * was false before, and becomes true as a result of the call), a refresh is also triggered. - * - * @param newValue a boolean value to enable or disable schema metadata programmatically, or - * {@code null} to use the driver's configuration. - * @see DefaultDriverOption#METADATA_SCHEMA_ENABLED - * @return if this call triggered a refresh, a future that will complete when that refresh is - * complete. Otherwise, a completed future with the current metadata. - */ - @NonNull - CompletionStage setSchemaMetadataEnabled(@Nullable Boolean newValue); - - /** - * Force an immediate refresh of the schema metadata, even if it is currently disabled (either in - * the configuration or via {@link #setSchemaMetadataEnabled(Boolean)}). - * - *

The new metadata is returned in the resulting future (and will also be reflected by {@link - * #getMetadata()} when that future completes). - */ - @NonNull - CompletionStage refreshSchemaAsync(); - - /** - * Convenience method to call {@link #refreshSchemaAsync()} and block for the result. - * - *

This must not be called on a driver thread. - */ - @NonNull - default Metadata refreshSchema() { - BlockingOperation.checkNotDriverThread(); - return CompletableFutures.getUninterruptibly(refreshSchemaAsync()); - } - - /** - * Checks if all nodes in the cluster agree on a common schema version. - * - *

Due to the distributed nature of Cassandra, schema changes made on one node might not be - * immediately visible to others. Under certain circumstances, the driver waits until all nodes - * agree on a common schema version (namely: before a schema refresh, and before completing a - * successful schema-altering query). To do so, it queries system tables to find out the schema - * version of all nodes that are currently {@link NodeState#UP UP}. If all the versions match, the - * check succeeds, otherwise it is retried periodically, until a given timeout (specified in the - * configuration). - * - *

A schema agreement failure is not fatal, but it might produce unexpected results (for - * example, getting an "unconfigured table" error for a table that you created right before, just - * because the two queries went to different coordinators). - * - *

Note that schema agreement never succeeds in a mixed-version cluster (it would be - * challenging because the way the schema version is computed varies across server versions); the - * assumption is that schema updates are unlikely to happen during a rolling upgrade anyway. - * - * @return a future that completes with {@code true} if the nodes agree, or {@code false} if the - * timeout fired. - * @see DefaultDriverOption#CONTROL_CONNECTION_AGREEMENT_INTERVAL - * @see DefaultDriverOption#CONTROL_CONNECTION_AGREEMENT_TIMEOUT - */ - @NonNull - CompletionStage checkSchemaAgreementAsync(); - - /** - * Convenience method to call {@link #checkSchemaAgreementAsync()} and block for the result. - * - *

This must not be called on a driver thread. - */ - default boolean checkSchemaAgreement() { - BlockingOperation.checkNotDriverThread(); - return CompletableFutures.getUninterruptibly(checkSchemaAgreementAsync()); - } - - /** Returns a context that provides access to all the policies used by this driver instance. */ - @NonNull - DriverContext getContext(); - - /** - * The keyspace that this session is currently connected to, or {@link Optional#empty()} if this - * session is not connected to any keyspace. - * - *

There are two ways that this can be set: before initializing the session (either with the - * {@code session-keyspace} option in the configuration, or with {@link - * CqlSessionBuilder#withKeyspace(CqlIdentifier)}); or at runtime, if the client issues a request - * that changes the keyspace (such as a CQL {@code USE} query). Note that this second method is - * inherently unsafe, since other requests expecting the old keyspace might be executing - * concurrently. Therefore it is highly discouraged, aside from trivial cases (such as a - * cqlsh-style program where requests are never concurrent). - */ - @NonNull - Optional getKeyspace(); - - /** - * Returns a gateway to the driver's DropWizard metrics, or {@link Optional#empty()} if all - * metrics are disabled, or if the driver has been configured to use MicroProfile or Micrometer - * instead of DropWizard (see {@code advanced.metrics.factory.class} in the configuration). - * - *

{@link Metrics} was originally intended to allow programmatic access to the metrics, but it - * has a hard dependency to the DropWizard API, which makes it unsuitable for alternative metric - * frameworks. A workaround is to inject your own metric registry with {@link - * SessionBuilder#withMetricRegistry(Object)} when you build the session. You can then use the - * framework's proprietary APIs to retrieve the metrics from the registry. - */ - @NonNull - Optional getMetrics(); - - /** - * Executes an arbitrary request. - * - * @param resultType the type of the result, which determines the internal request processor - * (built-in or custom) that will be used to handle the request. - * @see Session - */ - @Nullable // because ResultT could be Void - ResultT execute( - @NonNull RequestT request, @NonNull GenericType resultType); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/session/SessionBuilder.java b/core/src/main/java/com/datastax/oss/driver/api/core/session/SessionBuilder.java deleted file mode 100644 index 25500119047..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/session/SessionBuilder.java +++ /dev/null @@ -1,1013 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.session; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.auth.AuthProvider; -import com.datastax.oss.driver.api.core.auth.PlainTextAuthProviderBase; -import com.datastax.oss.driver.api.core.auth.ProgrammaticPlainTextAuthProvider; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistanceEvaluator; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeStateListener; -import com.datastax.oss.driver.api.core.metadata.schema.SchemaChangeListener; -import com.datastax.oss.driver.api.core.ssl.ProgrammaticSslEngineFactory; -import com.datastax.oss.driver.api.core.ssl.SslEngineFactory; -import com.datastax.oss.driver.api.core.tracker.RequestIdGenerator; -import com.datastax.oss.driver.api.core.tracker.RequestTracker; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.registry.MutableCodecRegistry; -import com.datastax.oss.driver.api.core.uuid.Uuids; -import com.datastax.oss.driver.internal.core.ContactPoints; -import com.datastax.oss.driver.internal.core.config.cloud.CloudConfig; -import com.datastax.oss.driver.internal.core.config.cloud.CloudConfigFactory; -import com.datastax.oss.driver.internal.core.config.typesafe.DefaultDriverConfigLoader; -import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.DefaultEndPoint; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.datastax.oss.driver.internal.core.tracker.W3CContextRequestIdGenerator; -import com.datastax.oss.driver.internal.core.util.concurrent.BlockingOperation; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.io.InputStream; -import java.net.InetSocketAddress; -import java.net.MalformedURLException; -import java.net.URL; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.Collection; -import java.util.Collections; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.Callable; -import java.util.concurrent.CompletionStage; -import java.util.function.Predicate; -import javax.net.ssl.SSLContext; -import net.jcip.annotations.NotThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Base implementation to build session instances. - * - *

You only need to deal with this directly if you use custom driver extensions. For the default - * session implementation, see {@link CqlSession#builder()}. - * - *

This class is mutable and not thread-safe. - */ -@NotThreadSafe -public abstract class SessionBuilder { - - public static final String ASTRA_PAYLOAD_KEY = "traceparent"; - - private static final Logger LOG = LoggerFactory.getLogger(SessionBuilder.class); - - @SuppressWarnings("unchecked") - protected final SelfT self = (SelfT) this; - - protected DriverConfigLoader configLoader; - protected Set programmaticContactPoints = new HashSet<>(); - protected CqlIdentifier keyspace; - protected Callable cloudConfigInputStream; - - protected ProgrammaticArguments.Builder programmaticArgumentsBuilder = - ProgrammaticArguments.builder(); - private boolean programmaticSslFactory = false; - private boolean programmaticLocalDatacenter = false; - - /** - * Sets the configuration loader to use. - * - *

If you don't call this method, the builder will use the default implementation, based on the - * Typesafe config library. More precisely, configuration properties are loaded and merged from - * the following (first-listed are higher priority): - * - *

    - *
  • system properties - *
  • {@code application.conf} (all resources on classpath with this name) - *
  • {@code application.json} (all resources on classpath with this name) - *
  • {@code application.properties} (all resources on classpath with this name) - *
  • {@code reference.conf} (all resources on classpath with this name). In particular, this - * will load the {@code reference.conf} included in the core driver JAR, that defines - * default options for all mandatory options. - *
- * - * The resulting configuration is expected to contain a {@code datastax-java-driver} section. - * - *

This default loader will honor the reload interval defined by the option {@code - * basic.config-reload-interval}. - * - * @see Typesafe config's - * standard loading behavior - */ - @NonNull - public SelfT withConfigLoader(@Nullable DriverConfigLoader configLoader) { - this.configLoader = configLoader; - return self; - } - - @NonNull - @Deprecated - protected DriverConfigLoader defaultConfigLoader() { - return new DefaultDriverConfigLoader(); - } - - @NonNull - protected DriverConfigLoader defaultConfigLoader(@Nullable ClassLoader classLoader) { - if (classLoader == null) { - return new DefaultDriverConfigLoader(); - } else { - return new DefaultDriverConfigLoader(classLoader); - } - } - - /** - * Adds contact points to use for the initial connection to the cluster. - * - *

These are addresses of Cassandra nodes that the driver uses to discover the cluster - * topology. Only one contact point is required (the driver will retrieve the address of the other - * nodes automatically), but it is usually a good idea to provide more than one contact point, - * because if that single contact point is unavailable, the driver cannot initialize itself - * correctly. - * - *

Contact points can also be provided statically in the configuration. If both are specified, - * they will be merged. If both are absent, the driver will default to 127.0.0.1:9042. - * - *

Contrary to the configuration, DNS names with multiple A-records will not be handled here. - * If you need that, extract them manually with {@link java.net.InetAddress#getAllByName(String)} - * before calling this method. Similarly, if you need connect addresses to stay unresolved, make - * sure you pass unresolved instances here (see {@code advanced.resolve-contact-points} in the - * configuration for more explanations). - */ - @NonNull - public SelfT addContactPoints(@NonNull Collection contactPoints) { - for (InetSocketAddress contactPoint : contactPoints) { - addContactPoint(contactPoint); - } - return self; - } - - /** - * Adds a contact point to use for the initial connection to the cluster. - * - * @see #addContactPoints(Collection) - */ - @NonNull - public SelfT addContactPoint(@NonNull InetSocketAddress contactPoint) { - this.programmaticContactPoints.add(new DefaultEndPoint(contactPoint)); - return self; - } - - /** - * Adds contact points to use for the initial connection to the cluster. - * - *

You only need this method if you use a custom {@link EndPoint} implementation. Otherwise, - * use {@link #addContactPoints(Collection)}. - */ - @NonNull - public SelfT addContactEndPoints(@NonNull Collection contactPoints) { - for (EndPoint contactPoint : contactPoints) { - addContactEndPoint(contactPoint); - } - return self; - } - - /** - * Adds a contact point to use for the initial connection to the cluster. - * - *

You only need this method if you use a custom {@link EndPoint} implementation. Otherwise, - * use {@link #addContactPoint(InetSocketAddress)}. - */ - @NonNull - public SelfT addContactEndPoint(@NonNull EndPoint contactPoint) { - this.programmaticContactPoints.add(contactPoint); - return self; - } - - /** - * Registers additional codecs for custom type mappings. - * - * @param typeCodecs neither the individual codecs, nor the vararg array itself, can be {@code - * null}. - */ - @NonNull - public SelfT addTypeCodecs(@NonNull TypeCodec... typeCodecs) { - this.programmaticArgumentsBuilder.addTypeCodecs(typeCodecs); - return self; - } - - /** - * Registers a node state listener to use with the session. - * - *

Listeners can be registered in two ways: either programmatically with this method, or via - * the configuration using the {@code advanced.metadata.node-state-listener.classes} option. - * - *

This method unregisters any previously-registered listener. If you intend to register more - * than one listener, use {@link #addNodeStateListener(NodeStateListener)} instead. - */ - @NonNull - public SelfT withNodeStateListener(@Nullable NodeStateListener nodeStateListener) { - this.programmaticArgumentsBuilder.withNodeStateListener(nodeStateListener); - return self; - } - - /** - * Registers a node state listener to use with the session, without removing previously-registered - * listeners. - * - *

Listeners can be registered in two ways: either programmatically with this method, or via - * the configuration using the {@code advanced.metadata.node-state-listener.classes} option. - * - *

Unlike {@link #withNodeStateListener(NodeStateListener)}, this method adds the new listener - * to the list of already-registered listeners, thus allowing applications to register multiple - * listeners. When multiple listeners are registered, they are notified in sequence every time a - * new listener event is triggered. - */ - @NonNull - public SelfT addNodeStateListener(@NonNull NodeStateListener nodeStateListener) { - programmaticArgumentsBuilder.addNodeStateListener(nodeStateListener); - return self; - } - - /** - * Registers a schema change listener to use with the session. - * - *

Listeners can be registered in two ways: either programmatically with this method, or via - * the configuration using the {@code advanced.metadata.schema-change-listener.classes} option. - * - *

This method unregisters any previously-registered listener. If you intend to register more - * than one listener, use {@link #addSchemaChangeListener(SchemaChangeListener)} instead. - */ - @NonNull - public SelfT withSchemaChangeListener(@Nullable SchemaChangeListener schemaChangeListener) { - this.programmaticArgumentsBuilder.withSchemaChangeListener(schemaChangeListener); - return self; - } - - /** - * Registers a schema change listener to use with the session, without removing - * previously-registered listeners. - * - *

Listeners can be registered in two ways: either programmatically with this method, or via - * the configuration using the {@code advanced.metadata.schema-change-listener.classes} option. - * - *

Unlike {@link #withSchemaChangeListener(SchemaChangeListener)}, this method adds the new - * listener to the list of already-registered listeners, thus allowing applications to register - * multiple listeners. When multiple listeners are registered, they are notified in sequence every - * time a new listener event is triggered. - */ - @NonNull - public SelfT addSchemaChangeListener(@NonNull SchemaChangeListener schemaChangeListener) { - programmaticArgumentsBuilder.addSchemaChangeListener(schemaChangeListener); - return self; - } - - /** - * Registers a request tracker to use with the session. - * - *

Trackers can be registered in two ways: either programmatically with this method, or via the - * configuration using the {@code advanced.request-tracker.classes} option. - * - *

This method unregisters any previously-registered tracker. If you intend to register more - * than one tracker, use {@link #addRequestTracker(RequestTracker)} instead. - */ - @NonNull - public SelfT withRequestTracker(@Nullable RequestTracker requestTracker) { - this.programmaticArgumentsBuilder.withRequestTracker(requestTracker); - return self; - } - - /** - * Registers a request tracker to use with the session, without removing previously-registered - * trackers. - * - *

Trackers can be registered in two ways: either programmatically with this method, or via the - * configuration using the {@code advanced.request-tracker.classes} option. - * - *

Unlike {@link #withRequestTracker(RequestTracker)}, this method adds the new tracker to the - * list of already-registered trackers, thus allowing applications to register multiple trackers. - * When multiple trackers are registered, they are notified in sequence every time a new tracker - * event is triggered. - */ - @NonNull - public SelfT addRequestTracker(@NonNull RequestTracker requestTracker) { - programmaticArgumentsBuilder.addRequestTracker(requestTracker); - return self; - } - - /** - * Registers a request ID generator. The driver will use the generated ID in the logs and - * optionally add to the custom payload so that users can correlate logs about the same request - * from the Cassandra side. - */ - @NonNull - public SelfT withRequestIdGenerator(@NonNull RequestIdGenerator requestIdGenerator) { - this.programmaticArgumentsBuilder.withRequestIdGenerator(requestIdGenerator); - return self; - } - - /** - * Registers an authentication provider to use with the session. - * - *

If the provider is specified programmatically with this method, it overrides the - * configuration (that is, the {@code advanced.auth-provider.class} option will be ignored). - */ - @NonNull - public SelfT withAuthProvider(@Nullable AuthProvider authProvider) { - this.programmaticArgumentsBuilder.withAuthProvider(authProvider); - return self; - } - - /** - * Configures the session to use plaintext authentication with the given username and password. - * - *

This methods calls {@link #withAuthProvider(AuthProvider)} to register a special provider - * implementation. Therefore calling it overrides the configuration (that is, the {@code - * advanced.auth-provider.class} option will be ignored). - * - *

Note that this approach holds the credentials in clear text in memory, which makes them - * vulnerable to an attacker who is able to perform memory dumps. If this is not acceptable for - * you, consider writing your own {@link AuthProvider} implementation ({@link - * PlainTextAuthProviderBase} is a good starting point), and providing it either with {@link - * #withAuthProvider(AuthProvider)} or via the configuration ({@code - * advanced.auth-provider.class}). - */ - @NonNull - public SelfT withAuthCredentials(@NonNull String username, @NonNull String password) { - return withAuthProvider(new ProgrammaticPlainTextAuthProvider(username, password)); - } - - /** - * Configures the session to use DSE plaintext authentication with the given username and - * password, and perform proxy authentication with the given authorization id. - * - *

This feature is only available in DataStax Enterprise. If connecting to Apache Cassandra, - * the authorization id will be ignored; it is recommended to use {@link - * #withAuthCredentials(String, String)} instead. - * - *

This methods calls {@link #withAuthProvider(AuthProvider)} to register a special provider - * implementation. Therefore calling it overrides the configuration (that is, the {@code - * advanced.auth-provider.class} option will be ignored). - * - *

Note that this approach holds the credentials in clear text in memory, which makes them - * vulnerable to an attacker who is able to perform memory dumps. If this is not acceptable for - * you, consider writing your own {@link AuthProvider} implementation (the internal class {@code - * PlainTextAuthProviderBase} is a good starting point), and providing it either with {@link - * #withAuthProvider(AuthProvider)} or via the configuration ({@code - * advanced.auth-provider.class}). - */ - @NonNull - public SelfT withAuthCredentials( - @NonNull String username, @NonNull String password, @NonNull String authorizationId) { - return withAuthProvider( - new ProgrammaticPlainTextAuthProvider(username, password, authorizationId)); - } - - /** - * @deprecated this method only exists to ease the transition from driver 3, it is an alias for - * {@link #withAuthCredentials(String, String)}. - */ - @Deprecated - @NonNull - public SelfT withCredentials(@NonNull String username, @NonNull String password) { - return withAuthCredentials(username, password); - } - - /** - * @deprecated this method only exists to ease the transition from driver 3, it is an alias for - * {@link #withAuthCredentials(String, String,String)}. - */ - @Deprecated - @NonNull - public SelfT withCredentials( - @NonNull String username, @NonNull String password, @NonNull String authorizationId) { - return withAuthCredentials(username, password, authorizationId); - } - - /** - * Registers an SSL engine factory for the session. - * - *

If the factory is provided programmatically with this method, it overrides the configuration - * (that is, the {@code advanced.ssl-engine-factory} option will be ignored). - * - * @see ProgrammaticSslEngineFactory - */ - @NonNull - public SelfT withSslEngineFactory(@Nullable SslEngineFactory sslEngineFactory) { - this.programmaticSslFactory = true; - this.programmaticArgumentsBuilder.withSslEngineFactory(sslEngineFactory); - return self; - } - - /** - * Configures the session to use SSL with the given context. - * - *

This is a convenience method for clients that already have an {@link SSLContext} instance. - * It wraps its argument into a {@link ProgrammaticSslEngineFactory}, and passes it to {@link - * #withSslEngineFactory(SslEngineFactory)}. - * - *

If you use this method, there is no way to customize cipher suites, or turn on host name - * validation. If you need finer control, use {@link #withSslEngineFactory(SslEngineFactory)} - * directly and pass either your own implementation of {@link SslEngineFactory}, or a {@link - * ProgrammaticSslEngineFactory} created with custom cipher suites and/or host name validation. - * - *

Also, note that SSL engines will be created with advisory peer information ({@link - * SSLContext#createSSLEngine(String, int)}) whenever possible. - */ - @NonNull - public SelfT withSslContext(@Nullable SSLContext sslContext) { - return withSslEngineFactory( - sslContext == null ? null : new ProgrammaticSslEngineFactory(sslContext)); - } - - /** - * Specifies the datacenter that is considered "local" by the load balancing policy. - * - *

This is a programmatic alternative to the configuration option {@code - * basic.load-balancing-policy.local-datacenter}. If this method is used, it takes precedence and - * overrides the configuration. - * - *

Note that this setting may or may not be relevant depending on the load balancing policy - * implementation in use. The driver's built-in {@code DefaultLoadBalancingPolicy} relies on it; - * if you use a third-party implementation, refer to their documentation. - */ - public SelfT withLocalDatacenter(@NonNull String profileName, @NonNull String localDatacenter) { - this.programmaticLocalDatacenter = true; - this.programmaticArgumentsBuilder.withLocalDatacenter(profileName, localDatacenter); - return self; - } - - /** Alias to {@link #withLocalDatacenter(String, String)} for the default profile. */ - public SelfT withLocalDatacenter(@NonNull String localDatacenter) { - return withLocalDatacenter(DriverExecutionProfile.DEFAULT_NAME, localDatacenter); - } - - /** - * Adds a custom {@link NodeDistanceEvaluator} for a particular execution profile. This assumes - * that you're also using a dedicated load balancing policy for that profile. - * - *

Node distance evaluators are honored by all the driver built-in load balancing policies. If - * you use a custom policy implementation however, you'll need to explicitly invoke the evaluator - * whenever appropriate. - * - *

If an evaluator is specified programmatically with this method, it overrides the - * configuration (that is, the {@code load-balancing-policy.evaluator.class} option will be - * ignored). - * - * @see #withNodeDistanceEvaluator(NodeDistanceEvaluator) - */ - @NonNull - public SelfT withNodeDistanceEvaluator( - @NonNull String profileName, @NonNull NodeDistanceEvaluator nodeDistanceEvaluator) { - this.programmaticArgumentsBuilder.withNodeDistanceEvaluator(profileName, nodeDistanceEvaluator); - return self; - } - - /** - * Alias to {@link #withNodeDistanceEvaluator(String, NodeDistanceEvaluator)} for the default - * profile. - */ - @NonNull - public SelfT withNodeDistanceEvaluator(@NonNull NodeDistanceEvaluator nodeDistanceEvaluator) { - return withNodeDistanceEvaluator(DriverExecutionProfile.DEFAULT_NAME, nodeDistanceEvaluator); - } - - /** - * Adds a custom filter to include/exclude nodes for a particular execution profile. This assumes - * that you're also using a dedicated load balancing policy for that profile. - * - *

The predicate's {@link Predicate#test(Object) test()} method will be invoked each time the - * {@link LoadBalancingPolicy} processes a topology or state change: if it returns false, the - * policy will suggest distance IGNORED (meaning the driver won't ever connect to it if all - * policies agree), and never included in any query plan. - * - *

Note that this behavior is implemented in the driver built-in load balancing policies. If - * you use a custom policy implementation, you'll need to explicitly invoke the filter. - * - *

If the filter is specified programmatically with this method, it overrides the configuration - * (that is, the {@code load-balancing-policy.filter.class} option will be ignored). - * - *

This method has been deprecated in favor of {@link - * #withNodeDistanceEvaluator(String, NodeDistanceEvaluator)}. If you were using node - * filters, you can easily replace your filters with the following implementation of {@link - * NodeDistanceEvaluator}: - * - *

{@code
-   * public class NodeFilterToDistanceEvaluatorAdapter implements NodeDistanceEvaluator {
-   *
-   *   private final Predicate nodeFilter;
-   *
-   *   public NodeFilterToDistanceEvaluatorAdapter(Predicate nodeFilter) {
-   *     this.nodeFilter = nodeFilter;
-   *   }
-   *
-   *   public NodeDistance evaluateDistance(Node node, String localDc) {
-   *     return nodeFilter.test(node) ? null : NodeDistance.IGNORED;
-   *   }
-   * }
-   * }
- * - * The same can be achieved using a lambda + closure: - * - *
{@code
-   * Predicate nodeFilter = ...
-   * NodeDistanceEvaluator evaluator =
-   *   (node, localDc) -> nodeFilter.test(node) ? null : NodeDistance.IGNORED;
-   * }
- * - * @see #withNodeFilter(Predicate) - * @deprecated Use {@link #withNodeDistanceEvaluator(String, NodeDistanceEvaluator)} instead. - */ - @Deprecated - @NonNull - public SelfT withNodeFilter(@NonNull String profileName, @NonNull Predicate nodeFilter) { - this.programmaticArgumentsBuilder.withNodeFilter(profileName, nodeFilter); - return self; - } - - /** - * Alias to {@link #withNodeFilter(String, Predicate)} for the default profile. - * - *

This method has been deprecated in favor of {@link - * #withNodeDistanceEvaluator(NodeDistanceEvaluator)}. See the javadocs of {@link - * #withNodeFilter(String, Predicate)} to understand how to migrate your legacy node filters. - * - * @deprecated Use {@link #withNodeDistanceEvaluator(NodeDistanceEvaluator)} instead. - */ - @Deprecated - @NonNull - public SelfT withNodeFilter(@NonNull Predicate nodeFilter) { - return withNodeFilter(DriverExecutionProfile.DEFAULT_NAME, nodeFilter); - } - - /** - * Sets the keyspace to connect the session to. - * - *

Note that this can also be provided by the configuration; if both are defined, this method - * takes precedence. - */ - @NonNull - public SelfT withKeyspace(@Nullable CqlIdentifier keyspace) { - this.keyspace = keyspace; - return self; - } - - /** - * Shortcut for {@link #withKeyspace(CqlIdentifier) - * setKeyspace(CqlIdentifier.fromCql(keyspaceName))} - */ - @NonNull - public SelfT withKeyspace(@Nullable String keyspaceName) { - return withKeyspace(keyspaceName == null ? null : CqlIdentifier.fromCql(keyspaceName)); - } - - /** - * The {@link ClassLoader} to use to reflectively load class names defined in configuration. - * - *

Unless you define a custom {@link #configLoader}, this class loader will also be used to - * locate application-specific configuration resources. - * - *

If you do not provide any custom class loader, the driver will attempt to use the following - * ones: - * - *

    - *
  1. When reflectively loading class names defined in configuration: same class loader that - * loaded the core driver classes. - *
  2. When locating application-specific configuration resources: the current thread's - * {@linkplain Thread#getContextClassLoader() context class loader}. - *
- * - * This is generally the right thing to do. - * - *

Defining a different class loader is typically only needed in web or OSGi environments where - * there are complex class loading requirements. - * - *

For example, if the driver jar is loaded by the web server's system class loader (that is, - * the driver jar was placed in the "/lib" folder of the web server), but the application tries to - * load a custom load balancing policy declared in the web app's "WEB-INF/lib" folder, the system - * class loader will not be able to load such class. Instead, you must use the web app's class - * loader, that you can obtain by calling {@link Thread#getContextClassLoader()}: - * - *

{@code
-   * CqlSession.builder()
-   *   .addContactEndPoint(...)
-   *   .withClassLoader(Thread.currentThread().getContextClassLoader())
-   *   .build();
-   * }
- * - * Indeed, in most web environments, {@code Thread.currentThread().getContextClassLoader()} will - * return the web app's class loader, which is a child of the web server's system class loader. - * This class loader is thus capable of loading both the implemented interface and the - * implementing class, in spite of them being declared in different places. - * - *

For OSGi deployments, it is usually not necessary to use this method. Even if the - * implemented interface and the implementing class are located in different bundles, the right - * class loader to use should be the default one (the driver bundle's class loader). In - * particular, it is not advised to rely on {@code Thread.currentThread().getContextClassLoader()} - * in OSGi environments, so you should never pass that class loader to this method. See Using - * a custom ClassLoader in our OSGi online docs for more information. - */ - @NonNull - public SelfT withClassLoader(@Nullable ClassLoader classLoader) { - this.programmaticArgumentsBuilder.withClassLoader(classLoader); - return self; - } - - /** - * Configures this SessionBuilder for Cloud deployments by retrieving connection information from - * the provided {@link Path}. - * - *

To connect to a Cloud database, you must first download the secure database bundle from the - * DataStax Astra console that contains the connection information, then instruct the driver to - * read its contents using either this method or one if its variants. - * - *

For more information, please refer to the DataStax Astra documentation. - * - * @param cloudConfigPath Path to the secure connect bundle zip file. - * @see #withCloudSecureConnectBundle(URL) - * @see #withCloudSecureConnectBundle(InputStream) - */ - @NonNull - public SelfT withCloudSecureConnectBundle(@NonNull Path cloudConfigPath) { - try { - URL cloudConfigUrl = cloudConfigPath.toAbsolutePath().normalize().toUri().toURL(); - this.cloudConfigInputStream = cloudConfigUrl::openStream; - } catch (MalformedURLException e) { - throw new IllegalArgumentException("Incorrect format of cloudConfigPath", e); - } - return self; - } - - /** - * Registers a CodecRegistry to use for the session. - * - *

When both this and {@link #addTypeCodecs(TypeCodec[])} are called, the added type codecs - * will be registered on the provided CodecRegistry. - */ - @NonNull - public SelfT withCodecRegistry(@Nullable MutableCodecRegistry codecRegistry) { - this.programmaticArgumentsBuilder.withCodecRegistry(codecRegistry); - return self; - } - - /** - * Configures this SessionBuilder for Cloud deployments by retrieving connection information from - * the provided {@link URL}. - * - *

To connect to a Cloud database, you must first download the secure database bundle from the - * DataStax Astra console that contains the connection information, then instruct the driver to - * read its contents using either this method or one if its variants. - * - *

For more information, please refer to the DataStax Astra documentation. - * - * @param cloudConfigUrl URL to the secure connect bundle zip file. - * @see #withCloudSecureConnectBundle(Path) - * @see #withCloudSecureConnectBundle(InputStream) - */ - @NonNull - public SelfT withCloudSecureConnectBundle(@NonNull URL cloudConfigUrl) { - this.cloudConfigInputStream = cloudConfigUrl::openStream; - return self; - } - - /** - * Configures this SessionBuilder for Cloud deployments by retrieving connection information from - * the provided {@link InputStream}. - * - *

To connect to a Cloud database, you must first download the secure database bundle from the - * DataStax Astra console that contains the connection information, then instruct the driver to - * read its contents using either this method or one if its variants. - * - *

For more information, please refer to the DataStax Astra documentation. - * - *

Note that the provided stream will be consumed and closed when either {@link - * #build()} or {@link #buildAsync()} are called; attempting to reuse it afterwards will result in - * an error being thrown. - * - * @param cloudConfigInputStream A stream containing the secure connect bundle zip file. - * @see #withCloudSecureConnectBundle(Path) - * @see #withCloudSecureConnectBundle(URL) - */ - @NonNull - public SelfT withCloudSecureConnectBundle(@NonNull InputStream cloudConfigInputStream) { - this.cloudConfigInputStream = () -> cloudConfigInputStream; - return self; - } - - /** - * Configures this SessionBuilder to use the provided Cloud proxy endpoint. - * - *

Normally, this method should not be called directly; the normal and easiest way to configure - * the driver for Cloud deployments is through a {@linkplain #withCloudSecureConnectBundle(URL) - * secure connect bundle}. - * - *

Setting this option to any non-null address will make the driver use a special topology - * monitor tailored for Cloud deployments. This topology monitor assumes that the target cluster - * should be contacted through the proxy specified here, using SNI routing. - * - *

For more information, please refer to the DataStax Astra documentation. - * - * @param cloudProxyAddress The address of the Cloud proxy to use. - * @see Server Name Indication - */ - @NonNull - public SelfT withCloudProxyAddress(@Nullable InetSocketAddress cloudProxyAddress) { - this.programmaticArgumentsBuilder.withCloudProxyAddress(cloudProxyAddress); - return self; - } - - /** - * A unique identifier for the created session. - * - *

It will be sent in the {@code STARTUP} protocol message, under the key {@code CLIENT_ID}, - * for each new connection established by the driver. Currently, this information is used by - * Insights monitoring (if the target cluster does not support Insights, the entry will be ignored - * by the server). - * - *

If you don't call this method, the driver will generate an identifier with {@link - * Uuids#random()}. - */ - @NonNull - public SelfT withClientId(@Nullable UUID clientId) { - this.programmaticArgumentsBuilder.withStartupClientId(clientId); - return self; - } - - /** - * The name of the application using the created session. - * - *

It will be sent in the {@code STARTUP} protocol message, under the key {@code - * APPLICATION_NAME}, for each new connection established by the driver. Currently, this - * information is used by Insights monitoring (if the target cluster does not support Insights, - * the entry will be ignored by the server). - * - *

This can also be defined in the driver configuration with the option {@code - * basic.application.name}; if you specify both, this method takes precedence and the - * configuration option will be ignored. If neither is specified, the entry is not included in the - * message. - */ - @NonNull - public SelfT withApplicationName(@Nullable String applicationName) { - this.programmaticArgumentsBuilder.withStartupApplicationName(applicationName); - return self; - } - - /** - * The version of the application using the created session. - * - *

It will be sent in the {@code STARTUP} protocol message, under the key {@code - * APPLICATION_VERSION}, for each new connection established by the driver. Currently, this - * information is used by Insights monitoring (if the target cluster does not support Insights, - * the entry will be ignored by the server). - * - *

This can also be defined in the driver configuration with the option {@code - * basic.application.version}; if you specify both, this method takes precedence and the - * configuration option will be ignored. If neither is specified, the entry is not included in the - * message. - */ - @NonNull - public SelfT withApplicationVersion(@Nullable String applicationVersion) { - this.programmaticArgumentsBuilder.withStartupApplicationVersion(applicationVersion); - return self; - } - - /** - * The metric registry object for storing driver metrics. - * - *

The argument should be an instance of the base registry type for the metrics framework you - * are using (see {@code advanced.metrics.factory.class} in the configuration): - * - *

    - *
  • Dropwizard (the default): {@code com.codahale.metrics.MetricRegistry} - *
  • Micrometer: {@code io.micrometer.core.instrument.MeterRegistry} - *
  • MicroProfile: {@code org.eclipse.microprofile.metrics.MetricRegistry} - *
- * - * Only MicroProfile requires an external instance of its registry to be provided. For - * Micrometer, if no Registry object is provided, Micrometer's {@code globalRegistry} will be - * used. For Dropwizard, if no Registry object is provided, an instance of {@code MetricRegistry} - * will be created and used. - */ - @NonNull - public SelfT withMetricRegistry(@Nullable Object metricRegistry) { - this.programmaticArgumentsBuilder.withMetricRegistry(metricRegistry); - return self; - } - - /** - * Creates the session with the options set by this builder. - * - *

The session initialization will happen asynchronously in a driver internal thread pool. - * - * @return a completion stage that completes with the session when it is fully initialized. - */ - @NonNull - public CompletionStage buildAsync() { - CompletionStage buildStage = buildDefaultSessionAsync(); - CompletionStage wrapStage = buildStage.thenApply(this::wrap); - // thenApply does not propagate cancellation (!) - CompletableFutures.propagateCancellation(wrapStage, buildStage); - return wrapStage; - } - /** - * Convenience method to call {@link #buildAsync()} and block on the result. - * - *

Usage in non-blocking applications: beware that session initialization is a costly - * operation. It should only be triggered from a thread that is allowed to block. If that is not - * the case, consider using {@link #buildAsync()} instead. - * - *

This must not be called on a driver thread. - */ - @NonNull - public SessionT build() { - BlockingOperation.checkNotDriverThread(); - return CompletableFutures.getUninterruptibly(buildAsync()); - } - - protected abstract SessionT wrap(@NonNull CqlSession defaultSession); - - @NonNull - protected final CompletionStage buildDefaultSessionAsync() { - try { - - ProgrammaticArguments programmaticArguments = programmaticArgumentsBuilder.build(); - - DriverConfigLoader configLoader = - this.configLoader != null - ? this.configLoader - : defaultConfigLoader(programmaticArguments.getClassLoader()); - - DriverExecutionProfile defaultConfig = configLoader.getInitialConfig().getDefaultProfile(); - if (cloudConfigInputStream == null) { - String configUrlString = - defaultConfig.getString(DefaultDriverOption.CLOUD_SECURE_CONNECT_BUNDLE, null); - if (configUrlString != null) { - cloudConfigInputStream = () -> getURL(configUrlString).openStream(); - } - } - List configContactPoints = - defaultConfig.getStringList(DefaultDriverOption.CONTACT_POINTS, Collections.emptyList()); - if (cloudConfigInputStream != null) { - // override request id generator, unless user has already set it - if (programmaticArguments.getRequestIdGenerator() == null) { - programmaticArgumentsBuilder.withRequestIdGenerator( - new W3CContextRequestIdGenerator(ASTRA_PAYLOAD_KEY)); - LOG.debug( - "A secure connect bundle is provided, using W3CContextRequestIdGenerator as request ID generator."); - } - if (!programmaticContactPoints.isEmpty() || !configContactPoints.isEmpty()) { - LOG.info( - "Both a secure connect bundle and contact points were provided. These are mutually exclusive. The contact points from the secure bundle will have priority."); - // clear the contact points provided in the setting file and via addContactPoints - configContactPoints = Collections.emptyList(); - programmaticContactPoints = new HashSet<>(); - } - - if (programmaticSslFactory - || defaultConfig.isDefined(DefaultDriverOption.SSL_ENGINE_FACTORY_CLASS)) { - LOG.info( - "Both a secure connect bundle and SSL options were provided. They are mutually exclusive. The SSL options from the secure bundle will have priority."); - } - CloudConfig cloudConfig = - new CloudConfigFactory().createCloudConfig(cloudConfigInputStream.call()); - addContactEndPoints(cloudConfig.getEndPoints()); - - boolean localDataCenterDefined = - anyProfileHasDatacenterDefined(configLoader.getInitialConfig()); - if (programmaticLocalDatacenter || localDataCenterDefined) { - LOG.info( - "Both a secure connect bundle and a local datacenter were provided. They are mutually exclusive. The local datacenter from the secure bundle will have priority."); - programmaticArgumentsBuilder.clearDatacenters(); - } - withLocalDatacenter(cloudConfig.getLocalDatacenter()); - withSslEngineFactory(cloudConfig.getSslEngineFactory()); - withCloudProxyAddress(cloudConfig.getProxyAddress()); - programmaticArguments = programmaticArgumentsBuilder.build(); - } - - boolean resolveAddresses = - defaultConfig.getBoolean(DefaultDriverOption.RESOLVE_CONTACT_POINTS, true); - - Set contactPoints = - ContactPoints.merge(programmaticContactPoints, configContactPoints, resolveAddresses); - - if (keyspace == null && defaultConfig.isDefined(DefaultDriverOption.SESSION_KEYSPACE)) { - keyspace = - CqlIdentifier.fromCql(defaultConfig.getString(DefaultDriverOption.SESSION_KEYSPACE)); - } - - return DefaultSession.init( - (InternalDriverContext) buildContext(configLoader, programmaticArguments), - contactPoints, - keyspace); - - } catch (Throwable t) { - // We construct the session synchronously (until the init() call), but async clients expect a - // failed future if anything goes wrong. So wrap any error from that synchronous part. - return CompletableFutures.failedFuture(t); - } - } - - private boolean anyProfileHasDatacenterDefined(DriverConfig driverConfig) { - for (DriverExecutionProfile driverExecutionProfile : driverConfig.getProfiles().values()) { - if (driverExecutionProfile.isDefined(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER)) { - return true; - } - } - return false; - } - - /** - * Returns URL based on the configUrl setting. If the configUrl has no protocol provided, the - * method will fallback to file:// protocol and return URL that has file protocol specified. - * - * @param configUrl url to config secure bundle - * @return URL with file protocol if there was not explicit protocol provided in the configUrl - * setting - */ - private URL getURL(String configUrl) throws MalformedURLException { - try { - return new URL(configUrl); - } catch (MalformedURLException e1) { - try { - return Paths.get(configUrl).toAbsolutePath().normalize().toUri().toURL(); - } catch (MalformedURLException e2) { - e2.addSuppressed(e1); - throw e2; - } - } - } - - /** - * This must return an instance of {@code InternalDriverContext} (it's not expressed - * directly in the signature to avoid leaking that type through the protected API). - */ - protected DriverContext buildContext( - DriverConfigLoader configLoader, ProgrammaticArguments programmaticArguments) { - - // Preserve backward compatibility with the deprecated method: - @SuppressWarnings("deprecation") - DriverContext legacyApiContext = - buildContext( - configLoader, - programmaticArguments.getTypeCodecs(), - programmaticArguments.getNodeStateListener(), - programmaticArguments.getSchemaChangeListener(), - programmaticArguments.getRequestTracker(), - programmaticArguments.getLocalDatacenters(), - programmaticArguments.getNodeFilters(), - programmaticArguments.getClassLoader()); - if (legacyApiContext != null) { - return legacyApiContext; - } - - return new DefaultDriverContext(configLoader, programmaticArguments); - } - - /** - * @deprecated this method only exists for backward compatibility (if a subclass written for - * driver 4.1.0 returns a non-null result, that value will be used). Please override {@link - * #buildContext(DriverConfigLoader, ProgrammaticArguments)} instead. - */ - @Deprecated - @SuppressWarnings("DeprecatedIsStillUsed") - protected DriverContext buildContext( - DriverConfigLoader configLoader, - List> typeCodecs, - NodeStateListener nodeStateListener, - SchemaChangeListener schemaChangeListener, - RequestTracker requestTracker, - Map localDatacenters, - Map> nodeFilters, - ClassLoader classLoader) { - return null; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/session/throttling/RequestThrottler.java b/core/src/main/java/com/datastax/oss/driver/api/core/session/throttling/RequestThrottler.java deleted file mode 100644 index 73d347d533e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/session/throttling/RequestThrottler.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.session.throttling; - -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.Closeable; - -/** - * Limits the number of concurrent requests executed by the driver. - * - *

Usage in non-blocking applications: beware that some implementations of this interface use - * locks for internal coordination, and do not qualify as lock-free. If your application enforces - * strict lock-freedom, then you should use the {@code PassThroughRequestThrottler} or the {@code - * ConcurrencyLimitingRequestThrottler}. - */ -public interface RequestThrottler extends Closeable { - - /** - * Registers a new request to be throttled. The throttler will invoke {@link - * Throttled#onThrottleReady(boolean)} when the request is allowed to proceed. - */ - void register(@NonNull Throttled request); - - /** - * Signals that a request has succeeded. This indicates to the throttler that another request - * might be started. - */ - void signalSuccess(@NonNull Throttled request); - - /** - * Signals that a request has failed. This indicates to the throttler that another request might - * be started. - */ - void signalError(@NonNull Throttled request, @NonNull Throwable error); - - /** - * Signals that a request has timed out. This indicates to the throttler that this request has - * stopped (if it was running already), or that it doesn't need to be started in the future. - * - *

Note: requests are responsible for handling their own timeout. The throttler does not - * perform time-based eviction on pending requests. - */ - void signalTimeout(@NonNull Throttled request); - - /** - * Signals that a request has been cancelled. This indicates to the throttler that another request - * might be started. - */ - default void signalCancel(@NonNull Throttled request) { - // no-op for backward compatibility purposes - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/session/throttling/Throttled.java b/core/src/main/java/com/datastax/oss/driver/api/core/session/throttling/Throttled.java deleted file mode 100644 index 6fd562804da..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/session/throttling/Throttled.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.session.throttling; - -import com.datastax.oss.driver.api.core.RequestThrottlingException; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * A request that may be subjected to throttling by a {@link - * com.datastax.oss.driver.api.core.session.throttling.RequestThrottler}. - */ -public interface Throttled { - - /** - * Invoked by the throttler to indicate that the request can now start. The request must wait for - * this call until it does any "actual" work (typically, writing to a connection). - * - * @param wasDelayed indicates whether the throttler delayed at all; this is so that requests - * don't have to rely on measuring time to determine it (this is useful for metrics). - */ - void onThrottleReady(boolean wasDelayed); - - /** - * Invoked by the throttler to indicate that the request cannot be fulfilled. Typically, this - * means we've reached maximum capacity, and the request can't even be enqueued. This error must - * be rethrown to the client. - * - * @param error the error that the request should be completed (exceptionally) with. - */ - void onThrottleFailure(@NonNull RequestThrottlingException error); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/specex/SpeculativeExecutionPolicy.java b/core/src/main/java/com/datastax/oss/driver/api/core/specex/SpeculativeExecutionPolicy.java deleted file mode 100644 index 163204ba62d..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/specex/SpeculativeExecutionPolicy.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.specex; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.session.SessionBuilder; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** - * The policy that decides if the driver will send speculative queries to the next nodes when the - * current node takes too long to respond. - */ -public interface SpeculativeExecutionPolicy extends AutoCloseable { - - /** - * @param node the node that caused the speculative execution (that is, the node that was queried - * previously but was too slow to answer) - * @param keyspace the CQL keyspace currently associated to the session. This is set either - * through the configuration, by calling {@link SessionBuilder#withKeyspace(CqlIdentifier)}, - * or by manually executing a {@code USE} CQL statement. It can be {@code null} if the session - * has no keyspace. - * @param request the request to execute. - * @param runningExecutions the number of executions that are already running (including the - * initial, non-speculative request). For example, if this is 2 it means the initial attempt - * was sent, then the driver scheduled a first speculative execution, and it is now asking for - * the delay until the second speculative execution. - * @return the time (in milliseconds) until a speculative request is sent to the next node, or 0 - * to send it immediately, or a negative value to stop sending requests. - */ - long nextExecution( - @NonNull Node node, - @Nullable CqlIdentifier keyspace, - @NonNull Request request, - int runningExecutions); - - /** Called when the cluster that this policy is associated with closes. */ - @Override - void close(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/ssl/ProgrammaticSslEngineFactory.java b/core/src/main/java/com/datastax/oss/driver/api/core/ssl/ProgrammaticSslEngineFactory.java deleted file mode 100644 index d65eaa864aa..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/ssl/ProgrammaticSslEngineFactory.java +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.ssl; - -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.api.core.session.SessionBuilder; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.net.InetSocketAddress; -import java.net.SocketAddress; -import javax.net.ssl.SSLContext; -import javax.net.ssl.SSLEngine; -import javax.net.ssl.SSLParameters; - -/** - * An SSL engine factory that allows you to configure the driver programmatically, by passing your - * own {@link SSLContext}. - * - *

Note that this class will create SSL engines with advisory peer information ({@link - * SSLContext#createSSLEngine(String, int)}) whenever possible. - * - *

If those defaults do not work for you, it should be pretty straightforward to write your own - * implementation by extending or duplicating this class. - * - * @see SessionBuilder#withSslEngineFactory(SslEngineFactory) - * @see SessionBuilder#withSslContext(SSLContext) - */ -public class ProgrammaticSslEngineFactory implements SslEngineFactory { - - protected final SSLContext sslContext; - protected final String[] cipherSuites; - protected final boolean requireHostnameValidation; - protected final boolean allowDnsReverseLookupSan; - - /** - * Creates an instance with the given {@link SSLContext}, default cipher suites and no host name - * validation. - * - * @param sslContext the {@link SSLContext} to use. - */ - public ProgrammaticSslEngineFactory(@NonNull SSLContext sslContext) { - this(sslContext, null); - } - - /** - * Creates an instance with the given {@link SSLContext} and cipher suites, and no host name - * validation. - * - * @param sslContext the {@link SSLContext} to use. - * @param cipherSuites the cipher suites to use, or null to use the default ones. - */ - public ProgrammaticSslEngineFactory( - @NonNull SSLContext sslContext, @Nullable String[] cipherSuites) { - this(sslContext, cipherSuites, false); - } - - /** - * Creates an instance with the given {@link SSLContext}, cipher suites and host name validation. - * - * @param sslContext the {@link SSLContext} to use. - * @param cipherSuites the cipher suites to use, or null to use the default ones. - * @param requireHostnameValidation whether to enable host name validation. If enabled, host name - * validation will be done using HTTPS algorithm. - */ - public ProgrammaticSslEngineFactory( - @NonNull SSLContext sslContext, - @Nullable String[] cipherSuites, - boolean requireHostnameValidation) { - this(sslContext, cipherSuites, requireHostnameValidation, true); - } - - /** - * Creates an instance with the given {@link SSLContext}, cipher suites and host name validation. - * - * @param sslContext the {@link SSLContext} to use. - * @param cipherSuites the cipher suites to use, or null to use the default ones. - * @param requireHostnameValidation whether to enable host name validation. If enabled, host name - * validation will be done using HTTPS algorithm. - * @param allowDnsReverseLookupSan whether to allow raw server IPs to be DNS reverse-resolved to - * choose the appropriate Subject Alternative Name. - */ - public ProgrammaticSslEngineFactory( - @NonNull SSLContext sslContext, - @Nullable String[] cipherSuites, - boolean requireHostnameValidation, - boolean allowDnsReverseLookupSan) { - this.sslContext = sslContext; - this.cipherSuites = cipherSuites; - this.requireHostnameValidation = requireHostnameValidation; - this.allowDnsReverseLookupSan = allowDnsReverseLookupSan; - } - - @NonNull - @Override - public SSLEngine newSslEngine(@NonNull EndPoint remoteEndpoint) { - SSLEngine engine; - SocketAddress remoteAddress = remoteEndpoint.resolve(); - if (remoteAddress instanceof InetSocketAddress) { - InetSocketAddress socketAddress = (InetSocketAddress) remoteAddress; - engine = - sslContext.createSSLEngine( - allowDnsReverseLookupSan - ? socketAddress.getHostName() - : socketAddress.getHostString(), - socketAddress.getPort()); - } else { - engine = sslContext.createSSLEngine(); - } - engine.setUseClientMode(true); - if (cipherSuites != null) { - engine.setEnabledCipherSuites(cipherSuites); - } - if (requireHostnameValidation) { - SSLParameters parameters = engine.getSSLParameters(); - parameters.setEndpointIdentificationAlgorithm("HTTPS"); - engine.setSSLParameters(parameters); - } - return engine; - } - - @Override - public void close() { - // nothing to do - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/ssl/SslEngineFactory.java b/core/src/main/java/com/datastax/oss/driver/api/core/ssl/SslEngineFactory.java deleted file mode 100644 index db4f18a97b9..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/ssl/SslEngineFactory.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.ssl; - -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import edu.umd.cs.findbugs.annotations.NonNull; -import javax.net.ssl.SSLEngine; - -/** - * Extension point to configure SSL based on the built-in JDK implementation. - * - *

Note that, for advanced use cases (such as bypassing the JDK in favor of another SSL - * implementation), the driver's internal API provides a lower-level interface: {@link - * com.datastax.oss.driver.internal.core.ssl.SslHandlerFactory}. - */ -public interface SslEngineFactory extends AutoCloseable { - /** - * Creates a new SSL engine each time a connection is established. - * - * @param remoteEndpoint the remote endpoint we are connecting to (the address of the Cassandra - * node). - */ - @NonNull - SSLEngine newSslEngine(@NonNull EndPoint remoteEndpoint); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/ssl/package-info.java b/core/src/main/java/com/datastax/oss/driver/api/core/ssl/package-info.java deleted file mode 100644 index a0cb3e73397..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/ssl/package-info.java +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** Support for secured communication between the driver and Cassandra nodes. */ -package com.datastax.oss.driver.api.core.ssl; diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/time/TimestampGenerator.java b/core/src/main/java/com/datastax/oss/driver/api/core/time/TimestampGenerator.java deleted file mode 100644 index b1139dd9f4d..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/time/TimestampGenerator.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.time; - -import com.datastax.oss.driver.api.core.cql.Statement; - -/** - * Generates client-side, microsecond-precision query timestamps. - * - *

These timestamps are used to order queries server-side, and resolve potential conflicts. - */ -public interface TimestampGenerator extends AutoCloseable { - - /** - * Returns the next timestamp, in microseconds. - * - *

The timestamps returned by this method should be monotonic; that is, successive invocations - * should return strictly increasing results. Note that this might not be possible using the clock - * alone, if it is not precise enough; alternative strategies might include incrementing the last - * returned value if the clock tick hasn't changed, and possibly drifting in the future. See the - * built-in driver implementations for more details. - * - * @return the next timestamp, or {@link Statement#NO_DEFAULT_TIMESTAMP} to indicate that the - * driver should not send one with the query (and let Cassandra generate a server-side - * timestamp). - */ - long next(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/tracker/RequestIdGenerator.java b/core/src/main/java/com/datastax/oss/driver/api/core/tracker/RequestIdGenerator.java deleted file mode 100644 index 21db3793b01..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/tracker/RequestIdGenerator.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.tracker; - -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.session.Request; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.nio.charset.StandardCharsets; -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; - -/** - * Interface responsible for generating request IDs. - * - *

Note that all request IDs have a parent/child relationship. A "session request ID" can loosely - * be thought of as encompassing a sequence of a request + any attendant retries, speculative - * executions etc. It's scope is identical to that of a {@link - * com.datastax.oss.driver.internal.core.cql.CqlRequestHandler}. A "node request ID" represents a - * single request within this larger scope. Note that a request corresponding to a request ID may be - * retried; in that case the retry count will be appended to the corresponding identifier in the - * logs. - */ -public interface RequestIdGenerator { - - String DEFAULT_PAYLOAD_KEY = "request-id"; - - /** - * Generates a unique identifier for the session request. This will be the identifier for the - * entire `session.execute()` call. This identifier will be added to logs, and propagated to - * request trackers. - * - * @return a unique identifier for the session request - */ - String getSessionRequestId(); - - /** - * Generates a unique identifier for the node request. This will be the identifier for the CQL - * request against a particular node. There can be one or more node requests for a single session - * request, due to retries or speculative executions. This identifier will be added to logs, and - * propagated to request trackers. - * - * @param statement the statement to be executed - * @param parentId the session request identifier - * @return a unique identifier for the node request - */ - String getNodeRequestId(@NonNull Request statement, @NonNull String parentId); - - default String getCustomPayloadKey() { - return DEFAULT_PAYLOAD_KEY; - } - - default Statement getDecoratedStatement( - @NonNull Statement statement, @NonNull String requestId) { - - Map existing = new HashMap<>(statement.getCustomPayload()); - String key = getCustomPayloadKey(); - - // Add or overwrite - existing.put(key, ByteBuffer.wrap(requestId.getBytes(StandardCharsets.UTF_8))); - - // Allowing null key/values - // Wrap a map inside to be immutable without instanciating a new map - Map unmodifiableMap = Collections.unmodifiableMap(existing); - - return statement.setCustomPayload(unmodifiableMap); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/tracker/RequestTracker.java b/core/src/main/java/com/datastax/oss/driver/api/core/tracker/RequestTracker.java deleted file mode 100644 index 065b41e496a..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/tracker/RequestTracker.java +++ /dev/null @@ -1,190 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.tracker; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.core.session.SessionBuilder; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** - * Tracks request execution for a session. - * - *

Implementations of this interface can be registered either via the configuration (see {@code - * reference.conf} in the manual or core driver JAR), or programmatically via {@link - * SessionBuilder#addRequestTracker(RequestTracker)}. - */ -public interface RequestTracker extends AutoCloseable { - - /** - * @deprecated This method only exists for backward compatibility. Override {@link - * #onSuccess(Request, long, DriverExecutionProfile, Node, String)} instead. - */ - @Deprecated - default void onSuccess( - @NonNull Request request, - long latencyNanos, - @NonNull DriverExecutionProfile executionProfile, - @NonNull Node node) {} - - /** - * Invoked each time a session request succeeds. A session request is a `session.execute()` call - * - * @param latencyNanos the overall execution time (from the {@link Session#execute(Request, - * GenericType) session.execute} call until the result is made available to the client). - * @param executionProfile the execution profile of this request. - * @param node the node that returned the successful response. - * @param sessionRequestLogPrefix the dedicated log prefix for this request - */ - default void onSuccess( - @NonNull Request request, - long latencyNanos, - @NonNull DriverExecutionProfile executionProfile, - @NonNull Node node, - @NonNull String sessionRequestLogPrefix) { - // If client doesn't override onSuccess with sessionRequestLogPrefix delegate call to the old - // method - onSuccess(request, latencyNanos, executionProfile, node); - } - - /** - * @deprecated This method only exists for backward compatibility. Override {@link - * #onError(Request, Throwable, long, DriverExecutionProfile, Node, String)} instead. - */ - @Deprecated - default void onError( - @NonNull Request request, - @NonNull Throwable error, - long latencyNanos, - @NonNull DriverExecutionProfile executionProfile, - @Nullable Node node) {} - - /** - * Invoked each time a session request fails. A session request is a `session.execute()` call - * - * @param latencyNanos the overall execution time (from the {@link Session#execute(Request, - * GenericType) session.execute} call until the error is propagated to the client). - * @param executionProfile the execution profile of this request. - * @param node the node that returned the error response, or {@code null} if the error occurred - * @param sessionRequestLogPrefix the dedicated log prefix for this request - */ - default void onError( - @NonNull Request request, - @NonNull Throwable error, - long latencyNanos, - @NonNull DriverExecutionProfile executionProfile, - @Nullable Node node, - @NonNull String sessionRequestLogPrefix) { - // If client doesn't override onError with sessionRequestLogPrefix delegate call to the old - // method - onError(request, error, latencyNanos, executionProfile, node); - } - - /** - * @deprecated This method only exists for backward compatibility. Override {@link - * #onNodeError(Request, Throwable, long, DriverExecutionProfile, Node, String)} instead. - */ - @Deprecated - default void onNodeError( - @NonNull Request request, - @NonNull Throwable error, - long latencyNanos, - @NonNull DriverExecutionProfile executionProfile, - @NonNull Node node) {} - - /** - * Invoked each time a node request fails. A node request is a CQL request sent to a particular - * node. There can be one or more node requests for a single session request, due to retries or - * speculative executions. - * - * @param latencyNanos the overall execution time (from the {@link Session#execute(Request, - * GenericType) session.execute} call until the error is propagated to the client). - * @param executionProfile the execution profile of this request. - * @param node the node that returned the error response. - * @param nodeRequestLogPrefix the dedicated log prefix for this request - */ - default void onNodeError( - @NonNull Request request, - @NonNull Throwable error, - long latencyNanos, - @NonNull DriverExecutionProfile executionProfile, - @NonNull Node node, - @NonNull String nodeRequestLogPrefix) { - // If client doesn't override onNodeError with nodeRequestLogPrefix delegate call to the old - // method - onNodeError(request, error, latencyNanos, executionProfile, node); - } - - /** - * @deprecated This method only exists for backward compatibility. Override {@link - * #onNodeSuccess(Request, long, DriverExecutionProfile, Node, String)} instead. - */ - @Deprecated - default void onNodeSuccess( - @NonNull Request request, - long latencyNanos, - @NonNull DriverExecutionProfile executionProfile, - @NonNull Node node) {} - - /** - * Invoked each time a node request succeeds. A node request is a CQL request sent to a particular - * node. There can be one or more node requests for a single session request, due to retries or - * speculative executions. - * - * @param latencyNanos the overall execution time (from the {@link Session#execute(Request, - * GenericType) session.execute} call until the result is made available to the client). - * @param executionProfile the execution profile of this request. - * @param node the node that returned the successful response. - * @param nodeRequestLogPrefix the dedicated log prefix for this request - */ - default void onNodeSuccess( - @NonNull Request request, - long latencyNanos, - @NonNull DriverExecutionProfile executionProfile, - @NonNull Node node, - @NonNull String nodeRequestLogPrefix) { - // If client doesn't override onNodeSuccess with nodeRequestLogPrefix delegate call to the old - // method - onNodeSuccess(request, latencyNanos, executionProfile, node); - } - - /** - * Invoked when the session is ready to process user requests. - * - *

WARNING: if you use {@code session.execute()} in your tracker implementation, keep in - * mind that those requests will in turn recurse back into {@code onSuccess} / {@code onError} - * methods. Make sure you don't trigger an infinite loop; one way to do that is to use a - * custom execution profile for internal requests. - * - *

This corresponds to the moment when {@link SessionBuilder#build()} returns, or the future - * returned by {@link SessionBuilder#buildAsync()} completes. If the session initialization fails, - * this method will not get called. - * - *

Listener methods are invoked from different threads; if you store the session in a field, - * make it at least volatile to guarantee proper publication. - * - *

This method is guaranteed to be the first one invoked on this object. - * - *

The default implementation is empty. - */ - default void onSessionReady(@NonNull Session session) {} -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/ContainerType.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/ContainerType.java deleted file mode 100644 index 93e92ec2c2b..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/ContainerType.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type; - -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * Representation of a type which "contains" some other type. This might be a collection type or it - * could be some other kind of container; the term is deliberately left somewhat vague. - */ -public interface ContainerType { - - @NonNull - DataType getElementType(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/CustomType.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/CustomType.java deleted file mode 100644 index 93f913a584d..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/CustomType.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type; - -import com.datastax.oss.protocol.internal.ProtocolConstants; -import edu.umd.cs.findbugs.annotations.NonNull; - -public interface CustomType extends DataType { - /** - * The fully qualified name of the subtype of {@code org.apache.cassandra.db.marshal.AbstractType} - * that represents this type server-side. - */ - @NonNull - String getClassName(); - - @NonNull - @Override - default String asCql(boolean includeFrozen, boolean pretty) { - return String.format("'%s'", getClassName()); - } - - @Override - default int getProtocolCode() { - return ProtocolConstants.DataType.CUSTOM; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/DataType.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/DataType.java deleted file mode 100644 index 92e5cc5edf0..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/DataType.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.detach.Detachable; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * The type of a CQL column, field or function argument. - * - *

The default implementations returned by the driver are immutable and serializable. If you - * write your own implementations, they should at least be thread-safe; serializability is not - * mandatory, but recommended for use with some 3rd-party tools like Apache Spark ™. - * - * @see DataTypes - */ -public interface DataType extends Detachable { - /** The code of the data type in the native protocol specification. */ - int getProtocolCode(); - - /** - * Builds an appropriate representation for use in a CQL query. - * - * @param includeFrozen whether to include the {@code frozen<...>} keyword if applicable. This - * will need to be set depending on where the result is used: for example, {@code CREATE - * TABLE} statements use the frozen keyword, whereas it should never appear in {@code CREATE - * FUNCTION}. - * @param pretty whether to pretty-print UDT names (as described in {@link - * CqlIdentifier#asCql(boolean)}. - */ - @NonNull - String asCql(boolean includeFrozen, boolean pretty); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/DataTypes.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/DataTypes.java deleted file mode 100644 index 492fc121c71..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/DataTypes.java +++ /dev/null @@ -1,134 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type; - -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.detach.Detachable; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.DataTypeClassNameParser; -import com.datastax.oss.driver.internal.core.type.DefaultCustomType; -import com.datastax.oss.driver.internal.core.type.DefaultListType; -import com.datastax.oss.driver.internal.core.type.DefaultMapType; -import com.datastax.oss.driver.internal.core.type.DefaultSetType; -import com.datastax.oss.driver.internal.core.type.DefaultTupleType; -import com.datastax.oss.driver.internal.core.type.DefaultVectorType; -import com.datastax.oss.driver.internal.core.type.PrimitiveType; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Arrays; - -/** Constants and factory methods to obtain data type instances. */ -public class DataTypes { - - public static final DataType ASCII = new PrimitiveType(ProtocolConstants.DataType.ASCII); - public static final DataType BIGINT = new PrimitiveType(ProtocolConstants.DataType.BIGINT); - public static final DataType BLOB = new PrimitiveType(ProtocolConstants.DataType.BLOB); - public static final DataType BOOLEAN = new PrimitiveType(ProtocolConstants.DataType.BOOLEAN); - public static final DataType COUNTER = new PrimitiveType(ProtocolConstants.DataType.COUNTER); - public static final DataType DECIMAL = new PrimitiveType(ProtocolConstants.DataType.DECIMAL); - public static final DataType DOUBLE = new PrimitiveType(ProtocolConstants.DataType.DOUBLE); - public static final DataType FLOAT = new PrimitiveType(ProtocolConstants.DataType.FLOAT); - public static final DataType INT = new PrimitiveType(ProtocolConstants.DataType.INT); - public static final DataType TIMESTAMP = new PrimitiveType(ProtocolConstants.DataType.TIMESTAMP); - public static final DataType UUID = new PrimitiveType(ProtocolConstants.DataType.UUID); - public static final DataType VARINT = new PrimitiveType(ProtocolConstants.DataType.VARINT); - public static final DataType TIMEUUID = new PrimitiveType(ProtocolConstants.DataType.TIMEUUID); - public static final DataType INET = new PrimitiveType(ProtocolConstants.DataType.INET); - public static final DataType DATE = new PrimitiveType(ProtocolConstants.DataType.DATE); - public static final DataType TEXT = new PrimitiveType(ProtocolConstants.DataType.VARCHAR); - public static final DataType TIME = new PrimitiveType(ProtocolConstants.DataType.TIME); - public static final DataType SMALLINT = new PrimitiveType(ProtocolConstants.DataType.SMALLINT); - public static final DataType TINYINT = new PrimitiveType(ProtocolConstants.DataType.TINYINT); - public static final DataType DURATION = new PrimitiveType(ProtocolConstants.DataType.DURATION); - - private static final DataTypeClassNameParser classNameParser = new DataTypeClassNameParser(); - - @NonNull - public static DataType custom(@NonNull String className) { - - // In protocol v4, duration is implemented as a custom type - if (className.equals("org.apache.cassandra.db.marshal.DurationType")) return DURATION; - - /* Vector support is currently implemented as a custom type but is also parameterized */ - if (className.startsWith(DefaultVectorType.VECTOR_CLASS_NAME)) - return classNameParser.parse(className, AttachmentPoint.NONE); - return new DefaultCustomType(className); - } - - @NonNull - public static ListType listOf(@NonNull DataType elementType) { - return new DefaultListType(elementType, false); - } - - @NonNull - public static ListType listOf(@NonNull DataType elementType, boolean frozen) { - return new DefaultListType(elementType, frozen); - } - - @NonNull - public static ListType frozenListOf(@NonNull DataType elementType) { - return new DefaultListType(elementType, true); - } - - @NonNull - public static SetType setOf(@NonNull DataType elementType) { - return new DefaultSetType(elementType, false); - } - - @NonNull - public static SetType setOf(@NonNull DataType elementType, boolean frozen) { - return new DefaultSetType(elementType, frozen); - } - - @NonNull - public static SetType frozenSetOf(@NonNull DataType elementType) { - return new DefaultSetType(elementType, true); - } - - @NonNull - public static MapType mapOf(@NonNull DataType keyType, @NonNull DataType valueType) { - return new DefaultMapType(keyType, valueType, false); - } - - @NonNull - public static MapType mapOf( - @NonNull DataType keyType, @NonNull DataType valueType, boolean frozen) { - return new DefaultMapType(keyType, valueType, frozen); - } - - @NonNull - public static MapType frozenMapOf(@NonNull DataType keyType, @NonNull DataType valueType) { - return new DefaultMapType(keyType, valueType, true); - } - - /** - * Builds a new, detached tuple type. - * - * @param componentTypes neither the individual types, nor the vararg array itself, can be {@code - * null}. - * @see Detachable - */ - @NonNull - public static TupleType tupleOf(@NonNull DataType... componentTypes) { - return new DefaultTupleType(ImmutableList.copyOf(Arrays.asList(componentTypes))); - } - - public static VectorType vectorOf(DataType subtype, int dimensions) { - return new DefaultVectorType(subtype, dimensions); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/ListType.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/ListType.java deleted file mode 100644 index ca377d10bbf..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/ListType.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type; - -import com.datastax.oss.protocol.internal.ProtocolConstants; -import edu.umd.cs.findbugs.annotations.NonNull; - -public interface ListType extends DataType, ContainerType { - - boolean isFrozen(); - - @NonNull - @Override - default String asCql(boolean includeFrozen, boolean pretty) { - String template = (isFrozen() && includeFrozen) ? "frozen>" : "list<%s>"; - return String.format(template, getElementType().asCql(includeFrozen, pretty)); - } - - @Override - default int getProtocolCode() { - return ProtocolConstants.DataType.LIST; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/MapType.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/MapType.java deleted file mode 100644 index f3bca2ac6a4..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/MapType.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type; - -import com.datastax.oss.protocol.internal.ProtocolConstants; -import edu.umd.cs.findbugs.annotations.NonNull; - -public interface MapType extends DataType { - - @NonNull - DataType getKeyType(); - - @NonNull - DataType getValueType(); - - boolean isFrozen(); - - @NonNull - @Override - default String asCql(boolean includeFrozen, boolean pretty) { - String template = (isFrozen() && includeFrozen) ? "frozen>" : "map<%s, %s>"; - return String.format( - template, - getKeyType().asCql(includeFrozen, pretty), - getValueType().asCql(includeFrozen, pretty)); - } - - @Override - default int getProtocolCode() { - return ProtocolConstants.DataType.MAP; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/SetType.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/SetType.java deleted file mode 100644 index fa902c72bb8..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/SetType.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type; - -import com.datastax.oss.protocol.internal.ProtocolConstants; -import edu.umd.cs.findbugs.annotations.NonNull; - -public interface SetType extends DataType, ContainerType { - - boolean isFrozen(); - - @NonNull - @Override - default String asCql(boolean includeFrozen, boolean pretty) { - String template = (isFrozen() && includeFrozen) ? "frozen>" : "set<%s>"; - return String.format(template, getElementType().asCql(includeFrozen, pretty)); - } - - @Override - default int getProtocolCode() { - return ProtocolConstants.DataType.SET; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/TupleType.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/TupleType.java deleted file mode 100644 index 9e2736ddce8..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/TupleType.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type; - -import com.datastax.oss.driver.api.core.data.TupleValue; -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.List; - -public interface TupleType extends DataType { - - @NonNull - List getComponentTypes(); - - @NonNull - TupleValue newValue(); - - /** - * Creates a new instance with the specified values for the fields. - * - *

To encode the values, this method uses the {@link CodecRegistry} that this type is {@link - * #getAttachmentPoint() attached} to; it looks for the best codec to handle the target CQL type - * and actual runtime type of each value (see {@link CodecRegistry#codecFor(DataType, Object)}). - * - * @param values the values of the tuple's fields. They must be in the same order as the fields in - * the tuple's definition. You can specify less values than there are fields (the remaining - * ones will be set to NULL), but not more (a runtime exception will be thrown). Individual - * values can be {@code null}, but the array itself can't. - * @throws IllegalArgumentException if there are too many values. - */ - @NonNull - TupleValue newValue(@NonNull Object... values); - - @NonNull - AttachmentPoint getAttachmentPoint(); - - @NonNull - @Override - default String asCql(boolean includeFrozen, boolean pretty) { - StringBuilder builder = new StringBuilder(); - // Tuples are always frozen - if (includeFrozen) { - builder.append("frozen<"); - } - boolean first = true; - for (DataType type : getComponentTypes()) { - builder.append(first ? "tuple<" : ", "); - first = false; - builder.append(type.asCql(includeFrozen, pretty)); - } - builder.append('>'); - if (includeFrozen) { - builder.append('>'); - } - return builder.toString(); - } - - @Override - default int getProtocolCode() { - return ProtocolConstants.DataType.TUPLE; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/UserDefinedType.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/UserDefinedType.java deleted file mode 100644 index 4d4768a8ae4..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/UserDefinedType.java +++ /dev/null @@ -1,169 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.data.UdtValue; -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.metadata.schema.Describable; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.internal.core.metadata.schema.ScriptBuilder; -import com.datastax.oss.driver.internal.core.util.Loggers; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Collections; -import java.util.List; - -public interface UserDefinedType extends DataType, Describable { - - @Nullable // because of ShallowUserDefinedType usage in the query builder - CqlIdentifier getKeyspace(); - - @NonNull - CqlIdentifier getName(); - - boolean isFrozen(); - - @NonNull - List getFieldNames(); - - /** - * @apiNote the default implementation only exists for backward compatibility. It wraps the result - * of {@link #firstIndexOf(CqlIdentifier)} in a singleton list, which is not entirely correct, - * as it will only return the first occurrence. Therefore it also logs a warning. - *

Implementors should always override this method (all built-in driver implementations - * do). - */ - @NonNull - default List allIndicesOf(@NonNull CqlIdentifier id) { - Loggers.USER_DEFINED_TYPE.warn( - "{} should override allIndicesOf(CqlIdentifier), the default implementation is a " - + "workaround for backward compatibility, it only returns the first occurrence", - getClass().getName()); - return Collections.singletonList(firstIndexOf(id)); - } - - int firstIndexOf(@NonNull CqlIdentifier id); - - /** - * @apiNote the default implementation only exists for backward compatibility. It wraps the result - * of {@link #firstIndexOf(String)} in a singleton list, which is not entirely correct, as it - * will only return the first occurrence. Therefore it also logs a warning. - *

Implementors should always override this method (all built-in driver implementations - * do). - */ - @NonNull - default List allIndicesOf(@NonNull String name) { - Loggers.USER_DEFINED_TYPE.warn( - "{} should override allIndicesOf(String), the default implementation is a " - + "workaround for backward compatibility, it only returns the first occurrence", - getClass().getName()); - return Collections.singletonList(firstIndexOf(name)); - } - - int firstIndexOf(@NonNull String name); - - default boolean contains(@NonNull CqlIdentifier id) { - return firstIndexOf(id) >= 0; - } - - default boolean contains(@NonNull String name) { - return firstIndexOf(name) >= 0; - } - - @NonNull - List getFieldTypes(); - - @NonNull - UserDefinedType copy(boolean newFrozen); - - @NonNull - UdtValue newValue(); - - /** - * Creates a new instance with the specified values for the fields. - * - *

To encode the values, this method uses the {@link CodecRegistry} that this type is {@link - * #getAttachmentPoint() attached} to; it looks for the best codec to handle the target CQL type - * and actual runtime type of each value (see {@link CodecRegistry#codecFor(DataType, Object)}). - * - * @param fields the value of the fields. They must be in the same order as the fields in the - * type's definition. You can specify less values than there are fields (the remaining ones - * will be set to NULL), but not more (a runtime exception will be thrown). Individual values - * can be {@code null}, but the array itself can't. - * @throws IllegalArgumentException if there are too many values. - */ - @NonNull - UdtValue newValue(@NonNull Object... fields); - - @NonNull - AttachmentPoint getAttachmentPoint(); - - @NonNull - @Override - default String asCql(boolean includeFrozen, boolean pretty) { - if (getKeyspace() != null) { - String template = (isFrozen() && includeFrozen) ? "frozen<%s.%s>" : "%s.%s"; - return String.format(template, getKeyspace().asCql(pretty), getName().asCql(pretty)); - } else { - String template = (isFrozen() && includeFrozen) ? "frozen<%s>" : "%s"; - return String.format(template, getName().asCql(pretty)); - } - } - - @NonNull - @Override - default String describe(boolean pretty) { - ScriptBuilder builder = new ScriptBuilder(pretty); - - builder - .append("CREATE TYPE ") - .append(getKeyspace()) - .append(".") - .append(getName()) - .append(" (") - .newLine() - .increaseIndent(); - - List fieldNames = getFieldNames(); - List fieldTypes = getFieldTypes(); - int fieldCount = fieldNames.size(); - for (int i = 0; i < fieldCount; i++) { - builder.append(fieldNames.get(i)).append(" ").append(fieldTypes.get(i).asCql(true, pretty)); - if (i < fieldCount - 1) { - builder.append(","); - } - builder.newLine(); - } - builder.decreaseIndent().append(");"); - return builder.build(); - } - - @NonNull - @Override - default String describeWithChildren(boolean pretty) { - // No children (if it uses other types, they're considered dependencies, not sub-elements) - return describe(pretty); - } - - @Override - default int getProtocolCode() { - return ProtocolConstants.DataType.UDT; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/VectorType.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/VectorType.java deleted file mode 100644 index 1d7c13807ec..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/VectorType.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type; - -/** - * Type representing a Cassandra vector type as described in CEP-30. At the moment this is - * implemented as a custom type so we include the CustomType interface as well. - */ -public interface VectorType extends CustomType, ContainerType { - - int getDimensions(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/CodecNotFoundException.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/CodecNotFoundException.java deleted file mode 100644 index 4f45af0924f..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/CodecNotFoundException.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type.codec; - -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** Thrown when a suitable {@link TypeCodec} cannot be found by the {@link CodecRegistry}. */ -public class CodecNotFoundException extends DriverException { - - private final DataType cqlType; - - private final GenericType javaType; - - public CodecNotFoundException(@Nullable DataType cqlType, @Nullable GenericType javaType) { - this( - String.format("Codec not found for requested operation: [%s <-> %s]", cqlType, javaType), - null, - cqlType, - javaType); - } - - public CodecNotFoundException( - @NonNull Throwable cause, @Nullable DataType cqlType, @Nullable GenericType javaType) { - this( - String.format( - "Error while looking up codec for requested operation: [%s <-> %s]", cqlType, javaType), - cause, - cqlType, - javaType); - } - - private CodecNotFoundException( - String msg, Throwable cause, DataType cqlType, GenericType javaType) { - super(msg, null, cause, true); - this.cqlType = cqlType; - this.javaType = javaType; - } - - @Nullable - public DataType getCqlType() { - return cqlType; - } - - @Nullable - public GenericType getJavaType() { - return javaType; - } - - @NonNull - @Override - public DriverException copy() { - return new CodecNotFoundException(getMessage(), getCause(), getCqlType(), getJavaType()); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.java deleted file mode 100644 index 51a96a16376..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.java +++ /dev/null @@ -1,492 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type.codec; - -import com.datastax.oss.driver.api.core.session.SessionBuilder; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.registry.MutableCodecRegistry; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.DefaultVectorType; -import com.datastax.oss.driver.internal.core.type.codec.SimpleBlobCodec; -import com.datastax.oss.driver.internal.core.type.codec.TimestampCodec; -import com.datastax.oss.driver.internal.core.type.codec.extras.OptionalCodec; -import com.datastax.oss.driver.internal.core.type.codec.extras.array.BooleanListToArrayCodec; -import com.datastax.oss.driver.internal.core.type.codec.extras.array.ByteListToArrayCodec; -import com.datastax.oss.driver.internal.core.type.codec.extras.array.DoubleListToArrayCodec; -import com.datastax.oss.driver.internal.core.type.codec.extras.array.FloatListToArrayCodec; -import com.datastax.oss.driver.internal.core.type.codec.extras.array.IntListToArrayCodec; -import com.datastax.oss.driver.internal.core.type.codec.extras.array.LongListToArrayCodec; -import com.datastax.oss.driver.internal.core.type.codec.extras.array.ObjectListToArrayCodec; -import com.datastax.oss.driver.internal.core.type.codec.extras.array.ShortListToArrayCodec; -import com.datastax.oss.driver.internal.core.type.codec.extras.enums.EnumNameCodec; -import com.datastax.oss.driver.internal.core.type.codec.extras.enums.EnumOrdinalCodec; -import com.datastax.oss.driver.internal.core.type.codec.extras.json.JsonCodec; -import com.datastax.oss.driver.internal.core.type.codec.extras.time.LocalTimestampCodec; -import com.datastax.oss.driver.internal.core.type.codec.extras.time.PersistentZonedTimestampCodec; -import com.datastax.oss.driver.internal.core.type.codec.extras.time.TimestampMillisCodec; -import com.datastax.oss.driver.internal.core.type.codec.extras.time.ZonedTimestampCodec; -import com.datastax.oss.driver.internal.core.type.codec.extras.vector.FloatVectorToArrayCodec; -import com.fasterxml.jackson.databind.ObjectMapper; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.time.Instant; -import java.time.LocalDateTime; -import java.time.ZoneId; -import java.time.ZoneOffset; -import java.time.ZonedDateTime; -import java.util.Optional; - -/** - * Additional codecs that can be registered to handle different type mappings. - * - * @see SessionBuilder#addTypeCodecs(TypeCodec[]) - * @see MutableCodecRegistry#register(TypeCodec) - */ -public class ExtraTypeCodecs { - - /** - * A codec that maps CQL type {@code timestamp} to Java's {@link Instant}, using the UTC time zone - * to parse and format CQL literals. - * - *

This codec uses {@link ZoneOffset#UTC} as its source of time zone information when - * formatting values as CQL literals, or parsing CQL literals that do not have any time zone - * indication. Note that this only applies to the {@link TypeCodec#format(Object)} and {@link - * TypeCodec#parse(String)} methods; regular encoding and decoding, like setting a value on a - * bound statement or reading a column from a row, are not affected by the time zone. - * - *

If you need a different time zone, consider other constants in this class, or call {@link - * ExtraTypeCodecs#timestampAt(ZoneId)} instead. - * - * @see TypeCodecs#TIMESTAMP - * @see ExtraTypeCodecs#timestampAt(ZoneId) - */ - public static final TypeCodec TIMESTAMP_UTC = new TimestampCodec(ZoneOffset.UTC); - - /** - * A codec that maps CQL type {@code timestamp} to Java's {@code long}, representing the number of - * milliseconds since the Epoch, using the system's default time zone to parse and format CQL - * literals. - * - *

This codec uses the system's {@linkplain ZoneId#systemDefault() default time zone} as its - * source of time zone information when formatting values as CQL literals, or parsing CQL literals - * that do not have any time zone indication. Note that this only applies to the {@link - * TypeCodec#format(Object)} and {@link TypeCodec#parse(String)} methods; regular encoding and - * decoding, like setting a value on a bound statement or reading a column from a row, are not - * affected by the time zone. - * - *

If you need a different time zone, consider other constants in this class, or call {@link - * #timestampMillisAt(ZoneId)} instead. - * - *

This codec can serve as a replacement for the driver's built-in {@linkplain - * TypeCodecs#TIMESTAMP timestamp} codec, when application code prefers to deal with raw - * milliseconds than with {@link Instant} instances. - * - * @see #TIMESTAMP_MILLIS_UTC - * @see #timestampMillisAt(ZoneId) - */ - public static final PrimitiveLongCodec TIMESTAMP_MILLIS_SYSTEM = new TimestampMillisCodec(); - - /** - * A codec that maps CQL type {@code timestamp} to Java's {@code long}, representing the number of - * milliseconds since the Epoch, using the UTC time zone to parse and format CQL literals. - * - *

This codec uses {@link ZoneOffset#UTC} as its source of time zone information when - * formatting values as CQL literals, or parsing CQL literals that do not have any time zone - * indication. Note that this only applies to the {@link TypeCodec#format(Object)} and {@link - * TypeCodec#parse(String)} methods; regular encoding and decoding, like setting a value on a - * bound statement or reading a column from a row, are not affected by the time zone. - * - *

If you need a different time zone, consider other constants in this class, or call {@link - * #timestampMillisAt(ZoneId)} instead. - * - *

This codec can serve as a replacement for the driver's built-in {@linkplain - * TypeCodecs#TIMESTAMP timestamp} codec, when application code prefers to deal with raw - * milliseconds than with {@link Instant} instances. - * - * @see #TIMESTAMP_MILLIS_SYSTEM - * @see #timestampMillisAt(ZoneId) - */ - public static final PrimitiveLongCodec TIMESTAMP_MILLIS_UTC = - new TimestampMillisCodec(ZoneOffset.UTC); - - /** - * A codec that maps CQL type {@code timestamp} to Java's {@link ZonedDateTime}, using the - * system's default time zone. - * - *

This codec uses the system's {@linkplain ZoneId#systemDefault() default time zone} as its - * source of time zone information when encoding or decoding. If you need a different time zone, - * consider using other constants in this class, or call {@link #zonedTimestampAt(ZoneId)} - * instead. - * - *

Note that CQL type {@code timestamp} type does not store any time zone; this codec is - * provided merely as a convenience for users that need to deal with zoned timestamps in their - * applications. - * - * @see #ZONED_TIMESTAMP_UTC - * @see #ZONED_TIMESTAMP_PERSISTED - * @see #zonedTimestampAt(ZoneId) - */ - public static final TypeCodec ZONED_TIMESTAMP_SYSTEM = new ZonedTimestampCodec(); - - /** - * A codec that maps CQL type {@code timestamp} to Java's {@link ZonedDateTime}, using the UTC - * time zone. - * - *

This codec uses {@link ZoneOffset#UTC} as its source of time zone information when encoding - * or decoding. If you need a different time zone, consider using other constants in this class, - * or call {@link #zonedTimestampAt(ZoneId)} instead. - * - *

Note that CQL type {@code timestamp} type does not store any time zone; this codec is - * provided merely as a convenience for users that need to deal with zoned timestamps in their - * applications. - * - * @see #ZONED_TIMESTAMP_SYSTEM - * @see #ZONED_TIMESTAMP_PERSISTED - * @see #zonedTimestampAt(ZoneId) - */ - public static final TypeCodec ZONED_TIMESTAMP_UTC = - new ZonedTimestampCodec(ZoneOffset.UTC); - - /** - * A codec that maps CQL type {@code tuple} to Java's {@link ZonedDateTime}, - * providing a pattern for maintaining timezone information in Cassandra. - * - *

Since CQL type {@code timestamp} does not store any time zone, it is persisted separately in - * the {@code text} field of the tuple, and so when the value is read back the original timezone - * it was written with is preserved. - * - * @see #ZONED_TIMESTAMP_SYSTEM - * @see #ZONED_TIMESTAMP_UTC - * @see #zonedTimestampAt(ZoneId) - */ - public static final TypeCodec ZONED_TIMESTAMP_PERSISTED = - new PersistentZonedTimestampCodec(); - - /** - * A codec that maps CQL type {@code timestamp} to Java's {@link LocalDateTime}, using the - * system's default time zone. - * - *

This codec uses the system's {@linkplain ZoneId#systemDefault() default time zone} as its - * source of time zone information when encoding or decoding. If you need a different time zone, - * consider using other constants in this class, or call {@link #localTimestampAt(ZoneId)} - * instead. - * - *

Note that CQL type {@code timestamp} does not store any time zone; this codec is provided - * merely as a convenience for users that need to deal with local date-times in their - * applications. - * - * @see #LOCAL_TIMESTAMP_UTC - * @see #localTimestampAt(ZoneId) - */ - public static final TypeCodec LOCAL_TIMESTAMP_SYSTEM = new LocalTimestampCodec(); - - /** - * A codec that maps CQL type {@code timestamp} to Java's {@link LocalDateTime}, using the UTC - * time zone. - * - *

This codec uses {@link ZoneOffset#UTC} as its source of time zone information when encoding - * or decoding. If you need a different time zone, consider using other constants in this class, - * or call {@link #localTimestampAt(ZoneId)} instead. - * - *

Note that CQL type {@code timestamp} does not store any time zone; this codec is provided - * merely as a convenience for users that need to deal with local date-times in their - * applications. - * - * @see #LOCAL_TIMESTAMP_SYSTEM - * @see #localTimestampAt(ZoneId) - */ - public static final TypeCodec LOCAL_TIMESTAMP_UTC = - new LocalTimestampCodec(ZoneOffset.UTC); - - /** - * A codec that maps CQL type {@code blob} to Java's {@code byte[]}. - * - *

If you are looking for a codec mapping CQL type {@code blob} to the Java type {@link - * ByteBuffer}, you should use {@link TypeCodecs#BLOB} instead. - * - *

If you are looking for a codec mapping CQL type {@code list BLOB_TO_ARRAY = new SimpleBlobCodec(); - - /** - * A codec that maps CQL type {@code list} to Java's {@code boolean[]}. - * - *

Note that this codec is designed for performance and converts CQL lists directly to - * {@code boolean[]}, thus avoiding any unnecessary boxing and unboxing of Java primitive {@code - * boolean} values; it also instantiates arrays without the need for an intermediary Java {@code - * List} object. - */ - public static final TypeCodec BOOLEAN_LIST_TO_ARRAY = new BooleanListToArrayCodec(); - - /** - * A codec that maps CQL type {@code list} to Java's {@code byte[]}. - * - *

This codec is not suitable for reading CQL blobs as byte arrays. If you are looking for a - * codec for the CQL type {@code blob}, you should use {@link TypeCodecs#BLOB} or {@link - * ExtraTypeCodecs#BLOB_TO_ARRAY} instead. - * - *

Note that this codec is designed for performance and converts CQL lists directly to - * {@code byte[]}, thus avoiding any unnecessary boxing and unboxing of Java primitive {@code - * byte} values; it also instantiates arrays without the need for an intermediary Java {@code - * List} object. - * - * @see TypeCodecs#BLOB - * @see ExtraTypeCodecs#BLOB_TO_ARRAY - */ - public static final TypeCodec BYTE_LIST_TO_ARRAY = new ByteListToArrayCodec(); - - /** - * A codec that maps CQL type {@code list} to Java's {@code short[]}. - * - *

Note that this codec is designed for performance and converts CQL lists directly to - * {@code short[]}, thus avoiding any unnecessary boxing and unboxing of Java primitive {@code - * short} values; it also instantiates arrays without the need for an intermediary Java {@code - * List} object. - */ - public static final TypeCodec SHORT_LIST_TO_ARRAY = new ShortListToArrayCodec(); - - /** - * A codec that maps CQL type {@code list} to Java's {@code int[]}. - * - *

Note that this codec is designed for performance and converts CQL lists directly to - * {@code int[]}, thus avoiding any unnecessary boxing and unboxing of Java primitive {@code int} - * values; it also instantiates arrays without the need for an intermediary Java {@code List} - * object. - */ - public static final TypeCodec INT_LIST_TO_ARRAY = new IntListToArrayCodec(); - - /** - * A codec that maps CQL type {@code list} to Java's {@code long[]}. - * - *

Note that this codec is designed for performance and converts CQL lists directly to - * {@code long[]}, thus avoiding any unnecessary boxing and unboxing of Java primitive {@code - * long} values; it also instantiates arrays without the need for an intermediary Java {@code - * List} object. - */ - public static final TypeCodec LONG_LIST_TO_ARRAY = new LongListToArrayCodec(); - - /** - * A codec that maps CQL type {@code list} to Java's {@code float[]}. - * - *

Note that this codec is designed for performance and converts CQL lists directly to - * {@code float[]}, thus avoiding any unnecessary boxing and unboxing of Java primitive {@code - * float} values; it also instantiates arrays without the need for an intermediary Java {@code - * List} object. - */ - public static final TypeCodec FLOAT_LIST_TO_ARRAY = new FloatListToArrayCodec(); - - /** - * A codec that maps CQL type {@code list} to Java's {@code double[]}. - * - *

Note that this codec is designed for performance and converts CQL lists directly to - * {@code double[]}, thus avoiding any unnecessary boxing and unboxing of Java primitive {@code - * double} values; it also instantiates arrays without the need for an intermediary Java {@code - * List} object. - */ - public static final TypeCodec DOUBLE_LIST_TO_ARRAY = new DoubleListToArrayCodec(); - - /** - * Builds a new codec that maps CQL type {@code timestamp} to Java's {@link Instant}, using the - * given time zone to parse and format CQL literals. - * - *

This codec uses the supplied {@link ZoneId} as its source of time zone information when - * formatting values as CQL literals, or parsing CQL literals that do not have any time zone - * indication. Note that this only applies to the {@link TypeCodec#format(Object)} and {@link - * TypeCodec#parse(String)} methods; regular encoding and decoding, like setting a value on a - * bound statement or reading a column from a row, are not affected by the time zone. - * - * @see TypeCodecs#TIMESTAMP - * @see ExtraTypeCodecs#TIMESTAMP_UTC - */ - @NonNull - public static TypeCodec timestampAt(@NonNull ZoneId timeZone) { - return new TimestampCodec(timeZone); - } - - /** - * Builds a new codec that maps CQL type {@code timestamp} to Java's {@code long}, representing - * the number of milliseconds since the Epoch, using the given time zone to parse and format CQL - * literals. - * - *

This codec uses the supplied {@link ZoneId} as its source of time zone information when - * formatting values as CQL literals, or parsing CQL literals that do not have any time zone - * indication. Note that this only applies to the {@link TypeCodec#format(Object)} and {@link - * TypeCodec#parse(String)} methods; regular encoding and decoding, like setting a value on a - * bound statement or reading a column from a row, are not affected by the time zone. - * - *

This codec can serve as a replacement for the driver's built-in {@linkplain - * TypeCodecs#TIMESTAMP timestamp} codec, when application code prefers to deal with raw - * milliseconds than with {@link Instant} instances. - * - * @see ExtraTypeCodecs#TIMESTAMP_MILLIS_SYSTEM - * @see ExtraTypeCodecs#TIMESTAMP_MILLIS_UTC - */ - @NonNull - public static PrimitiveLongCodec timestampMillisAt(@NonNull ZoneId timeZone) { - return new TimestampMillisCodec(timeZone); - } - - /** - * Builds a new codec that maps CQL type {@code timestamp} to Java's {@link ZonedDateTime}. - * - *

This codec uses the supplied {@link ZoneId} as its source of time zone information when - * encoding or decoding. - * - *

Note that CQL type {@code timestamp} does not store any time zone; the codecs created by - * this method are provided merely as a convenience for users that need to deal with zoned - * timestamps in their applications. - * - * @see ExtraTypeCodecs#ZONED_TIMESTAMP_SYSTEM - * @see ExtraTypeCodecs#ZONED_TIMESTAMP_UTC - * @see ExtraTypeCodecs#ZONED_TIMESTAMP_PERSISTED - */ - @NonNull - public static TypeCodec zonedTimestampAt(@NonNull ZoneId timeZone) { - return new ZonedTimestampCodec(timeZone); - } - - /** - * Builds a new codec that maps CQL type {@code timestamp} to Java's {@link LocalDateTime}. - * - *

This codec uses the supplied {@link ZoneId} as its source of time zone information when - * encoding or decoding. - * - *

Note that CQL type {@code timestamp} does not store any time zone; the codecs created by - * this method are provided merely as a convenience for users that need to deal with local - * date-times in their applications. - * - * @see ExtraTypeCodecs#LOCAL_TIMESTAMP_UTC - * @see #localTimestampAt(ZoneId) - */ - @NonNull - public static TypeCodec localTimestampAt(@NonNull ZoneId timeZone) { - return new LocalTimestampCodec(timeZone); - } - - /** - * Builds a new codec that maps a CQL list to a Java array. Encoding and decoding of elements in - * the array is delegated to the provided element codec. - * - *

This method is not suitable for Java primitive arrays. Use {@link - * ExtraTypeCodecs#BOOLEAN_LIST_TO_ARRAY}, {@link ExtraTypeCodecs#BYTE_LIST_TO_ARRAY}, {@link - * ExtraTypeCodecs#SHORT_LIST_TO_ARRAY}, {@link ExtraTypeCodecs#INT_LIST_TO_ARRAY}, {@link - * ExtraTypeCodecs#LONG_LIST_TO_ARRAY}, {@link ExtraTypeCodecs#FLOAT_LIST_TO_ARRAY} or {@link - * ExtraTypeCodecs#DOUBLE_LIST_TO_ARRAY} instead. - */ - @NonNull - public static TypeCodec listToArrayOf(@NonNull TypeCodec elementCodec) { - return new ObjectListToArrayCodec<>(elementCodec); - } - - /** - * Builds a new codec that maps CQL type {@code int} to a Java Enum, according to its constants' - * {@linkplain Enum#ordinal() ordinals} (STRONGLY discouraged, see explanations below). - * - *

This method is provided for compatibility with driver 3, but we strongly recommend against - * it. Relying on enum ordinals is a bad practice: any reordering of the enum constants, or - * insertion of a new constant before the end, will change the ordinals. The codec will keep - * working, but start inserting different codes and corrupting your data. - * - *

{@link #enumNamesOf(Class)} is a safer alternative, as it is not dependent on the constant - * order. If you still want to use integer codes for storage efficiency, we recommend implementing - * an explicit mapping (for example with a {@code toCode()} method on your enum type). It is then - * fairly straightforward to implement a codec with {@link MappingCodec}, using {@link - * TypeCodecs#INT} as the "inner" codec. - */ - @NonNull - public static > TypeCodec enumOrdinalsOf( - @NonNull Class enumClass) { - return new EnumOrdinalCodec<>(enumClass); - } - - /** - * Builds a new codec that maps CQL type {@code text} to a Java Enum, according to its constants' - * programmatic {@linkplain Enum#name() names}. - * - * @see #enumOrdinalsOf(Class) - */ - @NonNull - public static > TypeCodec enumNamesOf( - @NonNull Class enumClass) { - return new EnumNameCodec<>(enumClass); - } - - /** - * Builds a new codec that wraps another codec's Java type into {@link Optional} instances - * (mapping CQL null to {@link Optional#empty()}). - */ - @NonNull - public static TypeCodec> optionalOf(@NonNull TypeCodec innerCodec) { - return new OptionalCodec<>(innerCodec); - } - - /** - * Builds a new codec that maps CQL type {@code text} to the given Java type, using JSON - * serialization with a default Jackson mapper. - * - * @see Jackson JSON Library - */ - @NonNull - public static TypeCodec json(@NonNull GenericType javaType) { - return new JsonCodec<>(javaType); - } - - /** - * Builds a new codec that maps CQL type {@code text} to the given Java type, using JSON - * serialization with a default Jackson mapper. - * - * @see Jackson JSON Library - */ - @NonNull - public static TypeCodec json(@NonNull Class javaType) { - return new JsonCodec<>(javaType); - } - - /** - * Builds a new codec that maps CQL type {@code text} to the given Java type, using JSON - * serialization with the provided Jackson mapper. - * - * @see Jackson JSON Library - */ - @NonNull - public static TypeCodec json( - @NonNull GenericType javaType, @NonNull ObjectMapper objectMapper) { - return new JsonCodec<>(javaType, objectMapper); - } - - /** - * Builds a new codec that maps CQL type {@code text} to the given Java type, using JSON - * serialization with the provided Jackson mapper. - * - * @see Jackson JSON Library - */ - @NonNull - public static TypeCodec json( - @NonNull Class javaType, @NonNull ObjectMapper objectMapper) { - return new JsonCodec<>(javaType, objectMapper); - } - - /** Builds a new codec that maps CQL float vectors of the specified size to an array of floats. */ - public static TypeCodec floatVectorToArray(int dimensions) { - return new FloatVectorToArrayCodec(new DefaultVectorType(DataTypes.FLOAT, dimensions)); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/MappingCodec.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/MappingCodec.java deleted file mode 100644 index df1a34a566a..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/MappingCodec.java +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.util.Objects; - -/** - * A {@link TypeCodec} that maps instances of {@code InnerT}, a driver supported Java type, to - * instances of a target {@code OuterT} Java type. - * - *

This codec can be used to provide support for Java types that are not natively handled by the - * driver, as long as there is a conversion path to and from another supported Java type. - * - * @param The "inner" Java type; must be a driver supported Java type (that is, there must - * exist a codec registered for it). - * @param The "outer", or target Java type; this codec will handle the mapping to and from - * {@code InnerT} and {@code OuterT}. - * @see driver - * documentation on custom codecs - * @see - * driver supported Java types - */ -public abstract class MappingCodec implements TypeCodec { - - protected final TypeCodec innerCodec; - protected final GenericType outerJavaType; - - /** - * Creates a new mapping codec providing support for {@code OuterT} based on an existing codec for - * {@code InnerT}. - * - * @param innerCodec The inner codec to use to handle instances of InnerT; must not be null. - * @param outerJavaType The outer Java type; must not be null. - */ - protected MappingCodec( - @NonNull TypeCodec innerCodec, @NonNull GenericType outerJavaType) { - this.innerCodec = Objects.requireNonNull(innerCodec, "innerCodec cannot be null"); - this.outerJavaType = Objects.requireNonNull(outerJavaType, "outerJavaType cannot be null"); - } - - /** @return The type of {@code OuterT}. */ - @NonNull - @Override - public GenericType getJavaType() { - return outerJavaType; - } - - /** @return The type of {@code InnerT}. */ - public GenericType getInnerJavaType() { - return innerCodec.getJavaType(); - } - - @NonNull - @Override - public DataType getCqlType() { - return innerCodec.getCqlType(); - } - - @Override - public ByteBuffer encode(OuterT value, @NonNull ProtocolVersion protocolVersion) { - return innerCodec.encode(outerToInner(value), protocolVersion); - } - - @Override - public OuterT decode(ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - return innerToOuter(innerCodec.decode(bytes, protocolVersion)); - } - - @NonNull - @Override - public String format(OuterT value) { - return innerCodec.format(outerToInner(value)); - } - - @Override - public OuterT parse(String value) { - return innerToOuter(innerCodec.parse(value)); - } - - /** - * Converts from an instance of the inner Java type to an instance of the outer Java type. Used - * when deserializing or parsing. - * - * @param value The value to convert; may be null. - * @return The converted value; may be null. - */ - @Nullable - protected abstract OuterT innerToOuter(@Nullable InnerT value); - - /** - * Converts from an instance of the outer Java type to an instance of the inner Java type. Used - * when serializing or formatting. - * - * @param value The value to convert; may be null. - * @return The converted value; may be null. - */ - @Nullable - protected abstract InnerT outerToInner(@Nullable OuterT value); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveBooleanCodec.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveBooleanCodec.java deleted file mode 100644 index 2ad4f2fa15a..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveBooleanCodec.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; - -/** - * A specialized boolean codec that knows how to deal with primitive types. - * - *

If the codec registry returns an instance of this type, the driver's boolean getters will use - * it to avoid boxing. - */ -public interface PrimitiveBooleanCodec extends TypeCodec { - - @Nullable - ByteBuffer encodePrimitive(boolean value, @NonNull ProtocolVersion protocolVersion); - - boolean decodePrimitive(@Nullable ByteBuffer value, @NonNull ProtocolVersion protocolVersion); - - @Nullable - @Override - default ByteBuffer encode(@Nullable Boolean value, @NonNull ProtocolVersion protocolVersion) { - return (value == null) ? null : encodePrimitive(value, protocolVersion); - } - - @Nullable - @Override - default Boolean decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - return (bytes == null || bytes.remaining() == 0) - ? null - : decodePrimitive(bytes, protocolVersion); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveByteCodec.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveByteCodec.java deleted file mode 100644 index 5909bcd4ff9..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveByteCodec.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; - -/** - * A specialized byte codec that knows how to deal with primitive types. - * - *

If the codec registry returns an instance of this type, the driver's byte getters will use it - * to avoid boxing. - */ -public interface PrimitiveByteCodec extends TypeCodec { - - @Nullable - ByteBuffer encodePrimitive(byte value, @NonNull ProtocolVersion protocolVersion); - - byte decodePrimitive(@Nullable ByteBuffer value, @NonNull ProtocolVersion protocolVersion); - - @Nullable - @Override - default ByteBuffer encode(@Nullable Byte value, @NonNull ProtocolVersion protocolVersion) { - return (value == null) ? null : encodePrimitive(value, protocolVersion); - } - - @Nullable - @Override - default Byte decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - return (bytes == null || bytes.remaining() == 0) - ? null - : decodePrimitive(bytes, protocolVersion); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveDoubleCodec.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveDoubleCodec.java deleted file mode 100644 index c46160f0942..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveDoubleCodec.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; - -/** - * A specialized double codec that knows how to deal with primitive types. - * - *

If the codec registry returns an instance of this type, the driver's double getters will use - * it to avoid boxing. - */ -public interface PrimitiveDoubleCodec extends TypeCodec { - - @Nullable - ByteBuffer encodePrimitive(double value, @NonNull ProtocolVersion protocolVersion); - - double decodePrimitive(@Nullable ByteBuffer value, @NonNull ProtocolVersion protocolVersion); - - @Nullable - @Override - default ByteBuffer encode(@Nullable Double value, @NonNull ProtocolVersion protocolVersion) { - return (value == null) ? null : encodePrimitive(value, protocolVersion); - } - - @Nullable - @Override - default Double decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - return (bytes == null || bytes.remaining() == 0) - ? null - : decodePrimitive(bytes, protocolVersion); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveFloatCodec.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveFloatCodec.java deleted file mode 100644 index 585d5fdb1fa..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveFloatCodec.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; - -/** - * A specialized float codec that knows how to deal with primitive types. - * - *

If the codec registry returns an instance of this type, the driver's float getters will use it - * to avoid boxing. - */ -public interface PrimitiveFloatCodec extends TypeCodec { - - @Nullable - ByteBuffer encodePrimitive(float value, @NonNull ProtocolVersion protocolVersion); - - float decodePrimitive(@Nullable ByteBuffer value, @NonNull ProtocolVersion protocolVersion); - - @Nullable - @Override - default ByteBuffer encode(@Nullable Float value, @NonNull ProtocolVersion protocolVersion) { - return (value == null) ? null : encodePrimitive(value, protocolVersion); - } - - @Nullable - @Override - default Float decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - return (bytes == null || bytes.remaining() == 0) - ? null - : decodePrimitive(bytes, protocolVersion); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveIntCodec.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveIntCodec.java deleted file mode 100644 index b3f374eb8d7..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveIntCodec.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; - -/** - * A specialized integer codec that knows how to deal with primitive types. - * - *

If the codec registry returns an instance of this type, the driver's integer getters will use - * it to avoid boxing. - */ -public interface PrimitiveIntCodec extends TypeCodec { - - @Nullable - ByteBuffer encodePrimitive(int value, @NonNull ProtocolVersion protocolVersion); - - int decodePrimitive(@Nullable ByteBuffer value, @NonNull ProtocolVersion protocolVersion); - - @Nullable - @Override - default ByteBuffer encode(@Nullable Integer value, @NonNull ProtocolVersion protocolVersion) { - return (value == null) ? null : encodePrimitive(value, protocolVersion); - } - - @Nullable - @Override - default Integer decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - return (bytes == null || bytes.remaining() == 0) - ? null - : decodePrimitive(bytes, protocolVersion); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveLongCodec.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveLongCodec.java deleted file mode 100644 index ec65820c60f..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveLongCodec.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; - -/** - * A specialized long codec that knows how to deal with primitive types. - * - *

If the codec registry returns an instance of this type, the driver's long getters will use it - * to avoid boxing. - */ -public interface PrimitiveLongCodec extends TypeCodec { - - @Nullable - ByteBuffer encodePrimitive(long value, @NonNull ProtocolVersion protocolVersion); - - long decodePrimitive(@Nullable ByteBuffer value, @NonNull ProtocolVersion protocolVersion); - - @Nullable - @Override - default ByteBuffer encode(@Nullable Long value, @NonNull ProtocolVersion protocolVersion) { - return (value == null) ? null : encodePrimitive(value, protocolVersion); - } - - @Nullable - @Override - default Long decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - return (bytes == null || bytes.remaining() == 0) - ? null - : decodePrimitive(bytes, protocolVersion); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveShortCodec.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveShortCodec.java deleted file mode 100644 index 48c063b3dc6..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveShortCodec.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; - -/** - * A specialized short codec that knows how to deal with primitive types. - * - *

If the codec registry returns an instance of this type, the driver's short getters will use it - * to avoid boxing. - */ -public interface PrimitiveShortCodec extends TypeCodec { - - @Nullable - ByteBuffer encodePrimitive(short value, @NonNull ProtocolVersion protocolVersion); - - short decodePrimitive(@Nullable ByteBuffer value, @NonNull ProtocolVersion protocolVersion); - - @Nullable - @Override - default ByteBuffer encode(@Nullable Short value, @NonNull ProtocolVersion protocolVersion) { - return (value == null) ? null : encodePrimitive(value, protocolVersion); - } - - @Nullable - @Override - default Short decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - return (bytes == null || bytes.remaining() == 0) - ? null - : decodePrimitive(bytes, protocolVersion); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/TypeCodec.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/TypeCodec.java deleted file mode 100644 index d6afbe0380a..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/TypeCodec.java +++ /dev/null @@ -1,243 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.data.TupleValue; -import com.datastax.oss.driver.api.core.data.UdtValue; -import com.datastax.oss.driver.api.core.metadata.schema.AggregateMetadata; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.util.Optional; - -/** - * Manages the two-way conversion between a CQL type and a Java type. - * - *

Type codec implementations: - * - *

    - *
  1. must be thread-safe. - *
  2. must perform fast and never block. - *
  3. must support all native protocol versions; it is not possible to use different - * codecs for the same types but under different protocol versions. - *
  4. must comply with the native protocol specifications; failing to do so will result - * in unexpected results and could cause the driver to crash. - *
  5. should be stateless and immutable. - *
  6. should interpret {@code null} values and empty byte buffers (i.e. - * {@link ByteBuffer#remaining()} == 0) in a reasonable way; usually, {@code - * NULL} CQL values should map to {@code null} references, but exceptions exist; e.g. for - * varchar types, a {@code NULL} CQL value maps to a {@code null} reference, whereas an empty - * buffer maps to an empty String. For collection types, it is also admitted that {@code NULL} - * CQL values map to empty Java collections instead of {@code null} references. In any case, - * the codec's behavior with respect to {@code null} values and empty ByteBuffers should be - * clearly documented. - *
  7. for Java types that have a primitive equivalent, should implement the appropriate - * "primitive" codec interface, e.g. {@link PrimitiveBooleanCodec} for {@code boolean}. This - * allows the driver to avoid the overhead of boxing when using primitive accessors such as - * {@link Row#getBoolean(int)}. - *
  8. when decoding, must not consume {@link ByteBuffer} instances by performing - * relative read operations that modify their current position; codecs should instead prefer - * absolute read methods or, if necessary, {@link ByteBuffer#duplicate() duplicate} their byte - * buffers prior to reading them. - *
- */ -public interface TypeCodec { - - @NonNull - GenericType getJavaType(); - - @NonNull - DataType getCqlType(); - - /** - * Whether this codec is capable of processing the given Java type. - * - *

The default implementation is invariant with respect to the passed argument - * (through the usage of {@link GenericType#equals(Object)}) and it's strongly recommended not - * to modify this behavior. This means that a codec will only ever accept the exact - * Java type that it has been created for. - * - *

If the argument represents a Java primitive type, its wrapper type is considered instead. - */ - default boolean accepts(@NonNull GenericType javaType) { - Preconditions.checkNotNull(javaType); - return getJavaType().equals(javaType.wrap()); - } - - /** - * Whether this codec is capable of processing the given Java class. - * - *

This implementation simply compares the given class (or its wrapper type if it is a - * primitive type) against this codec's runtime (raw) class; it is invariant with respect - * to the passed argument (through the usage of {@link Class#equals(Object)} and it's strongly - * recommended not to modify this behavior. This means that a codec will only ever return - * {@code true} for the exact runtime (raw) Java class that it has been created for. - * - *

Implementors are encouraged to override this method if there is a more efficient way. In - * particular, if the codec targets a final class, the check can be done with a simple {@code ==}. - */ - default boolean accepts(@NonNull Class javaClass) { - Preconditions.checkNotNull(javaClass); - if (javaClass.isPrimitive()) { - if (javaClass == Boolean.TYPE) { - javaClass = Boolean.class; - } else if (javaClass == Character.TYPE) { - javaClass = Character.class; - } else if (javaClass == Byte.TYPE) { - javaClass = Byte.class; - } else if (javaClass == Short.TYPE) { - javaClass = Short.class; - } else if (javaClass == Integer.TYPE) { - javaClass = Integer.class; - } else if (javaClass == Long.TYPE) { - javaClass = Long.class; - } else if (javaClass == Float.TYPE) { - javaClass = Float.class; - } else if (javaClass == Double.TYPE) { - javaClass = Double.class; - } - } - return getJavaType().getRawType().equals(javaClass); - } - - /** - * Whether this codec is capable of encoding the given Java object. - * - *

The object's Java type is inferred from its runtime (raw) type, contrary to {@link - * #accepts(GenericType)} which is capable of handling generic types. - * - *

Contrary to other {@code accept} methods, this method's default implementation is - * covariant with respect to the passed argument (through the usage of {@link - * Class#isAssignableFrom(Class)}) and it's strongly recommended not to modify this - * behavior. This means that, by default, a codec will accept any subtype of the - * Java type that it has been created for. This is so because codec lookups by arbitrary Java - * objects only make sense when attempting to encode, never when attempting to decode, and indeed - * the {@linkplain #encode(Object, ProtocolVersion) encode} method is covariant with {@code - * JavaTypeT}. - * - *

It can only handle non-parameterized types; codecs handling parameterized types, such as - * collection types, must override this method and perform some sort of "manual" inspection of the - * actual type parameters. - * - *

Similarly, codecs that only accept a partial subset of all possible values must override - * this method and manually inspect the object to check if it complies or not with the codec's - * limitations. - * - *

Finally, if the codec targets a non-generic Java class, it might be possible to implement - * this method with a simple {@code instanceof} check. - */ - default boolean accepts(@NonNull Object value) { - Preconditions.checkNotNull(value); - return getJavaType().getRawType().isAssignableFrom(value.getClass()); - } - - /** Whether this codec is capable of processing the given CQL type. */ - default boolean accepts(@NonNull DataType cqlType) { - Preconditions.checkNotNull(cqlType); - return this.getCqlType().equals(cqlType); - } - - /** - * Encodes the given value in the binary format of the CQL type handled by this codec. - * - *

    - *
  • Null values should be gracefully handled and no exception should be raised; they should - * be considered as the equivalent of a NULL CQL value; - *
  • Codecs for CQL collection types should not permit null elements; - *
  • Codecs for CQL collection types should treat a {@code null} input as the equivalent of an - * empty collection. - *
- */ - @Nullable - ByteBuffer encode(@Nullable JavaTypeT value, @NonNull ProtocolVersion protocolVersion); - - /** - * Decodes a value from the binary format of the CQL type handled by this codec. - * - *
    - *
  • Null or empty buffers should be gracefully handled and no exception should be raised; - * they should be considered as the equivalent of a NULL CQL value and, in most cases, - * should map to {@code null} or a default value for the corresponding Java type, if - * applicable; - *
  • Codecs for CQL collection types should clearly document whether they return immutable - * collections or not (note that the driver's default collection codecs return - * mutable collections); - *
  • Codecs for CQL collection types should avoid returning {@code null}; they should return - * empty collections instead (the driver's default collection codecs all comply with this - * rule); - *
  • The provided {@link ByteBuffer} should never be consumed by read operations that modify - * its current position; if necessary, {@link ByteBuffer#duplicate()} duplicate} it before - * consuming. - *
- */ - @Nullable - JavaTypeT decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion); - - /** - * Formats the given value as a valid CQL literal according to the CQL type handled by this codec. - * - *

Implementors should take care of quoting and escaping the resulting CQL literal where - * applicable. Null values should be accepted; in most cases, implementations should return the - * CQL keyword {@code "NULL"} for {@code null} inputs. - * - *

Implementing this method is not strictly mandatory. It is used: - * - *

    - *
  1. by the request logger, if parameter logging is enabled; - *
  2. to format the INITCOND in {@link AggregateMetadata#describe(boolean)}; - *
  3. in the {@code toString()} representation of some driver objects (such as {@link UdtValue} - * and {@link TupleValue}), which is only used in driver logs; - *
  4. for literal values in the query builder (see {@code QueryBuilder#literal(Object, - * CodecRegistry)} and {@code QueryBuilder#literal(Object, TypeCodec)}). - *
- * - * If you choose not to implement this method, don't throw an exception but instead return a - * constant string (for example "XxxCodec.format not implemented"). - */ - @NonNull - String format(@Nullable JavaTypeT value); - - /** - * Parse the given CQL literal into an instance of the Java type handled by this codec. - * - *

Implementors should take care of unquoting and unescaping the given CQL string where - * applicable. Null values and empty strings should be accepted, as well as the string {@code - * "NULL"}; in most cases, implementations should interpret these inputs has equivalent to a - * {@code null} reference. - * - *

Implementing this method is not strictly mandatory: internally, the driver only uses it to - * parse the INITCOND when building the {@link AggregateMetadata metadata of an aggregate - * function} (and in most cases it will use a built-in codec, unless the INITCOND has a custom - * type). - * - *

If you choose not to implement this method, don't throw an exception but instead return - * {@code null}. - */ - @Nullable - JavaTypeT parse(@Nullable String value); - - @NonNull - default Optional serializedSize() { - return Optional.empty(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/TypeCodecs.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/TypeCodecs.java deleted file mode 100644 index 68f1b07b106..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/TypeCodecs.java +++ /dev/null @@ -1,264 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type.codec; - -import com.datastax.oss.driver.api.core.data.CqlDuration; -import com.datastax.oss.driver.api.core.data.CqlVector; -import com.datastax.oss.driver.api.core.data.TupleValue; -import com.datastax.oss.driver.api.core.data.UdtValue; -import com.datastax.oss.driver.api.core.type.CustomType; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.TupleType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.api.core.type.VectorType; -import com.datastax.oss.driver.internal.core.type.codec.BigIntCodec; -import com.datastax.oss.driver.internal.core.type.codec.BlobCodec; -import com.datastax.oss.driver.internal.core.type.codec.BooleanCodec; -import com.datastax.oss.driver.internal.core.type.codec.CounterCodec; -import com.datastax.oss.driver.internal.core.type.codec.CqlDurationCodec; -import com.datastax.oss.driver.internal.core.type.codec.CustomCodec; -import com.datastax.oss.driver.internal.core.type.codec.DateCodec; -import com.datastax.oss.driver.internal.core.type.codec.DecimalCodec; -import com.datastax.oss.driver.internal.core.type.codec.DoubleCodec; -import com.datastax.oss.driver.internal.core.type.codec.FloatCodec; -import com.datastax.oss.driver.internal.core.type.codec.InetCodec; -import com.datastax.oss.driver.internal.core.type.codec.IntCodec; -import com.datastax.oss.driver.internal.core.type.codec.ListCodec; -import com.datastax.oss.driver.internal.core.type.codec.MapCodec; -import com.datastax.oss.driver.internal.core.type.codec.SetCodec; -import com.datastax.oss.driver.internal.core.type.codec.SmallIntCodec; -import com.datastax.oss.driver.internal.core.type.codec.StringCodec; -import com.datastax.oss.driver.internal.core.type.codec.TimeCodec; -import com.datastax.oss.driver.internal.core.type.codec.TimeUuidCodec; -import com.datastax.oss.driver.internal.core.type.codec.TimestampCodec; -import com.datastax.oss.driver.internal.core.type.codec.TinyIntCodec; -import com.datastax.oss.driver.internal.core.type.codec.TupleCodec; -import com.datastax.oss.driver.internal.core.type.codec.UdtCodec; -import com.datastax.oss.driver.internal.core.type.codec.UuidCodec; -import com.datastax.oss.driver.internal.core.type.codec.VarIntCodec; -import com.datastax.oss.driver.internal.core.type.codec.VectorCodec; -import com.datastax.oss.driver.shaded.guava.common.base.Charsets; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.math.BigDecimal; -import java.math.BigInteger; -import java.net.InetAddress; -import java.nio.ByteBuffer; -import java.time.Instant; -import java.time.LocalDate; -import java.time.LocalTime; -import java.time.ZoneId; -import java.time.ZonedDateTime; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; - -/** - * Constants and factory methods to obtain instances of the driver's default type codecs. - * - *

See also {@link ExtraTypeCodecs} for additional codecs that you can register with your session - * to handle different type mappings. - */ -public class TypeCodecs { - - /** The default codec that maps CQL type {@code boolean} to Java's {@code boolean}. */ - public static final PrimitiveBooleanCodec BOOLEAN = new BooleanCodec(); - - /** The default codec that maps CQL type {@code tinyint} to Java's {@code byte}. */ - public static final PrimitiveByteCodec TINYINT = new TinyIntCodec(); - - /** The default codec that maps CQL type {@code double} to Java's {@code double}. */ - public static final PrimitiveDoubleCodec DOUBLE = new DoubleCodec(); - - /** The default codec that maps CQL type {@code counter} to Java's {@code long}. */ - public static final PrimitiveLongCodec COUNTER = new CounterCodec(); - - /** The default codec that maps CQL type {@code float} to Java's {@code float}. */ - public static final PrimitiveFloatCodec FLOAT = new FloatCodec(); - - /** The default codec that maps CQL type {@code int} to Java's {@code int}. */ - public static final PrimitiveIntCodec INT = new IntCodec(); - - /** The default codec that maps CQL type {@code bigint} to Java's {@code long}. */ - public static final PrimitiveLongCodec BIGINT = new BigIntCodec(); - - /** The default codec that maps CQL type {@code smallint} to Java's {@code short}. */ - public static final PrimitiveShortCodec SMALLINT = new SmallIntCodec(); - - /** - * The default codec that maps CQL type {@code timestamp} to Java's {@link Instant}, using the - * system's default time zone to parse and format CQL literals. - * - *

This codec uses the system's {@linkplain ZoneId#systemDefault() default time zone} as its - * source of time zone information when formatting values as CQL literals, or parsing CQL literals - * that do not have any time zone indication. Note that this only applies to the {@link - * TypeCodec#format(Object)} and {@link TypeCodec#parse(String)} methods; regular encoding and - * decoding, like setting a value on a bound statement or reading a column from a row, are not - * affected by the time zone. - * - *

If you need a different time zone, consider other codecs in {@link ExtraTypeCodecs}, or call - * {@link ExtraTypeCodecs#timestampAt(ZoneId)} instead. - * - * @see ExtraTypeCodecs#TIMESTAMP_UTC - * @see ExtraTypeCodecs#timestampAt(ZoneId) - */ - public static final TypeCodec TIMESTAMP = new TimestampCodec(); - - /** The default codec that maps CQL type {@code date} to Java's {@link LocalDate}. */ - public static final TypeCodec DATE = new DateCodec(); - - /** The default codec that maps CQL type {@code time} to Java's {@link LocalTime}. */ - public static final TypeCodec TIME = new TimeCodec(); - - /** - * The default codec that maps CQL type {@code blob} to Java's {@link ByteBuffer}. - * - *

If you are looking for a codec mapping CQL type {@code blob} to the Java type {@code - * byte[]}, you should use {@link ExtraTypeCodecs#BLOB_TO_ARRAY} instead. - * - *

If you are looking for a codec mapping CQL type {@code list} to the Java type - * {@code byte[]}, you should use {@link ExtraTypeCodecs#BYTE_LIST_TO_ARRAY} instead. - * - * @see ExtraTypeCodecs#BLOB_TO_ARRAY - * @see ExtraTypeCodecs#BYTE_LIST_TO_ARRAY - */ - public static final TypeCodec BLOB = new BlobCodec(); - - /** The default codec that maps CQL type {@code text} to Java's {@link String}. */ - public static final TypeCodec TEXT = new StringCodec(DataTypes.TEXT, Charsets.UTF_8); - /** The default codec that maps CQL type {@code ascii} to Java's {@link String}. */ - public static final TypeCodec ASCII = new StringCodec(DataTypes.ASCII, Charsets.US_ASCII); - /** The default codec that maps CQL type {@code varint} to Java's {@link BigInteger}. */ - public static final TypeCodec VARINT = new VarIntCodec(); - /** The default codec that maps CQL type {@code decimal} to Java's {@link BigDecimal}. */ - public static final TypeCodec DECIMAL = new DecimalCodec(); - /** The default codec that maps CQL type {@code uuid} to Java's {@link UUID}. */ - public static final TypeCodec UUID = new UuidCodec(); - /** The default codec that maps CQL type {@code timeuuid} to Java's {@link UUID}. */ - public static final TypeCodec TIMEUUID = new TimeUuidCodec(); - /** The default codec that maps CQL type {@code inet} to Java's {@link InetAddress}. */ - public static final TypeCodec INET = new InetCodec(); - /** The default codec that maps CQL type {@code duration} to the driver's {@link CqlDuration}. */ - public static final TypeCodec DURATION = new CqlDurationCodec(); - - /** - * Builds a new codec that maps a CQL custom type to Java's {@link ByteBuffer}. - * - * @param cqlType the fully-qualified name of the custom type. - */ - @NonNull - public static TypeCodec custom(@NonNull DataType cqlType) { - Preconditions.checkArgument(cqlType instanceof CustomType, "cqlType must be a custom type"); - return new CustomCodec((CustomType) cqlType); - } - - /** - * Builds a new codec that maps a CQL list to a Java list, using the given codec to map each - * element. - */ - @NonNull - public static TypeCodec> listOf(@NonNull TypeCodec elementCodec) { - return new ListCodec<>(DataTypes.listOf(elementCodec.getCqlType()), elementCodec); - } - - /** - * Builds a new codec that maps a CQL set to a Java set, using the given codec to map each - * element. - */ - @NonNull - public static TypeCodec> setOf(@NonNull TypeCodec elementCodec) { - return new SetCodec<>(DataTypes.setOf(elementCodec.getCqlType()), elementCodec); - } - - /** - * Builds a new codec that maps a CQL map to a Java map, using the given codecs to map each key - * and value. - */ - @NonNull - public static TypeCodec> mapOf( - @NonNull TypeCodec keyCodec, @NonNull TypeCodec valueCodec) { - return new MapCodec<>( - DataTypes.mapOf(keyCodec.getCqlType(), valueCodec.getCqlType()), keyCodec, valueCodec); - } - - /** - * Builds a new codec that maps a CQL tuple to the driver's {@link TupleValue}, for the given type - * definition. - * - *

Note that the components of a {@link TupleValue} are stored in their encoded form. They are - * encoded/decoded on the fly when you set or get them, using the codec registry. - */ - @NonNull - public static TypeCodec tupleOf(@NonNull TupleType cqlType) { - return new TupleCodec(cqlType); - } - - public static TypeCodec> vectorOf( - @NonNull VectorType type, @NonNull TypeCodec subtypeCodec) { - return new VectorCodec( - DataTypes.vectorOf(subtypeCodec.getCqlType(), type.getDimensions()), subtypeCodec); - } - - public static TypeCodec> vectorOf( - int dimensions, @NonNull TypeCodec subtypeCodec) { - return new VectorCodec(DataTypes.vectorOf(subtypeCodec.getCqlType(), dimensions), subtypeCodec); - } - - /** - * Builds a new codec that maps a CQL user defined type to the driver's {@link UdtValue}, for the - * given type definition. - * - *

Note that the fields of a {@link UdtValue} are stored in their encoded form. They are - * encoded/decoded on the fly when you set or get them, using the codec registry. - */ - @NonNull - public static TypeCodec udtOf(@NonNull UserDefinedType cqlType) { - return new UdtCodec(cqlType); - } - - /** - * An alias for {@link ExtraTypeCodecs#ZONED_TIMESTAMP_SYSTEM}. - * - *

This exists for historical reasons: the constant was originally defined in this class, but - * technically it belongs to {@link ExtraTypeCodecs} because this is not a built-in mapping. - */ - public static final TypeCodec ZONED_TIMESTAMP_SYSTEM = - ExtraTypeCodecs.ZONED_TIMESTAMP_SYSTEM; - - /** - * An alias for {@link ExtraTypeCodecs#ZONED_TIMESTAMP_UTC}. - * - *

This exists for historical reasons: the constant was originally defined in this class, but - * technically it belongs to {@link ExtraTypeCodecs} because this is not a built-in mapping. - */ - public static final TypeCodec ZONED_TIMESTAMP_UTC = - ExtraTypeCodecs.ZONED_TIMESTAMP_UTC; - - /** - * An alias for {@link ExtraTypeCodecs#zonedTimestampAt(ZoneId)}. - * - *

This exists for historical reasons: the method was originally defined in this class, but - * technically it belongs to {@link ExtraTypeCodecs} because this is not a built-in mapping. - */ - @NonNull - public static TypeCodec zonedTimestampAt(@NonNull ZoneId timeZone) { - return ExtraTypeCodecs.zonedTimestampAt(timeZone); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/registry/CodecRegistry.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/registry/CodecRegistry.java deleted file mode 100644 index 36472f34c79..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/registry/CodecRegistry.java +++ /dev/null @@ -1,188 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type.codec.registry; - -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.data.GettableByIndex; -import com.datastax.oss.driver.api.core.data.TupleValue; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.TupleType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.api.core.type.codec.CodecNotFoundException; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.codec.registry.DefaultCodecRegistry; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.List; - -/** - * Provides codecs to convert CQL types to their Java equivalent, and vice-versa. - * - *

Implementations MUST provide a default mapping for all CQL types (primitive types, and - * all the collections, tuples or user-defined types that can recursively be built from them — - * see {@link DataTypes}). - * - *

They may also provide additional mappings to other Java types (for use with methods such as - * {@link Row#get(int, Class)}, {@link TupleValue#set(int, Object, Class)}, etc.) - * - *

The default implementation returned by the driver also implements {@link - * MutableCodecRegistry}, and we strongly recommend that custom implementations do as well. The two - * interfaces are only separate for backward compatibility, because mutability was introduced in - * 4.3.0. - */ -public interface CodecRegistry { - /** - * An immutable instance, that only handles built-in driver types (that is, primitive types, and - * collections, tuples, and user defined types thereof). - * - *

Note that, due to implementation details, this instance is a {@link MutableCodecRegistry}, - * but any attempt to {@linkplain MutableCodecRegistry#register(TypeCodec) register new codecs} - * will throw {@link UnsupportedOperationException}. - */ - CodecRegistry DEFAULT = - new DefaultCodecRegistry("default") { - @Override - public void register(TypeCodec newCodec) { - throw new UnsupportedOperationException("CodecRegistry.DEFAULT is immutable"); - } - }; - - /** - * Returns a codec to handle the conversion between the given types. - * - *

This is used internally by the driver, in cases where both types are known, for example - * {@link GettableByIndex#getString(int) row.getString(0)} (Java type inferred from the method, - * CQL type known from the row metadata). - * - *

The driver's default registry implementation is invariant with regard to the Java - * type: for example, if {@code B extends A} and an {@code A<=>int} codec is registered, {@code - * codecFor(DataTypes.INT, B.class)} will not find that codec. This is because this method - * is used internally both for encoding and decoding, and covariance wouldn't work when decoding. - * - * @throws CodecNotFoundException if there is no such codec. - */ - @NonNull - TypeCodec codecFor( - @NonNull DataType cqlType, @NonNull GenericType javaType); - - /** - * Shortcut for {@link #codecFor(DataType, GenericType) codecFor(cqlType, - * GenericType.of(javaType))}. - * - *

Implementations may decide to override this method for performance reasons, if they have a - * way to avoid the overhead of wrapping. - * - * @throws CodecNotFoundException if there is no such codec. - */ - @NonNull - default TypeCodec codecFor( - @NonNull DataType cqlType, @NonNull Class javaType) { - return codecFor(cqlType, GenericType.of(javaType)); - } - - /** - * Returns a codec to convert the given CQL type to the Java type deemed most appropriate to - * represent it. - * - *

This is used internally by the driver, in cases where the Java type is not explicitly - * provided, for example {@link GettableByIndex#getObject(int) row.getObject(0)} (CQL type known - * from the row metadata, Java type unspecified). - * - *

The definition of "most appropriate" is left to the appreciation of the registry - * implementor. - * - * @throws CodecNotFoundException if there is no such codec. - */ - @NonNull - TypeCodec codecFor(@NonNull DataType cqlType); - - /** - * Returns a codec to convert the given Java type to the CQL type deemed most appropriate to - * represent it. - * - *

The driver does not use this method. It is provided as a convenience for third-party usage, - * for example if you were to generate a schema based on a set of Java classes. - * - *

The driver's default registry implementation is invariant with regard to the Java - * type: for example, if {@code B extends A} and an {@code A<=>int} codec is registered, {@code - * codecFor(DataTypes.INT, B.class)} will not find that codec. This is because we don't - * know whether this method will be used for encoding, decoding, or both. - * - * @throws CodecNotFoundException if there is no such codec. - */ - @NonNull - TypeCodec codecFor(@NonNull GenericType javaType); - - /** - * Shortcut for {@link #codecFor(GenericType) codecFor(GenericType.of(javaType))}. - * - *

Implementations may decide to override this method for performance reasons, if they have a - * way to avoid the overhead of wrapping. - * - * @throws CodecNotFoundException if there is no such codec. - */ - @NonNull - default TypeCodec codecFor(@NonNull Class javaType) { - return codecFor(GenericType.of(javaType)); - } - - /** - * Returns a codec to convert the given Java object to the given CQL type. - * - *

This is used internally by the driver when you bulk-set values in a {@link - * PreparedStatement#bind(Object...) bound statement}, {@link UserDefinedType#newValue(Object...) - * UDT} or {@link TupleType#newValue(Object...) tuple}. - * - *

Unlike other methods, the driver's default registry implementation is covariant - * with regard to the Java type: for example, if {@code B extends A} and an {@code A<=>int} codec - * is registered, {@code codecFor(DataTypes.INT, someB)} will find that codec. This is - * because this method is always used in encoding scenarios; if a bound statement has a value with - * a runtime type of {@code ArrayList}, it should be possible to encode it with a codec - * that accepts a {@code List}. - * - * @throws CodecNotFoundException if there is no such codec. - */ - @NonNull - TypeCodec codecFor(@NonNull DataType cqlType, @NonNull JavaTypeT value); - - /** - * Returns a codec to convert the given Java object to the CQL type deemed most appropriate to - * represent it. - * - *

This is used internally by the driver, in cases where the CQL type is unknown, for example - * for {@linkplain SimpleStatement#setPositionalValues(List) simple statement variables} (simple - * statements don't have access to schema metadata). - * - *

Unlike other methods, the driver's default registry implementation is covariant - * with regard to the Java type: for example, if {@code B extends A} and an {@code A<=>int} codec - * is registered, {@code codecFor(someB)} will find that codec. This is because this method - * is always used in encoding scenarios; if a simple statement has a value with a runtime type of - * {@code ArrayList}, it should be possible to encode it with a codec that accepts a - * {@code List}. - * - *

Note that, if {@code value} is an empty collection, this method may return a codec that - * won't accept {@code JavaTypeT}; but it will encode {@code value} correctly. - * - * @throws CodecNotFoundException if there is no such codec. - */ - @NonNull - TypeCodec codecFor(@NonNull JavaTypeT value); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/registry/MutableCodecRegistry.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/registry/MutableCodecRegistry.java deleted file mode 100644 index 7f5d1fb9813..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/registry/MutableCodecRegistry.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type.codec.registry; - -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; - -/** - * A codec registry that can be extended with new user codecs at runtime. - * - *

This interface only exists to preserve backward compatibility. In practice, the default {@link - * CodecRegistry} implementation returned by the driver implements this interface, so it can safely - * be cast. - * - *

However {@link CodecRegistry#DEFAULT} is immutable. It implements this interface, but {@link - * #register(TypeCodec)} throws an {@link UnsupportedOperationException}. - * - * @since 4.3.0 - */ -public interface MutableCodecRegistry extends CodecRegistry { - - /** - * Adds the given codec to the registry. - * - *

This method will log a warning and ignore the codec if it collides with one already present - * in the registry. Note that the driver's built-in implementation uses internal synchronization - * to guarantee that two threads cannot register colliding codecs concurrently; registration is - * not expected to happen in a very concurrent manner, so this should not pose a performance - * issue. - */ - void register(TypeCodec codec); - - /** Invokes {@link #register(TypeCodec)} for every codec in the given list. */ - default void register(TypeCodec... codecs) { - for (TypeCodec codec : codecs) { - register(codec); - } - } - - /** Invokes {@link #register(TypeCodec)} for every codec in the given list. */ - default void register(Iterable> codecs) { - for (TypeCodec codec : codecs) { - register(codec); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/reflect/GenericType.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/reflect/GenericType.java deleted file mode 100644 index d22b6f1bfaf..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/reflect/GenericType.java +++ /dev/null @@ -1,405 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type.reflect; - -import com.datastax.oss.driver.api.core.data.CqlDuration; -import com.datastax.oss.driver.api.core.data.CqlVector; -import com.datastax.oss.driver.api.core.data.GettableByIndex; -import com.datastax.oss.driver.api.core.data.TupleValue; -import com.datastax.oss.driver.api.core.data.UdtValue; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.shaded.guava.common.primitives.Primitives; -import com.datastax.oss.driver.shaded.guava.common.reflect.TypeParameter; -import com.datastax.oss.driver.shaded.guava.common.reflect.TypeResolver; -import com.datastax.oss.driver.shaded.guava.common.reflect.TypeToken; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.lang.reflect.Type; -import java.math.BigDecimal; -import java.math.BigInteger; -import java.net.InetAddress; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.time.Instant; -import java.time.LocalDate; -import java.time.LocalDateTime; -import java.time.LocalTime; -import java.time.ZonedDateTime; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.Set; -import java.util.UUID; -import net.jcip.annotations.Immutable; - -/** - * Runtime representation of a generic Java type. - * - *

This is used by type codecs to indicate which Java types they accept ({@link - * TypeCodec#accepts(GenericType)}), and by generic getters and setters (such as {@link - * GettableByIndex#get(int, GenericType)} in the driver's query API. - * - *

There are various ways to build instances of this class: - * - *

By using one of the static factory methods: - * - *

{@code
- * GenericType> stringListType = GenericType.listOf(String.class);
- * }
- * - * By using an anonymous class: - * - *
{@code
- * GenericType> fooBarType = new GenericType>(){};
- * }
- * - * In a generic method, by using {@link #where(GenericTypeParameter, GenericType)} to substitute - * free type variables with runtime types: - * - *
{@code
- *  GenericType> optionalOf(GenericType elementType) {
- *   return new GenericType>() {}
- *     .where(new GenericTypeParameter() {}, elementType);
- * }
- * ...
- * GenericType>> optionalStringListType = optionalOf(GenericType.listOf(String.class));
- * }
- * - *

You are encouraged to store and reuse these instances. - * - *

Note that this class is a thin wrapper around Guava's {@code TypeToken}. The only reason why - * {@code TypeToken} is not used directly is because Guava is not exposed in the driver's public API - * (it's used internally, but shaded). - */ -@Immutable -public class GenericType { - - public static final GenericType BOOLEAN = of(Boolean.class); - public static final GenericType BYTE = of(Byte.class); - public static final GenericType DOUBLE = of(Double.class); - public static final GenericType FLOAT = of(Float.class); - public static final GenericType INTEGER = of(Integer.class); - public static final GenericType LONG = of(Long.class); - public static final GenericType SHORT = of(Short.class); - public static final GenericType INSTANT = of(Instant.class); - public static final GenericType ZONED_DATE_TIME = of(ZonedDateTime.class); - public static final GenericType LOCAL_DATE = of(LocalDate.class); - public static final GenericType LOCAL_TIME = of(LocalTime.class); - public static final GenericType LOCAL_DATE_TIME = of(LocalDateTime.class); - public static final GenericType BYTE_BUFFER = of(ByteBuffer.class); - public static final GenericType STRING = of(String.class); - public static final GenericType BIG_INTEGER = of(BigInteger.class); - public static final GenericType BIG_DECIMAL = of(BigDecimal.class); - public static final GenericType UUID = of(UUID.class); - public static final GenericType INET_ADDRESS = of(InetAddress.class); - public static final GenericType CQL_DURATION = of(CqlDuration.class); - public static final GenericType TUPLE_VALUE = of(TupleValue.class); - public static final GenericType UDT_VALUE = of(UdtValue.class); - public static final GenericType DURATION = of(Duration.class); - - @NonNull - public static GenericType of(@NonNull Class type) { - return new SimpleGenericType<>(type); - } - - @NonNull - public static GenericType of(@NonNull java.lang.reflect.Type type) { - return new GenericType<>(TypeToken.of(type)); - } - - @NonNull - public static GenericType> listOf(@NonNull Class elementType) { - TypeToken> token = - new TypeToken>() {}.where(new TypeParameter() {}, TypeToken.of(elementType)); - return new GenericType<>(token); - } - - @NonNull - public static GenericType> listOf(@NonNull GenericType elementType) { - TypeToken> token = - new TypeToken>() {}.where(new TypeParameter() {}, elementType.token); - return new GenericType<>(token); - } - - @NonNull - public static GenericType> setOf(@NonNull Class elementType) { - TypeToken> token = - new TypeToken>() {}.where(new TypeParameter() {}, TypeToken.of(elementType)); - return new GenericType<>(token); - } - - @NonNull - public static GenericType> setOf(@NonNull GenericType elementType) { - TypeToken> token = - new TypeToken>() {}.where(new TypeParameter() {}, elementType.token); - return new GenericType<>(token); - } - - @NonNull - public static GenericType> vectorOf(@NonNull Class elementType) { - TypeToken> token = - new TypeToken>() {}.where( - new TypeParameter() {}, TypeToken.of(elementType)); - return new GenericType<>(token); - } - - @NonNull - public static GenericType> vectorOf(@NonNull GenericType elementType) { - TypeToken> token = - new TypeToken>() {}.where(new TypeParameter() {}, elementType.token); - return new GenericType<>(token); - } - - @NonNull - public static GenericType> mapOf( - @NonNull Class keyType, @NonNull Class valueType) { - TypeToken> token = - new TypeToken>() {}.where(new TypeParameter() {}, TypeToken.of(keyType)) - .where(new TypeParameter() {}, TypeToken.of(valueType)); - return new GenericType<>(token); - } - - @NonNull - public static GenericType> mapOf( - @NonNull GenericType keyType, @NonNull GenericType valueType) { - TypeToken> token = - new TypeToken>() {}.where(new TypeParameter() {}, keyType.token) - .where(new TypeParameter() {}, valueType.token); - return new GenericType<>(token); - } - - @NonNull - public static GenericType arrayOf(@NonNull Class componentType) { - TypeToken token = - new TypeToken() {}.where(new TypeParameter() {}, TypeToken.of(componentType)); - return new GenericType<>(token); - } - - @NonNull - public static GenericType arrayOf(@NonNull GenericType componentType) { - TypeToken token = - new TypeToken() {}.where(new TypeParameter() {}, componentType.token); - return new GenericType<>(token); - } - - @NonNull - public static GenericType> optionalOf(@NonNull Class componentType) { - TypeToken> token = - new TypeToken>() {}.where( - new TypeParameter() {}, TypeToken.of(componentType)); - return new GenericType<>(token); - } - - @NonNull - public static GenericType> optionalOf(@NonNull GenericType componentType) { - TypeToken> token = - new TypeToken>() {}.where(new TypeParameter() {}, componentType.token); - return new GenericType<>(token); - } - - private final TypeToken token; - - private GenericType(TypeToken token) { - this.token = token; - } - - protected GenericType() { - this.token = new TypeToken(getClass()) {}; - } - - /** - * Returns true if this type is a supertype of the given {@code type}. "Supertype" is defined - * according to the rules for type - * arguments introduced with Java generics. - */ - public final boolean isSupertypeOf(@NonNull GenericType type) { - return token.isSupertypeOf(type.token); - } - - /** - * Returns true if this type is a subtype of the given {@code type}. "Subtype" is defined - * according to the rules for type - * arguments introduced with Java generics. - */ - public final boolean isSubtypeOf(@NonNull GenericType type) { - return token.isSubtypeOf(type.token); - } - - /** - * Returns true if this type is known to be an array type, such as {@code int[]}, {@code T[]}, - * {@code []>} etc. - */ - public final boolean isArray() { - return token.isArray(); - } - - /** Returns true if this type is one of the nine primitive types (including {@code void}). */ - public final boolean isPrimitive() { - return token.isPrimitive(); - } - - /** - * Returns the corresponding wrapper type if this is a primitive type; otherwise returns {@code - * this} itself. Idempotent. - */ - @NonNull - public final GenericType wrap() { - if (isPrimitive()) { - return new GenericType<>(token.wrap()); - } - return this; - } - - /** - * Returns the corresponding primitive type if this is a wrapper type; otherwise returns {@code - * this} itself. Idempotent. - */ - @NonNull - public final GenericType unwrap() { - if (Primitives.allWrapperTypes().contains(token.getRawType())) { - return new GenericType<>(token.unwrap()); - } - return this; - } - - /** - * Substitutes a free type variable with an actual type. See {@link GenericType this class's - * javadoc} for an example. - */ - @NonNull - public final GenericType where( - @NonNull GenericTypeParameter freeVariable, @NonNull GenericType actualType) { - TypeResolver resolver = - new TypeResolver().where(freeVariable.getTypeVariable(), actualType.__getToken().getType()); - Type resolvedType = resolver.resolveType(this.token.getType()); - @SuppressWarnings("unchecked") - TypeToken resolvedToken = (TypeToken) TypeToken.of(resolvedType); - return new GenericType<>(resolvedToken); - } - - /** - * Substitutes a free type variable with an actual type. See {@link GenericType this class's - * javadoc} for an example. - */ - @NonNull - public final GenericType where( - @NonNull GenericTypeParameter freeVariable, @NonNull Class actualType) { - return where(freeVariable, GenericType.of(actualType)); - } - - /** - * Returns the array component type if this type represents an array ({@code int[]}, {@code T[]}, - * {@code []>} etc.), or else {@code null} is returned. - */ - @Nullable - @SuppressWarnings("unchecked") - public final GenericType getComponentType() { - TypeToken componentTypeToken = token.getComponentType(); - return (componentTypeToken == null) ? null : new GenericType(componentTypeToken); - } - - /** - * Returns the raw type of {@code T}. Formally speaking, if {@code T} is returned by {@link - * java.lang.reflect.Method#getGenericReturnType}, the raw type is what's returned by {@link - * java.lang.reflect.Method#getReturnType} of the same method object. Specifically: - * - *

    - *
  • If {@code T} is a {@code Class} itself, {@code T} itself is returned. - *
  • If {@code T} is a parameterized type, the raw type of the parameterized type is returned. - *
  • If {@code T} is an array type , the returned type is the corresponding array class. For - * example: {@code List[] => List[]}. - *
  • If {@code T} is a type variable or a wildcard type, the raw type of the first upper bound - * is returned. For example: {@code => Foo}. - *
- */ - @NonNull - public Class getRawType() { - return token.getRawType(); - } - - /** - * Returns the generic form of {@code superclass}. For example, if this is {@code - * ArrayList}, {@code Iterable} is returned given the input {@code - * Iterable.class}. - */ - @SuppressWarnings("unchecked") - @NonNull - public final GenericType getSupertype(@NonNull Class superclass) { - return new GenericType(token.getSupertype(superclass)); - } - - /** - * Returns subtype of {@code this} with {@code subclass} as the raw class. For example, if this is - * {@code Iterable} and {@code subclass} is {@code List}, {@code List} is - * returned. - */ - @SuppressWarnings("unchecked") - @NonNull - public final GenericType getSubtype(@NonNull Class subclass) { - return new GenericType(token.getSubtype(subclass)); - } - - /** Returns the represented type. */ - @NonNull - public final Type getType() { - return token.getType(); - } - - /** - * This method is for internal use, DO NOT use it from client code. - * - *

It leaks a shaded type. This should be part of the internal API, but due to internal - * implementation details it has to be exposed here. - * - * @leaks-private-api - */ - @NonNull - public TypeToken __getToken() { - return token; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof GenericType) { - GenericType that = (GenericType) other; - return this.token.equals(that.token); - } else { - return false; - } - } - - @Override - public int hashCode() { - return token.hashCode(); - } - - @Override - public String toString() { - return token.toString(); - } - - private static class SimpleGenericType extends GenericType { - SimpleGenericType(Class type) { - super(TypeToken.of(type)); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/reflect/GenericTypeParameter.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/reflect/GenericTypeParameter.java deleted file mode 100644 index 3bf0e3537e0..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/reflect/GenericTypeParameter.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type.reflect; - -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.lang.reflect.ParameterizedType; -import java.lang.reflect.Type; -import java.lang.reflect.TypeVariable; -import net.jcip.annotations.Immutable; - -/** - * Captures a free type variable that can be used in {@link GenericType#where(GenericTypeParameter, - * GenericType)}. - */ -@Immutable -@SuppressWarnings("unused") // for T (unfortunately has to cover the whole class) -public class GenericTypeParameter { - private final TypeVariable typeVariable; - - protected GenericTypeParameter() { - Type superclass = getClass().getGenericSuperclass(); - Preconditions.checkArgument( - superclass instanceof ParameterizedType, "%s isn't parameterized", superclass); - this.typeVariable = - (TypeVariable) ((ParameterizedType) superclass).getActualTypeArguments()[0]; - } - - @NonNull - public TypeVariable getTypeVariable() { - return typeVariable; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/uuid/Uuids.java b/core/src/main/java/com/datastax/oss/driver/api/core/uuid/Uuids.java deleted file mode 100644 index 8dae31f3734..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/uuid/Uuids.java +++ /dev/null @@ -1,682 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.uuid; - -import com.datastax.oss.driver.internal.core.os.Native; -import com.datastax.oss.driver.internal.core.util.Loggers; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.base.Charsets; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.lang.management.ManagementFactory; -import java.net.InetAddress; -import java.net.NetworkInterface; -import java.net.SocketException; -import java.net.UnknownHostException; -import java.nio.charset.StandardCharsets; -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; -import java.util.Date; -import java.util.Enumeration; -import java.util.HashSet; -import java.util.Objects; -import java.util.Properties; -import java.util.Random; -import java.util.Set; -import java.util.SplittableRandom; -import java.util.UUID; -import java.util.concurrent.atomic.AtomicLong; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Utility methods to help working with UUIDs, and more specifically, with time-based UUIDs (also - * known as Version 1 UUIDs). - * - *

The algorithm to generate time-based UUIDs roughly follows the description in RFC-4122, but - * with the following adaptations: - * - *

    - *
  1. Since Java does not provide direct access to the host's MAC address, that information is - * replaced with a digest of all IP addresses available on the host; - *
  2. The process ID (PID) isn't easily available to Java either, so it is determined by one of - * the following methods, in the order they are listed below: - *
      - *
    1. If the System property {@value PID_SYSTEM_PROPERTY} is set then the - * value to use as a PID will be read from that property; - *
    2. Otherwise, if a native call to {@code POSIX.getpid()} is possible, then the PID will - * be read from that call; - *
    3. Otherwise, an attempt will be made to read the PID from JMX's {@link - * ManagementFactory#getRuntimeMXBean() RuntimeMXBean}, since most JVMs tend to use the - * JVM's PID as part of that MXBean name (however that behavior is not officially part - * of the specification, so it may not work for all JVMs); - *
    4. If all of the above fail, a random integer will be generated and used as a surrogate - * PID. - *
    - *
- * - * @see JAVA-444 - * @see A Universally Unique IDentifier (UUID) URN - * Namespace (RFC 4122) - */ -public final class Uuids { - - /** The system property to use to force the value of the process ID ({@value}). */ - public static final String PID_SYSTEM_PROPERTY = "com.datastax.oss.driver.PID"; - - /** - * The namespace UUID for URLs, as defined in Appendix C of RFC-4122. When using - * this namespace to create a name-based UUID, it is expected that the name part be a valid {@link - * java.net.URL URL}. - */ - public static final UUID NAMESPACE_URL = UUID.fromString("6ba7b811-9dad-11d1-80b4-00c04fd430c8"); - - /** - * The namespace UUID for fully-qualified domain names, as defined in Appendix C of RFC-4122. When using - * this namespace to create a name-based UUID, it is expected that the name part be a valid domain - * name. - */ - public static final UUID NAMESPACE_DNS = UUID.fromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8"); - - /** - * The namespace UUID for OIDs, as defined in Appendix C of RFC-4122. When using - * this namespace to create a name-based UUID, it is expected that the name part be an ISO OID. - */ - public static final UUID NAMESPACE_OID = UUID.fromString("6ba7b812-9dad-11d1-80b4-00c04fd430c8"); - - /** - * The namespace UUID for X.500 domain names, as defined in Appendix C of RFC-4122. When using - * this namespace to create a name-based UUID, it is expected that the name part be a valid X.500 - * domain name, in DER or a text output format. - */ - public static final UUID NAMESPACE_X500 = UUID.fromString("6ba7b814-9dad-11d1-80b4-00c04fd430c8"); - - private static final Logger LOG = LoggerFactory.getLogger(Uuids.class); - - private Uuids() {} - - /** - * UUID v1 timestamps must be expressed relatively to October 15th, 1582 – the day when Gregorian - * calendar was introduced. This constant captures that moment in time expressed in milliseconds - * before the Unix epoch. It can be obtained by calling: - * - *
-   *   Instant.parse("1582-10-15T00:00:00Z").toEpochMilli();
-   * 
- */ - private static final long START_EPOCH_MILLIS = -12219292800000L; - - // Lazily initialize clock seq + node value at time of first access. Quarkus will attempt to - // initialize this class at deployment time which prevents us from just setting this value - // directly. The "node" part of the clock seq + node includes the current PID which (for - // GraalVM users) we obtain via the LLVM interop. That infrastructure isn't setup at Quarkus - // deployment time, however, thus we can't just call makeClockSeqAndNode() in an initializer. - // See JAVA-2663 for more detail on this point. - // - // Container impl adapted from Guava's memoized Supplier impl. - private static class ClockSeqAndNodeContainer { - - private volatile boolean initialized = false; - private long val; - - private long get() { - if (!initialized) { - synchronized (ClockSeqAndNodeContainer.class) { - if (!initialized) { - - initialized = true; - val = makeClockSeqAndNode(); - } - } - } - return val; - } - } - - private static final ClockSeqAndNodeContainer CLOCK_SEQ_AND_NODE = new ClockSeqAndNodeContainer(); - - // The min and max possible lsb for a UUID. - // - // This is not 0 and all 1's because Cassandra's TimeUUIDType compares the lsb parts as signed - // byte arrays. So the min value is 8 times -128 and the max is 8 times +127. - // - // We ignore the UUID variant (namely, MIN_CLOCK_SEQ_AND_NODE has variant 2 as it should, but - // MAX_CLOCK_SEQ_AND_NODE has variant 0) because I don't trust all UUID implementations to have - // correctly set those (pycassa doesn't always for instance). - private static final long MIN_CLOCK_SEQ_AND_NODE = 0x8080808080808080L; - private static final long MAX_CLOCK_SEQ_AND_NODE = 0x7f7f7f7f7f7f7f7fL; - - private static final AtomicLong lastTimestamp = new AtomicLong(0L); - - private static long makeNode() { - - // We don't have access to the MAC address (in pure JAVA at least) but need to generate a node - // part that identifies this host as uniquely as possible. - // The spec says that one option is to take as many sources that identify this node as possible - // and hash them together. That's what we do here by gathering all the IPs of this host as well - // as a few other sources. - try { - - MessageDigest digest = MessageDigest.getInstance("MD5"); - for (String address : getAllLocalAddresses()) update(digest, address); - - Properties props = System.getProperties(); - update(digest, props.getProperty("java.vendor")); - update(digest, props.getProperty("java.vendor.url")); - update(digest, props.getProperty("java.version")); - update(digest, props.getProperty("os.arch")); - update(digest, props.getProperty("os.name")); - update(digest, props.getProperty("os.version")); - update(digest, getProcessPiece()); - - byte[] hash = digest.digest(); - - long node = 0; - for (int i = 0; i < 6; i++) node |= (0x00000000000000ffL & (long) hash[i]) << (i * 8); - // Since we don't use the MAC address, the spec says that the multicast bit (least significant - // bit of the first byte of the node ID) must be 1. - return node | 0x0000010000000000L; - } catch (NoSuchAlgorithmException e) { - throw new RuntimeException(e); - } - } - - private static String getProcessPiece() { - Integer pid = null; - String pidProperty = System.getProperty(PID_SYSTEM_PROPERTY); - if (pidProperty != null) { - try { - pid = Integer.parseInt(pidProperty); - LOG.info("PID obtained from System property {}: {}", PID_SYSTEM_PROPERTY, pid); - } catch (NumberFormatException e) { - LOG.warn( - "Incorrect integer specified for PID in System property {}: {}", - PID_SYSTEM_PROPERTY, - pidProperty); - } - } - if (pid == null && Native.isGetProcessIdAvailable()) { - try { - pid = Native.getProcessId(); - LOG.info("PID obtained through native call to getpid(): {}", pid); - } catch (Exception e) { - Loggers.warnWithException(LOG, "Native call to getpid() failed", e); - } - } - if (pid == null) { - try { - @SuppressWarnings("StringSplitter") - String pidJmx = ManagementFactory.getRuntimeMXBean().getName().split("@")[0]; - pid = Integer.parseInt(pidJmx); - LOG.info("PID obtained through JMX: {}", pid); - } catch (Exception e) { - Loggers.warnWithException(LOG, "Failed to obtain PID from JMX", e); - } - } - if (pid == null) { - pid = new Random().nextInt(); - LOG.warn("Could not determine PID, falling back to a random integer: {}", pid); - } - ClassLoader loader = Uuids.class.getClassLoader(); - int loaderId = loader != null ? System.identityHashCode(loader) : 0; - return Integer.toHexString(pid) + Integer.toHexString(loaderId); - } - - private static void update(MessageDigest digest, String value) { - if (value != null) { - digest.update(value.getBytes(Charsets.UTF_8)); - } - } - - private static long makeClockSeqAndNode() { - long clock = new Random(System.currentTimeMillis()).nextLong(); - long node = makeNode(); - - long lsb = 0; - lsb |= (clock & 0x0000000000003FFFL) << 48; - lsb |= 0x8000000000000000L; - lsb |= node; - return lsb; - } - - /** - * Creates a new random (version 4) UUID. - * - *

This method has received a new implementation as of driver 4.10. Unlike the JDK's - * {@link UUID#randomUUID()} method, it does not use anymore the cryptographic {@link - * java.security.SecureRandom} number generator. Instead, it uses the non-cryptographic {@link - * Random} class, with a different seed at every invocation. - * - *

Using a non-cryptographic generator has two advantages: - * - *

    - *
  1. UUID generation is much faster than with {@link UUID#randomUUID()}; - *
  2. Contrary to {@link UUID#randomUUID()}, UUID generation with this method does not require - * I/O and is not a blocking call, which makes this method better suited for non-blocking - * applications. - *
- * - * Of course, this method is intended for usage where cryptographic strength is not required, such - * as when generating row identifiers for insertion in the database. If you still need - * cryptographic strength, consider using {@link Uuids#random(Random)} instead, and pass an - * instance of {@link java.security.SecureRandom}. - */ - @NonNull - public static UUID random() { - return random(new Random()); - } - - /** - * Creates a new random (version 4) UUID using the provided {@link Random} instance. - * - *

This method offers more flexibility than {@link #random()} as it allows to customize the - * {@link Random} instance to use, and also offers the possibility to reuse instances across - * successive calls. Reusing Random instances is the norm when using {@link - * java.util.concurrent.ThreadLocalRandom}, for instance; however other Random implementations may - * perform poorly under heavy thread contention. - * - *

Note: some Random implementations, such as {@link java.security.SecureRandom}, may trigger - * I/O activity during random number generation; these instances should not be used in - * non-blocking contexts. - */ - @NonNull - public static UUID random(@NonNull Random random) { - byte[] data = new byte[16]; - random.nextBytes(data); - return buildUuid(data, 4); - } - - /** - * Creates a new random (version 4) UUID using the provided {@link SplittableRandom} instance. - * - *

This method should be preferred to {@link #random()} when UUID generation happens in massive - * parallel computations, such as when using the ForkJoin framework. Note that {@link - * SplittableRandom} instances are not thread-safe. - */ - @NonNull - public static UUID random(@NonNull SplittableRandom random) { - byte[] data = toBytes(random.nextLong(), random.nextLong()); - return buildUuid(data, 4); - } - - /** - * Creates a new name-based (version 3) {@link UUID} from the given namespace UUID and the given - * string representing the name part. - * - *

Note that the given string will be converted to bytes using {@link StandardCharsets#UTF_8}. - * - * @param namespace The namespace UUID to use; cannot be null. - * @param name The name part; cannot be null. - * @throws NullPointerException if namespace or name is null. - * @throws IllegalStateException if the {@link MessageDigest} algorithm for version 3 (MD5) is not - * available on this platform. - */ - @NonNull - public static UUID nameBased(@NonNull UUID namespace, @NonNull String name) { - Objects.requireNonNull(name, "name cannot be null"); - return nameBased(namespace, name.getBytes(StandardCharsets.UTF_8)); - } - - /** - * Creates a new name-based (version 3) {@link UUID} from the given namespace UUID and the given - * byte array representing the name part. - * - * @param namespace The namespace UUID to use; cannot be null. - * @param name The name part; cannot be null. - * @throws NullPointerException if namespace or name is null. - * @throws IllegalStateException if the {@link MessageDigest} algorithm for version 3 (MD5) is not - * available on this platform. - */ - @NonNull - public static UUID nameBased(@NonNull UUID namespace, @NonNull byte[] name) { - return nameBased(namespace, name, 3); - } - - /** - * Creates a new name-based (version 3 or version 5) {@link UUID} from the given namespace UUID - * and the given string representing the name part. - * - *

Note that the given string will be converted to bytes using {@link StandardCharsets#UTF_8}. - * - * @param namespace The namespace UUID to use; cannot be null. - * @param name The name part; cannot be null. - * @param version The version to use, must be either 3 or 5; version 3 uses MD5 as its {@link - * MessageDigest} algorithm, while version 5 uses SHA-1. - * @throws NullPointerException if namespace or name is null. - * @throws IllegalArgumentException if version is not 3 nor 5. - * @throws IllegalStateException if the {@link MessageDigest} algorithm for the desired version is - * not available on this platform. - */ - @NonNull - public static UUID nameBased(@NonNull UUID namespace, @NonNull String name, int version) { - Objects.requireNonNull(name, "name cannot be null"); - return nameBased(namespace, name.getBytes(StandardCharsets.UTF_8), version); - } - - /** - * Creates a new name-based (version 3 or version 5) {@link UUID} from the given namespace UUID - * and the given byte array representing the name part. - * - * @param namespace The namespace UUID to use; cannot be null. - * @param name The name to use; cannot be null. - * @param version The version to use, must be either 3 or 5; version 3 uses MD5 as its {@link - * MessageDigest} algorithm, while version 5 uses SHA-1. - * @throws NullPointerException if namespace or name is null. - * @throws IllegalArgumentException if version is not 3 nor 5. - * @throws IllegalStateException if the {@link MessageDigest} algorithm for the desired version is - * not available on this platform. - */ - @NonNull - public static UUID nameBased(@NonNull UUID namespace, @NonNull byte[] name, int version) { - Objects.requireNonNull(namespace, "namespace cannot be null"); - Objects.requireNonNull(name, "name cannot be null"); - MessageDigest md = newMessageDigest(version); - md.update(toBytes(namespace)); - md.update(name); - return buildUuid(md.digest(), version); - } - - /** - * Creates a new name-based (version 3) {@link UUID} from the given byte array containing the - * namespace UUID and the name parts concatenated together. - * - *

The byte array is expected to be at least 16 bytes long. - * - * @param namespaceAndName A byte array containing the concatenated namespace UUID and name; - * cannot be null. - * @throws NullPointerException if namespaceAndName is null. - * @throws IllegalArgumentException if namespaceAndName is not at least 16 bytes - * long. - * @throws IllegalStateException if the {@link MessageDigest} algorithm for version 3 (MD5) is not - * available on this platform. - */ - @NonNull - public static UUID nameBased(@NonNull byte[] namespaceAndName) { - return nameBased(namespaceAndName, 3); - } - - /** - * Creates a new name-based (version 3 or version 5) {@link UUID} from the given byte array - * containing the namespace UUID and the name parts concatenated together. - * - *

The byte array is expected to be at least 16 bytes long. - * - * @param namespaceAndName A byte array containing the concatenated namespace UUID and name; - * cannot be null. - * @param version The version to use, must be either 3 or 5. - * @throws NullPointerException if namespaceAndName is null. - * @throws IllegalArgumentException if namespaceAndName is not at least 16 bytes - * long. - * @throws IllegalArgumentException if version is not 3 nor 5. - * @throws IllegalStateException if the {@link MessageDigest} algorithm for the desired version is - * not available on this platform. - */ - @NonNull - public static UUID nameBased(@NonNull byte[] namespaceAndName, int version) { - Objects.requireNonNull(namespaceAndName, "namespaceAndName cannot be null"); - if (namespaceAndName.length < 16) { - throw new IllegalArgumentException("namespaceAndName must be at least 16 bytes long"); - } - MessageDigest md = newMessageDigest(version); - md.update(namespaceAndName); - return buildUuid(md.digest(), version); - } - - @NonNull - private static MessageDigest newMessageDigest(int version) { - if (version != 3 && version != 5) { - throw new IllegalArgumentException( - "Invalid name-based UUID version, expecting 3 or 5, got: " + version); - } - String algorithm = version == 3 ? "MD5" : "SHA-1"; - try { - return MessageDigest.getInstance(algorithm); - } catch (NoSuchAlgorithmException e) { - throw new IllegalStateException(algorithm + " algorithm not available", e); - } - } - - @NonNull - private static UUID buildUuid(@NonNull byte[] data, int version) { - // clear and set version - data[6] &= (byte) 0x0f; - data[6] |= (byte) (version << 4); - // clear and set variant to IETF - data[8] &= (byte) 0x3f; - data[8] |= (byte) 0x80; - return fromBytes(data); - } - - private static UUID fromBytes(byte[] data) { - // data longer than 16 bytes will be truncated as mandated by the specs - assert data.length >= 16; - long msb = 0; - for (int i = 0; i < 8; i++) { - msb = (msb << 8) | (data[i] & 0xff); - } - long lsb = 0; - for (int i = 8; i < 16; i++) { - lsb = (lsb << 8) | (data[i] & 0xff); - } - return new UUID(msb, lsb); - } - - private static byte[] toBytes(UUID uuid) { - long msb = uuid.getMostSignificantBits(); - long lsb = uuid.getLeastSignificantBits(); - return toBytes(msb, lsb); - } - - private static byte[] toBytes(long msb, long lsb) { - byte[] out = new byte[16]; - for (int i = 0; i < 8; i++) { - out[i] = (byte) (msb >> ((7 - i) * 8)); - } - for (int i = 8; i < 16; i++) { - out[i] = (byte) (lsb >> ((15 - i) * 8)); - } - return out; - } - - /** - * Creates a new time-based (version 1) UUID. - * - *

UUIDs generated by this method are suitable for use with the {@code timeuuid} Cassandra - * type. In particular the generated UUID includes the timestamp of its generation. - * - *

Note that there is no way to provide your own timestamp. This is deliberate, as we feel that - * this does not conform to the UUID specification, and therefore don't want to encourage it - * through the API. If you want to do it anyway, use the following workaround: - * - *

-   * Random random = new Random();
-   * UUID uuid = new UUID(UUIDs.startOf(userProvidedTimestamp).getMostSignificantBits(), random.nextLong());
-   * 
- * - * If you simply need to perform a range query on a {@code timeuuid} column, use the "fake" UUID - * generated by {@link #startOf(long)} and {@link #endOf(long)}. - * - *

Usage with non-blocking threads: beware that this method may block the calling thread on its - * very first invocation, because the node part of time-based UUIDs needs to be computed at that - * moment, and the computation may require the loading of native libraries. If that is a problem, - * consider invoking this method once from a thread that is allowed to block. Subsequent - * invocations are guaranteed not to block. - */ - @NonNull - public static UUID timeBased() { - return new UUID(makeMsb(getCurrentTimestamp()), CLOCK_SEQ_AND_NODE.get()); - } - - /** - * Creates a "fake" time-based UUID that sorts as the smallest possible version 1 UUID generated - * at the provided timestamp. - * - *

Such created UUIDs are useful in queries to select a time range of a {@code timeuuid} - * column. - * - *

The UUIDs created by this method are not unique and as such are not suitable - * for anything else than querying a specific time range. In particular, you should not insert - * such UUIDs. "True" UUIDs from user-provided timestamps are not supported (see {@link - * #timeBased()} for more explanations). - * - *

Also, the timestamp to provide as a parameter must be a Unix timestamp (as returned by - * {@link System#currentTimeMillis} or {@link Date#getTime}), and not a count of - * 100-nanosecond intervals since 00:00:00.00, 15 October 1582 (as required by RFC-4122). - * - *

In other words, given a UUID {@code uuid}, you should never call {@code - * startOf(uuid.timestamp())} but rather {@code startOf(unixTimestamp(uuid))}. - * - *

Lastly, please note that Cassandra's {@code timeuuid} sorting is not compatible with {@link - * UUID#compareTo} and hence the UUIDs created by this method are not necessarily lower bound for - * that latter method. - * - * @param timestamp the Unix timestamp for which the created UUID must be a lower bound. - * @return the smallest (for Cassandra {@code timeuuid} sorting) UUID of {@code timestamp}. - */ - @NonNull - public static UUID startOf(long timestamp) { - return new UUID(makeMsb(fromUnixTimestamp(timestamp)), MIN_CLOCK_SEQ_AND_NODE); - } - - /** - * Creates a "fake" time-based UUID that sorts as the biggest possible version 1 UUID generated at - * the provided timestamp. - * - *

See {@link #startOf(long)} for explanations about the intended usage of such UUID. - * - * @param timestamp the Unix timestamp for which the created UUID must be an upper bound. - * @return the biggest (for Cassandra {@code timeuuid} sorting) UUID of {@code timestamp}. - */ - @NonNull - public static UUID endOf(long timestamp) { - long uuidTstamp = fromUnixTimestamp(timestamp + 1) - 1; - return new UUID(makeMsb(uuidTstamp), MAX_CLOCK_SEQ_AND_NODE); - } - - /** - * Returns the Unix timestamp contained by the provided time-based UUID. - * - *

This method is not equivalent to {@link UUID#timestamp()}. More precisely, a version 1 UUID - * stores a timestamp that represents the number of 100-nanoseconds intervals since midnight, 15 - * October 1582 and that is what {@link UUID#timestamp()} returns. This method however converts - * that timestamp to the equivalent Unix timestamp in milliseconds, i.e. a timestamp representing - * a number of milliseconds since midnight, January 1, 1970 UTC. In particular, the timestamps - * returned by this method are comparable to the timestamps returned by {@link - * System#currentTimeMillis}, {@link Date#getTime}, etc. - * - * @throws IllegalArgumentException if {@code uuid} is not a version 1 UUID. - */ - public static long unixTimestamp(@NonNull UUID uuid) { - if (uuid.version() != 1) { - throw new IllegalArgumentException( - String.format( - "Can only retrieve the unix timestamp for version 1 uuid (provided version %d)", - uuid.version())); - } - long timestamp = uuid.timestamp(); - return (timestamp / 10000) + START_EPOCH_MILLIS; - } - - // Use {@link System#currentTimeMillis} for a base time in milliseconds, and if we are in the same - // millisecond as the previous generation, increment the number of nanoseconds. - // However, since the precision is 100-nanosecond intervals, we can only generate 10K UUIDs within - // a millisecond safely. If we detect we have already generated that much UUIDs within a - // millisecond (which, while admittedly unlikely in a real application, is very achievable on even - // modest machines), then we stall the generator (busy spin) until the next millisecond as - // required by the RFC. - private static long getCurrentTimestamp() { - while (true) { - long now = fromUnixTimestamp(System.currentTimeMillis()); - long last = lastTimestamp.get(); - if (now > last) { - if (lastTimestamp.compareAndSet(last, now)) { - return now; - } - } else { - long lastMillis = millisOf(last); - // If the clock went back in time, bail out - if (millisOf(now) < millisOf(last)) { - return lastTimestamp.incrementAndGet(); - } - long candidate = last + 1; - // If we've generated more than 10k uuid in that millisecond, restart the whole process - // until we get to the next millis. Otherwise, we try use our candidate ... unless we've - // been beaten by another thread in which case we try again. - if (millisOf(candidate) == lastMillis && lastTimestamp.compareAndSet(last, candidate)) { - return candidate; - } - } - } - } - - @VisibleForTesting - static long fromUnixTimestamp(long tstamp) { - return (tstamp - START_EPOCH_MILLIS) * 10000; - } - - private static long millisOf(long timestamp) { - return timestamp / 10000; - } - - @VisibleForTesting - static long makeMsb(long timestamp) { - long msb = 0L; - msb |= (0x00000000ffffffffL & timestamp) << 32; - msb |= (0x0000ffff00000000L & timestamp) >>> 16; - msb |= (0x0fff000000000000L & timestamp) >>> 48; - msb |= 0x0000000000001000L; // sets the version to 1. - return msb; - } - - private static Set getAllLocalAddresses() { - Set allIps = new HashSet<>(); - try { - InetAddress localhost = InetAddress.getLocalHost(); - allIps.add(localhost.toString()); - // Also return the hostname if available, it won't hurt (this does a dns lookup, it's only - // done once at startup) - allIps.add(localhost.getCanonicalHostName()); - InetAddress[] allMyIps = InetAddress.getAllByName(localhost.getCanonicalHostName()); - if (allMyIps != null) { - for (InetAddress allMyIp : allMyIps) { - allIps.add(allMyIp.toString()); - } - } - } catch (UnknownHostException e) { - // Ignore, we'll try the network interfaces anyway - } - - try { - Enumeration en = NetworkInterface.getNetworkInterfaces(); - if (en != null) { - while (en.hasMoreElements()) { - Enumeration enumIpAddr = en.nextElement().getInetAddresses(); - while (enumIpAddr.hasMoreElements()) { - allIps.add(enumIpAddr.nextElement().toString()); - } - } - } - } catch (SocketException e) { - // Ignore, if we've really got nothing so far, we'll throw an exception - } - return allIps; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/package-info.java b/core/src/main/java/com/datastax/oss/driver/api/package-info.java deleted file mode 100644 index 7b2219647b2..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/package-info.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * The driver's public API. - * - *

This package, and all of its subpackages, contains all the types that are intended to be used - * by clients applications. Binary compatibility is guaranteed across minor versions. - */ -package com.datastax.oss.driver.api; diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/AsyncPagingIterableWrapper.java b/core/src/main/java/com/datastax/oss/driver/internal/core/AsyncPagingIterableWrapper.java deleted file mode 100644 index 055ab26909f..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/AsyncPagingIterableWrapper.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core; - -import com.datastax.oss.driver.api.core.AsyncPagingIterable; -import com.datastax.oss.driver.api.core.MappedAsyncPagingIterable; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.shaded.guava.common.collect.AbstractIterator; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Iterator; -import java.util.concurrent.CompletionStage; -import java.util.function.Function; - -public class AsyncPagingIterableWrapper - implements MappedAsyncPagingIterable { - - private final AsyncPagingIterable source; - private final Function elementMapper; - - private final Iterable currentPage; - - public AsyncPagingIterableWrapper( - AsyncPagingIterable source, - Function elementMapper) { - this.source = source; - this.elementMapper = elementMapper; - - Iterator sourceIterator = source.currentPage().iterator(); - Iterator iterator = - new AbstractIterator() { - @Override - protected TargetT computeNext() { - return sourceIterator.hasNext() - ? elementMapper.apply(sourceIterator.next()) - : endOfData(); - } - }; - this.currentPage = () -> iterator; - } - - @NonNull - @Override - public ColumnDefinitions getColumnDefinitions() { - return source.getColumnDefinitions(); - } - - @NonNull - @Override - public ExecutionInfo getExecutionInfo() { - return source.getExecutionInfo(); - } - - @Override - public int remaining() { - return source.remaining(); - } - - @NonNull - @Override - public Iterable currentPage() { - return currentPage; - } - - @Override - public boolean hasMorePages() { - return source.hasMorePages(); - } - - @NonNull - @Override - public CompletionStage> fetchNextPage() - throws IllegalStateException { - return source - .fetchNextPage() - .thenApply( - nextSource -> - new AsyncPagingIterableWrapper(nextSource, elementMapper)); - } - - @Override - public boolean wasApplied() { - return source.wasApplied(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/ConsistencyLevelRegistry.java b/core/src/main/java/com/datastax/oss/driver/internal/core/ConsistencyLevelRegistry.java deleted file mode 100644 index 7b66a61636c..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/ConsistencyLevelRegistry.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core; - -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; - -/** - * Extension point to plug custom consistency levels. - * - *

This is overridable through {@link InternalDriverContext}. - */ -public interface ConsistencyLevelRegistry { - - ConsistencyLevel codeToLevel(int code); - - int nameToCode(String name); - - ConsistencyLevel nameToLevel(String name); - - /** @return all the values known to this driver instance. */ - Iterable getValues(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/ContactPoints.java b/core/src/main/java/com/datastax/oss/driver/internal/core/ContactPoints.java deleted file mode 100644 index bb65661b72f..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/ContactPoints.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core; - -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.internal.core.metadata.DefaultEndPoint; -import com.datastax.oss.driver.internal.core.util.AddressUtils; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import com.datastax.oss.driver.shaded.guava.common.collect.Sets; -import java.net.InetSocketAddress; -import java.util.Collections; -import java.util.List; -import java.util.Set; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** Utility class to handle the initial contact points passed to the driver. */ -public class ContactPoints { - private static final Logger LOG = LoggerFactory.getLogger(ContactPoints.class); - - public static Set merge( - Set programmaticContactPoints, List configContactPoints, boolean resolve) { - - Set result = Sets.newHashSet(programmaticContactPoints); - for (String spec : configContactPoints) { - - Set addresses = Collections.emptySet(); - try { - addresses = AddressUtils.extract(spec, resolve); - } catch (RuntimeException e) { - LOG.warn("Ignoring invalid contact point {} ({})", spec, e.getMessage(), e); - } - - if (addresses.size() > 1) { - LOG.info( - "Contact point {} resolves to multiple addresses, will use them all ({})", - spec, - addresses); - } - - for (InetSocketAddress address : addresses) { - DefaultEndPoint endPoint = new DefaultEndPoint(address); - boolean wasNew = result.add(endPoint); - if (!wasNew) { - LOG.warn("Duplicate contact point {}", address); - } - } - } - return ImmutableSet.copyOf(result); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/CqlIdentifiers.java b/core/src/main/java/com/datastax/oss/driver/internal/core/CqlIdentifiers.java deleted file mode 100644 index a00da0e4b1a..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/CqlIdentifiers.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.function.Function; - -public class CqlIdentifiers { - - @NonNull - private static List wrap( - @NonNull Iterable in, @NonNull Function fn) { - - Objects.requireNonNull(in, "Input Iterable must not be null"); - Objects.requireNonNull(fn, "CqlIdentifier conversion function must not be null"); - ImmutableList.Builder builder = ImmutableList.builder(); - for (String name : in) { - builder.add(fn.apply(name)); - } - return builder.build(); - } - - @NonNull - public static List wrap(@NonNull Iterable in) { - return wrap(in, CqlIdentifier::fromCql); - } - - @NonNull - public static List wrapInternal(@NonNull Iterable in) { - return wrap(in, CqlIdentifier::fromInternal); - } - - @NonNull - private static Map wrapKeys( - @NonNull Map in, @NonNull Function fn) { - Objects.requireNonNull(in, "Input Map must not be null"); - Objects.requireNonNull(fn, "CqlIdentifier conversion function must not be null"); - ImmutableMap.Builder builder = ImmutableMap.builder(); - for (Map.Entry entry : in.entrySet()) { - builder.put(fn.apply(entry.getKey()), entry.getValue()); - } - return builder.build(); - } - - @NonNull - public static Map wrapKeys(@NonNull Map in) { - return wrapKeys(in, CqlIdentifier::fromCql); - } - - @NonNull - public static Map wrapKeysInternal(@NonNull Map in) { - return wrapKeys(in, CqlIdentifier::fromInternal); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/DefaultConsistencyLevelRegistry.java b/core/src/main/java/com/datastax/oss/driver/internal/core/DefaultConsistencyLevelRegistry.java deleted file mode 100644 index b563ad5facc..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/DefaultConsistencyLevelRegistry.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core; - -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.DefaultConsistencyLevel; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class DefaultConsistencyLevelRegistry implements ConsistencyLevelRegistry { - - private static final ImmutableList VALUES = - ImmutableList.builder().add(DefaultConsistencyLevel.values()).build(); - private static final ImmutableMap NAME_TO_CODE; - - static { - ImmutableMap.Builder nameToCodeBuilder = ImmutableMap.builder(); - for (DefaultConsistencyLevel consistencyLevel : DefaultConsistencyLevel.values()) { - nameToCodeBuilder.put(consistencyLevel.name(), consistencyLevel.getProtocolCode()); - } - NAME_TO_CODE = nameToCodeBuilder.build(); - } - - @Override - public ConsistencyLevel codeToLevel(int code) { - return DefaultConsistencyLevel.fromCode(code); - } - - @Override - public int nameToCode(String name) { - return NAME_TO_CODE.get(name); - } - - @Override - public ConsistencyLevel nameToLevel(String name) { - return DefaultConsistencyLevel.valueOf(name); - } - - @Override - public Iterable getValues() { - return VALUES; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/DefaultMavenCoordinates.java b/core/src/main/java/com/datastax/oss/driver/internal/core/DefaultMavenCoordinates.java deleted file mode 100644 index 8280ae8fec5..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/DefaultMavenCoordinates.java +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core; - -import com.datastax.oss.driver.api.core.MavenCoordinates; -import com.datastax.oss.driver.api.core.Version; -import edu.umd.cs.findbugs.annotations.NonNull; -import io.netty.buffer.ByteBuf; -import java.io.IOException; -import java.io.InputStreamReader; -import java.io.UncheckedIOException; -import java.net.URL; -import java.nio.charset.StandardCharsets; -import java.util.Properties; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class DefaultMavenCoordinates implements MavenCoordinates { - - private static final Logger LOG = LoggerFactory.getLogger(DefaultMavenCoordinates.class); - - public static MavenCoordinates buildFromResourceAndPrint(URL resource) { - MavenCoordinates info = buildFromResource(resource); - LOG.info("{}", info); - return info; - } - - public static DefaultMavenCoordinates buildFromResource(URL resource) { - // The resource is assumed to be a properties file, but - // encoded in UTF-8, not ISO-8859-1 as required by the Java specs, - // since our build tool (Maven) produces UTF-8-encoded resources. - try (InputStreamReader reader = - new InputStreamReader(resource.openStream(), StandardCharsets.UTF_8)) { - Properties props = new Properties(); - props.load(reader); - String name = props.getProperty("driver.name"); - String groupId = props.getProperty("driver.groupId"); - String artifactId = props.getProperty("driver.artifactId"); - String version = props.getProperty("driver.version"); - if (ByteBuf.class.getPackage().getName().contains("com.datastax.oss.driver.shaded")) { - artifactId += "-shaded"; - } - return new DefaultMavenCoordinates(name, groupId, artifactId, Version.parse(version)); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - } - - private final String name; - private final String groupId; - private final String artifactId; - private final Version version; - - public DefaultMavenCoordinates(String name, String groupId, String artifactId, Version version) { - this.name = name; - this.groupId = groupId; - this.artifactId = artifactId; - this.version = version; - } - - @NonNull - @Override - public String getName() { - return name; - } - - @NonNull - @Override - public String getGroupId() { - return groupId; - } - - @NonNull - @Override - public String getArtifactId() { - return artifactId; - } - - @NonNull - @Override - public Version getVersion() { - return version; - } - - @Override - public String toString() { - return String.format("%s (%s:%s) version %s", name, groupId, artifactId, version); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/DefaultProtocolFeature.java b/core/src/main/java/com/datastax/oss/driver/internal/core/DefaultProtocolFeature.java deleted file mode 100644 index 5d79f4ed0a5..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/DefaultProtocolFeature.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core; - -/** - * Features that are commonly supported by most Apache Cassandra protocol versions. - * - * @see com.datastax.oss.driver.api.core.DefaultProtocolVersion - */ -public enum DefaultProtocolFeature implements ProtocolFeature { - - /** - * The ability to leave variables unset in prepared statements. - * - * @see CASSANDRA-7304 - */ - UNSET_BOUND_VALUES, - - /** - * The ability to override the keyspace on a per-request basis. - * - * @see CASSANDRA-10145 - */ - PER_REQUEST_KEYSPACE, - - /** - * Support for smallint and tinyint types. - * - * @see CASSANDRA-8951 - */ - SMALLINT_AND_TINYINT_TYPES, - - /** - * Support for the date type. - * - * @see CASSANDRA-7523 - */ - DATE_TYPE, - - /** - * The ability to set a custom "now" time on statements (for testing purposes). - * - * @see CASSANDRA-14664 - */ - NOW_IN_SECONDS, - - /** - * The new protocol framing format introduced in Cassandra 4: wrapping multiple frames into a - * single "segment" to checksum (and possibly compress) them together. - * - * @see CASSANDRA-15299 - */ - MODERN_FRAMING, - ; -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/DefaultProtocolVersionRegistry.java b/core/src/main/java/com/datastax/oss/driver/internal/core/DefaultProtocolVersionRegistry.java deleted file mode 100644 index 80850e8e95a..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/DefaultProtocolVersionRegistry.java +++ /dev/null @@ -1,266 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core; - -import com.datastax.dse.driver.api.core.DseProtocolVersion; -import com.datastax.dse.driver.api.core.metadata.DseNodeProperties; -import com.datastax.dse.driver.internal.core.DseProtocolFeature; -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.UnsupportedProtocolVersionException; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import java.util.Collection; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Objects; -import java.util.Optional; -import java.util.Set; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Built-in implementation of the protocol version registry, supports all Cassandra and DSE - * versions. - */ -@ThreadSafe -public class DefaultProtocolVersionRegistry implements ProtocolVersionRegistry { - - private static final Logger LOG = LoggerFactory.getLogger(DefaultProtocolVersionRegistry.class); - private static final List allVersions = - ImmutableList.builder() - .add(DefaultProtocolVersion.values()) - .add(DseProtocolVersion.values()) - .build(); - - @VisibleForTesting - static final Version DSE_4_7_0 = Objects.requireNonNull(Version.parse("4.7.0")); - - @VisibleForTesting - static final Version DSE_5_0_0 = Objects.requireNonNull(Version.parse("5.0.0")); - - @VisibleForTesting - static final Version DSE_5_1_0 = Objects.requireNonNull(Version.parse("5.1.0")); - - @VisibleForTesting - static final Version DSE_6_0_0 = Objects.requireNonNull(Version.parse("6.0.0")); - - @VisibleForTesting - static final Version DSE_7_0_0 = Objects.requireNonNull(Version.parse("7.0.0")); - - private final String logPrefix; - - public DefaultProtocolVersionRegistry(String logPrefix) { - this.logPrefix = logPrefix; - } - - @Override - public ProtocolVersion fromName(String name) { - try { - return DefaultProtocolVersion.valueOf(name); - } catch (IllegalArgumentException noOssVersion) { - try { - return DseProtocolVersion.valueOf(name); - } catch (IllegalArgumentException noDseVersion) { - throw new IllegalArgumentException("Unknown protocol version name: " + name); - } - } - } - - @Override - public ProtocolVersion highestNonBeta() { - ProtocolVersion highest = allVersions.get(allVersions.size() - 1); - if (!highest.isBeta()) { - return highest; - } else { - return downgrade(highest) - .orElseThrow(() -> new AssertionError("There should be at least one non-beta version")); - } - } - - @Override - public Optional downgrade(ProtocolVersion version) { - int index = allVersions.indexOf(version); - if (index < 0) { - // This method is called with a value obtained from fromName, so this should never happen - throw new AssertionError(version + " is not a known version"); - } else if (index == 0) { - return Optional.empty(); - } else { - ProtocolVersion previousVersion = allVersions.get(index - 1); - // Beta versions are skipped during negotiation - return previousVersion.isBeta() ? downgrade(previousVersion) : Optional.of(previousVersion); - } - } - - @Override - public ProtocolVersion highestCommon(Collection nodes) { - if (nodes == null || nodes.isEmpty()) { - throw new IllegalArgumentException("Expected at least one node"); - } - - // Start with all non-beta versions (beta versions are always forced, and we don't call this - // method if the version was forced). - Set candidates = new LinkedHashSet<>(); - for (ProtocolVersion version : allVersions) { - if (!version.isBeta()) { - candidates.add(version); - } - } - // Keep an unfiltered copy in case we need to throw an exception below - ImmutableList initialCandidates = ImmutableList.copyOf(candidates); - - // For each node, remove the versions it doesn't support - for (Node node : nodes) { - - // We can't trust the Cassandra version reported by DSE to infer the maximum OSS protocol - // supported. For example DSE 6 reports release_version 4.0-SNAPSHOT, but only supports OSS - // protocol v4 (while Cassandra 4 will support v5). So we treat DSE separately. - Version dseVersion = (Version) node.getExtras().get(DseNodeProperties.DSE_VERSION); - if (dseVersion != null) { - LOG.debug("[{}] Node {} reports DSE version {}", logPrefix, node.getEndPoint(), dseVersion); - dseVersion = dseVersion.nextStable(); - if (dseVersion.compareTo(DSE_4_7_0) < 0) { - throw new UnsupportedProtocolVersionException( - node.getEndPoint(), - String.format( - "Node %s reports DSE version %s, " - + "but the driver only supports 4.7.0 and above", - node.getEndPoint(), dseVersion), - initialCandidates); - } else if (dseVersion.compareTo(DSE_5_0_0) < 0) { - // DSE 4.7.x, 4.8.x - removeHigherThan(DefaultProtocolVersion.V3, null, candidates); - } else if (dseVersion.compareTo(DSE_5_1_0) < 0) { - // DSE 5.0 - removeHigherThan(DefaultProtocolVersion.V4, null, candidates); - } else if (dseVersion.compareTo(DSE_6_0_0) < 0) { - // DSE 5.1 - removeHigherThan(DefaultProtocolVersion.V4, DseProtocolVersion.DSE_V1, candidates); - } else if (dseVersion.compareTo(DSE_7_0_0) < 0) { - // DSE 6 - removeHigherThan(DefaultProtocolVersion.V4, DseProtocolVersion.DSE_V2, candidates); - } else { - // DSE 7.0 - removeHigherThan(DefaultProtocolVersion.V5, DseProtocolVersion.DSE_V2, candidates); - } - } else { // not DSE - Version cassandraVersion = node.getCassandraVersion(); - if (cassandraVersion == null) { - LOG.warn( - "[{}] Node {} reports neither DSE version nor Cassandra version, " - + "ignoring it from optimal protocol version computation", - logPrefix, - node.getEndPoint()); - continue; - } - cassandraVersion = cassandraVersion.nextStable(); - LOG.debug( - "[{}] Node {} reports Cassandra version {}", - logPrefix, - node.getEndPoint(), - cassandraVersion); - if (cassandraVersion.compareTo(Version.V2_1_0) < 0) { - throw new UnsupportedProtocolVersionException( - node.getEndPoint(), - String.format( - "Node %s reports Cassandra version %s, " - + "but the driver only supports 2.1.0 and above", - node.getEndPoint(), cassandraVersion), - ImmutableList.of(DefaultProtocolVersion.V3, DefaultProtocolVersion.V4)); - } else if (cassandraVersion.compareTo(Version.V2_2_0) < 0) { - // 2.1.0 - removeHigherThan(DefaultProtocolVersion.V3, null, candidates); - } else if (cassandraVersion.compareTo(Version.V4_0_0) < 0) { - // 2.2, 3.x - removeHigherThan(DefaultProtocolVersion.V4, null, candidates); - } else { - // 4.0 - removeHigherThan(DefaultProtocolVersion.V5, null, candidates); - } - } - } - - // If we have versions left, return the highest one - ProtocolVersion max = null; - for (ProtocolVersion candidate : candidates) { - if (max == null || max.getCode() < candidate.getCode()) { - max = candidate; - } - } - if (max == null) { // Note: with the current algorithm, this never happens - throw new UnsupportedProtocolVersionException( - null, - String.format( - "Could not determine a common protocol version, " - + "enable DEBUG logs for '%s' for more details", - LOG.getName()), - initialCandidates); - } else { - return max; - } - } - - // Removes all versions strictly higher than the given versions from candidates. A null - // maxDseVersion means "remove all DSE versions". - private void removeHigherThan( - DefaultProtocolVersion maxOssVersion, - DseProtocolVersion maxDseVersion, - Set candidates) { - for (DefaultProtocolVersion ossVersion : DefaultProtocolVersion.values()) { - if (ossVersion.compareTo(maxOssVersion) > 0 && candidates.remove(ossVersion)) { - LOG.debug("[{}] Excluding protocol {}", logPrefix, ossVersion); - } - } - for (DseProtocolVersion dseVersion : DseProtocolVersion.values()) { - if ((maxDseVersion == null || dseVersion.compareTo(maxDseVersion) > 0) - && candidates.remove(dseVersion)) { - LOG.debug("[{}] Excluding protocol {}", logPrefix, dseVersion); - } - } - } - - @Override - public boolean supports(ProtocolVersion version, ProtocolFeature feature) { - int code = version.getCode(); - if (DefaultProtocolFeature.SMALLINT_AND_TINYINT_TYPES.equals(feature) - || DefaultProtocolFeature.DATE_TYPE.equals(feature) - || DefaultProtocolFeature.UNSET_BOUND_VALUES.equals(feature)) { - // All DSE versions and all OSS V4+ - return DefaultProtocolVersion.V4.getCode() <= code; - } else if (DefaultProtocolFeature.PER_REQUEST_KEYSPACE.equals(feature)) { - // Only DSE_V2+ and OSS V5+ - return (DefaultProtocolVersion.V5.getCode() <= code - && code < DseProtocolVersion.DSE_V1.getCode()) - || DseProtocolVersion.DSE_V2.getCode() <= code; - } else if (DefaultProtocolFeature.NOW_IN_SECONDS.equals(feature) - || DefaultProtocolFeature.MODERN_FRAMING.equals(feature)) { - // OSS only, V5+ - return DefaultProtocolVersion.V5.getCode() <= code - && code < DseProtocolVersion.DSE_V1.getCode(); - } else if (DseProtocolFeature.CONTINUOUS_PAGING.equals(feature)) { - // All DSE versions - return DseProtocolVersion.DSE_V1.getCode() <= code; - } else { - throw new IllegalArgumentException("Unhandled protocol feature: " + feature); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/PagingIterableWrapper.java b/core/src/main/java/com/datastax/oss/driver/internal/core/PagingIterableWrapper.java deleted file mode 100644 index 1f79f673d02..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/PagingIterableWrapper.java +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core; - -import com.datastax.oss.driver.api.core.PagingIterable; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.internal.core.cql.PagingIterableSpliterator; -import com.datastax.oss.driver.shaded.guava.common.collect.AbstractIterator; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Iterator; -import java.util.List; -import java.util.Spliterator; -import java.util.function.Function; - -public class PagingIterableWrapper implements PagingIterable { - - private final PagingIterable source; - private final boolean sized; - private final Iterator iterator; - - /** - * Creates a {@link PagingIterableWrapper} for the given source, with unknown size. Spliterators - * for this iterable will never report {@link Spliterator#SIZED}. - * - * @param source The source to wrap. - * @param elementMapper The element mapper. - */ - public PagingIterableWrapper( - @NonNull PagingIterable source, - @NonNull Function elementMapper) { - this(source, elementMapper, false); - } - - /** - * Creates a {@link PagingIterableWrapper} for the given source. If {@code sized} is {@code true}, - * spliterators for this iterable will report {@link Spliterator#SIZED} and {@link - * Spliterator#SUBSIZED} and their estimated size will be {@link #getAvailableWithoutFetching()}. - * - * @param source The source to wrap. - * @param elementMapper The element mapper. - * @param sized Whether this iterable has a known size or not. - */ - public PagingIterableWrapper( - @NonNull PagingIterable source, - @NonNull Function elementMapper, - boolean sized) { - this.source = source; - this.sized = sized; - Iterator sourceIterator = source.iterator(); - this.iterator = - new AbstractIterator() { - @Override - protected TargetT computeNext() { - return sourceIterator.hasNext() - ? elementMapper.apply(sourceIterator.next()) - : endOfData(); - } - }; - } - - @NonNull - @Override - public ColumnDefinitions getColumnDefinitions() { - return source.getColumnDefinitions(); - } - - @NonNull - @Override - public List getExecutionInfos() { - return source.getExecutionInfos(); - } - - @Override - public boolean isFullyFetched() { - return source.isFullyFetched(); - } - - @Override - public int getAvailableWithoutFetching() { - return source.getAvailableWithoutFetching(); - } - - @Override - public boolean wasApplied() { - return source.wasApplied(); - } - - @NonNull - @Override - public Iterator iterator() { - return iterator; - } - - @NonNull - @Override - public Spliterator spliterator() { - PagingIterableSpliterator.Builder builder = PagingIterableSpliterator.builder(this); - if (sized) { - builder.withEstimatedSize(getAvailableWithoutFetching()); - } - return builder.build(); - } - - @NonNull - @Override - public PagingIterable map( - Function elementMapper) { - return new PagingIterableWrapper<>(this, elementMapper, sized); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/ProtocolFeature.java b/core/src/main/java/com/datastax/oss/driver/internal/core/ProtocolFeature.java deleted file mode 100644 index bf73f7bbb16..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/ProtocolFeature.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core; - -import com.datastax.oss.driver.api.core.ProtocolVersion; - -/** - * A marker interface for features of the native protocol that are only supported by specific - * {@linkplain ProtocolVersion versions}. - * - *

The only reason to model this as an interface (as opposed to an enum type) is to accommodate - * for custom protocol extensions. If you're connecting to a standard Apache Cassandra cluster, all - * {@code ProtocolFeature}s are {@link DefaultProtocolFeature} instances. - * - * @see ProtocolVersionRegistry#supports(ProtocolVersion, ProtocolFeature) - * @see DefaultProtocolFeature - */ -public interface ProtocolFeature {} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/ProtocolVersionRegistry.java b/core/src/main/java/com/datastax/oss/driver/internal/core/ProtocolVersionRegistry.java deleted file mode 100644 index eff1d099905..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/ProtocolVersionRegistry.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.UnsupportedProtocolVersionException; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.metadata.TopologyMonitor; -import java.util.Collection; -import java.util.Optional; - -/** Defines which native protocol versions are supported by a driver instance. */ -public interface ProtocolVersionRegistry { - - /** - * Look up a version by its {@link ProtocolVersion#name() name}. This is used when a version was - * forced in the configuration. - * - * @throws IllegalArgumentException if there is no known version with this name. - * @see DefaultDriverOption#PROTOCOL_VERSION - */ - ProtocolVersion fromName(String name); - - /** - * The highest, non-beta version supported by the driver. This is used as the starting point for - * the negotiation process for the initial connection (if the version wasn't forced). - */ - ProtocolVersion highestNonBeta(); - - /** - * Downgrade to a lower version if the current version is not supported by the server. This is - * used during the negotiation process for the initial connection (if the version wasn't forced). - * - * @return empty if there is no version to downgrade to. - */ - Optional downgrade(ProtocolVersion version); - - /** - * Computes the highest common version supported by the given nodes. This is called after the - * initial {@link TopologyMonitor#refreshNodeList()} node refresh} (provided that the version was - * not forced), to ensure that we proceed with a version that will work with all the nodes. - * - * @throws UnsupportedProtocolVersionException if no such version exists (the nodes support - * non-intersecting ranges), or if there was an error during the computation. This will cause - * the driver initialization to fail. - */ - ProtocolVersion highestCommon(Collection nodes); - - /** Whether a given version supports a given feature. */ - boolean supports(ProtocolVersion version, ProtocolFeature feature); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/Ec2MultiRegionAddressTranslator.java b/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/Ec2MultiRegionAddressTranslator.java deleted file mode 100644 index 88e6cdb3bb2..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/Ec2MultiRegionAddressTranslator.java +++ /dev/null @@ -1,160 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.addresstranslation; - -import com.datastax.oss.driver.api.core.addresstranslation.AddressTranslator; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.internal.core.util.Loggers; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.util.Enumeration; -import java.util.Hashtable; -import javax.naming.Context; -import javax.naming.NamingEnumeration; -import javax.naming.NamingException; -import javax.naming.directory.Attribute; -import javax.naming.directory.Attributes; -import javax.naming.directory.DirContext; -import javax.naming.directory.InitialDirContext; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * {@link AddressTranslator} implementation for a multi-region EC2 deployment where clients are - * also deployed in EC2. - * - *

Its distinctive feature is that it translates addresses according to the location of the - * Cassandra host: - * - *

    - *
  • addresses in different EC2 regions (than the client) are unchanged; - *
  • addresses in the same EC2 region are translated to private IPs. - *
- * - * This optimizes network costs, because Amazon charges more for communication over public IPs. - * - *

Implementation note: this class performs a reverse DNS lookup of the origin address, to find - * the domain name of the target instance. Then it performs a forward DNS lookup of the domain name; - * the EC2 DNS does the private/public switch automatically based on location. - */ -public class Ec2MultiRegionAddressTranslator implements AddressTranslator { - - private static final Logger LOG = LoggerFactory.getLogger(Ec2MultiRegionAddressTranslator.class); - - private final DirContext ctx; - private final String logPrefix; - - public Ec2MultiRegionAddressTranslator( - @SuppressWarnings("unused") @NonNull DriverContext context) { - this.logPrefix = context.getSessionName(); - @SuppressWarnings("JdkObsolete") - Hashtable env = new Hashtable<>(); - env.put(Context.INITIAL_CONTEXT_FACTORY, "com.sun.jndi.dns.DnsContextFactory"); - try { - ctx = new InitialDirContext(env); - } catch (NamingException e) { - throw new RuntimeException("Could not create translator", e); - } - } - - @VisibleForTesting - Ec2MultiRegionAddressTranslator(@NonNull DirContext ctx) { - this.logPrefix = "test"; - this.ctx = ctx; - } - - @NonNull - @Override - public InetSocketAddress translate(@NonNull InetSocketAddress socketAddress) { - InetAddress address = socketAddress.getAddress(); - try { - // InetAddress#getHostName() is supposed to perform a reverse DNS lookup, but for some reason - // it doesn't work within the same EC2 region (it returns the IP address itself). - // We use an alternate implementation: - String domainName = lookupPtrRecord(reverse(address)); - if (domainName == null) { - LOG.warn("[{}] Found no domain name for {}, returning it as-is", logPrefix, address); - return socketAddress; - } - - InetAddress translatedAddress = InetAddress.getByName(domainName); - LOG.debug("[{}] Resolved {} to {}", logPrefix, address, translatedAddress); - return new InetSocketAddress(translatedAddress, socketAddress.getPort()); - } catch (Exception e) { - Loggers.warnWithException( - LOG, "[{}] Error resolving {}, returning it as-is", logPrefix, address, e); - return socketAddress; - } - } - - private String lookupPtrRecord(String reversedDomain) throws Exception { - Attributes attrs = ctx.getAttributes(reversedDomain, new String[] {"PTR"}); - for (NamingEnumeration ae = attrs.getAll(); ae.hasMoreElements(); ) { - Attribute attr = (Attribute) ae.next(); - Enumeration vals = attr.getAll(); - if (vals.hasMoreElements()) { - return vals.nextElement().toString(); - } - } - return null; - } - - @Override - public void close() { - try { - ctx.close(); - } catch (NamingException e) { - Loggers.warnWithException(LOG, "Error closing translator", e); - } - } - - // Builds the "reversed" domain name in the ARPA domain to perform the reverse lookup - @VisibleForTesting - static String reverse(InetAddress address) { - byte[] bytes = address.getAddress(); - if (bytes.length == 4) return reverseIpv4(bytes); - else return reverseIpv6(bytes); - } - - private static String reverseIpv4(byte[] bytes) { - StringBuilder builder = new StringBuilder(); - for (int i = bytes.length - 1; i >= 0; i--) { - builder.append(bytes[i] & 0xFF).append('.'); - } - builder.append("in-addr.arpa"); - return builder.toString(); - } - - private static String reverseIpv6(byte[] bytes) { - StringBuilder builder = new StringBuilder(); - for (int i = bytes.length - 1; i >= 0; i--) { - byte b = bytes[i]; - int lowNibble = b & 0x0F; - int highNibble = b >> 4 & 0x0F; - builder - .append(Integer.toHexString(lowNibble)) - .append('.') - .append(Integer.toHexString(highNibble)) - .append('.'); - } - builder.append("ip6.arpa"); - return builder.toString(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/FixedHostNameAddressTranslator.java b/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/FixedHostNameAddressTranslator.java deleted file mode 100644 index 5cc6c2518fb..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/FixedHostNameAddressTranslator.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.addresstranslation; - -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.ADDRESS_TRANSLATOR_ADVERTISED_HOSTNAME; - -import com.datastax.oss.driver.api.core.addresstranslation.AddressTranslator; -import com.datastax.oss.driver.api.core.context.DriverContext; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.net.InetSocketAddress; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * This translator always returns same hostname, no matter what IP address a node has but still - * using its native transport port. - * - *

The translator can be used for scenarios when all nodes are behind some kind of proxy, and it - * is not tailored for one concrete use case. One can use this, for example, for AWS PrivateLink as - * all nodes would be exposed to consumer - behind one hostname pointing to AWS Endpoint. - */ -public class FixedHostNameAddressTranslator implements AddressTranslator { - - private static final Logger LOG = LoggerFactory.getLogger(FixedHostNameAddressTranslator.class); - - private final String advertisedHostname; - private final String logPrefix; - - public FixedHostNameAddressTranslator(@NonNull DriverContext context) { - logPrefix = context.getSessionName(); - advertisedHostname = - context.getConfig().getDefaultProfile().getString(ADDRESS_TRANSLATOR_ADVERTISED_HOSTNAME); - } - - @NonNull - @Override - public InetSocketAddress translate(@NonNull InetSocketAddress address) { - final int port = address.getPort(); - LOG.debug("[{}] Resolved {}:{} to {}:{}", logPrefix, address, port, advertisedHostname, port); - return new InetSocketAddress(advertisedHostname, port); - } - - @Override - public void close() {} -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/PassThroughAddressTranslator.java b/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/PassThroughAddressTranslator.java deleted file mode 100644 index 0922821be8c..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/PassThroughAddressTranslator.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.addresstranslation; - -import com.datastax.oss.driver.api.core.addresstranslation.AddressTranslator; -import com.datastax.oss.driver.api.core.context.DriverContext; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.net.InetSocketAddress; -import net.jcip.annotations.ThreadSafe; - -/** - * An address translator that always returns the same address unchanged. - * - *

To activate this translator, modify the {@code advanced.address-translator} section in the - * driver configuration, for example: - * - *

- * datastax-java-driver {
- *   advanced.address-translator {
- *     class = PassThroughAddressTranslator
- *   }
- * }
- * 
- * - * See {@code reference.conf} (in the manual or core driver JAR) for more details. - */ -@ThreadSafe -public class PassThroughAddressTranslator implements AddressTranslator { - - public PassThroughAddressTranslator(@SuppressWarnings("unused") DriverContext context) { - // nothing to do - } - - @NonNull - @Override - public InetSocketAddress translate(@NonNull InetSocketAddress address) { - return address; - } - - @Override - public void close() { - // nothing to do - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/Subnet.java b/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/Subnet.java deleted file mode 100644 index 7c25e94e2f9..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/Subnet.java +++ /dev/null @@ -1,176 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.addresstranslation; - -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.base.Splitter; -import java.net.InetAddress; -import java.net.UnknownHostException; -import java.util.Arrays; -import java.util.List; - -class Subnet { - private final byte[] subnet; - private final byte[] networkMask; - private final byte[] upper; - private final byte[] lower; - - private Subnet(byte[] subnet, byte[] networkMask) { - this.subnet = subnet; - this.networkMask = networkMask; - - byte[] upper = new byte[subnet.length]; - byte[] lower = new byte[subnet.length]; - for (int i = 0; i < subnet.length; i++) { - upper[i] = (byte) (subnet[i] | ~networkMask[i]); - lower[i] = (byte) (subnet[i] & networkMask[i]); - } - this.upper = upper; - this.lower = lower; - } - - static Subnet parse(String subnetCIDR) throws UnknownHostException { - List parts = Splitter.on("/").splitToList(subnetCIDR); - if (parts.size() != 2) { - throw new IllegalArgumentException("Invalid subnet: " + subnetCIDR); - } - - boolean isIPv6 = parts.get(0).contains(":"); - byte[] subnet = InetAddress.getByName(parts.get(0)).getAddress(); - if (isIPv4(subnet) && isIPv6) { - subnet = toIPv6(subnet); - } - int prefixLength = Integer.parseInt(parts.get(1)); - validatePrefixLength(subnet, prefixLength); - - byte[] networkMask = toNetworkMask(subnet, prefixLength); - validateSubnetIsPrefixBlock(subnet, networkMask, subnetCIDR); - return new Subnet(subnet, networkMask); - } - - private static byte[] toNetworkMask(byte[] subnet, int prefixLength) { - int fullBytes = prefixLength / 8; - int remainingBits = prefixLength % 8; - byte[] mask = new byte[subnet.length]; - Arrays.fill(mask, 0, fullBytes, (byte) 0xFF); - if (remainingBits > 0) { - mask[fullBytes] = (byte) (0xFF << (8 - remainingBits)); - } - return mask; - } - - private static void validatePrefixLength(byte[] subnet, int prefixLength) { - int max_prefix_length = subnet.length * 8; - if (prefixLength < 0 || max_prefix_length < prefixLength) { - throw new IllegalArgumentException( - String.format( - "Prefix length %s must be within [0; %s]", prefixLength, max_prefix_length)); - } - } - - private static void validateSubnetIsPrefixBlock( - byte[] subnet, byte[] networkMask, String subnetCIDR) { - byte[] prefixBlock = toPrefixBlock(subnet, networkMask); - if (!Arrays.equals(subnet, prefixBlock)) { - throw new IllegalArgumentException( - String.format("Subnet %s must be represented as a network prefix block", subnetCIDR)); - } - } - - private static byte[] toPrefixBlock(byte[] subnet, byte[] networkMask) { - byte[] prefixBlock = new byte[subnet.length]; - for (int i = 0; i < subnet.length; i++) { - prefixBlock[i] = (byte) (subnet[i] & networkMask[i]); - } - return prefixBlock; - } - - @VisibleForTesting - byte[] getSubnet() { - return Arrays.copyOf(subnet, subnet.length); - } - - @VisibleForTesting - byte[] getNetworkMask() { - return Arrays.copyOf(networkMask, networkMask.length); - } - - byte[] getUpper() { - return Arrays.copyOf(upper, upper.length); - } - - byte[] getLower() { - return Arrays.copyOf(lower, lower.length); - } - - boolean isIPv4() { - return isIPv4(subnet); - } - - boolean isIPv6() { - return isIPv6(subnet); - } - - boolean contains(byte[] ip) { - if (isIPv4() && !isIPv4(ip)) { - return false; - } - if (isIPv6() && isIPv4(ip)) { - ip = toIPv6(ip); - } - if (subnet.length != ip.length) { - throw new IllegalArgumentException( - "IP version is unknown: " + Arrays.toString(toZeroBasedByteArray(ip))); - } - for (int i = 0; i < subnet.length; i++) { - if (subnet[i] != (byte) (ip[i] & networkMask[i])) { - return false; - } - } - return true; - } - - private static boolean isIPv4(byte[] ip) { - return ip.length == 4; - } - - private static boolean isIPv6(byte[] ip) { - return ip.length == 16; - } - - private static byte[] toIPv6(byte[] ipv4) { - byte[] ipv6 = new byte[16]; - ipv6[10] = (byte) 0xFF; - ipv6[11] = (byte) 0xFF; - System.arraycopy(ipv4, 0, ipv6, 12, 4); - return ipv6; - } - - @Override - public String toString() { - return Arrays.toString(toZeroBasedByteArray(subnet)); - } - - private static int[] toZeroBasedByteArray(byte[] bytes) { - int[] res = new int[bytes.length]; - for (int i = 0; i < bytes.length; i++) { - res[i] = bytes[i] & 0xFF; - } - return res; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/SubnetAddress.java b/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/SubnetAddress.java deleted file mode 100644 index 105e776a507..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/SubnetAddress.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.addresstranslation; - -import java.net.InetSocketAddress; -import java.net.UnknownHostException; - -class SubnetAddress { - private final Subnet subnet; - private final InetSocketAddress address; - - SubnetAddress(String subnetCIDR, InetSocketAddress address) { - try { - this.subnet = Subnet.parse(subnetCIDR); - } catch (UnknownHostException e) { - throw new RuntimeException(e); - } - this.address = address; - } - - InetSocketAddress getAddress() { - return this.address; - } - - boolean isOverlapping(SubnetAddress other) { - Subnet thisSubnet = this.subnet; - Subnet otherSubnet = other.subnet; - return thisSubnet.contains(otherSubnet.getLower()) - || thisSubnet.contains(otherSubnet.getUpper()) - || otherSubnet.contains(thisSubnet.getLower()) - || otherSubnet.contains(thisSubnet.getUpper()); - } - - boolean contains(InetSocketAddress address) { - return subnet.contains(address.getAddress().getAddress()); - } - - boolean isIPv4() { - return subnet.isIPv4(); - } - - boolean isIPv6() { - return subnet.isIPv6(); - } - - @Override - public String toString() { - return "SubnetAddress[subnet=" + subnet + ", address=" + address + "]"; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/SubnetAddressTranslator.java b/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/SubnetAddressTranslator.java deleted file mode 100644 index 85f29e3fadd..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/SubnetAddressTranslator.java +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.addresstranslation; - -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.ADDRESS_TRANSLATOR_DEFAULT_ADDRESS; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.ADDRESS_TRANSLATOR_RESOLVE_ADDRESSES; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.ADDRESS_TRANSLATOR_SUBNET_ADDRESSES; - -import com.datastax.oss.driver.api.core.addresstranslation.AddressTranslator; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.internal.core.util.AddressUtils; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.net.InetSocketAddress; -import java.util.List; -import java.util.Optional; -import java.util.stream.Collectors; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * This translator returns the proxy address of the private subnet containing the Cassandra node IP, - * or default address if no matching subnets, or passes through the original node address if no - * default configured. - * - *

The translator can be used for scenarios when all nodes are behind some kind of proxy, and - * that proxy is different for nodes located in different subnets (eg. when Cassandra is deployed in - * multiple datacenters/regions). One can use this, for example, for Cassandra on Kubernetes with - * different Cassandra datacenters deployed to different Kubernetes clusters. - */ -public class SubnetAddressTranslator implements AddressTranslator { - private static final Logger LOG = LoggerFactory.getLogger(SubnetAddressTranslator.class); - - private final List subnetAddresses; - - @SuppressWarnings("OptionalUsedAsFieldOrParameterType") - private final Optional defaultAddress; - - private final String logPrefix; - - public SubnetAddressTranslator(@NonNull DriverContext context) { - logPrefix = context.getSessionName(); - boolean resolveAddresses = - context - .getConfig() - .getDefaultProfile() - .getBoolean(ADDRESS_TRANSLATOR_RESOLVE_ADDRESSES, false); - this.subnetAddresses = - context.getConfig().getDefaultProfile().getStringMap(ADDRESS_TRANSLATOR_SUBNET_ADDRESSES) - .entrySet().stream() - .map( - e -> { - // Quoted and/or containing forward slashes map keys in reference.conf are read to - // strings with additional quotes, eg. 100.64.0.0/15 -> '100.64.0."0/15"' or - // "100.64.0.0/15" -> '"100.64.0.0/15"' - String subnetCIDR = e.getKey().replaceAll("\"", ""); - String address = e.getValue(); - return new SubnetAddress(subnetCIDR, parseAddress(address, resolveAddresses)); - }) - .collect(Collectors.toList()); - this.defaultAddress = - Optional.ofNullable( - context - .getConfig() - .getDefaultProfile() - .getString(ADDRESS_TRANSLATOR_DEFAULT_ADDRESS, null)) - .map(address -> parseAddress(address, resolveAddresses)); - - validateSubnetsAreOfSameProtocol(this.subnetAddresses); - validateSubnetsAreNotOverlapping(this.subnetAddresses); - } - - private static void validateSubnetsAreOfSameProtocol(List subnets) { - for (int i = 0; i < subnets.size() - 1; i++) { - for (int j = i + 1; j < subnets.size(); j++) { - SubnetAddress subnet1 = subnets.get(i); - SubnetAddress subnet2 = subnets.get(j); - if (subnet1.isIPv4() != subnet2.isIPv4() && subnet1.isIPv6() != subnet2.isIPv6()) { - throw new IllegalArgumentException( - String.format( - "Configured subnets are of the different protocols: %s, %s", subnet1, subnet2)); - } - } - } - } - - private static void validateSubnetsAreNotOverlapping(List subnets) { - for (int i = 0; i < subnets.size() - 1; i++) { - for (int j = i + 1; j < subnets.size(); j++) { - SubnetAddress subnet1 = subnets.get(i); - SubnetAddress subnet2 = subnets.get(j); - if (subnet1.isOverlapping(subnet2)) { - throw new IllegalArgumentException( - String.format("Configured subnets are overlapping: %s, %s", subnet1, subnet2)); - } - } - } - } - - @NonNull - @Override - public InetSocketAddress translate(@NonNull InetSocketAddress address) { - InetSocketAddress translatedAddress = null; - for (SubnetAddress subnetAddress : subnetAddresses) { - if (subnetAddress.contains(address)) { - translatedAddress = subnetAddress.getAddress(); - } - } - if (translatedAddress == null && defaultAddress.isPresent()) { - translatedAddress = defaultAddress.get(); - } - if (translatedAddress == null) { - translatedAddress = address; - } - LOG.debug("[{}] Translated {} to {}", logPrefix, address, translatedAddress); - return translatedAddress; - } - - @Override - public void close() {} - - @Nullable - private InetSocketAddress parseAddress(String address, boolean resolve) { - try { - InetSocketAddress parsedAddress = AddressUtils.extract(address, resolve).iterator().next(); - LOG.debug("[{}] Parsed {} to {}", logPrefix, address, parsedAddress); - return parsedAddress; - } catch (RuntimeException e) { - throw new IllegalArgumentException( - String.format("Invalid address %s (%s)", address, e.getMessage()), e); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/AdminRequestHandler.java b/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/AdminRequestHandler.java deleted file mode 100644 index 5078428c21a..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/AdminRequestHandler.java +++ /dev/null @@ -1,272 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.adminrequest; - -import com.datastax.oss.driver.api.core.DriverTimeoutException; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.connection.BusyConnectionException; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.channel.ResponseCallback; -import com.datastax.oss.driver.internal.core.util.concurrent.UncaughtExceptions; -import com.datastax.oss.driver.shaded.guava.common.collect.Maps; -import com.datastax.oss.protocol.internal.Frame; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.request.Query; -import com.datastax.oss.protocol.internal.request.query.QueryOptions; -import com.datastax.oss.protocol.internal.response.Result; -import com.datastax.oss.protocol.internal.response.result.Prepared; -import com.datastax.oss.protocol.internal.response.result.Rows; -import io.netty.util.concurrent.Future; -import io.netty.util.concurrent.ScheduledFuture; -import java.net.InetAddress; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.TimeUnit; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** Handles the lifecycle of an admin request (such as a node refresh or schema refresh query). */ -@ThreadSafe -public class AdminRequestHandler implements ResponseCallback { - private static final Logger LOG = LoggerFactory.getLogger(AdminRequestHandler.class); - - public static AdminRequestHandler call( - DriverChannel channel, Query query, Duration timeout, String logPrefix) { - return new AdminRequestHandler<>( - channel, - true, - query, - Frame.NO_PAYLOAD, - timeout, - logPrefix, - "call '" + query.query + "'", - com.datastax.oss.protocol.internal.response.result.Void.class); - } - - public static AdminRequestHandler query( - DriverChannel channel, - String query, - Map parameters, - Duration timeout, - int pageSize, - String logPrefix) { - Query message = - new Query( - query, - buildQueryOptions(pageSize, serialize(parameters, channel.protocolVersion()), null)); - String debugString = "query '" + message.query + "'"; - if (!parameters.isEmpty()) { - debugString += " with parameters " + parameters; - } - return new AdminRequestHandler<>( - channel, true, message, Frame.NO_PAYLOAD, timeout, logPrefix, debugString, Rows.class); - } - - public static AdminRequestHandler query( - DriverChannel channel, String query, Duration timeout, int pageSize, String logPrefix) { - return query(channel, query, Collections.emptyMap(), timeout, pageSize, logPrefix); - } - - private final DriverChannel channel; - private final boolean shouldPreAcquireId; - private final Message message; - private final Map customPayload; - private final Duration timeout; - private final String logPrefix; - private final String debugString; - private final Class expectedResponseType; - protected final CompletableFuture result = new CompletableFuture<>(); - - // This is only ever accessed on the channel's event loop, so it doesn't need to be volatile - private ScheduledFuture timeoutFuture; - - protected AdminRequestHandler( - DriverChannel channel, - boolean shouldPreAcquireId, - Message message, - Map customPayload, - Duration timeout, - String logPrefix, - String debugString, - Class expectedResponseType) { - this.channel = channel; - this.shouldPreAcquireId = shouldPreAcquireId; - this.message = message; - this.customPayload = customPayload; - this.timeout = timeout; - this.logPrefix = logPrefix; - this.debugString = debugString; - this.expectedResponseType = expectedResponseType; - } - - public CompletionStage start() { - LOG.debug("[{}] Executing {}", logPrefix, this); - if (shouldPreAcquireId && !channel.preAcquireId()) { - setFinalError( - new BusyConnectionException( - String.format( - "%s has reached its maximum number of simultaneous requests", channel))); - } else { - channel.write(message, false, customPayload, this).addListener(this::onWriteComplete); - } - return result; - } - - private void onWriteComplete(Future future) { - if (future.isSuccess()) { - LOG.debug("[{}] Successfully wrote {}, waiting for response", logPrefix, this); - if (timeout.toNanos() > 0) { - timeoutFuture = - channel - .eventLoop() - .schedule(this::fireTimeout, timeout.toNanos(), TimeUnit.NANOSECONDS); - timeoutFuture.addListener(UncaughtExceptions::log); - } - } else { - setFinalError(future.cause()); - } - } - - private void fireTimeout() { - setFinalError( - new DriverTimeoutException(String.format("%s timed out after %s", debugString, timeout))); - if (!channel.closeFuture().isDone()) { - channel.cancel(this); - } - } - - @Override - public void onFailure(Throwable error) { - if (timeoutFuture != null) { - timeoutFuture.cancel(true); - } - setFinalError(error); - } - - @Override - public void onResponse(Frame responseFrame) { - if (timeoutFuture != null) { - timeoutFuture.cancel(true); - } - Message message = responseFrame.message; - LOG.debug("[{}] Got response {}", logPrefix, responseFrame.message); - if (!expectedResponseType.isInstance(message)) { - // Note that this also covers error responses, no need to get too fancy here - setFinalError(new UnexpectedResponseException(debugString, message)); - } else if (expectedResponseType == Rows.class) { - Rows rows = (Rows) message; - ByteBuffer pagingState = rows.getMetadata().pagingState; - AdminRequestHandler nextHandler = (pagingState == null) ? null : this.copy(pagingState); - // The public factory methods guarantee that expectedResponseType and ResultT always match: - @SuppressWarnings("unchecked") - ResultT result = (ResultT) new AdminResult(rows, nextHandler, channel.protocolVersion()); - setFinalResult(result); - } else if (expectedResponseType == Prepared.class) { - Prepared prepared = (Prepared) message; - @SuppressWarnings("unchecked") - ResultT result = (ResultT) ByteBuffer.wrap(prepared.preparedQueryId); - setFinalResult(result); - } else if (expectedResponseType - == com.datastax.oss.protocol.internal.response.result.Void.class) { - setFinalResult(null); - } else { - setFinalError(new AssertionError("Unhandled response type" + expectedResponseType)); - } - } - - protected boolean setFinalResult(ResultT result) { - return this.result.complete(result); - } - - protected boolean setFinalError(Throwable error) { - return result.completeExceptionally(error); - } - - private AdminRequestHandler copy(ByteBuffer pagingState) { - assert message instanceof Query; - Query current = (Query) this.message; - QueryOptions currentOptions = current.options; - QueryOptions newOptions = - buildQueryOptions(currentOptions.pageSize, currentOptions.namedValues, pagingState); - return new AdminRequestHandler<>( - channel, - // This is called for next page queries, so we always need to reacquire an id: - true, - new Query(current.query, newOptions), - customPayload, - timeout, - logPrefix, - debugString, - expectedResponseType); - } - - private static QueryOptions buildQueryOptions( - int pageSize, Map serialize, ByteBuffer pagingState) { - return new QueryOptions( - ProtocolConstants.ConsistencyLevel.ONE, - Collections.emptyList(), - serialize, - false, - pageSize, - pagingState, - ProtocolConstants.ConsistencyLevel.SERIAL, - Statement.NO_DEFAULT_TIMESTAMP, - null, - Statement.NO_NOW_IN_SECONDS); - } - - private static Map serialize( - Map parameters, ProtocolVersion protocolVersion) { - Map result = Maps.newHashMapWithExpectedSize(parameters.size()); - for (Map.Entry entry : parameters.entrySet()) { - result.put(entry.getKey(), serialize(entry.getValue(), protocolVersion)); - } - return result; - } - - private static ByteBuffer serialize(Object parameter, ProtocolVersion protocolVersion) { - if (parameter instanceof String) { - return TypeCodecs.TEXT.encode((String) parameter, protocolVersion); - } else if (parameter instanceof InetAddress) { - return TypeCodecs.INET.encode((InetAddress) parameter, protocolVersion); - } else if (parameter instanceof List && ((List) parameter).get(0) instanceof String) { - @SuppressWarnings("unchecked") - List l = (List) parameter; - return AdminRow.LIST_OF_TEXT.encode(l, protocolVersion); - } else if (parameter instanceof Integer) { - return TypeCodecs.INT.encode((Integer) parameter, protocolVersion); - } else { - throw new IllegalArgumentException( - "Unsupported variable type for admin query: " + parameter.getClass()); - } - } - - @Override - public String toString() { - return debugString; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/AdminResult.java b/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/AdminResult.java deleted file mode 100644 index 686cc05c6b0..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/AdminResult.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.adminrequest; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.driver.shaded.guava.common.collect.AbstractIterator; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.protocol.internal.response.result.ColumnSpec; -import com.datastax.oss.protocol.internal.response.result.Rows; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Queue; -import java.util.concurrent.CompletionStage; -import net.jcip.annotations.NotThreadSafe; - -@NotThreadSafe // wraps a mutable queue -public class AdminResult implements Iterable { - - private final Queue> data; - private final Map columnSpecs; - private final AdminRequestHandler nextHandler; - private final ProtocolVersion protocolVersion; - - public AdminResult( - Rows rows, AdminRequestHandler nextHandler, ProtocolVersion protocolVersion) { - this.data = rows.getData(); - - ImmutableMap.Builder columnSpecsBuilder = ImmutableMap.builder(); - for (ColumnSpec spec : rows.getMetadata().columnSpecs) { - columnSpecsBuilder.put(spec.name, spec); - } - // Admin queries are simple selects only, so there are no duplicate names (if that ever - // changes, build() will fail and we'll have to do things differently) - this.columnSpecs = columnSpecsBuilder.build(); - - this.nextHandler = nextHandler; - this.protocolVersion = protocolVersion; - } - - /** This consumes the result's data and can be called only once. */ - @NonNull - @Override - public Iterator iterator() { - return new AbstractIterator() { - @Override - protected AdminRow computeNext() { - List rowData = data.poll(); - return (rowData == null) - ? endOfData() - : new AdminRow(columnSpecs, rowData, protocolVersion); - } - }; - } - - public boolean hasNextPage() { - return nextHandler != null; - } - - public CompletionStage nextPage() { - return (nextHandler == null) - ? CompletableFutures.failedFuture( - new AssertionError("No next page, use hasNextPage() before you call this method")) - : nextHandler.start(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/AdminRow.java b/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/AdminRow.java deleted file mode 100644 index 6e32ea845fb..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/AdminRow.java +++ /dev/null @@ -1,129 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.adminrequest; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.response.result.ColumnSpec; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.net.InetAddress; -import java.nio.ByteBuffer; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; -import net.jcip.annotations.Immutable; - -@Immutable -public class AdminRow { - - @VisibleForTesting - static final TypeCodec> LIST_OF_TEXT = TypeCodecs.listOf(TypeCodecs.TEXT); - - private static final TypeCodec> SET_OF_TEXT = TypeCodecs.setOf(TypeCodecs.TEXT); - private static final TypeCodec> MAP_OF_STRING_TO_STRING = - TypeCodecs.mapOf(TypeCodecs.TEXT, TypeCodecs.TEXT); - - private final Map columnSpecs; - private final List data; - private final ProtocolVersion protocolVersion; - - public AdminRow( - Map columnSpecs, List data, ProtocolVersion protocolVersion) { - this.columnSpecs = columnSpecs; - this.data = data; - this.protocolVersion = protocolVersion; - } - - @Nullable - public Boolean getBoolean(String columnName) { - return get(columnName, TypeCodecs.BOOLEAN); - } - - @Nullable - public Integer getInteger(String columnName) { - return get(columnName, TypeCodecs.INT); - } - - public boolean isString(String columnName) { - return columnSpecs.get(columnName).type.id == ProtocolConstants.DataType.VARCHAR; - } - - @Nullable - public String getString(String columnName) { - return get(columnName, TypeCodecs.TEXT); - } - - @Nullable - public UUID getUuid(String columnName) { - return get(columnName, TypeCodecs.UUID); - } - - @Nullable - public ByteBuffer getByteBuffer(String columnName) { - return get(columnName, TypeCodecs.BLOB); - } - - @Nullable - public InetAddress getInetAddress(String columnName) { - return get(columnName, TypeCodecs.INET); - } - - @Nullable - public List getListOfString(String columnName) { - return get(columnName, LIST_OF_TEXT); - } - - @Nullable - public Set getSetOfString(String columnName) { - return get(columnName, SET_OF_TEXT); - } - - @Nullable - public Map getMapOfStringToString(String columnName) { - return get(columnName, MAP_OF_STRING_TO_STRING); - } - - public boolean isNull(String columnName) { - if (!contains(columnName)) { - return true; - } else { - int index = columnSpecs.get(columnName).index; - return data.get(index) == null; - } - } - - public boolean contains(String columnName) { - return columnSpecs.containsKey(columnName); - } - - @Nullable - public T get(String columnName, TypeCodec codec) { - // Minimal checks here: this is for internal use, so the caller should know what they're - // doing - if (!contains(columnName)) { - return null; - } else { - int index = columnSpecs.get(columnName).index; - return codec.decode(data.get(index), protocolVersion); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/ThrottledAdminRequestHandler.java b/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/ThrottledAdminRequestHandler.java deleted file mode 100644 index 40ab21b759a..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/ThrottledAdminRequestHandler.java +++ /dev/null @@ -1,179 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.adminrequest; - -import com.datastax.oss.driver.api.core.DriverTimeoutException; -import com.datastax.oss.driver.api.core.RequestThrottlingException; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; -import com.datastax.oss.driver.api.core.session.throttling.RequestThrottler; -import com.datastax.oss.driver.api.core.session.throttling.Throttled; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.control.ControlConnection; -import com.datastax.oss.driver.internal.core.metrics.SessionMetricUpdater; -import com.datastax.oss.driver.internal.core.pool.ChannelPool; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.response.Result; -import com.datastax.oss.protocol.internal.response.result.Prepared; -import com.datastax.oss.protocol.internal.response.result.Rows; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.Map; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.TimeUnit; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class ThrottledAdminRequestHandler extends AdminRequestHandler - implements Throttled { - - /** - * @param shouldPreAcquireId whether to call {@link DriverChannel#preAcquireId()} before sending - * the request. This must be false if you obtained the connection from a pool ({@link - * ChannelPool#next()}, or {@link DefaultSession#getChannel(Node, String)}). It must be - * true if you are using a standalone channel (e.g. in {@link ControlConnection} or one of - * its auxiliary components). - */ - public static ThrottledAdminRequestHandler query( - DriverChannel channel, - boolean shouldPreAcquireId, - Message message, - Map customPayload, - Duration timeout, - RequestThrottler throttler, - SessionMetricUpdater metricUpdater, - String logPrefix, - String debugString) { - return new ThrottledAdminRequestHandler<>( - channel, - shouldPreAcquireId, - message, - customPayload, - timeout, - throttler, - metricUpdater, - logPrefix, - debugString, - Rows.class); - } - - /** - * @param shouldPreAcquireId whether to call {@link DriverChannel#preAcquireId()} before sending - * the request. See {@link #query(DriverChannel, boolean, Message, Map, Duration, - * RequestThrottler, SessionMetricUpdater, String, String)} for more explanations. - */ - public static ThrottledAdminRequestHandler prepare( - DriverChannel channel, - boolean shouldPreAcquireId, - Message message, - Map customPayload, - Duration timeout, - RequestThrottler throttler, - SessionMetricUpdater metricUpdater, - String logPrefix) { - return new ThrottledAdminRequestHandler<>( - channel, - shouldPreAcquireId, - message, - customPayload, - timeout, - throttler, - metricUpdater, - logPrefix, - message.toString(), - Prepared.class); - } - - private final long startTimeNanos; - private final RequestThrottler throttler; - private final SessionMetricUpdater metricUpdater; - - protected ThrottledAdminRequestHandler( - DriverChannel channel, - boolean preAcquireId, - Message message, - Map customPayload, - Duration timeout, - RequestThrottler throttler, - SessionMetricUpdater metricUpdater, - String logPrefix, - String debugString, - Class expectedResponseType) { - super( - channel, - preAcquireId, - message, - customPayload, - timeout, - logPrefix, - debugString, - expectedResponseType); - this.startTimeNanos = System.nanoTime(); - this.throttler = throttler; - this.metricUpdater = metricUpdater; - } - - @Override - public CompletionStage start() { - // Don't write request yet, wait for green light from throttler - throttler.register(this); - return result; - } - - @Override - public void onThrottleReady(boolean wasDelayed) { - if (wasDelayed) { - metricUpdater.updateTimer( - DefaultSessionMetric.THROTTLING_DELAY, - null, - System.nanoTime() - startTimeNanos, - TimeUnit.NANOSECONDS); - } - super.start(); - } - - @Override - public void onThrottleFailure(@NonNull RequestThrottlingException error) { - metricUpdater.incrementCounter(DefaultSessionMetric.THROTTLING_ERRORS, null); - setFinalError(error); - } - - @Override - protected boolean setFinalResult(ResultT result) { - boolean wasSet = super.setFinalResult(result); - if (wasSet) { - throttler.signalSuccess(this); - } - return wasSet; - } - - @Override - protected boolean setFinalError(Throwable error) { - boolean wasSet = super.setFinalError(error); - if (wasSet) { - if (error instanceof DriverTimeoutException) { - throttler.signalTimeout(this); - } else if (!(error instanceof RequestThrottlingException)) { - throttler.signalError(this, error); - } - } - return wasSet; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/UnexpectedResponseException.java b/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/UnexpectedResponseException.java deleted file mode 100644 index c842b655411..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/UnexpectedResponseException.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.adminrequest; - -import com.datastax.oss.protocol.internal.Message; - -public class UnexpectedResponseException extends Exception { - - public final Message message; - - public UnexpectedResponseException(String requestName, Message message) { - super(String.format("%s got unexpected response %s", requestName, message)); - this.message = message; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/package-info.java b/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/package-info.java deleted file mode 100644 index 55ab14c8981..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/package-info.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Infrastructure to execute internal requests in the driver, for example control connection - * queries, or automatic statement preparation. - * - *

This is a stripped-down version of the public API, with the bare minimum for our needs: - * - *

    - *
  • async mode only. - *
  • execution on a given channel, without retries. - *
  • {@code QUERY} and {@code PREPARE} messages only. - *
  • paging is possible, but only on the same channel. If the channel gets closed between pages, - * the query fails. - *
  • values can only be bound by name, and it is assumed that the target type can always be - * inferred unambiguously (i.e. the only integer type is {@code int}, etc). - *
  • limited result API: getters by internal name only, no custom codecs. - *
  • codecs are only implemented for the types we actually need for admin queries. - *
- */ -package com.datastax.oss.driver.internal.core.adminrequest; diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/auth/PlainTextAuthProvider.java b/core/src/main/java/com/datastax/oss/driver/internal/core/auth/PlainTextAuthProvider.java deleted file mode 100644 index f2dfdf14171..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/auth/PlainTextAuthProvider.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.auth; - -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.dse.driver.internal.core.auth.AuthUtils; -import com.datastax.oss.driver.api.core.auth.PlainTextAuthProviderBase; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.ThreadSafe; - -/** - * A simple authentication provider that supports SASL authentication using the PLAIN mechanism for - * version 3 (or above) of the CQL native protocol. - * - *

To activate this provider, add an {@code advanced.auth-provider} section in the driver - * configuration, for example: - * - *

- * datastax-java-driver {
- *   advanced.auth-provider {
- *     class = com.datastax.driver.api.core.auth.PlainTextAuthProvider
- *     username = cassandra
- *     password = cassandra
- *
- *     // If connecting to DataStax Enterprise, this additional option allows proxy authentication
- *     // (login as another user or role)
- *     authorization-id = userOrRole
- *   }
- * }
- * 
- * - * The authentication provider cannot be changed at runtime; however, the credentials can be changed - * at runtime: the new ones will be used for new connection attempts once the configuration gets - * {@linkplain com.datastax.oss.driver.api.core.config.DriverConfigLoader#reload() reloaded}. - * - *

See {@code reference.conf} (in the manual or core driver JAR) for more details. - */ -@ThreadSafe -public class PlainTextAuthProvider extends PlainTextAuthProviderBase { - - private final DriverExecutionProfile config; - - public PlainTextAuthProvider(DriverContext context) { - super(context.getSessionName()); - this.config = context.getConfig().getDefaultProfile(); - } - - @NonNull - @Override - protected Credentials getCredentials( - @NonNull EndPoint endPoint, @NonNull String serverAuthenticator) { - // It's not valid to use the PlainTextAuthProvider without a username or password, error out - // early here - AuthUtils.validateConfigPresent( - config, - PlainTextAuthProvider.class.getName(), - endPoint, - DefaultDriverOption.AUTH_PROVIDER_USER_NAME, - DefaultDriverOption.AUTH_PROVIDER_PASSWORD); - - String authorizationId = config.getString(DseDriverOption.AUTH_PROVIDER_AUTHORIZATION_ID, ""); - assert authorizationId != null; // per the default above - return new Credentials( - config.getString(DefaultDriverOption.AUTH_PROVIDER_USER_NAME).toCharArray(), - config.getString(DefaultDriverOption.AUTH_PROVIDER_PASSWORD).toCharArray(), - authorizationId.toCharArray()); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ChannelEvent.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ChannelEvent.java deleted file mode 100644 index 970ea061ec7..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ChannelEvent.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import com.datastax.oss.driver.api.core.metadata.Node; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -/** Events relating to driver channels. */ -@Immutable -public class ChannelEvent { - public enum Type { - OPENED, - CLOSED, - RECONNECTION_STARTED, - RECONNECTION_STOPPED, - CONTROL_CONNECTION_FAILED - } - - public static ChannelEvent channelOpened(Node node) { - return new ChannelEvent(Type.OPENED, node); - } - - public static ChannelEvent channelClosed(Node node) { - return new ChannelEvent(Type.CLOSED, node); - } - - public static ChannelEvent reconnectionStarted(Node node) { - return new ChannelEvent(Type.RECONNECTION_STARTED, node); - } - - public static ChannelEvent reconnectionStopped(Node node) { - return new ChannelEvent(Type.RECONNECTION_STOPPED, node); - } - - /** The control connection tried to use this node, but failed to open a channel. */ - public static ChannelEvent controlConnectionFailed(Node node) { - return new ChannelEvent(Type.CONTROL_CONNECTION_FAILED, node); - } - - public final Type type; - public final Node node; - - public ChannelEvent(Type type, Node node) { - this.type = type; - this.node = node; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof ChannelEvent) { - ChannelEvent that = (ChannelEvent) other; - return this.type == that.type && Objects.equals(this.node, that.node); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(type, node); - } - - @Override - public String toString() { - return "ChannelEvent(" + type + ", " + node + ")"; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ChannelFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ChannelFactory.java deleted file mode 100644 index 66a5c4edc0e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ChannelFactory.java +++ /dev/null @@ -1,394 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.UnsupportedProtocolVersionException; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; -import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; -import com.datastax.oss.driver.internal.core.config.typesafe.TypesafeDriverConfig; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.context.NettyOptions; -import com.datastax.oss.driver.internal.core.metadata.DefaultNode; -import com.datastax.oss.driver.internal.core.metrics.NodeMetricUpdater; -import com.datastax.oss.driver.internal.core.metrics.NoopNodeMetricUpdater; -import com.datastax.oss.driver.internal.core.metrics.SessionMetricUpdater; -import com.datastax.oss.driver.internal.core.protocol.FrameDecoder; -import com.datastax.oss.driver.internal.core.protocol.FrameEncoder; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import io.netty.bootstrap.Bootstrap; -import io.netty.channel.Channel; -import io.netty.channel.ChannelFuture; -import io.netty.channel.ChannelInitializer; -import io.netty.channel.ChannelOption; -import io.netty.channel.ChannelPipeline; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.atomic.AtomicBoolean; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** Builds {@link DriverChannel} objects for an instance of the driver. */ -@ThreadSafe -public class ChannelFactory { - - private static final Logger LOG = LoggerFactory.getLogger(ChannelFactory.class); - - /** - * A value for {@link #productType} that indicates that we are connected to DataStax Cloud. This - * value matches the one defined at DSE DB server side at {@code ProductType.java}. - */ - private static final String DATASTAX_CLOUD_PRODUCT_TYPE = "DATASTAX_APOLLO"; - - private static final AtomicBoolean LOGGED_ORPHAN_WARNING = new AtomicBoolean(); - - /** - * A value for {@link #productType} that indicates that the server does not report any product - * type. - */ - private static final String UNKNOWN_PRODUCT_TYPE = "UNKNOWN"; - - // The names of the handlers on the pipeline: - public static final String SSL_HANDLER_NAME = "ssl"; - public static final String INBOUND_TRAFFIC_METER_NAME = "inboundTrafficMeter"; - public static final String OUTBOUND_TRAFFIC_METER_NAME = "outboundTrafficMeter"; - public static final String FRAME_TO_BYTES_ENCODER_NAME = "frameToBytesEncoder"; - public static final String FRAME_TO_SEGMENT_ENCODER_NAME = "frameToSegmentEncoder"; - public static final String SEGMENT_TO_BYTES_ENCODER_NAME = "segmentToBytesEncoder"; - public static final String BYTES_TO_FRAME_DECODER_NAME = "bytesToFrameDecoder"; - public static final String BYTES_TO_SEGMENT_DECODER_NAME = "bytesToSegmentDecoder"; - public static final String SEGMENT_TO_FRAME_DECODER_NAME = "segmentToFrameDecoder"; - public static final String HEARTBEAT_HANDLER_NAME = "heartbeat"; - public static final String INFLIGHT_HANDLER_NAME = "inflight"; - public static final String INIT_HANDLER_NAME = "init"; - - private final String logPrefix; - protected final InternalDriverContext context; - - /** either set from the configuration, or null and will be negotiated */ - @VisibleForTesting volatile ProtocolVersion protocolVersion; - - private volatile String clusterName; - - /** - * The value of the {@code PRODUCT_TYPE} option reported by the first channel we opened, in - * response to a {@code SUPPORTED} request. - * - *

If the server does not return that option, the value will be {@link #UNKNOWN_PRODUCT_TYPE}. - */ - @VisibleForTesting volatile String productType; - - public ChannelFactory(InternalDriverContext context) { - this.logPrefix = context.getSessionName(); - this.context = context; - - DriverExecutionProfile defaultConfig = context.getConfig().getDefaultProfile(); - if (defaultConfig.isDefined(DefaultDriverOption.PROTOCOL_VERSION)) { - String versionName = defaultConfig.getString(DefaultDriverOption.PROTOCOL_VERSION); - this.protocolVersion = context.getProtocolVersionRegistry().fromName(versionName); - } // else it will be negotiated with the first opened connection - } - - public ProtocolVersion getProtocolVersion() { - ProtocolVersion result = this.protocolVersion; - Preconditions.checkState( - result != null, "Protocol version not known yet, this should only be called after init"); - return result; - } - - /** - * WARNING: this is only used at the very beginning of the init process (when we just refreshed - * the list of nodes for the first time, and found out that one of them requires a lower version - * than was negotiated with the first contact point); it's safe at this time because we are in a - * controlled state (only the control connection is open, it's not executing queries and we're - * going to reconnect immediately after). Calling this method at any other time will likely wreak - * havoc. - */ - public void setProtocolVersion(ProtocolVersion newVersion) { - this.protocolVersion = newVersion; - } - - public String getClusterName() { - return clusterName; - } - - public CompletionStage connect(Node node, DriverChannelOptions options) { - NodeMetricUpdater nodeMetricUpdater; - if (node instanceof DefaultNode) { - nodeMetricUpdater = ((DefaultNode) node).getMetricUpdater(); - } else { - nodeMetricUpdater = NoopNodeMetricUpdater.INSTANCE; - } - return connect(node.getEndPoint(), options, nodeMetricUpdater); - } - - @VisibleForTesting - CompletionStage connect( - EndPoint endPoint, DriverChannelOptions options, NodeMetricUpdater nodeMetricUpdater) { - CompletableFuture resultFuture = new CompletableFuture<>(); - - ProtocolVersion currentVersion; - boolean isNegotiating; - List attemptedVersions = new CopyOnWriteArrayList<>(); - if (this.protocolVersion != null) { - currentVersion = protocolVersion; - isNegotiating = false; - } else { - currentVersion = context.getProtocolVersionRegistry().highestNonBeta(); - isNegotiating = true; - } - - connect( - endPoint, - options, - nodeMetricUpdater, - currentVersion, - isNegotiating, - attemptedVersions, - resultFuture); - return resultFuture; - } - - private void connect( - EndPoint endPoint, - DriverChannelOptions options, - NodeMetricUpdater nodeMetricUpdater, - ProtocolVersion currentVersion, - boolean isNegotiating, - List attemptedVersions, - CompletableFuture resultFuture) { - - NettyOptions nettyOptions = context.getNettyOptions(); - - Bootstrap bootstrap = - new Bootstrap() - .group(nettyOptions.ioEventLoopGroup()) - .channel(nettyOptions.channelClass()) - .option(ChannelOption.ALLOCATOR, nettyOptions.allocator()) - .handler( - initializer(endPoint, currentVersion, options, nodeMetricUpdater, resultFuture)); - - nettyOptions.afterBootstrapInitialized(bootstrap); - - ChannelFuture connectFuture = bootstrap.connect(endPoint.resolve()); - - connectFuture.addListener( - cf -> { - if (connectFuture.isSuccess()) { - Channel channel = connectFuture.channel(); - DriverChannel driverChannel = - new DriverChannel(endPoint, channel, context.getWriteCoalescer(), currentVersion); - // If this is the first successful connection, remember the protocol version and - // cluster name for future connections. - if (isNegotiating) { - ChannelFactory.this.protocolVersion = currentVersion; - } - if (ChannelFactory.this.clusterName == null) { - ChannelFactory.this.clusterName = driverChannel.getClusterName(); - } - Map> supportedOptions = driverChannel.getOptions(); - if (ChannelFactory.this.productType == null && supportedOptions != null) { - List productTypes = supportedOptions.get("PRODUCT_TYPE"); - String productType = - productTypes != null && !productTypes.isEmpty() - ? productTypes.get(0) - : UNKNOWN_PRODUCT_TYPE; - ChannelFactory.this.productType = productType; - DriverConfig driverConfig = context.getConfig(); - if (driverConfig instanceof TypesafeDriverConfig - && productType.equals(DATASTAX_CLOUD_PRODUCT_TYPE)) { - ((TypesafeDriverConfig) driverConfig) - .overrideDefaults( - ImmutableMap.of( - DefaultDriverOption.REQUEST_CONSISTENCY, - ConsistencyLevel.LOCAL_QUORUM.name())); - } - } - resultFuture.complete(driverChannel); - } else { - Throwable error = connectFuture.cause(); - if (error instanceof UnsupportedProtocolVersionException && isNegotiating) { - attemptedVersions.add(currentVersion); - Optional downgraded = - context.getProtocolVersionRegistry().downgrade(currentVersion); - if (downgraded.isPresent()) { - LOG.debug( - "[{}] Failed to connect with protocol {}, retrying with {}", - logPrefix, - currentVersion, - downgraded.get()); - connect( - endPoint, - options, - nodeMetricUpdater, - downgraded.get(), - true, - attemptedVersions, - resultFuture); - } else { - resultFuture.completeExceptionally( - UnsupportedProtocolVersionException.forNegotiation( - endPoint, attemptedVersions)); - } - } else { - // Note: might be completed already if the failure happened in initializer(), this is - // fine - resultFuture.completeExceptionally(error); - } - } - }); - } - - @VisibleForTesting - ChannelInitializer initializer( - EndPoint endPoint, - ProtocolVersion protocolVersion, - DriverChannelOptions options, - NodeMetricUpdater nodeMetricUpdater, - CompletableFuture resultFuture) { - return new ChannelFactoryInitializer( - endPoint, protocolVersion, options, nodeMetricUpdater, resultFuture); - }; - - class ChannelFactoryInitializer extends ChannelInitializer { - - private final EndPoint endPoint; - private final ProtocolVersion protocolVersion; - private final DriverChannelOptions options; - private final NodeMetricUpdater nodeMetricUpdater; - private final CompletableFuture resultFuture; - - ChannelFactoryInitializer( - EndPoint endPoint, - ProtocolVersion protocolVersion, - DriverChannelOptions options, - NodeMetricUpdater nodeMetricUpdater, - CompletableFuture resultFuture) { - - this.endPoint = endPoint; - this.protocolVersion = protocolVersion; - this.options = options; - this.nodeMetricUpdater = nodeMetricUpdater; - this.resultFuture = resultFuture; - } - - @Override - protected void initChannel(Channel channel) { - try { - DriverExecutionProfile defaultConfig = context.getConfig().getDefaultProfile(); - - long setKeyspaceTimeoutMillis = - defaultConfig - .getDuration(DefaultDriverOption.CONNECTION_SET_KEYSPACE_TIMEOUT) - .toMillis(); - int maxFrameLength = - (int) defaultConfig.getBytes(DefaultDriverOption.PROTOCOL_MAX_FRAME_LENGTH); - int maxRequestsPerConnection = - defaultConfig.getInt(DefaultDriverOption.CONNECTION_MAX_REQUESTS); - int maxOrphanRequests = - defaultConfig.getInt(DefaultDriverOption.CONNECTION_MAX_ORPHAN_REQUESTS); - if (maxOrphanRequests >= maxRequestsPerConnection) { - if (LOGGED_ORPHAN_WARNING.compareAndSet(false, true)) { - LOG.warn( - "[{}] Invalid value for {}: {}. It must be lower than {}. " - + "Defaulting to {} (1/4 of max-requests) instead.", - logPrefix, - DefaultDriverOption.CONNECTION_MAX_ORPHAN_REQUESTS.getPath(), - maxOrphanRequests, - DefaultDriverOption.CONNECTION_MAX_REQUESTS.getPath(), - maxRequestsPerConnection / 4); - } - maxOrphanRequests = maxRequestsPerConnection / 4; - } - - InFlightHandler inFlightHandler = - new InFlightHandler( - protocolVersion, - new StreamIdGenerator(maxRequestsPerConnection), - maxOrphanRequests, - setKeyspaceTimeoutMillis, - channel.newPromise(), - options.eventCallback, - options.ownerLogPrefix); - HeartbeatHandler heartbeatHandler = new HeartbeatHandler(defaultConfig); - ProtocolInitHandler initHandler = - new ProtocolInitHandler( - context, - protocolVersion, - clusterName, - endPoint, - options, - heartbeatHandler, - productType == null); - - ChannelPipeline pipeline = channel.pipeline(); - context - .getSslHandlerFactory() - .map(f -> f.newSslHandler(channel, endPoint)) - .map(h -> pipeline.addLast(SSL_HANDLER_NAME, h)); - - // Only add meter handlers on the pipeline if metrics are enabled. - SessionMetricUpdater sessionMetricUpdater = context.getMetricsFactory().getSessionUpdater(); - if (nodeMetricUpdater.isEnabled(DefaultNodeMetric.BYTES_RECEIVED, null) - || sessionMetricUpdater.isEnabled(DefaultSessionMetric.BYTES_RECEIVED, null)) { - pipeline.addLast( - INBOUND_TRAFFIC_METER_NAME, - new InboundTrafficMeter(nodeMetricUpdater, sessionMetricUpdater)); - } - - if (nodeMetricUpdater.isEnabled(DefaultNodeMetric.BYTES_SENT, null) - || sessionMetricUpdater.isEnabled(DefaultSessionMetric.BYTES_SENT, null)) { - pipeline.addLast( - OUTBOUND_TRAFFIC_METER_NAME, - new OutboundTrafficMeter(nodeMetricUpdater, sessionMetricUpdater)); - } - - pipeline - .addLast( - FRAME_TO_BYTES_ENCODER_NAME, - new FrameEncoder(context.getFrameCodec(), maxFrameLength)) - .addLast( - BYTES_TO_FRAME_DECODER_NAME, - new FrameDecoder(context.getFrameCodec(), maxFrameLength)) - // Note: HeartbeatHandler is inserted here once init completes - .addLast(INFLIGHT_HANDLER_NAME, inFlightHandler) - .addLast(INIT_HANDLER_NAME, initHandler); - - context.getNettyOptions().afterChannelInitialized(channel); - } catch (Throwable t) { - // If the init handler throws an exception, Netty swallows it and closes the channel. We - // want to propagate it instead, so fail the outer future (the result of connect()). - resultFuture.completeExceptionally(t); - throw t; - } - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ChannelHandlerRequest.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ChannelHandlerRequest.java deleted file mode 100644 index 3ba3d70eb8d..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ChannelHandlerRequest.java +++ /dev/null @@ -1,133 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import com.datastax.oss.driver.api.core.DriverTimeoutException; -import com.datastax.oss.driver.api.core.connection.BusyConnectionException; -import com.datastax.oss.driver.internal.core.util.ProtocolUtils; -import com.datastax.oss.driver.internal.core.util.concurrent.UncaughtExceptions; -import com.datastax.oss.protocol.internal.Frame; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.response.Error; -import io.netty.channel.Channel; -import io.netty.channel.ChannelFuture; -import io.netty.channel.ChannelHandlerContext; -import io.netty.util.concurrent.Future; -import io.netty.util.concurrent.ScheduledFuture; -import java.util.concurrent.TimeUnit; -import net.jcip.annotations.NotThreadSafe; - -/** Common infrastructure to send a native protocol request from a channel handler. */ -@NotThreadSafe // must be confined to the channel's event loop -abstract class ChannelHandlerRequest implements ResponseCallback { - - final Channel channel; - final ChannelHandlerContext ctx; - final InFlightHandler inFlightHandler; - private final long timeoutMillis; - - private ScheduledFuture timeoutFuture; - - ChannelHandlerRequest(ChannelHandlerContext ctx, long timeoutMillis) { - this.ctx = ctx; - this.channel = ctx.channel(); - this.inFlightHandler = ctx.pipeline().get(InFlightHandler.class); - assert inFlightHandler != null; - this.timeoutMillis = timeoutMillis; - } - - abstract String describe(); - - abstract Message getRequest(); - - abstract void onResponse(Message response); - - /** either message or cause can be null */ - abstract void fail(String message, Throwable cause); - - void fail(Throwable cause) { - fail(null, cause); - } - - void send() { - assert channel.eventLoop().inEventLoop(); - if (!inFlightHandler.preAcquireId()) { - fail( - new BusyConnectionException( - String.format( - "%s has reached its maximum number of simultaneous requests", channel))); - } else { - DriverChannel.RequestMessage message = - new DriverChannel.RequestMessage(getRequest(), false, Frame.NO_PAYLOAD, this); - ChannelFuture writeFuture = channel.writeAndFlush(message); - writeFuture.addListener(this::writeListener); - } - } - - private void writeListener(Future writeFuture) { - if (writeFuture.isSuccess()) { - timeoutFuture = - channel.eventLoop().schedule(this::onTimeout, timeoutMillis, TimeUnit.MILLISECONDS); - } else { - String message = - String.format("%s: failed to send request (%s)", describe(), writeFuture.cause()); - fail(message, writeFuture.cause()); - } - } - - @Override - public final void onResponse(Frame responseFrame) { - timeoutFuture.cancel(true); - onResponse(responseFrame.message); - } - - @Override - public final void onFailure(Throwable error) { - // timeoutFuture may not have been assigned if write failed. - if (timeoutFuture != null) { - timeoutFuture.cancel(true); - } - String message = String.format("%s: unexpected failure (%s)", describe(), error); - fail(message, error); - } - - private void onTimeout() { - fail(new DriverTimeoutException(describe() + ": timed out after " + timeoutMillis + " ms")); - if (!channel.closeFuture().isDone()) { - // Cancel the response callback - channel.writeAndFlush(this).addListener(UncaughtExceptions::log); - } - } - - void failOnUnexpected(Message response) { - if (response instanceof Error) { - Error error = (Error) response; - fail( - new IllegalArgumentException( - String.format( - "%s: server replied with unexpected error code [%s]: %s", - describe(), ProtocolUtils.errorCodeString(error.code), error.message))); - } else { - fail( - new IllegalArgumentException( - String.format( - "%s: server replied with unexpected response type (opcode=%s)", - describe(), ProtocolUtils.opcodeString(response.opcode)))); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ClusterNameMismatchException.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ClusterNameMismatchException.java deleted file mode 100644 index 8e47db3fb1b..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ClusterNameMismatchException.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import com.datastax.oss.driver.api.core.metadata.EndPoint; - -/** - * Indicates that we've attempted to connect to a node with a cluster name that doesn't match that - * of the other nodes known to the driver. - * - *

The driver runs the following query on each newly established connection: - * - *

- *     select cluster_name from system.local
- * 
- * - * The first connection sets the cluster name for this driver instance, all subsequent connections - * must match it or they will get rejected. This is intended to filter out errors in the discovery - * process (for example, stale entries in {@code system.peers}). - * - *

This error is never returned directly to the client. If we detect a mismatch, it will always - * be after the driver has connected successfully; the error will be logged and the offending node - * forced down. - */ -public class ClusterNameMismatchException extends RuntimeException { - - private static final long serialVersionUID = 0; - - public final EndPoint endPoint; - public final String expectedClusterName; - public final String actualClusterName; - - public ClusterNameMismatchException( - EndPoint endPoint, String actualClusterName, String expectedClusterName) { - super( - String.format( - "Node %s reports cluster name '%s' that doesn't match our cluster name '%s'. " - + "It will be forced down.", - endPoint, actualClusterName, expectedClusterName)); - this.endPoint = endPoint; - this.expectedClusterName = expectedClusterName; - this.actualClusterName = actualClusterName; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ConnectInitHandler.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ConnectInitHandler.java deleted file mode 100644 index 789981b4832..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ConnectInitHandler.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import com.datastax.oss.driver.internal.core.util.concurrent.PromiseCombiner; -import io.netty.channel.ChannelDuplexHandler; -import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelPromise; -import java.net.SocketAddress; -import net.jcip.annotations.NotThreadSafe; - -/** - * A handler that delays the promise returned by {@code bootstrap.connect()}, in order to run a - * custom initialization process before making the channel available to clients. - * - *

This handler is not shareable. It must be installed by the channel initializer, as the last - * channel in the pipeline. - * - *

It will be notified via {@link #onRealConnect(ChannelHandlerContext)} when the real underlying - * connection is established. It can then start sending messages on the connection, while external - * clients are still waiting on their promise. Once the custom initialization is finished, the - * clients' promise can be completed with {@link #setConnectSuccess()} or {@link - * #setConnectFailure(Throwable)}. - */ -@NotThreadSafe -public abstract class ConnectInitHandler extends ChannelDuplexHandler { - // the completion of the custom initialization process - private ChannelPromise initPromise; - private ChannelHandlerContext ctx; - - @Override - public void connect( - ChannelHandlerContext ctx, - SocketAddress remoteAddress, - SocketAddress localAddress, - ChannelPromise callerPromise) - throws Exception { - this.ctx = ctx; - initPromise = ctx.channel().newPromise(); - - // the completion of the real underlying connection: - ChannelPromise realConnectPromise = ctx.channel().newPromise(); - super.connect(ctx, remoteAddress, localAddress, realConnectPromise); - realConnectPromise.addListener(future -> onRealConnect(ctx)); - - // Make the caller's promise wait on the other two: - PromiseCombiner.combine(callerPromise, realConnectPromise, initPromise); - } - - protected abstract void onRealConnect(ChannelHandlerContext ctx); - - protected boolean setConnectSuccess() { - boolean result = initPromise.trySuccess(); - if (result) { - ctx.pipeline().remove(this); - } - return result; - } - - protected void setConnectFailure(Throwable cause) { - if (initPromise.tryFailure(cause)) { - ctx.channel().close(); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/DefaultWriteCoalescer.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/DefaultWriteCoalescer.java deleted file mode 100644 index 232fa83be44..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/DefaultWriteCoalescer.java +++ /dev/null @@ -1,144 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.context.DriverContext; -import io.netty.channel.Channel; -import io.netty.channel.ChannelFuture; -import io.netty.channel.ChannelPromise; -import io.netty.channel.EventLoop; -import java.util.HashSet; -import java.util.Queue; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentLinkedQueue; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import net.jcip.annotations.ThreadSafe; - -/** - * Default write coalescing strategy. - * - *

It maintains a queue per event loop, with the writes targeting the channels that run on this - * loop. As soon as a write gets enqueued, it triggers a task that will flush the queue (other - * writes may get enqueued before or while the task runs). - * - *

Note that Netty provides a similar mechanism out of the box ({@link - * io.netty.handler.flush.FlushConsolidationHandler}), but in our experience our approach allows - * more performance gains, because it allows consolidating not only the flushes, but also the write - * tasks themselves (a single consolidated write task is scheduled on the event loop, instead of - * multiple individual tasks, so there is less context switching). - */ -@ThreadSafe -public class DefaultWriteCoalescer implements WriteCoalescer { - private final long rescheduleIntervalNanos; - private final ConcurrentMap flushers = new ConcurrentHashMap<>(); - - public DefaultWriteCoalescer(DriverContext context) { - DriverExecutionProfile config = context.getConfig().getDefaultProfile(); - rescheduleIntervalNanos = config.getDuration(DefaultDriverOption.COALESCER_INTERVAL).toNanos(); - } - - @Override - public ChannelFuture writeAndFlush(Channel channel, Object message) { - ChannelPromise writePromise = channel.newPromise(); - Write write = new Write(channel, message, writePromise); - enqueue(write, channel.eventLoop()); - return writePromise; - } - - private void enqueue(Write write, EventLoop eventLoop) { - Flusher flusher = flushers.computeIfAbsent(eventLoop, Flusher::new); - flusher.enqueue(write); - } - - private class Flusher { - private final EventLoop eventLoop; - - // These variables are accessed both from client threads and the event loop - private final Queue writes = new ConcurrentLinkedQueue<>(); - private final AtomicBoolean running = new AtomicBoolean(); - - // This variable is accessed only from runOnEventLoop, it doesn't need to be thread-safe - private final Set channels = new HashSet<>(); - - private Flusher(EventLoop eventLoop) { - this.eventLoop = eventLoop; - } - - private void enqueue(Write write) { - boolean added = writes.offer(write); - assert added; // always true (see MpscLinkedAtomicQueue implementation) - if (running.compareAndSet(false, true)) { - eventLoop.execute(this::runOnEventLoop); - } - } - - private void runOnEventLoop() { - assert eventLoop.inEventLoop(); - - Write write; - while ((write = writes.poll()) != null) { - Channel channel = write.channel; - channels.add(channel); - channel.write(write.message, write.writePromise); - } - - for (Channel channel : channels) { - channel.flush(); - } - channels.clear(); - - // Prepare to stop - running.set(false); - - // enqueue() can be called concurrently with this method. There is a race condition if it: - // - added an element in the queue after we were done draining it - // - but observed running==true before we flipped it, and therefore didn't schedule another - // run - - // If nothing was added in the queue, there were no concurrent calls, we can stop safely now - if (writes.isEmpty()) { - return; - } - - // Otherwise, check if one of those calls scheduled a run. If so, they flipped the bit back - // on. If not, we need to do it ourselves. - boolean shouldRestartMyself = running.compareAndSet(false, true); - - if (shouldRestartMyself && !eventLoop.isShuttingDown()) { - eventLoop.schedule(this::runOnEventLoop, rescheduleIntervalNanos, TimeUnit.NANOSECONDS); - } - } - } - - private static class Write { - private final Channel channel; - private final Object message; - private final ChannelPromise writePromise; - - private Write(Channel channel, Object message, ChannelPromise writePromise) { - this.channel = channel; - this.message = message; - this.writePromise = writePromise; - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/DriverChannel.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/DriverChannel.java deleted file mode 100644 index e40aa6f3097..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/DriverChannel.java +++ /dev/null @@ -1,305 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.connection.BusyConnectionException; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRequestHandler; -import com.datastax.oss.driver.internal.core.adminrequest.ThrottledAdminRequestHandler; -import com.datastax.oss.driver.internal.core.pool.ChannelPool; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.datastax.oss.driver.internal.core.util.concurrent.UncaughtExceptions; -import com.datastax.oss.protocol.internal.Message; -import io.netty.channel.Channel; -import io.netty.channel.ChannelConfig; -import io.netty.channel.ChannelFuture; -import io.netty.channel.EventLoop; -import io.netty.util.AttributeKey; -import io.netty.util.concurrent.Future; -import io.netty.util.concurrent.Promise; -import java.net.SocketAddress; -import java.nio.ByteBuffer; -import java.util.List; -import java.util.Map; -import java.util.concurrent.atomic.AtomicBoolean; -import net.jcip.annotations.ThreadSafe; - -/** - * A thin wrapper around a Netty {@link Channel}, to send requests to a Cassandra node and receive - * responses. - */ -@ThreadSafe -public class DriverChannel { - - static final AttributeKey CLUSTER_NAME_KEY = AttributeKey.valueOf("cluster_name"); - static final AttributeKey>> OPTIONS_KEY = - AttributeKey.valueOf("options"); - - @SuppressWarnings("RedundantStringConstructorCall") - static final Object GRACEFUL_CLOSE_MESSAGE = new String("GRACEFUL_CLOSE_MESSAGE"); - - @SuppressWarnings("RedundantStringConstructorCall") - static final Object FORCEFUL_CLOSE_MESSAGE = new String("FORCEFUL_CLOSE_MESSAGE"); - - private final EndPoint endPoint; - private final Channel channel; - private final InFlightHandler inFlightHandler; - private final WriteCoalescer writeCoalescer; - private final ProtocolVersion protocolVersion; - private final AtomicBoolean closing = new AtomicBoolean(); - private final AtomicBoolean forceClosing = new AtomicBoolean(); - - DriverChannel( - EndPoint endPoint, - Channel channel, - WriteCoalescer writeCoalescer, - ProtocolVersion protocolVersion) { - this.endPoint = endPoint; - this.channel = channel; - this.inFlightHandler = channel.pipeline().get(InFlightHandler.class); - this.writeCoalescer = writeCoalescer; - this.protocolVersion = protocolVersion; - } - - /** - * @return a future that succeeds when the request frame was successfully written on the channel. - * Beyond that, the caller will be notified through the {@code responseCallback}. - */ - public Future write( - Message request, - boolean tracing, - Map customPayload, - ResponseCallback responseCallback) { - if (closing.get()) { - return channel.newFailedFuture(new IllegalStateException("Driver channel is closing")); - } - RequestMessage message = new RequestMessage(request, tracing, customPayload, responseCallback); - return writeCoalescer.writeAndFlush(channel, message); - } - - /** - * Cancels a callback, indicating that the client that wrote it is no longer interested in the - * answer. - * - *

Note that this does not cancel the request server-side (but might in the future if Cassandra - * supports it). - */ - public void cancel(ResponseCallback responseCallback) { - // To avoid creating an extra message, we adopt the convention that writing the callback - // directly means cancellation - writeCoalescer.writeAndFlush(channel, responseCallback).addListener(UncaughtExceptions::log); - } - - /** - * Switches the underlying Cassandra connection to a new keyspace (as if a {@code USE ...} - * statement was issued). - * - *

The future will complete once the change is effective. Only one change may run at a given - * time, concurrent attempts will fail. - * - *

Changing the keyspace is inherently thread-unsafe: if other queries are running at the same - * time, the keyspace they will use is unpredictable. - */ - public Future setKeyspace(CqlIdentifier newKeyspace) { - Promise promise = channel.eventLoop().newPromise(); - channel.pipeline().fireUserEventTriggered(new SetKeyspaceEvent(newKeyspace, promise)); - return promise; - } - - /** - * @return the name of the Cassandra cluster as returned by {@code system.local.cluster_name} on - * this connection. - */ - public String getClusterName() { - return channel.attr(CLUSTER_NAME_KEY).get(); - } - - public Map> getOptions() { - return channel.attr(OPTIONS_KEY).get(); - } - - /** - * @return the number of available stream ids on the channel; more precisely, this is the number - * of {@link #preAcquireId()} calls for which the id has not been released yet. This is used - * to weigh channels in pools that have a size bigger than 1, in the load balancing policy, - * and for monitoring purposes. - */ - public int getAvailableIds() { - return inFlightHandler.getAvailableIds(); - } - - /** - * Indicates the intention to send a request using this channel. - * - *

There must be exactly one invocation of this method before each call to {@link - * #write(Message, boolean, Map, ResponseCallback)}. If this method returns true, the client - * must proceed with the write. If it returns false, it must not proceed. - * - *

This method is used together with {@link #getAvailableIds()} to track how many requests are - * currently executing on the channel, and avoid submitting a request that would result in a - * {@link BusyConnectionException}. The two methods follow atomic semantics: {@link - * #getAvailableIds()} returns the exact count of clients that have called {@link #preAcquireId()} - * and not yet released their stream id at this point in time. - * - *

Most of the time, the driver code calls this method automatically: - * - *

    - *
  • if you obtained the channel from a pool ({@link ChannelPool#next()} or {@link - * DefaultSession#getChannel(Node, String)}), do not call this method: it has already - * been done as part of selecting the channel. - *
  • if you use {@link ChannelHandlerRequest} or {@link AdminRequestHandler} for internal - * queries, do not call this method, those classes already do it. - *
  • however, if you use {@link ThrottledAdminRequestHandler}, you must specify a {@code - * shouldPreAcquireId} argument to indicate whether to call this method or not. This is - * because those requests are sometimes used with a channel that comes from a pool - * (requiring {@code shouldPreAcquireId = false}), or sometimes with a standalone channel - * like in the control connection (requiring {@code shouldPreAcquireId = true}). - *
- */ - public boolean preAcquireId() { - return inFlightHandler.preAcquireId(); - } - - /** - * @return the number of requests currently executing on this channel (including {@link - * #getOrphanedIds() orphaned ids}). - */ - public int getInFlight() { - return inFlightHandler.getInFlight(); - } - - /** - * @return the number of stream ids for requests that have either timed out or been cancelled, but - * for which we can't release the stream id because a request might still come from the - * server. - */ - public int getOrphanedIds() { - return inFlightHandler.getOrphanIds(); - } - - public EventLoop eventLoop() { - return channel.eventLoop(); - } - - public ProtocolVersion protocolVersion() { - return protocolVersion; - } - - /** The endpoint that was used to establish the connection. */ - public EndPoint getEndPoint() { - return endPoint; - } - - public SocketAddress localAddress() { - return channel.localAddress(); - } - - /** @return The {@link ChannelConfig configuration} of this channel. */ - public ChannelConfig config() { - return channel.config(); - } - - /** - * Initiates a graceful shutdown: no new requests will be accepted, but all pending requests will - * be allowed to complete before the underlying channel is closed. - */ - public Future close() { - if (closing.compareAndSet(false, true) && channel.isOpen()) { - // go through the coalescer: this guarantees that we won't reject writes that were submitted - // before, but had not been coalesced yet. - writeCoalescer - .writeAndFlush(channel, GRACEFUL_CLOSE_MESSAGE) - .addListener(UncaughtExceptions::log); - } - return channel.closeFuture(); - } - - /** - * Initiates a forced shutdown: any pending request will be aborted and the underlying channel - * will be closed. - */ - public Future forceClose() { - this.close(); - if (forceClosing.compareAndSet(false, true) && channel.isOpen()) { - writeCoalescer - .writeAndFlush(channel, FORCEFUL_CLOSE_MESSAGE) - .addListener(UncaughtExceptions::log); - } - return channel.closeFuture(); - } - - /** - * Returns a future that will complete when a graceful close has started, but not yet completed. - * - *

In other words, the channel has stopped accepting new requests, but is still waiting for - * pending requests to finish. Once the last response has been received, the channel will really - * close and {@link #closeFuture()} will be completed. - * - *

If there were no pending requests when the graceful shutdown was initiated, or if {@link - * #forceClose()} is called first, this future will never complete. - */ - public ChannelFuture closeStartedFuture() { - return this.inFlightHandler.closeStartedFuture; - } - - /** - * Does not close the channel, but returns a future that will complete when it is completely - * closed. - */ - public ChannelFuture closeFuture() { - return channel.closeFuture(); - } - - @Override - public String toString() { - return channel.toString(); - } - - // This is essentially a stripped-down Frame. We can't materialize the frame before writing, - // because we need the stream id, which is assigned from within the event loop. - static class RequestMessage { - final Message request; - final boolean tracing; - final Map customPayload; - final ResponseCallback responseCallback; - - RequestMessage( - Message message, - boolean tracing, - Map customPayload, - ResponseCallback responseCallback) { - this.request = message; - this.tracing = tracing; - this.customPayload = customPayload; - this.responseCallback = responseCallback; - } - } - - static class SetKeyspaceEvent { - final CqlIdentifier keyspaceName; - final Promise promise; - - public SetKeyspaceEvent(CqlIdentifier keyspaceName, Promise promise) { - this.keyspaceName = keyspaceName; - this.promise = promise; - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/DriverChannelOptions.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/DriverChannelOptions.java deleted file mode 100644 index 208cf52ac22..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/DriverChannelOptions.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import java.util.Collections; -import java.util.List; -import net.jcip.annotations.Immutable; - -/** Options for the creation of a driver channel. */ -@Immutable -public class DriverChannelOptions { - - /** No keyspace, no events, don't report available stream ids. */ - public static DriverChannelOptions DEFAULT = builder().build(); - - public static Builder builder() { - return new Builder(); - } - - public final CqlIdentifier keyspace; - - /** - * What kind of protocol events to listen for. - * - * @see com.datastax.oss.protocol.internal.ProtocolConstants.EventType - */ - public final List eventTypes; - - public final EventCallback eventCallback; - - public final String ownerLogPrefix; - - private DriverChannelOptions( - CqlIdentifier keyspace, - List eventTypes, - EventCallback eventCallback, - String ownerLogPrefix) { - this.keyspace = keyspace; - this.eventTypes = eventTypes; - this.eventCallback = eventCallback; - this.ownerLogPrefix = ownerLogPrefix; - } - - public static class Builder { - private CqlIdentifier keyspace = null; - private List eventTypes = Collections.emptyList(); - private EventCallback eventCallback = null; - private String ownerLogPrefix = null; - - public Builder withKeyspace(CqlIdentifier keyspace) { - this.keyspace = keyspace; - return this; - } - - public Builder withEvents(List eventTypes, EventCallback eventCallback) { - Preconditions.checkArgument(eventTypes != null && !eventTypes.isEmpty()); - Preconditions.checkNotNull(eventCallback); - this.eventTypes = eventTypes; - this.eventCallback = eventCallback; - return this; - } - - public Builder withOwnerLogPrefix(String ownerLogPrefix) { - this.ownerLogPrefix = ownerLogPrefix; - return this; - } - - public DriverChannelOptions build() { - return new DriverChannelOptions(keyspace, eventTypes, eventCallback, ownerLogPrefix); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/EventCallback.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/EventCallback.java deleted file mode 100644 index 0ac71233fdd..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/EventCallback.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import com.datastax.oss.protocol.internal.Message; - -public interface EventCallback { - /** Invoked when a protocol event is received. */ - void onEvent(Message event); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/HeartbeatHandler.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/HeartbeatHandler.java deleted file mode 100644 index 3dac60f5216..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/HeartbeatHandler.java +++ /dev/null @@ -1,114 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.connection.HeartbeatException; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.request.Options; -import com.datastax.oss.protocol.internal.response.Supported; -import io.netty.channel.ChannelHandlerContext; -import io.netty.handler.timeout.IdleState; -import io.netty.handler.timeout.IdleStateEvent; -import io.netty.handler.timeout.IdleStateHandler; -import net.jcip.annotations.NotThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@NotThreadSafe -class HeartbeatHandler extends IdleStateHandler { - - private static final Logger LOG = LoggerFactory.getLogger(HeartbeatHandler.class); - - private final DriverExecutionProfile config; - - private HeartbeatRequest request; - - HeartbeatHandler(DriverExecutionProfile config) { - super((int) config.getDuration(DefaultDriverOption.HEARTBEAT_INTERVAL).getSeconds(), 0, 0); - this.config = config; - } - - @Override - protected void channelIdle(ChannelHandlerContext ctx, IdleStateEvent evt) throws Exception { - if (evt.state() == IdleState.READER_IDLE) { - if (this.request != null) { - LOG.warn( - "Not sending heartbeat because a previous one is still in progress. " - + "Check that {} is not lower than {}.", - DefaultDriverOption.HEARTBEAT_INTERVAL.getPath(), - DefaultDriverOption.HEARTBEAT_TIMEOUT.getPath()); - } else { - LOG.debug( - "Connection was inactive for {} seconds, sending heartbeat", - config.getDuration(DefaultDriverOption.HEARTBEAT_INTERVAL).getSeconds()); - long timeoutMillis = config.getDuration(DefaultDriverOption.HEARTBEAT_TIMEOUT).toMillis(); - this.request = new HeartbeatRequest(ctx, timeoutMillis); - this.request.send(); - } - } - } - - private class HeartbeatRequest extends ChannelHandlerRequest { - - HeartbeatRequest(ChannelHandlerContext ctx, long timeoutMillis) { - super(ctx, timeoutMillis); - } - - @Override - String describe() { - return "Heartbeat request"; - } - - @Override - Message getRequest() { - return Options.INSTANCE; - } - - @Override - void onResponse(Message response) { - if (response instanceof Supported) { - LOG.debug("{} Heartbeat query succeeded", ctx.channel()); - HeartbeatHandler.this.request = null; - } else { - failOnUnexpected(response); - } - } - - @Override - void fail(String message, Throwable cause) { - if (cause instanceof HeartbeatException) { - // Ignore: this happens when the heartbeat query times out and the inflight handler aborts - // all queries (including the heartbeat query itself) - return; - } - - HeartbeatHandler.this.request = null; - if (message != null) { - LOG.debug("{} Heartbeat query failed: {}", ctx.channel(), message, cause); - } else { - LOG.debug("{} Heartbeat query failed", ctx.channel(), cause); - } - - // Notify InFlightHandler. - ctx.fireExceptionCaught( - new HeartbeatException(ctx.channel().remoteAddress(), message, cause)); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/InFlightHandler.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/InFlightHandler.java deleted file mode 100644 index 90b02f358cd..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/InFlightHandler.java +++ /dev/null @@ -1,457 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.connection.BusyConnectionException; -import com.datastax.oss.driver.api.core.connection.ClosedConnectionException; -import com.datastax.oss.driver.api.core.connection.HeartbeatException; -import com.datastax.oss.driver.internal.core.channel.DriverChannel.RequestMessage; -import com.datastax.oss.driver.internal.core.channel.DriverChannel.SetKeyspaceEvent; -import com.datastax.oss.driver.internal.core.protocol.FrameDecodingException; -import com.datastax.oss.driver.internal.core.util.Loggers; -import com.datastax.oss.driver.shaded.guava.common.collect.BiMap; -import com.datastax.oss.driver.shaded.guava.common.collect.HashBiMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import com.datastax.oss.protocol.internal.Frame; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.request.Query; -import com.datastax.oss.protocol.internal.response.result.SetKeyspace; -import io.netty.channel.ChannelDuplexHandler; -import io.netty.channel.ChannelFuture; -import io.netty.channel.ChannelHandler; -import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelPromise; -import io.netty.util.concurrent.Promise; -import java.util.HashMap; -import java.util.Map; -import java.util.Set; -import net.jcip.annotations.NotThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** Manages requests that are currently executing on a channel. */ -@NotThreadSafe -public class InFlightHandler extends ChannelDuplexHandler { - private static final Logger LOG = LoggerFactory.getLogger(InFlightHandler.class); - - private final ProtocolVersion protocolVersion; - private final StreamIdGenerator streamIds; - final ChannelPromise closeStartedFuture; - private final String ownerLogPrefix; - private final BiMap inFlight; - private final Map orphaned; - private volatile int orphanedSize; // thread-safe view for metrics - private final long setKeyspaceTimeoutMillis; - private final EventCallback eventCallback; - private final int maxOrphanStreamIds; - private boolean closingGracefully; - private SetKeyspaceRequest setKeyspaceRequest; - private String logPrefix; - - InFlightHandler( - ProtocolVersion protocolVersion, - StreamIdGenerator streamIds, - int maxOrphanStreamIds, - long setKeyspaceTimeoutMillis, - ChannelPromise closeStartedFuture, - EventCallback eventCallback, - String ownerLogPrefix) { - this.protocolVersion = protocolVersion; - this.streamIds = streamIds; - this.maxOrphanStreamIds = maxOrphanStreamIds; - this.closeStartedFuture = closeStartedFuture; - this.ownerLogPrefix = ownerLogPrefix; - this.logPrefix = ownerLogPrefix + "|connecting..."; - this.inFlight = HashBiMap.create(streamIds.getMaxAvailableIds()); - this.orphaned = new HashMap<>(maxOrphanStreamIds); - this.setKeyspaceTimeoutMillis = setKeyspaceTimeoutMillis; - this.eventCallback = eventCallback; - } - - @Override - public void channelActive(ChannelHandlerContext ctx) throws Exception { - super.channelActive(ctx); - String channelId = ctx.channel().toString(); - this.logPrefix = ownerLogPrefix + "|" + channelId.substring(1, channelId.length() - 1); - } - - @Override - public void write(ChannelHandlerContext ctx, Object in, ChannelPromise promise) throws Exception { - if (in == DriverChannel.GRACEFUL_CLOSE_MESSAGE) { - LOG.debug("[{}] Received graceful close request", logPrefix); - startGracefulShutdown(ctx); - } else if (in == DriverChannel.FORCEFUL_CLOSE_MESSAGE) { - LOG.debug("[{}] Received forceful close request, aborting pending queries", logPrefix); - abortAllInFlight(new ClosedConnectionException("Channel was force-closed")); - ctx.channel().close(); - } else if (in instanceof HeartbeatException) { - abortAllInFlight( - new ClosedConnectionException("Heartbeat query failed", ((HeartbeatException) in))); - ctx.close(); - } else if (in instanceof RequestMessage) { - write(ctx, (RequestMessage) in, promise); - } else if (in instanceof ResponseCallback) { - cancel(ctx, (ResponseCallback) in, promise); - } else { - promise.setFailure( - new IllegalArgumentException("Unsupported message type " + in.getClass().getName())); - } - } - - private void write(ChannelHandlerContext ctx, RequestMessage message, ChannelPromise promise) { - if (closingGracefully) { - promise.setFailure(new IllegalStateException("Channel is closing")); - streamIds.cancelPreAcquire(); - return; - } - int streamId = streamIds.acquire(); - if (streamId < 0) { - // Should not happen with the preAcquire mechanism, but handle gracefully - promise.setFailure( - new BusyConnectionException( - String.format( - "Couldn't acquire a stream id from InFlightHandler on %s", ctx.channel()))); - streamIds.cancelPreAcquire(); - return; - } - - if (inFlight.containsKey(streamId)) { - promise.setFailure( - new IllegalStateException("Found pending callback for stream id " + streamId)); - streamIds.cancelPreAcquire(); - return; - } - - LOG.trace("[{}] Writing {} on stream id {}", logPrefix, message.responseCallback, streamId); - Frame frame = - Frame.forRequest( - protocolVersion.getCode(), - streamId, - message.tracing, - message.customPayload, - message.request); - - inFlight.put(streamId, message.responseCallback); - ChannelFuture writeFuture = ctx.write(frame, promise); - writeFuture.addListener( - future -> { - if (future.isSuccess()) { - message.responseCallback.onStreamIdAssigned(streamId); - } else { - release(streamId, ctx); - } - }); - } - - private void cancel( - ChannelHandlerContext ctx, ResponseCallback responseCallback, ChannelPromise promise) { - Integer streamId = inFlight.inverse().remove(responseCallback); - if (streamId == null) { - LOG.trace( - "[{}] Received cancellation for unknown or already cancelled callback {}, skipping", - logPrefix, - responseCallback); - } else { - LOG.trace( - "[{}] Cancelled callback {} for stream id {}", logPrefix, responseCallback, streamId); - if (closingGracefully && inFlight.isEmpty()) { - LOG.debug("[{}] Last pending query was cancelled, closing channel", logPrefix); - ctx.channel().close(); - } else { - // We can't release the stream id, because a response might still come back from the server. - // Keep track of those "orphaned" ids, to release them later if we get a response and the - // callback says it's the last one. - orphaned.put(streamId, responseCallback); - if (orphaned.size() > maxOrphanStreamIds) { - LOG.debug( - "[{}] Orphan stream ids exceeded the configured threshold ({}), closing gracefully", - logPrefix, - maxOrphanStreamIds); - startGracefulShutdown(ctx); - } else { - orphanedSize = orphaned.size(); - } - } - } - promise.setSuccess(); - } - - private void startGracefulShutdown(ChannelHandlerContext ctx) { - if (inFlight.isEmpty()) { - LOG.debug("[{}] No pending queries, completing graceful shutdown now", logPrefix); - ctx.channel().close(); - } else { - // Remove heartbeat handler from pipeline if present. - ChannelHandler heartbeatHandler = ctx.pipeline().get(ChannelFactory.HEARTBEAT_HANDLER_NAME); - if (heartbeatHandler != null) { - ctx.pipeline().remove(heartbeatHandler); - } - LOG.debug("[{}] There are pending queries, delaying graceful shutdown", logPrefix); - closingGracefully = true; - closeStartedFuture.trySuccess(); - } - } - - @Override - @SuppressWarnings("NonAtomicVolatileUpdate") - public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { - Frame responseFrame = (Frame) msg; - int streamId = responseFrame.streamId; - - if (streamId < 0) { - Message event = responseFrame.message; - if (eventCallback == null) { - LOG.debug("[{}] Received event {} but no callback was registered", logPrefix, event); - } else { - LOG.debug("[{}] Received event {}, notifying callback", logPrefix, event); - try { - eventCallback.onEvent(event); - } catch (Throwable t) { - Loggers.warnWithException( - LOG, "[{}] Unexpected error while invoking event handler", logPrefix, t); - } - } - } else { - boolean wasInFlight = true; - ResponseCallback callback = inFlight.get(streamId); - if (callback == null) { - wasInFlight = false; - callback = orphaned.get(streamId); - if (callback == null) { - LOG.trace("[{}] Got response on unknown stream id {}, skipping", logPrefix, streamId); - return; - } - } - try { - if (callback.isLastResponse(responseFrame)) { - LOG.debug( - "[{}] Got last response on {} stream id {}, completing and releasing", - logPrefix, - wasInFlight ? "in-flight" : "orphaned", - streamId); - release(streamId, ctx); - } else { - LOG.trace( - "[{}] Got non-last response on {} stream id {}, still holding", - logPrefix, - wasInFlight ? "in-flight" : "orphaned", - streamId); - } - if (wasInFlight) { - callback.onResponse(responseFrame); - } - } catch (Throwable t) { - if (wasInFlight) { - fail( - callback, - new IllegalArgumentException("Unexpected error while invoking response handler", t)); - } else { - // Assume the callback is already completed, so it's better to log - Loggers.warnWithException( - LOG, - "[{}] Unexpected error while invoking response handler on stream id {}", - logPrefix, - t, - streamId); - } - } - } - } - - /** Called if an exception was thrown while processing an inbound event (i.e. a response). */ - @Override - public void exceptionCaught(ChannelHandlerContext ctx, Throwable exception) throws Exception { - if (exception instanceof FrameDecodingException) { - int streamId = ((FrameDecodingException) exception).streamId; - LOG.debug("[{}] Error while decoding response on stream id {}", logPrefix, streamId); - if (streamId >= 0) { - // We know which request matches the failing response, fail that one only - ResponseCallback responseCallback = inFlight.get(streamId); - if (responseCallback != null) { - fail(responseCallback, exception.getCause()); - } - release(streamId, ctx); - } else { - Loggers.warnWithException( - LOG, - "[{}] Unexpected error while decoding incoming event frame", - logPrefix, - exception.getCause()); - } - } else { - // Otherwise fail all pending requests - abortAllInFlight( - (exception instanceof HeartbeatException) - ? (HeartbeatException) exception - : new ClosedConnectionException("Unexpected error on channel", exception)); - ctx.close(); - } - } - - @Override - public void userEventTriggered(ChannelHandlerContext ctx, Object event) throws Exception { - if (event instanceof SetKeyspaceEvent) { - SetKeyspaceEvent setKeyspaceEvent = (SetKeyspaceEvent) event; - if (this.setKeyspaceRequest != null) { - setKeyspaceEvent.promise.setFailure( - new IllegalStateException( - "Can't call setKeyspace while a keyspace switch is already in progress")); - } else { - LOG.debug( - "[{}] Switching to keyspace {}", logPrefix, setKeyspaceEvent.keyspaceName.asInternal()); - this.setKeyspaceRequest = new SetKeyspaceRequest(ctx, setKeyspaceEvent); - this.setKeyspaceRequest.send(); - } - } else { - super.userEventTriggered(ctx, event); - } - } - - @Override - public void channelInactive(ChannelHandlerContext ctx) throws Exception { - // If the channel was closed normally (normal or forced shutdown), inFlight is already empty by - // the time we get here. So if it's not, it means the channel closed unexpectedly (e.g. the - // connection was dropped). - abortAllInFlight(new ClosedConnectionException("Lost connection to remote peer")); - super.channelInactive(ctx); - } - - private void release(int streamId, ChannelHandlerContext ctx) { - LOG.trace("[{}] Releasing stream id {}", logPrefix, streamId); - if (inFlight.remove(streamId) != null) { - // If we're in the middle of an orderly close and this was the last request, actually close - // the channel now - if (closingGracefully && inFlight.isEmpty()) { - LOG.debug("[{}] Done handling the last pending query, closing channel", logPrefix); - ctx.channel().close(); - } - } else if (orphaned.remove(streamId) != null) { - orphanedSize = orphaned.size(); - } - // Note: it's possible that the callback is in neither map, if we get here after a call to - // abortAllInFlight that already cleared the map (see JAVA-2000) - streamIds.release(streamId); - } - - private void abortAllInFlight(DriverException cause) { - abortAllInFlight(cause, null); - } - - /** - * @param ignore the ResponseCallback that called this method, if applicable (avoids a recursive - * loop) - */ - private void abortAllInFlight(DriverException cause, ResponseCallback ignore) { - if (!inFlight.isEmpty()) { - - // Create a local copy and clear the map immediately. This prevents - // ConcurrentModificationException if aborting one of the handlers recurses back into this - // method. - Set responseCallbacks = ImmutableSet.copyOf(inFlight.values()); - inFlight.clear(); - - for (ResponseCallback responseCallback : responseCallbacks) { - if (responseCallback != ignore) { - fail(responseCallback, cause); - } - } - // It's not necessary to release the stream ids, since we always call this method right before - // closing the channel - } - } - - private void fail(ResponseCallback callback, Throwable failure) { - try { - callback.onFailure(failure); - } catch (Throwable throwable) { - // Protect against unexpected errors. We don't have anywhere to report the error (since - // onFailure failed), so log as a last resort. - LOG.error("[{}] Unexpected error while failing {}", logPrefix, callback, throwable); - } - } - - int getAvailableIds() { - return streamIds.getAvailableIds(); - } - - boolean preAcquireId() { - return streamIds.preAcquire(); - } - - int getInFlight() { - return streamIds.getMaxAvailableIds() - streamIds.getAvailableIds(); - } - - int getOrphanIds() { - return orphanedSize; - } - - private class SetKeyspaceRequest extends ChannelHandlerRequest { - - private final CqlIdentifier keyspaceName; - private final Promise promise; - - SetKeyspaceRequest(ChannelHandlerContext ctx, SetKeyspaceEvent setKeyspaceEvent) { - super(ctx, setKeyspaceTimeoutMillis); - this.keyspaceName = setKeyspaceEvent.keyspaceName; - this.promise = setKeyspaceEvent.promise; - } - - @Override - String describe() { - return "[" + logPrefix + "] Set keyspace request (USE " + keyspaceName.asCql(true) + ")"; - } - - @Override - Message getRequest() { - return new Query("USE " + keyspaceName.asCql(false)); - } - - @Override - void onResponse(Message response) { - if (response instanceof SetKeyspace) { - if (promise.trySuccess(null)) { - InFlightHandler.this.setKeyspaceRequest = null; - } - } else { - failOnUnexpected(response); - } - } - - @Override - void fail(String message, Throwable cause) { - ClosedConnectionException setKeyspaceException = - new ClosedConnectionException(message, cause); - if (promise.tryFailure(setKeyspaceException)) { - InFlightHandler.this.setKeyspaceRequest = null; - // setKeyspace queries are not triggered directly by the user, but only as a response to a - // successful "USE... query", so the keyspace name should generally be valid. If the - // keyspace switch fails, this could be due to a schema disagreement or a more serious - // error. Rescheduling the switch is impractical, we can't do much better than closing the - // channel and letting it reconnect. - Loggers.warnWithException( - LOG, "[{}] Unexpected error while switching keyspace", logPrefix, setKeyspaceException); - abortAllInFlight(setKeyspaceException, this); - ctx.channel().close(); - } - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/InboundTrafficMeter.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/InboundTrafficMeter.java deleted file mode 100644 index 518f398a808..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/InboundTrafficMeter.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; -import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; -import com.datastax.oss.driver.internal.core.metrics.NodeMetricUpdater; -import com.datastax.oss.driver.internal.core.metrics.SessionMetricUpdater; -import io.netty.buffer.ByteBuf; -import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelInboundHandlerAdapter; - -public class InboundTrafficMeter extends ChannelInboundHandlerAdapter { - - private final NodeMetricUpdater nodeMetricUpdater; - private final SessionMetricUpdater sessionMetricUpdater; - - InboundTrafficMeter( - NodeMetricUpdater nodeMetricUpdater, SessionMetricUpdater sessionMetricUpdater) { - this.nodeMetricUpdater = nodeMetricUpdater; - this.sessionMetricUpdater = sessionMetricUpdater; - } - - @Override - public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { - if (msg instanceof ByteBuf) { - int bytes = ((ByteBuf) msg).readableBytes(); - nodeMetricUpdater.markMeter(DefaultNodeMetric.BYTES_RECEIVED, null, bytes); - sessionMetricUpdater.markMeter(DefaultSessionMetric.BYTES_RECEIVED, null, bytes); - } - super.channelRead(ctx, msg); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/OutboundTrafficMeter.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/OutboundTrafficMeter.java deleted file mode 100644 index 768eb047b9d..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/OutboundTrafficMeter.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; -import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; -import com.datastax.oss.driver.internal.core.metrics.NodeMetricUpdater; -import com.datastax.oss.driver.internal.core.metrics.SessionMetricUpdater; -import io.netty.buffer.ByteBuf; -import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelOutboundHandlerAdapter; -import io.netty.channel.ChannelPromise; - -public class OutboundTrafficMeter extends ChannelOutboundHandlerAdapter { - - private final NodeMetricUpdater nodeMetricUpdater; - private final SessionMetricUpdater sessionMetricUpdater; - - OutboundTrafficMeter( - NodeMetricUpdater nodeMetricUpdater, SessionMetricUpdater sessionMetricUpdater) { - this.nodeMetricUpdater = nodeMetricUpdater; - this.sessionMetricUpdater = sessionMetricUpdater; - } - - @Override - public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) - throws Exception { - if (msg instanceof ByteBuf) { - int bytes = ((ByteBuf) msg).readableBytes(); - nodeMetricUpdater.markMeter(DefaultNodeMetric.BYTES_SENT, null, bytes); - sessionMetricUpdater.markMeter(DefaultSessionMetric.BYTES_SENT, null, bytes); - } - super.write(ctx, msg, promise); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/PassThroughWriteCoalescer.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/PassThroughWriteCoalescer.java deleted file mode 100644 index 4e3f7d61f66..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/PassThroughWriteCoalescer.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import com.datastax.oss.driver.api.core.context.DriverContext; -import io.netty.channel.Channel; -import io.netty.channel.ChannelFuture; -import net.jcip.annotations.ThreadSafe; - -/** No-op implementation of the write coalescer: each write is flushed immediately. */ -@ThreadSafe -public class PassThroughWriteCoalescer implements WriteCoalescer { - - public PassThroughWriteCoalescer(@SuppressWarnings("unused") DriverContext context) { - // nothing to do - } - - @Override - public ChannelFuture writeAndFlush(Channel channel, Object message) { - return channel.writeAndFlush(message); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ProtocolInitHandler.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ProtocolInitHandler.java deleted file mode 100644 index 8a426f7b368..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ProtocolInitHandler.java +++ /dev/null @@ -1,420 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.InvalidKeyspaceException; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.UnsupportedProtocolVersionException; -import com.datastax.oss.driver.api.core.auth.AuthenticationException; -import com.datastax.oss.driver.api.core.auth.Authenticator; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.connection.ConnectionInitException; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.internal.core.DefaultProtocolFeature; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.protocol.BytesToSegmentDecoder; -import com.datastax.oss.driver.internal.core.protocol.FrameToSegmentEncoder; -import com.datastax.oss.driver.internal.core.protocol.SegmentToBytesEncoder; -import com.datastax.oss.driver.internal.core.protocol.SegmentToFrameDecoder; -import com.datastax.oss.driver.internal.core.util.ProtocolUtils; -import com.datastax.oss.driver.internal.core.util.concurrent.UncaughtExceptions; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.ProtocolConstants.ErrorCode; -import com.datastax.oss.protocol.internal.request.AuthResponse; -import com.datastax.oss.protocol.internal.request.Options; -import com.datastax.oss.protocol.internal.request.Query; -import com.datastax.oss.protocol.internal.request.Register; -import com.datastax.oss.protocol.internal.request.Startup; -import com.datastax.oss.protocol.internal.response.AuthChallenge; -import com.datastax.oss.protocol.internal.response.AuthSuccess; -import com.datastax.oss.protocol.internal.response.Authenticate; -import com.datastax.oss.protocol.internal.response.Error; -import com.datastax.oss.protocol.internal.response.Ready; -import com.datastax.oss.protocol.internal.response.Supported; -import com.datastax.oss.protocol.internal.response.result.Rows; -import com.datastax.oss.protocol.internal.response.result.SetKeyspace; -import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelPipeline; -import java.nio.ByteBuffer; -import java.util.List; -import java.util.Objects; -import net.jcip.annotations.NotThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Handles the sequence of internal requests that we send on a channel before it's ready to accept - * user requests. - */ -@NotThreadSafe -class ProtocolInitHandler extends ConnectInitHandler { - private static final Logger LOG = LoggerFactory.getLogger(ProtocolInitHandler.class); - private static final Query CLUSTER_NAME_QUERY = - new Query("SELECT cluster_name FROM system.local"); - - private final InternalDriverContext context; - private final long timeoutMillis; - private final ProtocolVersion initialProtocolVersion; - private final DriverChannelOptions options; - // might be null if this is the first channel to this cluster - private final String expectedClusterName; - private final EndPoint endPoint; - private final HeartbeatHandler heartbeatHandler; - private String logPrefix; - private ChannelHandlerContext ctx; - private final boolean querySupportedOptions; - - /** - * @param querySupportedOptions whether to send OPTIONS as the first message, to request which - * protocol options the channel supports. If this is true, the options will be stored as a - * channel attribute, and exposed via {@link DriverChannel#getOptions()}. - */ - ProtocolInitHandler( - InternalDriverContext context, - ProtocolVersion protocolVersion, - String expectedClusterName, - EndPoint endPoint, - DriverChannelOptions options, - HeartbeatHandler heartbeatHandler, - boolean querySupportedOptions) { - - this.context = context; - this.endPoint = endPoint; - - DriverExecutionProfile defaultConfig = context.getConfig().getDefaultProfile(); - - this.timeoutMillis = - defaultConfig.getDuration(DefaultDriverOption.CONNECTION_INIT_QUERY_TIMEOUT).toMillis(); - this.initialProtocolVersion = protocolVersion; - this.expectedClusterName = expectedClusterName; - this.options = options; - this.heartbeatHandler = heartbeatHandler; - this.querySupportedOptions = querySupportedOptions; - this.logPrefix = options.ownerLogPrefix + "|connecting..."; - } - - @Override - public void channelActive(ChannelHandlerContext ctx) throws Exception { - super.channelActive(ctx); - String channelId = ctx.channel().toString(); - this.logPrefix = options.ownerLogPrefix + "|" + channelId.substring(1, channelId.length() - 1); - } - - @Override - protected void onRealConnect(ChannelHandlerContext ctx) { - LOG.debug("[{}] Starting channel initialization", logPrefix); - this.ctx = ctx; - new InitRequest(ctx).send(); - } - - @Override - protected boolean setConnectSuccess() { - boolean result = super.setConnectSuccess(); - if (result) { - // add heartbeat to pipeline now that protocol is initialized. - ctx.pipeline() - .addBefore( - ChannelFactory.INFLIGHT_HANDLER_NAME, - ChannelFactory.HEARTBEAT_HANDLER_NAME, - heartbeatHandler); - } - return result; - } - - private enum Step { - OPTIONS, - STARTUP, - GET_CLUSTER_NAME, - SET_KEYSPACE, - AUTH_RESPONSE, - REGISTER, - } - - private class InitRequest extends ChannelHandlerRequest { - // This class is a finite-state automaton, that sends a different query depending on the step - // in the initialization sequence. - private Step step; - private int stepNumber = 0; - private Message request; - private Authenticator authenticator; - private ByteBuffer authResponseToken; - - InitRequest(ChannelHandlerContext ctx) { - super(ctx, timeoutMillis); - this.step = querySupportedOptions ? Step.OPTIONS : Step.STARTUP; - } - - @Override - String describe() { - return String.format( - "[%s] Protocol initialization request, step %d (%s)", logPrefix, stepNumber, request); - } - - @Override - Message getRequest() { - switch (step) { - case OPTIONS: - return request = Options.INSTANCE; - case STARTUP: - return request = new Startup(context.getStartupOptions()); - case GET_CLUSTER_NAME: - return request = CLUSTER_NAME_QUERY; - case SET_KEYSPACE: - return request = new Query("USE " + options.keyspace.asCql(false)); - case AUTH_RESPONSE: - return request = new AuthResponse(authResponseToken); - case REGISTER: - return request = new Register(options.eventTypes); - default: - throw new AssertionError("unhandled step: " + step); - } - } - - @Override - void send() { - stepNumber++; - super.send(); - } - - @Override - void onResponse(Message response) { - LOG.debug( - "[{}] step {} received response opcode={}", - logPrefix, - step, - ProtocolUtils.opcodeString(response.opcode)); - try { - if (step == Step.OPTIONS && response instanceof Supported) { - channel.attr(DriverChannel.OPTIONS_KEY).set(((Supported) response).options); - step = Step.STARTUP; - send(); - } else if (step == Step.STARTUP && response instanceof Ready) { - maybeSwitchToModernFraming(); - context.getAuthProvider().ifPresent(provider -> provider.onMissingChallenge(endPoint)); - step = Step.GET_CLUSTER_NAME; - send(); - } else if (step == Step.STARTUP && response instanceof Authenticate) { - maybeSwitchToModernFraming(); - Authenticate authenticate = (Authenticate) response; - authenticator = buildAuthenticator(endPoint, authenticate.authenticator); - authenticator - .initialResponse() - .whenCompleteAsync( - (token, error) -> { - if (error != null) { - fail( - new AuthenticationException( - endPoint, - String.format( - "Authenticator.initialResponse(): stage completed exceptionally (%s)", - error), - error)); - } else { - step = Step.AUTH_RESPONSE; - authResponseToken = token; - send(); - } - }, - channel.eventLoop()) - .exceptionally(UncaughtExceptions::log); - } else if (step == Step.AUTH_RESPONSE && response instanceof AuthChallenge) { - ByteBuffer challenge = ((AuthChallenge) response).token; - authenticator - .evaluateChallenge(challenge) - .whenCompleteAsync( - (token, error) -> { - if (error != null) { - fail( - new AuthenticationException( - endPoint, - String.format( - "Authenticator.evaluateChallenge(): stage completed exceptionally (%s)", - error), - error)); - } else { - step = Step.AUTH_RESPONSE; - authResponseToken = token; - send(); - } - }, - channel.eventLoop()) - .exceptionally(UncaughtExceptions::log); - } else if (step == Step.AUTH_RESPONSE && response instanceof AuthSuccess) { - ByteBuffer token = ((AuthSuccess) response).token; - authenticator - .onAuthenticationSuccess(token) - .whenCompleteAsync( - (ignored, error) -> { - if (error != null) { - fail( - new AuthenticationException( - endPoint, - String.format( - "Authenticator.onAuthenticationSuccess(): stage completed exceptionally (%s)", - error), - error)); - } else { - step = Step.GET_CLUSTER_NAME; - send(); - } - }, - channel.eventLoop()) - .exceptionally(UncaughtExceptions::log); - } else if (step == Step.AUTH_RESPONSE - && response instanceof Error - && ((Error) response).code == ProtocolConstants.ErrorCode.AUTH_ERROR) { - fail( - new AuthenticationException( - endPoint, - String.format( - "server replied with '%s' to AuthResponse request", - ((Error) response).message))); - } else if (step == Step.GET_CLUSTER_NAME && response instanceof Rows) { - Rows rows = (Rows) response; - List row = Objects.requireNonNull(rows.getData().poll()); - String actualClusterName = getString(row, 0); - if (expectedClusterName != null && !expectedClusterName.equals(actualClusterName)) { - fail( - new ClusterNameMismatchException(endPoint, actualClusterName, expectedClusterName)); - } else { - if (expectedClusterName == null) { - // Store the actual name so that it can be retrieved from the factory - channel.attr(DriverChannel.CLUSTER_NAME_KEY).set(actualClusterName); - } - if (options.keyspace != null) { - step = Step.SET_KEYSPACE; - send(); - } else if (!options.eventTypes.isEmpty()) { - step = Step.REGISTER; - send(); - } else { - setConnectSuccess(); - } - } - } else if (step == Step.SET_KEYSPACE && response instanceof SetKeyspace) { - if (!options.eventTypes.isEmpty()) { - step = Step.REGISTER; - send(); - } else { - setConnectSuccess(); - } - } else if (step == Step.REGISTER && response instanceof Ready) { - setConnectSuccess(); - } else if (response instanceof Error) { - Error error = (Error) response; - // Testing for a specific string is a tad fragile but Cassandra doesn't give us a more - // precise error code. - // C* 2.1 reports a server error instead of protocol error, see CASSANDRA-9451. - boolean firstRequest = - (step == Step.OPTIONS && querySupportedOptions) || step == Step.STARTUP; - boolean serverOrProtocolError = - error.code == ErrorCode.PROTOCOL_ERROR || error.code == ErrorCode.SERVER_ERROR; - boolean badProtocolVersionMessage = - error.message.contains("Invalid or unsupported protocol version") - // JAVA-2925: server is behind driver and considers the proposed version as beta - || error.message.contains("Beta version of the protocol used"); - if (firstRequest && serverOrProtocolError && badProtocolVersionMessage) { - fail( - UnsupportedProtocolVersionException.forSingleAttempt( - endPoint, initialProtocolVersion)); - } else if (step == Step.SET_KEYSPACE - && error.code == ProtocolConstants.ErrorCode.INVALID) { - fail(new InvalidKeyspaceException(error.message)); - } else { - failOnUnexpected(error); - } - } else { - failOnUnexpected(response); - } - } catch (AuthenticationException e) { - fail(e); - } catch (Throwable t) { - fail(String.format("%s: unexpected exception (%s)", describe(), t), t); - } - } - - @Override - void fail(String message, Throwable cause) { - Throwable finalException = - (message == null) ? cause : new ConnectionInitException(message, cause); - setConnectFailure(finalException); - } - - private Authenticator buildAuthenticator(EndPoint endPoint, String authenticator) { - return context - .getAuthProvider() - .map(p -> p.newAuthenticator(endPoint, authenticator)) - .orElseThrow( - () -> - new AuthenticationException( - endPoint, - String.format( - "Node %s requires authentication (%s), but no authenticator configured", - endPoint, authenticator))); - } - - @Override - public String toString() { - return "init query " + step; - } - } - - /** - * Rearranges the pipeline to deal with the new framing structure in protocol v5 and above. The - * first messages still use the legacy format, we only do this after a successful response to the - * first STARTUP message. - */ - private void maybeSwitchToModernFraming() { - if (context - .getProtocolVersionRegistry() - .supports(initialProtocolVersion, DefaultProtocolFeature.MODERN_FRAMING)) { - - ChannelPipeline pipeline = ctx.pipeline(); - - // We basically add one conversion step in the middle: frames <-> *segments* <-> bytes - // Outbound: - pipeline.replace( - ChannelFactory.FRAME_TO_BYTES_ENCODER_NAME, - ChannelFactory.FRAME_TO_SEGMENT_ENCODER_NAME, - new FrameToSegmentEncoder( - context.getPrimitiveCodec(), context.getFrameCodec(), logPrefix)); - pipeline.addBefore( - ChannelFactory.FRAME_TO_SEGMENT_ENCODER_NAME, - ChannelFactory.SEGMENT_TO_BYTES_ENCODER_NAME, - new SegmentToBytesEncoder(context.getSegmentCodec())); - - // Inbound: - pipeline.replace( - ChannelFactory.BYTES_TO_FRAME_DECODER_NAME, - ChannelFactory.BYTES_TO_SEGMENT_DECODER_NAME, - new BytesToSegmentDecoder(context.getSegmentCodec())); - pipeline.addAfter( - ChannelFactory.BYTES_TO_SEGMENT_DECODER_NAME, - ChannelFactory.SEGMENT_TO_FRAME_DECODER_NAME, - new SegmentToFrameDecoder(context.getFrameCodec(), logPrefix)); - } - } - - private String getString(List row, int i) { - return TypeCodecs.TEXT.decode(row.get(i), DefaultProtocolVersion.DEFAULT); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ResponseCallback.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ResponseCallback.java deleted file mode 100644 index 5a0e9e5eb86..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ResponseCallback.java +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import com.datastax.oss.protocol.internal.Frame; - -/** - * The outcome of a request sent to a Cassandra node. - * - *

This comes into play after the request has been successfully written to the channel. - * - *

Due to internal implementation constraints, different instances of this type must not be equal - * to each other (they are stored in a {@code BiMap} in {@link InFlightHandler}); reference equality - * should be appropriate in all cases. - */ -public interface ResponseCallback { - - /** - * Invoked when the server replies (note that the response frame might contain an error message). - */ - void onResponse(Frame responseFrame); - - /** - * Invoked if we couldn't get the response. - * - *

This can be triggered in two cases: - * - *

    - *
  • the connection was closed (for example, because of a heartbeat failure) before the - * response was received; - *
  • the response was received but there was an error while decoding it. - *
- */ - void onFailure(Throwable error); - - /** - * Reports the stream id used for the request on the current connection. - * - *

This is called every time the request is written successfully to a connection (and therefore - * might multiple times in case of retries). It is guaranteed to be invoked before any response to - * the request on that connection is processed. - * - *

The default implementation does nothing. This only needs to be overridden for specialized - * requests that hold the stream id across multiple responses. - * - * @see #isLastResponse(Frame) - */ - default void onStreamIdAssigned(int streamId) { - // nothing to do - } - - /** - * Whether the given frame is the last response to this request. - * - *

This is invoked for each response received by this callback; if it returns {@code true}, the - * driver assumes that the server is no longer using this stream id, and that it can be safely - * reused to send another request. - * - *

The default implementation always returns {@code true}: regular CQL requests only have one - * response, and we can reuse the stream id as soon as we've received it. This only needs to be - * overridden for specialized requests that hold the stream id across multiple responses. - */ - default boolean isLastResponse(Frame responseFrame) { - return true; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/StreamIdGenerator.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/StreamIdGenerator.java deleted file mode 100644 index 3384bc57c94..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/StreamIdGenerator.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import java.util.BitSet; -import java.util.concurrent.atomic.AtomicInteger; -import net.jcip.annotations.NotThreadSafe; - -/** - * Manages the set of identifiers used to distinguish multiplexed requests on a channel. - * - *

{@link #preAcquire()} / {@link #getAvailableIds()} follow atomic semantics. See {@link - * DriverChannel#preAcquireId()} for more explanations. - * - *

Other methods are not synchronized, they are only called by {@link InFlightHandler} on the I/O - * thread. - */ -@NotThreadSafe -class StreamIdGenerator { - - private final int maxAvailableIds; - // unset = available, set = borrowed (note that this is the opposite of the 3.x implementation) - private final BitSet ids; - private final AtomicInteger availableIds; - - StreamIdGenerator(int maxAvailableIds) { - this.maxAvailableIds = maxAvailableIds; - this.ids = new BitSet(this.maxAvailableIds); - this.availableIds = new AtomicInteger(this.maxAvailableIds); - } - - boolean preAcquire() { - while (true) { - int current = availableIds.get(); - assert current >= 0; - if (current == 0) { - return false; - } else if (availableIds.compareAndSet(current, current - 1)) { - return true; - } - } - } - - void cancelPreAcquire() { - int available = availableIds.incrementAndGet(); - assert available <= maxAvailableIds; - } - - int acquire() { - assert availableIds.get() < maxAvailableIds; - int id = ids.nextClearBit(0); - if (id >= maxAvailableIds) { - return -1; - } - ids.set(id); - return id; - } - - void release(int id) { - if (!ids.get(id)) { - throw new IllegalStateException("Tried to release id that hadn't been borrowed: " + id); - } - ids.clear(id); - int available = availableIds.incrementAndGet(); - assert available <= maxAvailableIds; - } - - int getAvailableIds() { - return availableIds.get(); - } - - int getMaxAvailableIds() { - return maxAvailableIds; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/WriteCoalescer.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/WriteCoalescer.java deleted file mode 100644 index 03391c57809..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/WriteCoalescer.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import io.netty.channel.Channel; -import io.netty.channel.ChannelFuture; - -/** - * Optimizes the write operations on Netty channels. - * - *

Flush operations are generally speaking expensive as these may trigger a syscall on the - * transport level. Thus it is in most cases (where write latency can be traded with throughput) a - * good idea to try to minimize flush operations as much as possible. This component allows writes - * to be accumulated and flushed together for better performance. - */ -public interface WriteCoalescer { - /** - * Writes and flushes the message to the channel, possibly at a later time, but the order of - * messages must be preserved. - */ - ChannelFuture writeAndFlush(Channel channel, Object message); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/package-info.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/package-info.java deleted file mode 100644 index d8514bdb88c..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/package-info.java +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** Handling of a single connection to a Cassandra node. */ -package com.datastax.oss.driver.internal.core.channel; diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/config/ConfigChangeEvent.java b/core/src/main/java/com/datastax/oss/driver/internal/core/config/ConfigChangeEvent.java deleted file mode 100644 index d2898d39925..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/config/ConfigChangeEvent.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.config; - -/** An event triggered when the configuration was changed. */ -public enum ConfigChangeEvent { - // Implementation note: to find where this event is consumed, look for references to the class - // itself, not INSTANCE (EventBus.register takes a class not an object). - INSTANCE -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/config/DerivedExecutionProfile.java b/core/src/main/java/com/datastax/oss/driver/internal/core/config/DerivedExecutionProfile.java deleted file mode 100644 index 39c37d78c10..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/config/DerivedExecutionProfile.java +++ /dev/null @@ -1,198 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.config; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.config.DriverOption; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSortedSet; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.time.Duration; -import java.util.AbstractMap; -import java.util.List; -import java.util.Map; -import java.util.SortedSet; -import java.util.function.BiFunction; - -public class DerivedExecutionProfile implements DriverExecutionProfile { - - private static final Object NO_VALUE = new Object(); - - public static DerivedExecutionProfile with( - DriverExecutionProfile baseProfile, DriverOption option, Object value) { - if (baseProfile instanceof DerivedExecutionProfile) { - // Don't nest derived profiles, use same base and add to overrides - DerivedExecutionProfile previousDerived = (DerivedExecutionProfile) baseProfile; - ImmutableMap.Builder newOverrides = ImmutableMap.builder(); - for (Map.Entry override : previousDerived.overrides.entrySet()) { - if (!override.getKey().equals(option)) { - newOverrides.put(override.getKey(), override.getValue()); - } - } - newOverrides.put(option, value); - return new DerivedExecutionProfile(previousDerived.baseProfile, newOverrides.build()); - } else { - return new DerivedExecutionProfile(baseProfile, ImmutableMap.of(option, value)); - } - } - - public static DerivedExecutionProfile without( - DriverExecutionProfile baseProfile, DriverOption option) { - return with(baseProfile, option, NO_VALUE); - } - - private final DriverExecutionProfile baseProfile; - private final Map overrides; - - public DerivedExecutionProfile( - DriverExecutionProfile baseProfile, Map overrides) { - this.baseProfile = baseProfile; - this.overrides = overrides; - } - - @NonNull - @Override - public String getName() { - return baseProfile.getName(); - } - - @Override - public boolean isDefined(@NonNull DriverOption option) { - if (overrides.containsKey(option)) { - return overrides.get(option) != NO_VALUE; - } else { - return baseProfile.isDefined(option); - } - } - - @Override - public boolean getBoolean(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getBoolean); - } - - @NonNull - @Override - public List getBooleanList(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getBooleanList); - } - - @Override - public int getInt(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getInt); - } - - @NonNull - @Override - public List getIntList(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getIntList); - } - - @Override - public long getLong(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getLong); - } - - @NonNull - @Override - public List getLongList(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getLongList); - } - - @Override - public double getDouble(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getDouble); - } - - @NonNull - @Override - public List getDoubleList(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getDoubleList); - } - - @NonNull - @Override - public String getString(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getString); - } - - @NonNull - @Override - public List getStringList(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getStringList); - } - - @NonNull - @Override - public Map getStringMap(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getStringMap); - } - - @Override - public long getBytes(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getBytes); - } - - @NonNull - @Override - public List getBytesList(DriverOption option) { - return get(option, DriverExecutionProfile::getBytesList); - } - - @NonNull - @Override - public Duration getDuration(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getDuration); - } - - @NonNull - @Override - public List getDurationList(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getDurationList); - } - - @NonNull - @SuppressWarnings("unchecked") - private ValueT get( - @NonNull DriverOption option, - BiFunction getter) { - Object value = overrides.get(option); - if (value == null) { - value = getter.apply(baseProfile, option); - } - if (value == null || value == NO_VALUE) { - throw new IllegalArgumentException("Missing configuration option " + option.getPath()); - } - return (ValueT) value; - } - - @NonNull - @Override - public SortedSet> entrySet() { - ImmutableSortedSet.Builder> builder = - ImmutableSortedSet.orderedBy(Map.Entry.comparingByKey()); - // builder.add() has no effect if the element already exists, so process the overrides first - // since they have higher precedence - for (Map.Entry entry : overrides.entrySet()) { - if (entry.getValue() != NO_VALUE) { - builder.add(new AbstractMap.SimpleEntry<>(entry.getKey().getPath(), entry.getValue())); - } - } - builder.addAll(baseProfile.entrySet()); - return builder.build(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/config/DriverOptionConfigBuilder.java b/core/src/main/java/com/datastax/oss/driver/internal/core/config/DriverOptionConfigBuilder.java deleted file mode 100644 index 5775fcbe507..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/config/DriverOptionConfigBuilder.java +++ /dev/null @@ -1,164 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.config; - -import com.datastax.oss.driver.api.core.config.DriverOption; -import edu.umd.cs.findbugs.annotations.CheckReturnValue; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.time.Duration; -import java.util.List; -import java.util.Map; - -/** @see com.datastax.oss.driver.internal.core.config.typesafe.DefaultDriverConfigLoaderBuilder */ -@Deprecated -public interface DriverOptionConfigBuilder { - - @NonNull - @CheckReturnValue - default SelfT withBoolean(@NonNull DriverOption option, boolean value) { - return with(option, value); - } - - @NonNull - @CheckReturnValue - default SelfT withBooleanList(@NonNull DriverOption option, @NonNull List value) { - return with(option, value); - } - - @NonNull - @CheckReturnValue - default SelfT withInt(@NonNull DriverOption option, int value) { - return with(option, value); - } - - @NonNull - @CheckReturnValue - default SelfT withIntList(@NonNull DriverOption option, @NonNull List value) { - return with(option, value); - } - - @NonNull - @CheckReturnValue - default SelfT withLong(@NonNull DriverOption option, long value) { - return with(option, value); - } - - @NonNull - @CheckReturnValue - default SelfT withLongList(@NonNull DriverOption option, @NonNull List value) { - return with(option, value); - } - - @NonNull - @CheckReturnValue - default SelfT withDouble(@NonNull DriverOption option, double value) { - return with(option, value); - } - - @NonNull - @CheckReturnValue - default SelfT withDoubleList(@NonNull DriverOption option, @NonNull List value) { - return with(option, value); - } - - @NonNull - @CheckReturnValue - default SelfT withString(@NonNull DriverOption option, @NonNull String value) { - return with(option, value); - } - - @NonNull - @CheckReturnValue - default SelfT withStringList(@NonNull DriverOption option, @NonNull List value) { - return with(option, value); - } - - @SuppressWarnings("unchecked") - @NonNull - @CheckReturnValue - default SelfT withStringMap(@NonNull DriverOption option, @NonNull Map value) { - SelfT v = (SelfT) this; - for (String key : value.keySet()) { - v = (SelfT) v.with(option.getPath() + "." + key, value.get(key)); - } - return v; - } - - /** - * Specifies a size in bytes. This is separate from {@link #withLong(DriverOption, long)}, in case - * implementations want to allow users to provide sizes in a more human-readable way, for example - * "256 MB". - */ - @NonNull - @CheckReturnValue - default SelfT withBytes(@NonNull DriverOption option, @NonNull String value) { - return with(option, value); - } - - @NonNull - @CheckReturnValue - default SelfT withBytes(@NonNull DriverOption option, long value) { - return with(option, value); - } - - @NonNull - @CheckReturnValue - default SelfT withBytesList(@NonNull DriverOption option, @NonNull List value) { - return with(option, value); - } - - @NonNull - @CheckReturnValue - default SelfT withDuration(@NonNull DriverOption option, @NonNull Duration value) { - return with(option, value); - } - - @NonNull - @CheckReturnValue - default SelfT withDurationList(@NonNull DriverOption option, @NonNull List value) { - return with(option, value); - } - - @NonNull - @CheckReturnValue - default SelfT withClass(@NonNull DriverOption option, @NonNull Class value) { - return with(option, value.getName()); - } - - /** Unsets an option. */ - @NonNull - @CheckReturnValue - default SelfT without(@NonNull DriverOption option) { - return with(option, null); - } - - @NonNull - @CheckReturnValue - default SelfT with(@NonNull DriverOption option, @Nullable Object value) { - return with(option.getPath(), value); - } - - /** - * Provides a simple path to value mapping, all default methods invoke this method directly. It is - * not recommended that it is used directly other than by these defaults. - */ - @NonNull - @CheckReturnValue - SelfT with(@NonNull String path, @Nullable Object value); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/config/cloud/CloudConfig.java b/core/src/main/java/com/datastax/oss/driver/internal/core/config/cloud/CloudConfig.java deleted file mode 100644 index 1a1076e9d78..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/config/cloud/CloudConfig.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.config.cloud; - -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.api.core.ssl.SslEngineFactory; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.net.InetSocketAddress; -import java.util.List; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class CloudConfig { - - private final InetSocketAddress proxyAddress; - private final List endPoints; - private final String localDatacenter; - private final SslEngineFactory sslEngineFactory; - - CloudConfig( - @NonNull InetSocketAddress proxyAddress, - @NonNull List endPoints, - @NonNull String localDatacenter, - @NonNull SslEngineFactory sslEngineFactory) { - this.proxyAddress = proxyAddress; - this.endPoints = ImmutableList.copyOf(endPoints); - this.localDatacenter = localDatacenter; - this.sslEngineFactory = sslEngineFactory; - } - - @NonNull - public InetSocketAddress getProxyAddress() { - return proxyAddress; - } - - @NonNull - public List getEndPoints() { - return endPoints; - } - - @NonNull - public String getLocalDatacenter() { - return localDatacenter; - } - - @NonNull - public SslEngineFactory getSslEngineFactory() { - return sslEngineFactory; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/config/cloud/CloudConfigFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/config/cloud/CloudConfigFactory.java deleted file mode 100644 index 817b3263d25..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/config/cloud/CloudConfigFactory.java +++ /dev/null @@ -1,296 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.config.cloud; - -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.internal.core.metadata.SniEndPoint; -import com.datastax.oss.driver.internal.core.ssl.SniSslEngineFactory; -import com.datastax.oss.driver.shaded.guava.common.io.ByteStreams; -import com.datastax.oss.driver.shaded.guava.common.net.HostAndPort; -import com.fasterxml.jackson.core.JsonParser; -import com.fasterxml.jackson.databind.JsonNode; -import com.fasterxml.jackson.databind.ObjectMapper; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.BufferedReader; -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.InputStreamReader; -import java.net.ConnectException; -import java.net.InetSocketAddress; -import java.net.MalformedURLException; -import java.net.URL; -import java.net.UnknownHostException; -import java.nio.charset.StandardCharsets; -import java.security.GeneralSecurityException; -import java.security.KeyStore; -import java.security.SecureRandom; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.Objects; -import java.util.zip.ZipEntry; -import java.util.zip.ZipInputStream; -import javax.net.ssl.HttpsURLConnection; -import javax.net.ssl.KeyManagerFactory; -import javax.net.ssl.SSLContext; -import javax.net.ssl.TrustManagerFactory; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@ThreadSafe -public class CloudConfigFactory { - private static final Logger LOG = LoggerFactory.getLogger(CloudConfigFactory.class); - /** - * Creates a {@link CloudConfig} with information fetched from the specified Cloud configuration - * URL. - * - *

The target URL must point to a valid secure connect bundle archive in ZIP format. - * - * @param cloudConfigUrl the URL to fetch the Cloud configuration from; cannot be null. - * @throws IOException If the Cloud configuration cannot be read. - * @throws GeneralSecurityException If the Cloud SSL context cannot be created. - */ - @NonNull - public CloudConfig createCloudConfig(@NonNull URL cloudConfigUrl) - throws IOException, GeneralSecurityException { - Objects.requireNonNull(cloudConfigUrl, "cloudConfigUrl cannot be null"); - return createCloudConfig(cloudConfigUrl.openStream()); - } - - /** - * Creates a {@link CloudConfig} with information fetched from the specified {@link InputStream}. - * - *

The stream must contain a valid secure connect bundle archive in ZIP format. Note that the - * stream will be closed after a call to that method and cannot be used anymore. - * - * @param cloudConfig the stream to read the Cloud configuration from; cannot be null. - * @throws IOException If the Cloud configuration cannot be read. - * @throws GeneralSecurityException If the Cloud SSL context cannot be created. - */ - @NonNull - public CloudConfig createCloudConfig(@NonNull InputStream cloudConfig) - throws IOException, GeneralSecurityException { - Objects.requireNonNull(cloudConfig, "cloudConfig cannot be null"); - JsonNode configJson = null; - ByteArrayOutputStream keyStoreOutputStream = null; - ByteArrayOutputStream trustStoreOutputStream = null; - ObjectMapper mapper = new ObjectMapper().configure(JsonParser.Feature.AUTO_CLOSE_SOURCE, false); - try (ZipInputStream zipInputStream = new ZipInputStream(cloudConfig)) { - ZipEntry entry; - while ((entry = zipInputStream.getNextEntry()) != null) { - String fileName = entry.getName(); - switch (fileName) { - case "config.json": - configJson = mapper.readTree(zipInputStream); - break; - case "identity.jks": - keyStoreOutputStream = new ByteArrayOutputStream(); - ByteStreams.copy(zipInputStream, keyStoreOutputStream); - break; - case "trustStore.jks": - trustStoreOutputStream = new ByteArrayOutputStream(); - ByteStreams.copy(zipInputStream, trustStoreOutputStream); - break; - } - } - } - if (configJson == null) { - throw new IllegalStateException("Invalid bundle: missing file config.json"); - } - if (keyStoreOutputStream == null) { - throw new IllegalStateException("Invalid bundle: missing file identity.jks"); - } - if (trustStoreOutputStream == null) { - throw new IllegalStateException("Invalid bundle: missing file trustStore.jks"); - } - char[] keyStorePassword = getKeyStorePassword(configJson); - char[] trustStorePassword = getTrustStorePassword(configJson); - ByteArrayInputStream keyStoreInputStream = - new ByteArrayInputStream(keyStoreOutputStream.toByteArray()); - ByteArrayInputStream trustStoreInputStream = - new ByteArrayInputStream(trustStoreOutputStream.toByteArray()); - SSLContext sslContext = - createSslContext( - keyStoreInputStream, keyStorePassword, trustStoreInputStream, trustStorePassword); - URL metadataServiceUrl = getMetadataServiceUrl(configJson); - JsonNode proxyMetadataJson; - try (BufferedReader proxyMetadata = fetchProxyMetadata(metadataServiceUrl, sslContext)) { - proxyMetadataJson = mapper.readTree(proxyMetadata); - } - InetSocketAddress sniProxyAddress = getSniProxyAddress(proxyMetadataJson); - List endPoints = getEndPoints(proxyMetadataJson, sniProxyAddress); - String localDatacenter = getLocalDatacenter(proxyMetadataJson); - SniSslEngineFactory sslEngineFactory = new SniSslEngineFactory(sslContext); - validateIfBundleContainsUsernamePassword(configJson); - return new CloudConfig(sniProxyAddress, endPoints, localDatacenter, sslEngineFactory); - } - - @NonNull - protected char[] getKeyStorePassword(JsonNode configFile) { - if (configFile.has("keyStorePassword")) { - return configFile.get("keyStorePassword").asText().toCharArray(); - } else { - throw new IllegalStateException("Invalid config.json: missing field keyStorePassword"); - } - } - - @NonNull - protected char[] getTrustStorePassword(JsonNode configFile) { - if (configFile.has("trustStorePassword")) { - return configFile.get("trustStorePassword").asText().toCharArray(); - } else { - throw new IllegalStateException("Invalid config.json: missing field trustStorePassword"); - } - } - - @NonNull - protected URL getMetadataServiceUrl(JsonNode configFile) throws MalformedURLException { - if (configFile.has("host")) { - String metadataServiceHost = configFile.get("host").asText(); - if (configFile.has("port")) { - int metadataServicePort = configFile.get("port").asInt(); - return new URL("https", metadataServiceHost, metadataServicePort, "/metadata"); - } else { - throw new IllegalStateException("Invalid config.json: missing field port"); - } - } else { - throw new IllegalStateException("Invalid config.json: missing field host"); - } - } - - protected void validateIfBundleContainsUsernamePassword(JsonNode configFile) { - if (configFile.has("username") || configFile.has("password")) { - LOG.info( - "The bundle contains config.json with username and/or password. Providing it in the bundle is deprecated and ignored."); - } - } - - @NonNull - protected SSLContext createSslContext( - @NonNull ByteArrayInputStream keyStoreInputStream, - @NonNull char[] keyStorePassword, - @NonNull ByteArrayInputStream trustStoreInputStream, - @NonNull char[] trustStorePassword) - throws IOException, GeneralSecurityException { - KeyManagerFactory kmf = createKeyManagerFactory(keyStoreInputStream, keyStorePassword); - TrustManagerFactory tmf = createTrustManagerFactory(trustStoreInputStream, trustStorePassword); - SSLContext sslContext = SSLContext.getInstance("SSL"); - sslContext.init(kmf.getKeyManagers(), tmf.getTrustManagers(), new SecureRandom()); - return sslContext; - } - - @NonNull - protected KeyManagerFactory createKeyManagerFactory( - @NonNull InputStream keyStoreInputStream, @NonNull char[] keyStorePassword) - throws IOException, GeneralSecurityException { - KeyStore ks = KeyStore.getInstance("JKS"); - ks.load(keyStoreInputStream, keyStorePassword); - KeyManagerFactory kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); - kmf.init(ks, keyStorePassword); - Arrays.fill(keyStorePassword, (char) 0); - return kmf; - } - - @NonNull - protected TrustManagerFactory createTrustManagerFactory( - @NonNull InputStream trustStoreInputStream, @NonNull char[] trustStorePassword) - throws IOException, GeneralSecurityException { - KeyStore ts = KeyStore.getInstance("JKS"); - ts.load(trustStoreInputStream, trustStorePassword); - TrustManagerFactory tmf = - TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); - tmf.init(ts); - Arrays.fill(trustStorePassword, (char) 0); - return tmf; - } - - @NonNull - protected BufferedReader fetchProxyMetadata( - @NonNull URL metadataServiceUrl, @NonNull SSLContext sslContext) throws IOException { - try { - HttpsURLConnection connection = (HttpsURLConnection) metadataServiceUrl.openConnection(); - connection.setSSLSocketFactory(sslContext.getSocketFactory()); - connection.setRequestMethod("GET"); - return new BufferedReader( - new InputStreamReader(connection.getInputStream(), StandardCharsets.UTF_8)); - } catch (ConnectException e) { - throw new IllegalStateException( - "Unable to connect to cloud metadata service. Please make sure your cluster is not parked or terminated", - e); - } catch (UnknownHostException e) { - throw new IllegalStateException( - "Unable to resolve host for cloud metadata service. Please make sure your cluster is not terminated", - e); - } - } - - @NonNull - protected String getLocalDatacenter(@NonNull JsonNode proxyMetadata) { - JsonNode contactInfo = getContactInfo(proxyMetadata); - if (contactInfo.has("local_dc")) { - return contactInfo.get("local_dc").asText(); - } else { - throw new IllegalStateException("Invalid proxy metadata: missing field local_dc"); - } - } - - @NonNull - protected InetSocketAddress getSniProxyAddress(@NonNull JsonNode proxyMetadata) { - JsonNode contactInfo = getContactInfo(proxyMetadata); - if (contactInfo.has("sni_proxy_address")) { - HostAndPort sniProxyHostAndPort = - HostAndPort.fromString(contactInfo.get("sni_proxy_address").asText()); - if (!sniProxyHostAndPort.hasPort()) { - throw new IllegalStateException( - "Invalid proxy metadata: missing port from field sni_proxy_address"); - } - return InetSocketAddress.createUnresolved( - sniProxyHostAndPort.getHost(), sniProxyHostAndPort.getPort()); - } else { - throw new IllegalStateException("Invalid proxy metadata: missing field sni_proxy_address"); - } - } - - @NonNull - protected List getEndPoints( - @NonNull JsonNode proxyMetadata, @NonNull InetSocketAddress sniProxyAddress) { - JsonNode contactInfo = getContactInfo(proxyMetadata); - if (contactInfo.has("contact_points")) { - List endPoints = new ArrayList<>(); - JsonNode hostIdsJson = contactInfo.get("contact_points"); - for (int i = 0; i < hostIdsJson.size(); i++) { - endPoints.add(new SniEndPoint(sniProxyAddress, hostIdsJson.get(i).asText())); - } - return endPoints; - } else { - throw new IllegalStateException("Invalid proxy metadata: missing field contact_points"); - } - } - - @NonNull - protected JsonNode getContactInfo(@NonNull JsonNode proxyMetadata) { - if (proxyMetadata.has("contact_info")) { - return proxyMetadata.get("contact_info"); - } else { - throw new IllegalStateException("Invalid proxy metadata: missing field contact_info"); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/config/composite/CompositeDriverConfig.java b/core/src/main/java/com/datastax/oss/driver/internal/core/config/composite/CompositeDriverConfig.java deleted file mode 100644 index 9a74d00df4f..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/config/composite/CompositeDriverConfig.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.config.composite; - -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.shaded.guava.common.collect.Sets; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Collections; -import java.util.Map; -import java.util.Objects; -import java.util.concurrent.ConcurrentHashMap; - -public class CompositeDriverConfig implements DriverConfig { - - private final DriverConfig primaryConfig; - private final DriverConfig fallbackConfig; - private final Map profiles = new ConcurrentHashMap<>(); - - public CompositeDriverConfig( - @NonNull DriverConfig primaryConfig, @NonNull DriverConfig fallbackConfig) { - this.primaryConfig = Objects.requireNonNull(primaryConfig); - this.fallbackConfig = Objects.requireNonNull(fallbackConfig); - } - - @NonNull - @Override - public DriverExecutionProfile getProfile(@NonNull String profileName) { - return profiles.compute( - profileName, - (k, v) -> - (v == null) - ? new CompositeDriverExecutionProfile(primaryConfig, fallbackConfig, profileName) - : v.refresh()); - } - - @NonNull - @Override - public Map getProfiles() { - // The map is updated lazily, if we want all the profiles we need to fetch them explicitly - for (String name : - Sets.union(primaryConfig.getProfiles().keySet(), fallbackConfig.getProfiles().keySet())) { - getProfile(name); - } - return Collections.unmodifiableMap(profiles); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/config/composite/CompositeDriverConfigLoader.java b/core/src/main/java/com/datastax/oss/driver/internal/core/config/composite/CompositeDriverConfigLoader.java deleted file mode 100644 index 23baf458c85..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/config/composite/CompositeDriverConfigLoader.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.config.composite; - -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Objects; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; - -public class CompositeDriverConfigLoader implements DriverConfigLoader { - - private final DriverConfigLoader primaryConfigLoader; - private final DriverConfigLoader fallbackConfigLoader; - - public CompositeDriverConfigLoader( - @NonNull DriverConfigLoader primaryConfigLoader, - @NonNull DriverConfigLoader fallbackConfigLoader) { - this.primaryConfigLoader = Objects.requireNonNull(primaryConfigLoader); - this.fallbackConfigLoader = Objects.requireNonNull(fallbackConfigLoader); - } - - @NonNull - @Override - public DriverConfig getInitialConfig() { - DriverConfig primaryConfig = primaryConfigLoader.getInitialConfig(); - DriverConfig fallbackConfig = fallbackConfigLoader.getInitialConfig(); - return new CompositeDriverConfig(primaryConfig, fallbackConfig); - } - - @Override - public void onDriverInit(@NonNull DriverContext context) { - fallbackConfigLoader.onDriverInit(context); - primaryConfigLoader.onDriverInit(context); - } - - @NonNull - @Override - public CompletionStage reload() { - if (!primaryConfigLoader.supportsReloading() && !fallbackConfigLoader.supportsReloading()) { - return CompletableFutures.failedFuture( - new UnsupportedOperationException( - "Reloading is not supported (this is a composite config, " - + "and neither the primary nor the fallback are reloadable)")); - } else if (!primaryConfigLoader.supportsReloading()) { - return fallbackConfigLoader.reload(); - } else if (!fallbackConfigLoader.supportsReloading()) { - return primaryConfigLoader.reload(); - } else { - CompletionStage primaryFuture = primaryConfigLoader.reload(); - CompletionStage fallbackFuture = fallbackConfigLoader.reload(); - CompletableFuture compositeFuture = new CompletableFuture<>(); - primaryFuture.whenComplete( - (primaryChanged, primaryError) -> - fallbackFuture.whenComplete( - (fallbackChanged, fallbackError) -> { - if (primaryError == null && fallbackError == null) { - compositeFuture.complete(primaryChanged || fallbackChanged); - } else if (fallbackError == null) { - compositeFuture.completeExceptionally(primaryError); - } else if (primaryError == null) { - compositeFuture.completeExceptionally(fallbackError); - } else { - primaryError.addSuppressed(fallbackError); - compositeFuture.completeExceptionally(primaryError); - } - })); - return compositeFuture; - } - } - - @Override - public boolean supportsReloading() { - return primaryConfigLoader.supportsReloading() || fallbackConfigLoader.supportsReloading(); - } - - @Override - public void close() { - primaryConfigLoader.close(); - fallbackConfigLoader.close(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/config/composite/CompositeDriverExecutionProfile.java b/core/src/main/java/com/datastax/oss/driver/internal/core/config/composite/CompositeDriverExecutionProfile.java deleted file mode 100644 index 147d9e0bdb4..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/config/composite/CompositeDriverExecutionProfile.java +++ /dev/null @@ -1,222 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.config.composite; - -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.config.DriverOption; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSortedSet; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.time.Duration; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.SortedSet; -import java.util.TreeSet; -import java.util.function.BiFunction; - -public class CompositeDriverExecutionProfile implements DriverExecutionProfile { - - private final DriverConfig primaryConfig; - private final DriverConfig fallbackConfig; - private final String profileName; - - @Nullable private volatile DriverExecutionProfile primaryProfile; - @Nullable private volatile DriverExecutionProfile fallbackProfile; - - public CompositeDriverExecutionProfile( - @NonNull DriverConfig primaryConfig, - @NonNull DriverConfig fallbackConfig, - @NonNull String profileName) { - this.primaryConfig = Objects.requireNonNull(primaryConfig); - this.fallbackConfig = Objects.requireNonNull(fallbackConfig); - this.profileName = Objects.requireNonNull(profileName); - refreshInternal(); - } - - /** - * Fetches the underlying profiles again from the two backing configs. This is because some config - * implementations support adding/removing profiles at runtime. - * - *

For efficiency reasons this is only done when the user fetches the profile again from the - * main config, not every time an option is fetched from the profile. - */ - public CompositeDriverExecutionProfile refresh() { - return refreshInternal(); - } - - // This method only exists to avoid calling its public, overridable variant from the constructor - private CompositeDriverExecutionProfile refreshInternal() { - // There's no `hasProfile()` in the public API because it didn't make sense until now. So - // unfortunately we have to catch the exception. - try { - primaryProfile = primaryConfig.getProfile(profileName); - } catch (IllegalArgumentException e) { - primaryProfile = null; - } - try { - fallbackProfile = fallbackConfig.getProfile(profileName); - } catch (IllegalArgumentException e) { - fallbackProfile = null; - } - - Preconditions.checkArgument( - primaryProfile != null || fallbackProfile != null, - "Unknown profile '%s'. Check your configuration.", - profileName); - return this; - } - - @NonNull - @Override - public String getName() { - return profileName; - } - - @Override - public boolean isDefined(@NonNull DriverOption option) { - DriverExecutionProfile primaryProfile = this.primaryProfile; - if (primaryProfile != null && primaryProfile.isDefined(option)) { - return true; - } else { - DriverExecutionProfile fallbackProfile = this.fallbackProfile; - return fallbackProfile != null && fallbackProfile.isDefined(option); - } - } - - @Override - public boolean getBoolean(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getBoolean); - } - - @NonNull - @Override - public List getBooleanList(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getBooleanList); - } - - @Override - public int getInt(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getInt); - } - - @NonNull - @Override - public List getIntList(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getIntList); - } - - @Override - public long getLong(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getLong); - } - - @NonNull - @Override - public List getLongList(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getLongList); - } - - @Override - public double getDouble(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getDouble); - } - - @NonNull - @Override - public List getDoubleList(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getDoubleList); - } - - @NonNull - @Override - public String getString(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getString); - } - - @NonNull - @Override - public List getStringList(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getStringList); - } - - @NonNull - @Override - public Map getStringMap(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getStringMap); - } - - @Override - public long getBytes(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getBytes); - } - - @NonNull - @Override - public List getBytesList(DriverOption option) { - return get(option, DriverExecutionProfile::getBytesList); - } - - @NonNull - @Override - public Duration getDuration(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getDuration); - } - - @NonNull - @Override - public List getDurationList(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getDurationList); - } - - private ValueT get( - @NonNull DriverOption option, - BiFunction getter) { - DriverExecutionProfile primaryProfile = this.primaryProfile; - if (primaryProfile != null && primaryProfile.isDefined(option)) { - return getter.apply(primaryProfile, option); - } else { - DriverExecutionProfile fallbackProfile = this.fallbackProfile; - if (fallbackProfile != null && fallbackProfile.isDefined(option)) { - return getter.apply(fallbackProfile, option); - } else { - throw new IllegalArgumentException("Unknown option: " + option); - } - } - } - - @NonNull - @Override - public SortedSet> entrySet() { - DriverExecutionProfile primaryProfile = this.primaryProfile; - DriverExecutionProfile fallbackProfile = this.fallbackProfile; - if (primaryProfile != null && fallbackProfile != null) { - SortedSet> result = new TreeSet<>(Map.Entry.comparingByKey()); - result.addAll(fallbackProfile.entrySet()); - result.addAll(primaryProfile.entrySet()); - return ImmutableSortedSet.copyOf(Map.Entry.comparingByKey(), result); - } else if (primaryProfile != null) { - return primaryProfile.entrySet(); - } else { - assert fallbackProfile != null; - return fallbackProfile.entrySet(); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/config/map/MapBasedDriverConfig.java b/core/src/main/java/com/datastax/oss/driver/internal/core/config/map/MapBasedDriverConfig.java deleted file mode 100644 index 74adbf120ca..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/config/map/MapBasedDriverConfig.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.config.map; - -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.config.DriverOption; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Collections; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; - -/** @see MapBasedDriverConfigLoader */ -public class MapBasedDriverConfig implements DriverConfig { - - private final Map> optionsMap; - private final Map profiles = new ConcurrentHashMap<>(); - - public MapBasedDriverConfig(Map> optionsMap) { - this.optionsMap = optionsMap; - if (!optionsMap.containsKey(DriverExecutionProfile.DEFAULT_NAME)) { - throw new IllegalArgumentException( - "The options map must contain a profile named " + DriverExecutionProfile.DEFAULT_NAME); - } - createMissingProfiles(); - } - - @NonNull - @Override - public DriverExecutionProfile getProfile(@NonNull String profileName) { - return profiles.computeIfAbsent(profileName, this::newProfile); - } - - @NonNull - @Override - public Map getProfiles() { - // Refresh in case profiles were added to the backing map - createMissingProfiles(); - return Collections.unmodifiableMap(profiles); - } - - private void createMissingProfiles() { - for (Map.Entry> entry : optionsMap.entrySet()) { - String profileName = entry.getKey(); - if (!profiles.containsKey(profileName)) { - profiles.put(profileName, newProfile(profileName)); - } - } - } - - private MapBasedDriverExecutionProfile newProfile(String profileName) { - return new MapBasedDriverExecutionProfile(optionsMap, profileName); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/config/map/MapBasedDriverConfigLoader.java b/core/src/main/java/com/datastax/oss/driver/internal/core/config/map/MapBasedDriverConfigLoader.java deleted file mode 100644 index 14f959e5dc0..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/config/map/MapBasedDriverConfigLoader.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.config.map; - -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.config.DriverOption; -import com.datastax.oss.driver.api.core.config.OptionsMap; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.internal.core.config.ConfigChangeEvent; -import com.datastax.oss.driver.internal.core.context.EventBus; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.function.Consumer; - -public class MapBasedDriverConfigLoader implements DriverConfigLoader, Consumer { - - @NonNull private final OptionsMap source; - @NonNull private final Map> rawMap; - private volatile EventBus eventBus; - - public MapBasedDriverConfigLoader( - @NonNull OptionsMap source, @NonNull Map> rawMap) { - this.source = source; - this.rawMap = rawMap; - } - - @NonNull - @Override - public DriverConfig getInitialConfig() { - return new MapBasedDriverConfig(rawMap); - } - - @Override - public void onDriverInit(@NonNull DriverContext context) { - eventBus = ((InternalDriverContext) context).getEventBus(); - source.addChangeListener(this); - } - - @Override - public void accept(OptionsMap map) { - assert eventBus != null; // listener is registered after setting this field - eventBus.fire(ConfigChangeEvent.INSTANCE); - } - - @NonNull - @Override - public CompletionStage reload() { - return CompletableFuture.completedFuture(true); - } - - @Override - public boolean supportsReloading() { - return true; - } - - @Override - public void close() { - source.removeChangeListener(this); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/config/map/MapBasedDriverExecutionProfile.java b/core/src/main/java/com/datastax/oss/driver/internal/core/config/map/MapBasedDriverExecutionProfile.java deleted file mode 100644 index 4234befd94b..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/config/map/MapBasedDriverExecutionProfile.java +++ /dev/null @@ -1,187 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.config.map; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.config.DriverOption; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSortedSet; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.time.Duration; -import java.util.AbstractMap; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.SortedSet; - -/** @see MapBasedDriverConfigLoader */ -public class MapBasedDriverExecutionProfile implements DriverExecutionProfile { - - private final String profileName; - // The backing map for the current profile - private final Map profile; - // The backing map for the default profile (if the current one is not the default) - private final Map defaultProfile; - - public MapBasedDriverExecutionProfile( - Map> optionsMap, String profileName) { - this( - profileName, - optionsMap.get(profileName), - profileName.equals(DriverExecutionProfile.DEFAULT_NAME) - ? Collections.emptyMap() - : optionsMap.get(DriverExecutionProfile.DEFAULT_NAME)); - Preconditions.checkArgument( - optionsMap.containsKey(profileName), - "Unknown profile '%s'. Check your configuration.", - profileName); - } - - public MapBasedDriverExecutionProfile( - String profileName, - Map profile, - Map defaultProfile) { - this.profileName = profileName; - this.profile = profile; - this.defaultProfile = defaultProfile; - } - - @NonNull - @Override - public String getName() { - return profileName; - } - - @Override - public boolean isDefined(@NonNull DriverOption option) { - return profile.containsKey(option) || defaultProfile.containsKey(option); - } - - // Driver options don't encode the type, everything relies on the user putting the right types in - // the backing map, so no point in trying to type-check. - @SuppressWarnings({"unchecked", "TypeParameterUnusedInFormals"}) - @NonNull - private T get(@NonNull DriverOption option) { - Object value = profile.getOrDefault(option, defaultProfile.get(option)); - if (value == null) { - throw new IllegalArgumentException("Missing configuration option " + option.getPath()); - } - return (T) value; - } - - @Override - public boolean getBoolean(@NonNull DriverOption option) { - return get(option); - } - - @NonNull - @Override - public List getBooleanList(@NonNull DriverOption option) { - return get(option); - } - - @Override - public int getInt(@NonNull DriverOption option) { - return get(option); - } - - @NonNull - @Override - public List getIntList(@NonNull DriverOption option) { - return get(option); - } - - @Override - public long getLong(@NonNull DriverOption option) { - return get(option); - } - - @NonNull - @Override - public List getLongList(@NonNull DriverOption option) { - return get(option); - } - - @Override - public double getDouble(@NonNull DriverOption option) { - return get(option); - } - - @NonNull - @Override - public List getDoubleList(@NonNull DriverOption option) { - return get(option); - } - - @NonNull - @Override - public String getString(@NonNull DriverOption option) { - return get(option); - } - - @NonNull - @Override - public List getStringList(@NonNull DriverOption option) { - return get(option); - } - - @NonNull - @Override - public Map getStringMap(@NonNull DriverOption option) { - return get(option); - } - - @Override - public long getBytes(@NonNull DriverOption option) { - return get(option); - } - - @NonNull - @Override - public List getBytesList(DriverOption option) { - return get(option); - } - - @NonNull - @Override - public Duration getDuration(@NonNull DriverOption option) { - return get(option); - } - - @NonNull - @Override - public List getDurationList(@NonNull DriverOption option) { - return get(option); - } - - @NonNull - @Override - public SortedSet> entrySet() { - ImmutableSortedSet.Builder> builder = - ImmutableSortedSet.orderedBy(Map.Entry.comparingByKey()); - for (Map backingMap : - // builder.add() ignores duplicates, so process higher precedence backing maps first - ImmutableList.of(profile, defaultProfile)) { - for (Map.Entry entry : backingMap.entrySet()) { - builder.add(new AbstractMap.SimpleEntry<>(entry.getKey().getPath(), entry.getValue())); - } - } - return builder.build(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/DefaultDriverConfigLoader.java b/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/DefaultDriverConfigLoader.java deleted file mode 100644 index f1bfbea8249..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/DefaultDriverConfigLoader.java +++ /dev/null @@ -1,374 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.config.typesafe; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.session.SessionBuilder; -import com.datastax.oss.driver.internal.core.config.ConfigChangeEvent; -import com.datastax.oss.driver.internal.core.context.EventBus; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.util.Loggers; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.driver.internal.core.util.concurrent.RunOrSchedule; -import com.typesafe.config.Config; -import com.typesafe.config.ConfigFactory; -import com.typesafe.config.ConfigParseOptions; -import edu.umd.cs.findbugs.annotations.NonNull; -import io.netty.util.concurrent.EventExecutor; -import io.netty.util.concurrent.ScheduledFuture; -import java.io.File; -import java.net.URL; -import java.time.Duration; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.RejectedExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.function.Supplier; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * The default loader; it is based on Typesafe Config and optionally reloads at a configurable - * interval. - */ -@ThreadSafe -public class DefaultDriverConfigLoader implements DriverConfigLoader { - - private static final Logger LOG = LoggerFactory.getLogger(DefaultDriverConfigLoader.class); - - public static final String DEFAULT_ROOT_PATH = "datastax-java-driver"; - - public static final Supplier DEFAULT_CONFIG_SUPPLIER = - () -> { - ConfigFactory.invalidateCaches(); - // The thread's context class loader will be used for application classpath resources, - // while the driver class loader will be used for reference classpath resources. - return ConfigFactory.defaultOverrides() - .withFallback(ConfigFactory.defaultApplication()) - .withFallback(ConfigFactory.defaultReference(CqlSession.class.getClassLoader())) - .resolve() - .getConfig(DEFAULT_ROOT_PATH); - }; - - @NonNull - public static DefaultDriverConfigLoader fromClasspath( - @NonNull String resourceBaseName, @NonNull ClassLoader appClassLoader) { - return new DefaultDriverConfigLoader( - () -> { - ConfigFactory.invalidateCaches(); - Config config = - ConfigFactory.defaultOverrides() - .withFallback( - ConfigFactory.parseResourcesAnySyntax( - resourceBaseName, - ConfigParseOptions.defaults().setClassLoader(appClassLoader))) - .withFallback(ConfigFactory.defaultReference(CqlSession.class.getClassLoader())) - .resolve(); - return config.getConfig(DEFAULT_ROOT_PATH); - }); - } - - @NonNull - public static DriverConfigLoader fromFile(@NonNull File file) { - return new DefaultDriverConfigLoader( - () -> { - ConfigFactory.invalidateCaches(); - Config config = - ConfigFactory.defaultOverrides() - .withFallback(ConfigFactory.parseFileAnySyntax(file)) - .withFallback(ConfigFactory.defaultReference(CqlSession.class.getClassLoader())) - .resolve(); - return config.getConfig(DEFAULT_ROOT_PATH); - }); - } - - @NonNull - public static DriverConfigLoader fromUrl(@NonNull URL url) { - return new DefaultDriverConfigLoader( - () -> { - ConfigFactory.invalidateCaches(); - Config config = - ConfigFactory.defaultOverrides() - .withFallback(ConfigFactory.parseURL(url)) - .withFallback(ConfigFactory.defaultReference(CqlSession.class.getClassLoader())) - .resolve(); - return config.getConfig(DEFAULT_ROOT_PATH); - }); - } - - @NonNull - public static DefaultDriverConfigLoader fromString(@NonNull String contents) { - return new DefaultDriverConfigLoader( - () -> { - ConfigFactory.invalidateCaches(); - Config config = - ConfigFactory.defaultOverrides() - .withFallback(ConfigFactory.parseString(contents)) - .withFallback(ConfigFactory.defaultReference(CqlSession.class.getClassLoader())) - .resolve(); - return config.getConfig(DEFAULT_ROOT_PATH); - }, - false); - } - - private final Supplier configSupplier; - private final TypesafeDriverConfig driverConfig; - private final boolean supportsReloading; - - private volatile SingleThreaded singleThreaded; - - /** - * Builds a new instance with the default Typesafe config loading rules (documented in {@link - * SessionBuilder#withConfigLoader(DriverConfigLoader)}) and the core driver options. This - * constructor enables config reloading (that is, {@link #supportsReloading} will return true). - * - *

Application-specific classpath resources will be located using the {@linkplain - * Thread#getContextClassLoader() the current thread's context class loader}. This might not be - * suitable for OSGi deployments, which should use {@link #DefaultDriverConfigLoader(ClassLoader)} - * instead. - */ - public DefaultDriverConfigLoader() { - this(DEFAULT_CONFIG_SUPPLIER); - } - - /** - * Builds a new instance with the default Typesafe config loading rules (documented in {@link - * SessionBuilder#withConfigLoader(DriverConfigLoader)}) and the core driver options, except that - * application-specific classpath resources will be located using the provided {@link ClassLoader} - * instead of {@linkplain Thread#getContextClassLoader() the current thread's context class - * loader}. This constructor enables config reloading (that is, {@link #supportsReloading} will - * return true). - */ - public DefaultDriverConfigLoader(@NonNull ClassLoader appClassLoader) { - this( - () -> { - ConfigFactory.invalidateCaches(); - return ConfigFactory.defaultOverrides() - .withFallback(ConfigFactory.defaultApplication(appClassLoader)) - .withFallback(ConfigFactory.defaultReference(CqlSession.class.getClassLoader())) - .resolve() - .getConfig(DEFAULT_ROOT_PATH); - }); - } - - /** - * Builds an instance with custom arguments, if you want to load the configuration from somewhere - * else. This constructor enables config reloading (that is, {@link #supportsReloading} will - * return true). - * - * @param configSupplier A supplier for the Typesafe {@link Config}; it will be invoked once when - * this object is instantiated, and at each reload attempt, if reloading is enabled. - */ - public DefaultDriverConfigLoader(@NonNull Supplier configSupplier) { - this(configSupplier, true); - } - - /** - * Builds an instance with custom arguments, if you want to load the configuration from somewhere - * else and/or modify config reload behavior. - * - * @param configSupplier A supplier for the Typesafe {@link Config}; it will be invoked once when - * this object is instantiated, and at each reload attempt, if reloading is enabled. - * @param supportsReloading Whether config reloading should be enabled or not. - */ - public DefaultDriverConfigLoader( - @NonNull Supplier configSupplier, boolean supportsReloading) { - this.configSupplier = configSupplier; - this.driverConfig = new TypesafeDriverConfig(configSupplier.get()); - this.supportsReloading = supportsReloading; - } - - @NonNull - @Override - public DriverConfig getInitialConfig() { - return driverConfig; - } - - @Override - public void onDriverInit(@NonNull DriverContext driverContext) { - this.singleThreaded = new SingleThreaded((InternalDriverContext) driverContext); - } - - @NonNull - @Override - public final CompletionStage reload() { - if (supportsReloading) { - CompletableFuture result = new CompletableFuture<>(); - RunOrSchedule.on(singleThreaded.adminExecutor, () -> singleThreaded.reload(result)); - return result; - } else { - return CompletableFutures.failedFuture( - new UnsupportedOperationException( - "This instance of DefaultDriverConfigLoader does not support reloading")); - } - } - - @Override - public final boolean supportsReloading() { - return supportsReloading; - } - - /** For internal use only, this leaks a Typesafe config type. */ - @NonNull - public Supplier getConfigSupplier() { - return configSupplier; - } - - @Override - public void close() { - SingleThreaded singleThreaded = this.singleThreaded; - if (singleThreaded != null && !singleThreaded.adminExecutor.terminationFuture().isDone()) { - try { - RunOrSchedule.on(singleThreaded.adminExecutor, singleThreaded::close); - } catch (RejectedExecutionException e) { - // Checking the future is racy, there is still a tiny window that could get us here. - // We can safely ignore this error because, if the execution is rejected, the periodic - // reload task, if any, has been already cancelled. - } - } - } - - /** - * Constructs a builder that may be used to provide additional configuration beyond those defined - * in your configuration files programmatically. For example: - * - *

{@code
-   * CqlSession session = CqlSession.builder()
-   *   .withConfigLoader(DefaultDriverConfigLoader.builder()
-   *     .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofMillis(500))
-   *     .build())
-   *   .build();
-   * }
- * - *

In the general case, use of this is not recommended, but it may be useful in situations - * where configuration must be defined at runtime or is derived from some other configuration - * source. - * - * @deprecated this feature is now available in the public API. Use {@link - * DriverConfigLoader#programmaticBuilder()} instead. - */ - @Deprecated - @NonNull - public static DefaultDriverConfigLoaderBuilder builder() { - return new DefaultDriverConfigLoaderBuilder(); - } - - private class SingleThreaded { - private final String logPrefix; - private final EventExecutor adminExecutor; - private final EventBus eventBus; - private final DriverExecutionProfile config; - - private Duration reloadInterval; - private ScheduledFuture periodicTaskHandle; - private boolean closeWasCalled; - - private SingleThreaded(InternalDriverContext context) { - this.logPrefix = context.getSessionName(); - this.adminExecutor = context.getNettyOptions().adminEventExecutorGroup().next(); - this.eventBus = context.getEventBus(); - this.config = context.getConfig().getDefaultProfile(); - this.reloadInterval = - context - .getConfig() - .getDefaultProfile() - .getDuration(DefaultDriverOption.CONFIG_RELOAD_INTERVAL); - - RunOrSchedule.on(adminExecutor, this::schedulePeriodicReload); - } - - private void schedulePeriodicReload() { - assert adminExecutor.inEventLoop(); - // Cancel any previously running task - if (periodicTaskHandle != null) { - periodicTaskHandle.cancel(false); - } - if (reloadInterval.isZero()) { - LOG.debug("[{}] Reload interval is 0, disabling periodic reloading", logPrefix); - } else { - LOG.debug("[{}] Scheduling periodic reloading with interval {}", logPrefix, reloadInterval); - periodicTaskHandle = - adminExecutor.scheduleAtFixedRate( - this::reloadInBackground, - reloadInterval.toNanos(), - reloadInterval.toNanos(), - TimeUnit.NANOSECONDS); - } - } - - /** - * @param reloadedFuture a future to complete when the reload is complete (might be null if the - * caller is not interested in being notified) - */ - private void reload(CompletableFuture reloadedFuture) { - assert adminExecutor.inEventLoop(); - if (closeWasCalled) { - if (reloadedFuture != null) { - reloadedFuture.completeExceptionally(new IllegalStateException("session is closing")); - } - return; - } - try { - boolean changed = driverConfig.reload(configSupplier.get()); - if (changed) { - LOG.info("[{}] Detected a configuration change", logPrefix); - eventBus.fire(ConfigChangeEvent.INSTANCE); - Duration newReloadInterval = - config.getDuration(DefaultDriverOption.CONFIG_RELOAD_INTERVAL); - if (!newReloadInterval.equals(reloadInterval)) { - reloadInterval = newReloadInterval; - schedulePeriodicReload(); - } - } else { - LOG.debug("[{}] Reloaded configuration but it hasn't changed", logPrefix); - } - if (reloadedFuture != null) { - reloadedFuture.complete(changed); - } - } catch (Error | RuntimeException e) { - if (reloadedFuture != null) { - reloadedFuture.completeExceptionally(e); - } else { - Loggers.warnWithException( - LOG, "[{}] Unexpected exception during scheduled reload", logPrefix, e); - } - } - } - - private void reloadInBackground() { - reload(null); - } - - private void close() { - assert adminExecutor.inEventLoop(); - if (closeWasCalled) { - return; - } - closeWasCalled = true; - if (periodicTaskHandle != null) { - periodicTaskHandle.cancel(false); - } - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/DefaultDriverConfigLoaderBuilder.java b/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/DefaultDriverConfigLoaderBuilder.java deleted file mode 100644 index 3096fd85ffb..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/DefaultDriverConfigLoaderBuilder.java +++ /dev/null @@ -1,125 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.config.typesafe; - -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.protocol.internal.util.collection.NullAllowingImmutableMap; -import com.typesafe.config.Config; -import com.typesafe.config.ConfigFactory; -import com.typesafe.config.ConfigValueFactory; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Map; -import net.jcip.annotations.NotThreadSafe; - -/** - * @deprecated this feature is now available in the public API. Use {@link - * DriverConfigLoader#programmaticBuilder()} instead. - */ -@NotThreadSafe -@Deprecated -public class DefaultDriverConfigLoaderBuilder - implements com.datastax.oss.driver.internal.core.config.DriverOptionConfigBuilder< - DefaultDriverConfigLoaderBuilder> { - - private NullAllowingImmutableMap.Builder values = - NullAllowingImmutableMap.builder(); - - /** - * @return a new {@link ProfileBuilder} to provide programmatic configuration at a profile level. - * @see #withProfile(String, Profile) - */ - @NonNull - public static ProfileBuilder profileBuilder() { - return new ProfileBuilder(); - } - - /** Adds configuration for a profile constructed using {@link #profileBuilder()} by name. */ - @NonNull - public DefaultDriverConfigLoaderBuilder withProfile( - @NonNull String profileName, @NonNull Profile profile) { - String prefix = "profiles." + profileName + "."; - for (Map.Entry entry : profile.values.entrySet()) { - this.with(prefix + entry.getKey(), entry.getValue()); - } - return this; - } - - /** - * @return constructed {@link DriverConfigLoader} using the configuration passed into this - * builder. - */ - @NonNull - public DriverConfigLoader build() { - // fallback on the default config supplier (config file) - return new DefaultDriverConfigLoader( - () -> buildConfig().withFallback(DefaultDriverConfigLoader.DEFAULT_CONFIG_SUPPLIER.get())); - } - - /** @return A {@link Config} containing only the options provided */ - protected Config buildConfig() { - Config config = ConfigFactory.empty(); - for (Map.Entry entry : values.build().entrySet()) { - config = config.withValue(entry.getKey(), ConfigValueFactory.fromAnyRef(entry.getValue())); - } - return config; - } - - @NonNull - @Override - public DefaultDriverConfigLoaderBuilder with(@NonNull String path, @Nullable Object value) { - values.put(path, value); - return this; - } - - /** A builder for specifying options at a profile level using {@code withXXX} methods. */ - @Deprecated - public static final class ProfileBuilder - implements com.datastax.oss.driver.internal.core.config.DriverOptionConfigBuilder< - ProfileBuilder> { - - final NullAllowingImmutableMap.Builder values = - NullAllowingImmutableMap.builder(); - - private ProfileBuilder() {} - - @NonNull - @Override - public ProfileBuilder with(@NonNull String path, @Nullable Object value) { - values.put(path, value); - return this; - } - - @NonNull - public Profile build() { - return new Profile(values.build()); - } - } - - /** - * A single-purpose holder of profile options as a map to be consumed by {@link - * DefaultDriverConfigLoaderBuilder}. - */ - public static final class Profile { - final Map values; - - private Profile(Map values) { - this.values = values; - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/DefaultProgrammaticDriverConfigLoaderBuilder.java b/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/DefaultProgrammaticDriverConfigLoaderBuilder.java deleted file mode 100644 index 2a7f6379362..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/DefaultProgrammaticDriverConfigLoaderBuilder.java +++ /dev/null @@ -1,269 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.config.typesafe; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.config.DriverOption; -import com.datastax.oss.driver.api.core.config.ProgrammaticDriverConfigLoaderBuilder; -import com.typesafe.config.Config; -import com.typesafe.config.ConfigFactory; -import com.typesafe.config.ConfigValueFactory; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.time.Duration; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.function.Supplier; -import net.jcip.annotations.NotThreadSafe; - -@NotThreadSafe -public class DefaultProgrammaticDriverConfigLoaderBuilder - implements ProgrammaticDriverConfigLoaderBuilder { - - public static final Supplier DEFAULT_FALLBACK_SUPPLIER = - () -> - ConfigFactory.defaultApplication() - // Do not remove root path here, it must be done after merging configs - .withFallback(ConfigFactory.defaultReference(CqlSession.class.getClassLoader())); - - private final Map values = new HashMap<>(); - - private final Supplier fallbackSupplier; - private final String rootPath; - - private String currentProfileName = DriverExecutionProfile.DEFAULT_NAME; - - /** - * Creates an instance of {@link DefaultProgrammaticDriverConfigLoaderBuilder} with default - * settings. - * - *

Fallback configuration for options that haven't been specified programmatically will be - * obtained from standard classpath resources. Application-specific classpath resources will be - * located using the {@linkplain Thread#getContextClassLoader() the current thread's context class - * loader}. This might not be suitable for OSGi deployments, which should use {@link - * #DefaultProgrammaticDriverConfigLoaderBuilder(ClassLoader)} instead. - */ - public DefaultProgrammaticDriverConfigLoaderBuilder() { - this(DEFAULT_FALLBACK_SUPPLIER, DefaultDriverConfigLoader.DEFAULT_ROOT_PATH); - } - - /** - * Creates an instance of {@link DefaultProgrammaticDriverConfigLoaderBuilder} with default - * settings but a custom class loader. - * - *

Fallback configuration for options that haven't been specified programmatically will be - * obtained from standard classpath resources. Application-specific classpath resources will be - * located using the provided {@link ClassLoader} instead of {@linkplain - * Thread#getContextClassLoader() the current thread's context class loader}. - */ - public DefaultProgrammaticDriverConfigLoaderBuilder(@NonNull ClassLoader appClassLoader) { - this( - () -> - ConfigFactory.defaultApplication(appClassLoader) - .withFallback(ConfigFactory.defaultReference(CqlSession.class.getClassLoader())), - DefaultDriverConfigLoader.DEFAULT_ROOT_PATH); - } - - /** - * Creates an instance of {@link DefaultProgrammaticDriverConfigLoaderBuilder} using a custom - * fallback config supplier. - * - * @param fallbackSupplier the supplier that will provide fallback configuration for options that - * haven't been specified programmatically. - * @param rootPath the root path used in non-programmatic sources (fallback reference.conf and - * system properties). In most cases it should be {@link - * DefaultDriverConfigLoader#DEFAULT_ROOT_PATH}. Cannot be null but can be empty. - */ - public DefaultProgrammaticDriverConfigLoaderBuilder( - @NonNull Supplier fallbackSupplier, @NonNull String rootPath) { - this.fallbackSupplier = fallbackSupplier; - this.rootPath = rootPath; - } - - private ProgrammaticDriverConfigLoaderBuilder with( - @NonNull DriverOption option, @Nullable Object value) { - return with(option.getPath(), value); - } - - private ProgrammaticDriverConfigLoaderBuilder with(@NonNull String path, @Nullable Object value) { - if (!DriverExecutionProfile.DEFAULT_NAME.equals(currentProfileName)) { - path = "profiles." + currentProfileName + "." + path; - } - if (!rootPath.isEmpty()) { - path = rootPath + "." + path; - } - values.put(path, value); - return this; - } - - @NonNull - @Override - public ProgrammaticDriverConfigLoaderBuilder startProfile(@NonNull String profileName) { - currentProfileName = Objects.requireNonNull(profileName); - return this; - } - - @NonNull - @Override - public ProgrammaticDriverConfigLoaderBuilder endProfile() { - currentProfileName = DriverExecutionProfile.DEFAULT_NAME; - return this; - } - - @NonNull - @Override - public ProgrammaticDriverConfigLoaderBuilder withBoolean( - @NonNull DriverOption option, boolean value) { - return with(option, value); - } - - @NonNull - @Override - public ProgrammaticDriverConfigLoaderBuilder withBooleanList( - @NonNull DriverOption option, @NonNull List value) { - return with(option, value); - } - - @NonNull - @Override - public ProgrammaticDriverConfigLoaderBuilder withInt(@NonNull DriverOption option, int value) { - return with(option, value); - } - - @NonNull - @Override - public ProgrammaticDriverConfigLoaderBuilder withIntList( - @NonNull DriverOption option, @NonNull List value) { - return with(option, value); - } - - @NonNull - @Override - public ProgrammaticDriverConfigLoaderBuilder withLong(@NonNull DriverOption option, long value) { - return with(option, value); - } - - @NonNull - @Override - public ProgrammaticDriverConfigLoaderBuilder withLongList( - @NonNull DriverOption option, @NonNull List value) { - return with(option, value); - } - - @NonNull - @Override - public ProgrammaticDriverConfigLoaderBuilder withDouble( - @NonNull DriverOption option, double value) { - return with(option, value); - } - - @NonNull - @Override - public ProgrammaticDriverConfigLoaderBuilder withDoubleList( - @NonNull DriverOption option, @NonNull List value) { - return with(option, value); - } - - @NonNull - @Override - public ProgrammaticDriverConfigLoaderBuilder withString( - @NonNull DriverOption option, @NonNull String value) { - return with(option, value); - } - - @NonNull - @Override - public ProgrammaticDriverConfigLoaderBuilder withStringList( - @NonNull DriverOption option, @NonNull List value) { - return with(option, value); - } - - @NonNull - @Override - public ProgrammaticDriverConfigLoaderBuilder withStringMap( - @NonNull DriverOption option, @NonNull Map value) { - for (String key : value.keySet()) { - this.with(option.getPath() + "." + key, value.get(key)); - } - return this; - } - - @NonNull - @Override - public ProgrammaticDriverConfigLoaderBuilder withBytes(@NonNull DriverOption option, long value) { - return with(option, value); - } - - @NonNull - @Override - public ProgrammaticDriverConfigLoaderBuilder withBytesList( - @NonNull DriverOption option, @NonNull List value) { - return with(option, value); - } - - @NonNull - @Override - public ProgrammaticDriverConfigLoaderBuilder withDuration( - @NonNull DriverOption option, @NonNull Duration value) { - return with(option, value); - } - - @NonNull - @Override - public ProgrammaticDriverConfigLoaderBuilder withDurationList( - @NonNull DriverOption option, @NonNull List value) { - return with(option, value); - } - - @NonNull - @Override - public ProgrammaticDriverConfigLoaderBuilder without(@NonNull DriverOption option) { - return with(option, null); - } - - @NonNull - @Override - public DriverConfigLoader build() { - return new DefaultDriverConfigLoader( - () -> { - ConfigFactory.invalidateCaches(); - Config programmaticConfig = buildConfig(); - Config config = - ConfigFactory.defaultOverrides() - .withFallback(programmaticConfig) - .withFallback(fallbackSupplier.get()) - .resolve(); - // Only remove rootPath after the merge between system properties - // and fallback configuration, since both are supposed to - // contain the same rootPath prefix. - return rootPath.isEmpty() ? config : config.getConfig(rootPath); - }); - } - - private Config buildConfig() { - Config config = ConfigFactory.empty(); - for (Map.Entry entry : values.entrySet()) { - config = config.withValue(entry.getKey(), ConfigValueFactory.fromAnyRef(entry.getValue())); - } - return config; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/TypesafeDriverConfig.java b/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/TypesafeDriverConfig.java deleted file mode 100644 index e1d8c779f2c..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/TypesafeDriverConfig.java +++ /dev/null @@ -1,213 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.config.typesafe; - -import static com.typesafe.config.ConfigValueType.OBJECT; - -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.config.DriverOption; -import com.datastax.oss.driver.internal.core.util.Loggers; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.typesafe.config.Config; -import com.typesafe.config.ConfigObject; -import com.typesafe.config.ConfigOrigin; -import com.typesafe.config.ConfigOriginFactory; -import com.typesafe.config.ConfigValue; -import com.typesafe.config.ConfigValueFactory; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.net.URL; -import java.util.Map; -import java.util.Optional; -import java.util.concurrent.ConcurrentHashMap; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@ThreadSafe -public class TypesafeDriverConfig implements DriverConfig { - - private static final Logger LOG = LoggerFactory.getLogger(TypesafeDriverConfig.class); - private static final ConfigOrigin DEFAULT_OVERRIDES_ORIGIN = - ConfigOriginFactory.newSimple("default was overridden programmatically"); - - private final ImmutableMap profiles; - // Only used to detect if reload saw any change - private volatile Config lastLoadedConfig; - - private final Map defaultOverrides = new ConcurrentHashMap<>(); - - private final TypesafeDriverExecutionProfile.Base defaultProfile; - - public TypesafeDriverConfig(Config config) { - this.lastLoadedConfig = config; - Map profileConfigs = extractProfiles(config); - - ImmutableMap.Builder builder = - ImmutableMap.builder(); - for (Map.Entry entry : profileConfigs.entrySet()) { - builder.put( - entry.getKey(), - new TypesafeDriverExecutionProfile.Base(entry.getKey(), entry.getValue())); - } - this.profiles = builder.build(); - this.defaultProfile = profiles.get(DriverExecutionProfile.DEFAULT_NAME); - } - - /** @return whether the configuration changed */ - public boolean reload(Config config) { - config = applyDefaultOverrides(config); - if (config.equals(lastLoadedConfig)) { - return false; - } else { - lastLoadedConfig = config; - try { - Map profileConfigs = extractProfiles(config); - for (Map.Entry entry : profileConfigs.entrySet()) { - String profileName = entry.getKey(); - TypesafeDriverExecutionProfile.Base profile = this.profiles.get(profileName); - if (profile == null) { - LOG.warn( - "Unknown profile '{}' while reloading configuration. " - + "Adding profiles at runtime is not supported.", - profileName); - } else { - profile.refresh(entry.getValue()); - } - } - return true; - } catch (Throwable t) { - Loggers.warnWithException(LOG, "Error reloading configuration, keeping previous one", t); - return false; - } - } - } - - /* - * Processes the raw configuration to extract profiles. For example: - * { - * foo = 1, bar = 2 - * profiles { - * custom1 { bar = 3 } - * } - * } - * Would produce: - * "default" => { foo = 1, bar = 2 } - * "custom1" => { foo = 1, bar = 3 } - */ - private Map extractProfiles(Config sourceConfig) { - ImmutableMap.Builder result = ImmutableMap.builder(); - - Config defaultProfileConfig = sourceConfig.withoutPath("profiles"); - result.put(DriverExecutionProfile.DEFAULT_NAME, defaultProfileConfig); - - // The rest of the method is a bit confusing because we navigate between Typesafe config's two - // APIs, see https://github.com/typesafehub/config#understanding-config-and-configobject - // In an attempt to clarify: - // xxxObject = `ConfigObject` API (config as a hierarchical structure) - // xxxConfig = `Config` API (config as a flat set of options with hierarchical paths) - ConfigObject rootObject = sourceConfig.root(); - if (rootObject.containsKey("profiles") && rootObject.get("profiles").valueType() == OBJECT) { - ConfigObject profilesObject = (ConfigObject) rootObject.get("profiles"); - for (String profileName : profilesObject.keySet()) { - if (profileName.equals(DriverExecutionProfile.DEFAULT_NAME)) { - throw new IllegalArgumentException( - String.format( - "Can't have %s as a profile name because it's used internally. Pick another name.", - profileName)); - } - ConfigValue profileObject = profilesObject.get(profileName); - if (profileObject.valueType() == OBJECT) { - Config profileConfig = ((ConfigObject) profileObject).toConfig(); - result.put(profileName, profileConfig.withFallback(defaultProfileConfig)); - } - } - } - return result.build(); - } - - @Override - public DriverExecutionProfile getDefaultProfile() { - return defaultProfile; - } - - @NonNull - @Override - public DriverExecutionProfile getProfile(@NonNull String profileName) { - if (profileName.equals(DriverExecutionProfile.DEFAULT_NAME)) { - return defaultProfile; - } - return Optional.ofNullable(profiles.get(profileName)) - .orElseThrow( - () -> - new IllegalArgumentException( - String.format("Unknown profile '%s'. Check your configuration.", profileName))); - } - - @NonNull - @Override - public Map getProfiles() { - return profiles; - } - - /** - * Replace the given options, only if the original values came from {@code - * reference.conf}: if the option was set explicitly in {@code application.conf}, then the - * override is ignored. - * - *

The overrides are also taken into account in profiles, and survive reloads. If this method - * is invoked multiple times, the last value for each option will be used. Note that it is - * currently not possible to use {@code null} as a value. - */ - public void overrideDefaults(@NonNull Map overrides) { - defaultOverrides.putAll(overrides); - reload(lastLoadedConfig); - } - - private Config applyDefaultOverrides(Config source) { - Config result = source; - for (Map.Entry entry : defaultOverrides.entrySet()) { - String path = entry.getKey().getPath(); - Object value = entry.getValue(); - if (isDefault(source, path)) { - LOG.debug("Replacing default value for {} by {}", path, value); - result = - result.withValue( - path, ConfigValueFactory.fromAnyRef(value).withOrigin(DEFAULT_OVERRIDES_ORIGIN)); - } else { - LOG.debug( - "Ignoring default override for {} because the user has overridden the value", path); - } - } - return result; - } - - // Whether the value in the given path comes from the reference.conf in the driver JAR. - private static boolean isDefault(Config config, String path) { - if (!config.hasPath(path)) { - return false; - } - ConfigOrigin origin = config.getValue(path).origin(); - if (origin.equals(DEFAULT_OVERRIDES_ORIGIN)) { - // Same default was overridden twice, should use the last value - return true; - } - URL url = origin.url(); - return url != null && url.toString().endsWith("reference.conf"); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/TypesafeDriverExecutionProfile.java b/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/TypesafeDriverExecutionProfile.java deleted file mode 100644 index b7dd5abe42e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/TypesafeDriverExecutionProfile.java +++ /dev/null @@ -1,413 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.config.typesafe; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.config.DriverOption; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSortedSet; -import com.datastax.oss.driver.shaded.guava.common.collect.MapMaker; -import com.typesafe.config.Config; -import com.typesafe.config.ConfigFactory; -import com.typesafe.config.ConfigValue; -import com.typesafe.config.ConfigValueFactory; -import com.typesafe.config.ConfigValueType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.time.Duration; -import java.util.AbstractMap; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.SortedSet; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import java.util.function.Function; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public abstract class TypesafeDriverExecutionProfile implements DriverExecutionProfile { - - /** The original profile in the driver's configuration that this profile was derived from. */ - protected abstract Base getBaseProfile(); - - /** The extra options that were added with {@code withXxx} methods. */ - protected abstract Config getAddedOptions(); - - /** The actual options that will be used to answer {@code getXxx} calls. */ - protected abstract Config getEffectiveOptions(); - - protected final ConcurrentMap cache = new ConcurrentHashMap<>(); - - @Override - public boolean isDefined(@NonNull DriverOption option) { - return getEffectiveOptions().hasPath(option.getPath()); - } - - @Override - public boolean getBoolean(@NonNull DriverOption option) { - return getCached(option.getPath(), getEffectiveOptions()::getBoolean); - } - - // We override `with*` methods because they can be implemented a bit better with Typesafe config - @NonNull - @Override - public DriverExecutionProfile withBoolean(@NonNull DriverOption option, boolean value) { - return with(option, value); - } - - @NonNull - @Override - public List getBooleanList(@NonNull DriverOption option) { - return getCached(option.getPath(), getEffectiveOptions()::getBooleanList); - } - - @NonNull - @Override - public DriverExecutionProfile withBooleanList( - @NonNull DriverOption option, @NonNull List value) { - return with(option, value); - } - - @Override - public int getInt(@NonNull DriverOption option) { - return getCached(option.getPath(), getEffectiveOptions()::getInt); - } - - @NonNull - @Override - public DriverExecutionProfile withInt(@NonNull DriverOption option, int value) { - return with(option, value); - } - - @NonNull - @Override - public List getIntList(@NonNull DriverOption option) { - return getCached(option.getPath(), getEffectiveOptions()::getIntList); - } - - @NonNull - @Override - public DriverExecutionProfile withIntList( - @NonNull DriverOption option, @NonNull List value) { - return with(option, value); - } - - @Override - public long getLong(@NonNull DriverOption option) { - return getCached(option.getPath(), getEffectiveOptions()::getLong); - } - - @NonNull - @Override - public DriverExecutionProfile withLong(@NonNull DriverOption option, long value) { - return with(option, value); - } - - @NonNull - @Override - public List getLongList(@NonNull DriverOption option) { - return getCached(option.getPath(), getEffectiveOptions()::getLongList); - } - - @NonNull - @Override - public DriverExecutionProfile withLongList( - @NonNull DriverOption option, @NonNull List value) { - return with(option, value); - } - - @Override - public double getDouble(@NonNull DriverOption option) { - return getCached(option.getPath(), getEffectiveOptions()::getDouble); - } - - @NonNull - @Override - public DriverExecutionProfile withDouble(@NonNull DriverOption option, double value) { - return with(option, value); - } - - @NonNull - @Override - public List getDoubleList(@NonNull DriverOption option) { - return getCached(option.getPath(), getEffectiveOptions()::getDoubleList); - } - - @NonNull - @Override - public DriverExecutionProfile withDoubleList( - @NonNull DriverOption option, @NonNull List value) { - return with(option, value); - } - - @NonNull - @Override - public String getString(@NonNull DriverOption option) { - return getCached(option.getPath(), getEffectiveOptions()::getString); - } - - @NonNull - @Override - public DriverExecutionProfile withString(@NonNull DriverOption option, @NonNull String value) { - return with(option, value); - } - - @NonNull - @Override - public List getStringList(@NonNull DriverOption option) { - return getCached(option.getPath(), getEffectiveOptions()::getStringList); - } - - @NonNull - @Override - public DriverExecutionProfile withStringList( - @NonNull DriverOption option, @NonNull List value) { - return with(option, value); - } - - @NonNull - @Override - public Map getStringMap(@NonNull DriverOption option) { - Config subConfig = getCached(option.getPath(), getEffectiveOptions()::getConfig); - ImmutableMap.Builder builder = ImmutableMap.builder(); - for (Map.Entry entry : subConfig.entrySet()) { - if (entry.getValue().valueType().equals(ConfigValueType.STRING)) { - builder.put(entry.getKey(), (String) entry.getValue().unwrapped()); - } - } - return builder.build(); - } - - @NonNull - @Override - public DriverExecutionProfile withStringMap( - @NonNull DriverOption option, @NonNull Map map) { - Base base = getBaseProfile(); - // Add the new option to any already derived options - Config newAdded = getAddedOptions(); - for (String key : map.keySet()) { - newAdded = - newAdded.withValue( - option.getPath() + "." + key, ConfigValueFactory.fromAnyRef(map.get(key))); - } - Derived derived = new Derived(base, newAdded); - base.register(derived); - return derived; - } - - @Override - public long getBytes(@NonNull DriverOption option) { - return getCached(option.getPath(), getEffectiveOptions()::getBytes); - } - - @NonNull - @Override - public DriverExecutionProfile withBytes(@NonNull DriverOption option, long value) { - return with(option, value); - } - - @NonNull - @Override - public List getBytesList(DriverOption option) { - return getCached(option.getPath(), getEffectiveOptions()::getBytesList); - } - - @NonNull - @Override - public DriverExecutionProfile withBytesList( - @NonNull DriverOption option, @NonNull List value) { - return with(option, value); - } - - @NonNull - @Override - public Duration getDuration(@NonNull DriverOption option) { - return getCached(option.getPath(), getEffectiveOptions()::getDuration); - } - - @NonNull - @Override - public DriverExecutionProfile withDuration( - @NonNull DriverOption option, @NonNull Duration value) { - return with(option, value); - } - - @NonNull - @Override - public List getDurationList(@NonNull DriverOption option) { - return getCached(option.getPath(), getEffectiveOptions()::getDurationList); - } - - @NonNull - @Override - public DriverExecutionProfile withDurationList( - @NonNull DriverOption option, @NonNull List value) { - return with(option, value); - } - - @NonNull - @Override - public DriverExecutionProfile without(@NonNull DriverOption option) { - return with(option, null); - } - - @NonNull - @Override - public Object getComparisonKey(@NonNull DriverOption option) { - // This method has a default implementation in the interface, but here we can do it in one line: - return getEffectiveOptions().getConfig(option.getPath()); - } - - @NonNull - @Override - public SortedSet> entrySet() { - ImmutableSortedSet.Builder> builder = - ImmutableSortedSet.orderedBy(Map.Entry.comparingByKey()); - for (Map.Entry entry : getEffectiveOptions().entrySet()) { - builder.add(new AbstractMap.SimpleEntry<>(entry.getKey(), entry.getValue().unwrapped())); - } - return builder.build(); - } - - private T getCached(String path, Function compute) { - // compute's signature guarantees we get a T, and this is the only place where we mutate the - // entry - @SuppressWarnings("unchecked") - T t = (T) cache.computeIfAbsent(path, compute); - return t; - } - - private DriverExecutionProfile with(@NonNull DriverOption option, @Nullable Object value) { - Base base = getBaseProfile(); - // Add the new option to any already derived options - Config newAdded = - getAddedOptions().withValue(option.getPath(), ConfigValueFactory.fromAnyRef(value)); - Derived derived = new Derived(base, newAdded); - base.register(derived); - return derived; - } - - /** A profile that was loaded directly from the driver's configuration. */ - @ThreadSafe - static class Base extends TypesafeDriverExecutionProfile { - - private final String name; - private volatile Config options; - private volatile Set derivedProfiles; - - Base(String name, Config options) { - this.name = name; - this.options = options; - } - - @NonNull - @Override - public String getName() { - return name; - } - - @Override - protected Base getBaseProfile() { - return this; - } - - @Override - protected Config getAddedOptions() { - return ConfigFactory.empty(); - } - - @Override - protected Config getEffectiveOptions() { - return options; - } - - void refresh(Config newOptions) { - this.options = newOptions; - this.cache.clear(); - if (derivedProfiles != null) { - for (Derived derivedProfile : derivedProfiles) { - derivedProfile.refresh(); - } - } - } - - void register(Derived derivedProfile) { - getDerivedProfiles().add(derivedProfile); - } - - // Lazy init - private Set getDerivedProfiles() { - Set result = derivedProfiles; - if (result == null) { - synchronized (this) { - result = derivedProfiles; - if (result == null) { - derivedProfiles = - result = Collections.newSetFromMap(new MapMaker().weakKeys().makeMap()); - } - } - } - return result; - } - } - - /** - * A profile that was copied from another profile programmatically using {@code withXxx} methods. - */ - @ThreadSafe - static class Derived extends TypesafeDriverExecutionProfile { - - private final Base baseProfile; - private final Config addedOptions; - private volatile Config effectiveOptions; - - Derived(Base baseProfile, Config addedOptions) { - this.baseProfile = baseProfile; - this.addedOptions = addedOptions; - refresh(); - } - - void refresh() { - this.effectiveOptions = addedOptions.withFallback(baseProfile.getEffectiveOptions()); - this.cache.clear(); - } - - @NonNull - @Override - public String getName() { - return baseProfile.getName(); - } - - @Override - protected Base getBaseProfile() { - return baseProfile; - } - - @Override - protected Config getAddedOptions() { - return addedOptions; - } - - @Override - protected Config getEffectiveOptions() { - return effectiveOptions; - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/package-info.java b/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/package-info.java deleted file mode 100644 index 72e0ba5ae3d..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Implementation of the driver configuration based on the Typesafe config library. - */ -package com.datastax.oss.driver.internal.core.config.typesafe; diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/connection/ConstantReconnectionPolicy.java b/core/src/main/java/com/datastax/oss/driver/internal/core/connection/ConstantReconnectionPolicy.java deleted file mode 100644 index 03edb38f8d4..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/connection/ConstantReconnectionPolicy.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.connection; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.connection.ReconnectionPolicy; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.metadata.Node; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.time.Duration; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A reconnection policy that waits a constant time between each reconnection attempt. - * - *

To activate this policy, modify the {@code advanced.reconnection-policy} section in the driver - * configuration, for example: - * - *

- * datastax-java-driver {
- *   advanced.reconnection-policy {
- *     class = ConstantReconnectionPolicy
- *     base-delay = 1 second
- *   }
- * }
- * 
- * - * See {@code reference.conf} (in the manual or core driver JAR) for more details. - */ -public class ConstantReconnectionPolicy implements ReconnectionPolicy { - - private static final Logger LOG = LoggerFactory.getLogger(ConstantReconnectionPolicy.class); - - private final String logPrefix; - private final ReconnectionSchedule schedule; - - /** Builds a new instance. */ - public ConstantReconnectionPolicy(DriverContext context) { - this.logPrefix = context.getSessionName(); - DriverExecutionProfile config = context.getConfig().getDefaultProfile(); - Duration delay = config.getDuration(DefaultDriverOption.RECONNECTION_BASE_DELAY); - if (delay.isNegative()) { - throw new IllegalArgumentException( - String.format( - "Invalid negative delay for " - + DefaultDriverOption.RECONNECTION_BASE_DELAY.getPath() - + " (got %d)", - delay)); - } - this.schedule = () -> delay; - } - - @NonNull - @Override - public ReconnectionSchedule newNodeSchedule(@NonNull Node node) { - LOG.debug("[{}] Creating new schedule for {}", logPrefix, node); - return schedule; - } - - @NonNull - @Override - public ReconnectionSchedule newControlConnectionSchedule( - @SuppressWarnings("ignored") boolean isInitialConnection) { - LOG.debug("[{}] Creating new schedule for the control connection", logPrefix); - return schedule; - } - - @Override - public void close() { - // nothing to do - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/connection/ExponentialReconnectionPolicy.java b/core/src/main/java/com/datastax/oss/driver/internal/core/connection/ExponentialReconnectionPolicy.java deleted file mode 100644 index 5fa04cb63d6..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/connection/ExponentialReconnectionPolicy.java +++ /dev/null @@ -1,169 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.connection; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.connection.ReconnectionPolicy; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.time.Duration; -import java.util.concurrent.ThreadLocalRandom; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A reconnection policy that waits exponentially longer between each reconnection attempt (but - * keeps a constant delay once a maximum delay is reached). - * - *

It uses the same schedule implementation for individual nodes or the control connection: - * reconnection attempt {@code i} will be tried {@code Math.min(2^(i-1) * getBaseDelayMs(), - * getMaxDelayMs())} milliseconds after the previous one. A random amount of jitter (+/- 15%) will - * be added to the pure exponential delay value to avoid situations where many clients are in the - * reconnection process at exactly the same time. The jitter will never cause the delay to be less - * than the base delay, or more than the max delay. - * - *

To activate this policy, modify the {@code advanced.reconnection-policy} section in the driver - * configuration, for example: - * - *

- * datastax-java-driver {
- *   advanced.reconnection-policy {
- *     class = ExponentialReconnectionPolicy
- *     base-delay = 1 second
- *     max-delay = 60 seconds
- *   }
- * }
- * 
- * - * See {@code reference.conf} (in the manual or core driver JAR) for more details. - */ -@ThreadSafe -public class ExponentialReconnectionPolicy implements ReconnectionPolicy { - - private static final Logger LOG = LoggerFactory.getLogger(ExponentialReconnectionPolicy.class); - - private final String logPrefix; - private final long baseDelayMs; - private final long maxDelayMs; - private final long maxAttempts; - - /** Builds a new instance. */ - public ExponentialReconnectionPolicy(DriverContext context) { - this.logPrefix = context.getSessionName(); - - DriverExecutionProfile config = context.getConfig().getDefaultProfile(); - - this.baseDelayMs = config.getDuration(DefaultDriverOption.RECONNECTION_BASE_DELAY).toMillis(); - this.maxDelayMs = config.getDuration(DefaultDriverOption.RECONNECTION_MAX_DELAY).toMillis(); - - Preconditions.checkArgument( - baseDelayMs > 0, - "%s must be strictly positive (got %s)", - DefaultDriverOption.RECONNECTION_BASE_DELAY.getPath(), - baseDelayMs); - Preconditions.checkArgument( - maxDelayMs >= 0, - "%s must be positive (got %s)", - DefaultDriverOption.RECONNECTION_MAX_DELAY.getPath(), - maxDelayMs); - Preconditions.checkArgument( - maxDelayMs >= baseDelayMs, - "%s must be bigger than %s (got %s, %s)", - DefaultDriverOption.RECONNECTION_MAX_DELAY.getPath(), - DefaultDriverOption.RECONNECTION_BASE_DELAY.getPath(), - maxDelayMs, - baseDelayMs); - - // Maximum number of attempts after which we overflow - int ceil = (baseDelayMs & (baseDelayMs - 1)) == 0 ? 0 : 1; - this.maxAttempts = 64L - Long.numberOfLeadingZeros(Long.MAX_VALUE / baseDelayMs) - ceil; - } - - /** - * The base delay in milliseconds for this policy (e.g. the delay before the first reconnection - * attempt). - * - * @return the base delay in milliseconds for this policy. - */ - public long getBaseDelayMs() { - return baseDelayMs; - } - - /** - * The maximum delay in milliseconds between reconnection attempts for this policy. - * - * @return the maximum delay in milliseconds between reconnection attempts for this policy. - */ - public long getMaxDelayMs() { - return maxDelayMs; - } - - @NonNull - @Override - public ReconnectionSchedule newNodeSchedule(@NonNull Node node) { - LOG.debug("[{}] Creating new schedule for {}", logPrefix, node); - return new ExponentialSchedule(); - } - - @NonNull - @Override - public ReconnectionSchedule newControlConnectionSchedule( - @SuppressWarnings("ignored") boolean isInitialConnection) { - LOG.debug("[{}] Creating new schedule for the control connection", logPrefix); - return new ExponentialSchedule(); - } - - @Override - public void close() { - // nothing to do - } - - private class ExponentialSchedule implements ReconnectionSchedule { - - private int attempts; - - @NonNull - @Override - public Duration nextDelay() { - long delay = (attempts > maxAttempts) ? maxDelayMs : calculateDelayWithJitter(); - return Duration.ofMillis(delay); - } - - private long calculateDelayWithJitter() { - // assert we haven't hit the max attempts - assert attempts <= maxAttempts; - // get the pure exponential delay based on the attempt count - long delay = Math.min(baseDelayMs * (1L << attempts++), maxDelayMs); - // calculate up to 15% jitter, plus or minus (i.e. 85 - 115% of the pure value) - int jitter = ThreadLocalRandom.current().nextInt(85, 116); - // apply jitter - delay = (jitter * delay) / 100; - // ensure the final delay is between the base and max - delay = Math.min(maxDelayMs, Math.max(baseDelayMs, delay)); - return delay; - } - } - - public long getMaxAttempts() { - return maxAttempts; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/context/DefaultDriverContext.java b/core/src/main/java/com/datastax/oss/driver/internal/core/context/DefaultDriverContext.java deleted file mode 100644 index 3074bda2398..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/context/DefaultDriverContext.java +++ /dev/null @@ -1,1064 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.context; - -import static com.datastax.oss.driver.internal.core.util.Dependency.JACKSON; - -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.dse.driver.internal.core.InsightsClientLifecycleListener; -import com.datastax.dse.driver.internal.core.type.codec.DseTypeCodecsRegistrar; -import com.datastax.dse.protocol.internal.DseProtocolV1ClientCodecs; -import com.datastax.dse.protocol.internal.DseProtocolV2ClientCodecs; -import com.datastax.dse.protocol.internal.ProtocolV4ClientCodecsForDse; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.addresstranslation.AddressTranslator; -import com.datastax.oss.driver.api.core.auth.AuthProvider; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.connection.ReconnectionPolicy; -import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistanceEvaluator; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeStateListener; -import com.datastax.oss.driver.api.core.metadata.schema.SchemaChangeListener; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import com.datastax.oss.driver.api.core.session.ProgrammaticArguments; -import com.datastax.oss.driver.api.core.session.throttling.RequestThrottler; -import com.datastax.oss.driver.api.core.specex.SpeculativeExecutionPolicy; -import com.datastax.oss.driver.api.core.ssl.SslEngineFactory; -import com.datastax.oss.driver.api.core.time.TimestampGenerator; -import com.datastax.oss.driver.api.core.tracker.RequestIdGenerator; -import com.datastax.oss.driver.api.core.tracker.RequestTracker; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.api.core.type.codec.registry.MutableCodecRegistry; -import com.datastax.oss.driver.internal.core.ConsistencyLevelRegistry; -import com.datastax.oss.driver.internal.core.DefaultConsistencyLevelRegistry; -import com.datastax.oss.driver.internal.core.DefaultProtocolVersionRegistry; -import com.datastax.oss.driver.internal.core.ProtocolVersionRegistry; -import com.datastax.oss.driver.internal.core.channel.ChannelFactory; -import com.datastax.oss.driver.internal.core.channel.DefaultWriteCoalescer; -import com.datastax.oss.driver.internal.core.channel.WriteCoalescer; -import com.datastax.oss.driver.internal.core.control.ControlConnection; -import com.datastax.oss.driver.internal.core.metadata.CloudTopologyMonitor; -import com.datastax.oss.driver.internal.core.metadata.DefaultTopologyMonitor; -import com.datastax.oss.driver.internal.core.metadata.LoadBalancingPolicyWrapper; -import com.datastax.oss.driver.internal.core.metadata.MetadataManager; -import com.datastax.oss.driver.internal.core.metadata.MultiplexingNodeStateListener; -import com.datastax.oss.driver.internal.core.metadata.NoopNodeStateListener; -import com.datastax.oss.driver.internal.core.metadata.TopologyMonitor; -import com.datastax.oss.driver.internal.core.metadata.schema.MultiplexingSchemaChangeListener; -import com.datastax.oss.driver.internal.core.metadata.schema.NoopSchemaChangeListener; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.DefaultSchemaParserFactory; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.SchemaParserFactory; -import com.datastax.oss.driver.internal.core.metadata.schema.queries.DefaultSchemaQueriesFactory; -import com.datastax.oss.driver.internal.core.metadata.schema.queries.SchemaQueriesFactory; -import com.datastax.oss.driver.internal.core.metadata.token.DefaultReplicationStrategyFactory; -import com.datastax.oss.driver.internal.core.metadata.token.DefaultTokenFactoryRegistry; -import com.datastax.oss.driver.internal.core.metadata.token.ReplicationStrategyFactory; -import com.datastax.oss.driver.internal.core.metadata.token.TokenFactoryRegistry; -import com.datastax.oss.driver.internal.core.metrics.MetricIdGenerator; -import com.datastax.oss.driver.internal.core.metrics.MetricsFactory; -import com.datastax.oss.driver.internal.core.pool.ChannelPoolFactory; -import com.datastax.oss.driver.internal.core.protocol.BuiltInCompressors; -import com.datastax.oss.driver.internal.core.protocol.ByteBufPrimitiveCodec; -import com.datastax.oss.driver.internal.core.servererrors.DefaultWriteTypeRegistry; -import com.datastax.oss.driver.internal.core.servererrors.WriteTypeRegistry; -import com.datastax.oss.driver.internal.core.session.BuiltInRequestProcessors; -import com.datastax.oss.driver.internal.core.session.PoolManager; -import com.datastax.oss.driver.internal.core.session.RequestProcessor; -import com.datastax.oss.driver.internal.core.session.RequestProcessorRegistry; -import com.datastax.oss.driver.internal.core.ssl.JdkSslHandlerFactory; -import com.datastax.oss.driver.internal.core.ssl.SslHandlerFactory; -import com.datastax.oss.driver.internal.core.tracker.MultiplexingRequestTracker; -import com.datastax.oss.driver.internal.core.tracker.NoopRequestTracker; -import com.datastax.oss.driver.internal.core.tracker.RequestLogFormatter; -import com.datastax.oss.driver.internal.core.type.codec.registry.DefaultCodecRegistry; -import com.datastax.oss.driver.internal.core.util.DefaultDependencyChecker; -import com.datastax.oss.driver.internal.core.util.Reflection; -import com.datastax.oss.driver.internal.core.util.concurrent.CycleDetector; -import com.datastax.oss.driver.internal.core.util.concurrent.LazyReference; -import com.datastax.oss.protocol.internal.Compressor; -import com.datastax.oss.protocol.internal.FrameCodec; -import com.datastax.oss.protocol.internal.PrimitiveCodec; -import com.datastax.oss.protocol.internal.ProtocolV3ClientCodecs; -import com.datastax.oss.protocol.internal.ProtocolV5ClientCodecs; -import com.datastax.oss.protocol.internal.ProtocolV6ClientCodecs; -import com.datastax.oss.protocol.internal.SegmentCodec; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import io.netty.buffer.ByteBuf; -import java.net.InetSocketAddress; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.UUID; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.function.Predicate; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Default implementation of the driver context. - * - *

All non-constant components are initialized lazily. Some components depend on others, so there - * might be deadlocks or stack overflows if the dependency graph is badly designed. This can be - * checked automatically with the system property {@code - * -Dcom.datastax.oss.driver.DETECT_CYCLES=true} (this might have a slight impact on startup time, - * so the check is disabled by default). - * - *

This is DIY dependency injection. We stayed away from DI frameworks for simplicity, to avoid - * an extra dependency, and because end users might want to access some of these components in their - * own implementations (which wouldn't work well with compile-time approaches like Dagger). - * - *

This also provides extension points for stuff that is too low-level for the driver - * configuration: the intent is that someone can extend this class, override one (or more) of the - * buildXxx methods, and initialize the cluster with this new implementation. - */ -@ThreadSafe -public class DefaultDriverContext implements InternalDriverContext { - - private static final Logger LOG = LoggerFactory.getLogger(InternalDriverContext.class); - private static final AtomicInteger SESSION_NAME_COUNTER = new AtomicInteger(); - - protected final CycleDetector cycleDetector = - new CycleDetector("Detected cycle in context initialization"); - - private final LazyReference> loadBalancingPoliciesRef = - new LazyReference<>("loadBalancingPolicies", this::buildLoadBalancingPolicies, cycleDetector); - private final LazyReference reconnectionPolicyRef = - new LazyReference<>("reconnectionPolicy", this::buildReconnectionPolicy, cycleDetector); - private final LazyReference> retryPoliciesRef = - new LazyReference<>("retryPolicies", this::buildRetryPolicies, cycleDetector); - private final LazyReference> - speculativeExecutionPoliciesRef = - new LazyReference<>( - "speculativeExecutionPolicies", - this::buildSpeculativeExecutionPolicies, - cycleDetector); - private final LazyReference timestampGeneratorRef = - new LazyReference<>("timestampGenerator", this::buildTimestampGenerator, cycleDetector); - private final LazyReference addressTranslatorRef = - new LazyReference<>("addressTranslator", this::buildAddressTranslator, cycleDetector); - private final LazyReference> sslEngineFactoryRef; - - private final LazyReference eventBusRef = - new LazyReference<>("eventBus", this::buildEventBus, cycleDetector); - private final LazyReference> compressorRef = - new LazyReference<>("compressor", this::buildCompressor, cycleDetector); - private final LazyReference> primitiveCodecRef = - new LazyReference<>("primitiveCodec", this::buildPrimitiveCodec, cycleDetector); - private final LazyReference> frameCodecRef = - new LazyReference<>("frameCodec", this::buildFrameCodec, cycleDetector); - private final LazyReference> segmentCodecRef = - new LazyReference<>("segmentCodec", this::buildSegmentCodec, cycleDetector); - private final LazyReference protocolVersionRegistryRef = - new LazyReference<>( - "protocolVersionRegistry", this::buildProtocolVersionRegistry, cycleDetector); - private final LazyReference consistencyLevelRegistryRef = - new LazyReference<>( - "consistencyLevelRegistry", this::buildConsistencyLevelRegistry, cycleDetector); - private final LazyReference writeTypeRegistryRef = - new LazyReference<>("writeTypeRegistry", this::buildWriteTypeRegistry, cycleDetector); - private final LazyReference nettyOptionsRef = - new LazyReference<>("nettyOptions", this::buildNettyOptions, cycleDetector); - private final LazyReference writeCoalescerRef = - new LazyReference<>("writeCoalescer", this::buildWriteCoalescer, cycleDetector); - private final LazyReference> sslHandlerFactoryRef = - new LazyReference<>("sslHandlerFactory", this::buildSslHandlerFactory, cycleDetector); - private final LazyReference channelFactoryRef = - new LazyReference<>("channelFactory", this::buildChannelFactory, cycleDetector); - private final LazyReference topologyMonitorRef = - new LazyReference<>("topologyMonitor", this::buildTopologyMonitor, cycleDetector); - private final LazyReference metadataManagerRef = - new LazyReference<>("metadataManager", this::buildMetadataManager, cycleDetector); - private final LazyReference loadBalancingPolicyWrapperRef = - new LazyReference<>( - "loadBalancingPolicyWrapper", this::buildLoadBalancingPolicyWrapper, cycleDetector); - private final LazyReference controlConnectionRef = - new LazyReference<>("controlConnection", this::buildControlConnection, cycleDetector); - private final LazyReference requestProcessorRegistryRef = - new LazyReference<>( - "requestProcessorRegistry", this::buildRequestProcessorRegistry, cycleDetector); - private final LazyReference schemaQueriesFactoryRef = - new LazyReference<>("schemaQueriesFactory", this::buildSchemaQueriesFactory, cycleDetector); - private final LazyReference schemaParserFactoryRef = - new LazyReference<>("schemaParserFactory", this::buildSchemaParserFactory, cycleDetector); - private final LazyReference tokenFactoryRegistryRef = - new LazyReference<>("tokenFactoryRegistry", this::buildTokenFactoryRegistry, cycleDetector); - private final LazyReference replicationStrategyFactoryRef = - new LazyReference<>( - "replicationStrategyFactory", this::buildReplicationStrategyFactory, cycleDetector); - private final LazyReference poolManagerRef = - new LazyReference<>("poolManager", this::buildPoolManager, cycleDetector); - private final LazyReference metricsFactoryRef = - new LazyReference<>("metricsFactory", this::buildMetricsFactory, cycleDetector); - private final LazyReference metricIdGeneratorRef = - new LazyReference<>("metricIdGenerator", this::buildMetricIdGenerator, cycleDetector); - private final LazyReference requestThrottlerRef = - new LazyReference<>("requestThrottler", this::buildRequestThrottler, cycleDetector); - private final LazyReference startupOptionsRef = - new LazyReference<>("startupOptionsFactory", this::buildStartupOptionsFactory, cycleDetector); - private final LazyReference nodeStateListenerRef; - private final LazyReference schemaChangeListenerRef; - private final LazyReference requestTrackerRef; - private final LazyReference> requestIdGeneratorRef; - private final LazyReference> authProviderRef; - private final LazyReference> lifecycleListenersRef = - new LazyReference<>("lifecycleListeners", this::buildLifecycleListeners, cycleDetector); - - private final DriverConfig config; - private final DriverConfigLoader configLoader; - private final ChannelPoolFactory channelPoolFactory = new ChannelPoolFactory(); - private final CodecRegistry codecRegistry; - private final String sessionName; - private final NodeStateListener nodeStateListenerFromBuilder; - private final SchemaChangeListener schemaChangeListenerFromBuilder; - private final RequestTracker requestTrackerFromBuilder; - private final Map localDatacentersFromBuilder; - private final Map> nodeFiltersFromBuilder; - private final Map nodeDistanceEvaluatorsFromBuilder; - private final ClassLoader classLoader; - private final InetSocketAddress cloudProxyAddress; - private final LazyReference requestLogFormatterRef = - new LazyReference<>("requestLogFormatter", this::buildRequestLogFormatter, cycleDetector); - private final UUID startupClientId; - private final String startupApplicationName; - private final String startupApplicationVersion; - private final Object metricRegistry; - // A stack trace captured in the constructor. Used to extract information about the client - // application. - private final StackTraceElement[] initStackTrace; - - public DefaultDriverContext( - DriverConfigLoader configLoader, ProgrammaticArguments programmaticArguments) { - this.config = configLoader.getInitialConfig(); - this.configLoader = configLoader; - DriverExecutionProfile defaultProfile = config.getDefaultProfile(); - if (defaultProfile.isDefined(DefaultDriverOption.SESSION_NAME)) { - this.sessionName = defaultProfile.getString(DefaultDriverOption.SESSION_NAME); - } else { - this.sessionName = "s" + SESSION_NAME_COUNTER.getAndIncrement(); - } - this.localDatacentersFromBuilder = programmaticArguments.getLocalDatacenters(); - this.codecRegistry = buildCodecRegistry(programmaticArguments); - this.nodeStateListenerFromBuilder = programmaticArguments.getNodeStateListener(); - this.nodeStateListenerRef = - new LazyReference<>( - "nodeStateListener", - () -> buildNodeStateListener(nodeStateListenerFromBuilder), - cycleDetector); - this.schemaChangeListenerFromBuilder = programmaticArguments.getSchemaChangeListener(); - this.schemaChangeListenerRef = - new LazyReference<>( - "schemaChangeListener", - () -> buildSchemaChangeListener(schemaChangeListenerFromBuilder), - cycleDetector); - this.requestTrackerFromBuilder = programmaticArguments.getRequestTracker(); - - this.authProviderRef = - new LazyReference<>( - "authProvider", - () -> buildAuthProvider(programmaticArguments.getAuthProvider()), - cycleDetector); - this.requestTrackerRef = - new LazyReference<>( - "requestTracker", () -> buildRequestTracker(requestTrackerFromBuilder), cycleDetector); - this.requestIdGeneratorRef = - new LazyReference<>( - "requestIdGenerator", - () -> buildRequestIdGenerator(programmaticArguments.getRequestIdGenerator()), - cycleDetector); - this.sslEngineFactoryRef = - new LazyReference<>( - "sslEngineFactory", - () -> buildSslEngineFactory(programmaticArguments.getSslEngineFactory()), - cycleDetector); - @SuppressWarnings("deprecation") - Map> nodeFilters = programmaticArguments.getNodeFilters(); - this.nodeFiltersFromBuilder = nodeFilters; - this.nodeDistanceEvaluatorsFromBuilder = programmaticArguments.getNodeDistanceEvaluators(); - this.classLoader = programmaticArguments.getClassLoader(); - this.cloudProxyAddress = programmaticArguments.getCloudProxyAddress(); - this.startupClientId = programmaticArguments.getStartupClientId(); - this.startupApplicationName = programmaticArguments.getStartupApplicationName(); - this.startupApplicationVersion = programmaticArguments.getStartupApplicationVersion(); - StackTraceElement[] stackTrace; - try { - stackTrace = Thread.currentThread().getStackTrace(); - } catch (Exception ex) { - // ignore and use empty - stackTrace = new StackTraceElement[] {}; - } - this.initStackTrace = stackTrace; - this.metricRegistry = programmaticArguments.getMetricRegistry(); - } - - /** - * @deprecated this constructor only exists for backward compatibility. Please use {@link - * #DefaultDriverContext(DriverConfigLoader, ProgrammaticArguments)} instead. - */ - @Deprecated - public DefaultDriverContext( - DriverConfigLoader configLoader, - List> typeCodecs, - NodeStateListener nodeStateListener, - SchemaChangeListener schemaChangeListener, - RequestTracker requestTracker, - Map localDatacenters, - Map> nodeFilters, - ClassLoader classLoader) { - this( - configLoader, - ProgrammaticArguments.builder() - .addTypeCodecs(typeCodecs.toArray(new TypeCodec[0])) - .withNodeStateListener(nodeStateListener) - .withSchemaChangeListener(schemaChangeListener) - .withRequestTracker(requestTracker) - .withLocalDatacenters(localDatacenters) - .withNodeFilters(nodeFilters) - .withClassLoader(classLoader) - .build()); - } - - /** - * Returns builder of options to send in a Startup message. - * - * @see #getStartupOptions() - */ - protected StartupOptionsBuilder buildStartupOptionsFactory() { - return new StartupOptionsBuilder(this) - .withClientId(startupClientId) - .withApplicationName(startupApplicationName) - .withApplicationVersion(startupApplicationVersion); - } - - protected Map buildLoadBalancingPolicies() { - return Reflection.buildFromConfigProfiles( - this, - DefaultDriverOption.LOAD_BALANCING_POLICY_CLASS, - DefaultDriverOption.LOAD_BALANCING_POLICY, - LoadBalancingPolicy.class, - "com.datastax.oss.driver.internal.core.loadbalancing", - "com.datastax.dse.driver.internal.core.loadbalancing"); - } - - protected Map buildRetryPolicies() { - return Reflection.buildFromConfigProfiles( - this, - DefaultDriverOption.RETRY_POLICY_CLASS, - DefaultDriverOption.RETRY_POLICY, - RetryPolicy.class, - "com.datastax.oss.driver.internal.core.retry"); - } - - protected Map buildSpeculativeExecutionPolicies() { - return Reflection.buildFromConfigProfiles( - this, - DefaultDriverOption.SPECULATIVE_EXECUTION_POLICY_CLASS, - DefaultDriverOption.SPECULATIVE_EXECUTION_POLICY, - SpeculativeExecutionPolicy.class, - "com.datastax.oss.driver.internal.core.specex"); - } - - protected TimestampGenerator buildTimestampGenerator() { - return Reflection.buildFromConfig( - this, - DefaultDriverOption.TIMESTAMP_GENERATOR_CLASS, - TimestampGenerator.class, - "com.datastax.oss.driver.internal.core.time") - .orElseThrow( - () -> - new IllegalArgumentException( - String.format( - "Missing timestamp generator, check your configuration (%s)", - DefaultDriverOption.TIMESTAMP_GENERATOR_CLASS))); - } - - protected ReconnectionPolicy buildReconnectionPolicy() { - return Reflection.buildFromConfig( - this, - DefaultDriverOption.RECONNECTION_POLICY_CLASS, - ReconnectionPolicy.class, - "com.datastax.oss.driver.internal.core.connection") - .orElseThrow( - () -> - new IllegalArgumentException( - String.format( - "Missing reconnection policy, check your configuration (%s)", - DefaultDriverOption.RECONNECTION_POLICY_CLASS))); - } - - protected AddressTranslator buildAddressTranslator() { - return Reflection.buildFromConfig( - this, - DefaultDriverOption.ADDRESS_TRANSLATOR_CLASS, - AddressTranslator.class, - "com.datastax.oss.driver.internal.core.addresstranslation") - .orElseThrow( - () -> - new IllegalArgumentException( - String.format( - "Missing address translator, check your configuration (%s)", - DefaultDriverOption.ADDRESS_TRANSLATOR_CLASS))); - } - - protected Optional buildSslEngineFactory(SslEngineFactory factoryFromBuilder) { - return (factoryFromBuilder != null) - ? Optional.of(factoryFromBuilder) - : Reflection.buildFromConfig( - this, - DefaultDriverOption.SSL_ENGINE_FACTORY_CLASS, - SslEngineFactory.class, - "com.datastax.oss.driver.internal.core.ssl"); - } - - protected EventBus buildEventBus() { - return new EventBus(getSessionName()); - } - - protected Compressor buildCompressor() { - DriverExecutionProfile defaultProfile = getConfig().getDefaultProfile(); - String name = defaultProfile.getString(DefaultDriverOption.PROTOCOL_COMPRESSION, "none"); - assert name != null : "should use default value"; - return BuiltInCompressors.newInstance(name, this); - } - - protected PrimitiveCodec buildPrimitiveCodec() { - return new ByteBufPrimitiveCodec(getNettyOptions().allocator()); - } - - protected FrameCodec buildFrameCodec() { - return new FrameCodec<>( - getPrimitiveCodec(), - getCompressor(), - new ProtocolV3ClientCodecs(), - new ProtocolV4ClientCodecsForDse(), - new ProtocolV5ClientCodecs(), - new ProtocolV6ClientCodecs(), - new DseProtocolV1ClientCodecs(), - new DseProtocolV2ClientCodecs()); - } - - protected SegmentCodec buildSegmentCodec() { - return new SegmentCodec<>(getPrimitiveCodec(), getCompressor()); - } - - protected ProtocolVersionRegistry buildProtocolVersionRegistry() { - return new DefaultProtocolVersionRegistry(getSessionName()); - } - - protected ConsistencyLevelRegistry buildConsistencyLevelRegistry() { - return new DefaultConsistencyLevelRegistry(); - } - - protected WriteTypeRegistry buildWriteTypeRegistry() { - return new DefaultWriteTypeRegistry(); - } - - protected NettyOptions buildNettyOptions() { - return new DefaultNettyOptions(this); - } - - protected Optional buildSslHandlerFactory() { - // If a JDK-based factory was provided through the public API, wrap it - return getSslEngineFactory().map(JdkSslHandlerFactory::new); - - // For more advanced options (like using Netty's native OpenSSL support instead of the JDK), - // extend DefaultDriverContext and override this method - } - - protected WriteCoalescer buildWriteCoalescer() { - return new DefaultWriteCoalescer(this); - } - - protected ChannelFactory buildChannelFactory() { - return new ChannelFactory(this); - } - - protected TopologyMonitor buildTopologyMonitor() { - if (cloudProxyAddress == null) { - return new DefaultTopologyMonitor(this); - } - return new CloudTopologyMonitor(this, cloudProxyAddress); - } - - protected MetadataManager buildMetadataManager() { - return new MetadataManager(this); - } - - protected LoadBalancingPolicyWrapper buildLoadBalancingPolicyWrapper() { - return new LoadBalancingPolicyWrapper(this, getLoadBalancingPolicies()); - } - - protected ControlConnection buildControlConnection() { - return new ControlConnection(this); - } - - protected RequestProcessorRegistry buildRequestProcessorRegistry() { - List> processors = - BuiltInRequestProcessors.createDefaultProcessors(this); - return new RequestProcessorRegistry( - getSessionName(), processors.toArray(new RequestProcessor[0])); - } - - protected CodecRegistry buildCodecRegistry(ProgrammaticArguments arguments) { - MutableCodecRegistry registry = arguments.getCodecRegistry(); - if (registry == null) { - registry = new DefaultCodecRegistry(this.sessionName); - } - registry.register(arguments.getTypeCodecs()); - DseTypeCodecsRegistrar.registerDseCodecs(registry); - return registry; - } - - protected SchemaQueriesFactory buildSchemaQueriesFactory() { - return new DefaultSchemaQueriesFactory(this); - } - - protected SchemaParserFactory buildSchemaParserFactory() { - return new DefaultSchemaParserFactory(this); - } - - protected TokenFactoryRegistry buildTokenFactoryRegistry() { - return new DefaultTokenFactoryRegistry(this); - } - - protected ReplicationStrategyFactory buildReplicationStrategyFactory() { - return new DefaultReplicationStrategyFactory(this); - } - - protected PoolManager buildPoolManager() { - return new PoolManager(this); - } - - protected MetricsFactory buildMetricsFactory() { - return Reflection.buildFromConfig( - this, - DefaultDriverOption.METRICS_FACTORY_CLASS, - MetricsFactory.class, - "com.datastax.oss.driver.internal.core.metrics", - "com.datastax.oss.driver.internal.metrics.microprofile", - "com.datastax.oss.driver.internal.metrics.micrometer") - .orElseThrow( - () -> - new IllegalArgumentException( - String.format( - "Missing metrics factory, check your config (%s)", - DefaultDriverOption.METRICS_FACTORY_CLASS))); - } - - protected MetricIdGenerator buildMetricIdGenerator() { - return Reflection.buildFromConfig( - this, - DefaultDriverOption.METRICS_ID_GENERATOR_CLASS, - MetricIdGenerator.class, - "com.datastax.oss.driver.internal.core.metrics") - .orElseThrow( - () -> - new IllegalArgumentException( - String.format( - "Missing metric descriptor, check your config (%s)", - DefaultDriverOption.METRICS_ID_GENERATOR_CLASS))); - } - - protected RequestThrottler buildRequestThrottler() { - return Reflection.buildFromConfig( - this, - DefaultDriverOption.REQUEST_THROTTLER_CLASS, - RequestThrottler.class, - "com.datastax.oss.driver.internal.core.session.throttling") - .orElseThrow( - () -> - new IllegalArgumentException( - String.format( - "Missing request throttler, check your configuration (%s)", - DefaultDriverOption.REQUEST_THROTTLER_CLASS))); - } - - protected NodeStateListener buildNodeStateListener( - NodeStateListener nodeStateListenerFromBuilder) { - List listeners = new ArrayList<>(); - if (nodeStateListenerFromBuilder != null) { - listeners.add(nodeStateListenerFromBuilder); - } - DefaultDriverOption newOption = DefaultDriverOption.METADATA_NODE_STATE_LISTENER_CLASSES; - @SuppressWarnings("deprecation") - DefaultDriverOption legacyOption = DefaultDriverOption.METADATA_NODE_STATE_LISTENER_CLASS; - DriverExecutionProfile profile = config.getDefaultProfile(); - if (profile.isDefined(newOption)) { - listeners.addAll( - Reflection.buildFromConfigList( - this, - newOption, - NodeStateListener.class, - "com.datastax.oss.driver.internal.core.metadata")); - } - if (profile.isDefined(legacyOption)) { - LOG.warn( - "Option {} has been deprecated and will be removed in a future release; please use option {} instead.", - legacyOption, - newOption); - Reflection.buildFromConfig( - this, - legacyOption, - NodeStateListener.class, - "com.datastax.oss.driver.internal.core.metadata") - .ifPresent(listeners::add); - } - if (listeners.isEmpty()) { - return new NoopNodeStateListener(this); - } else if (listeners.size() == 1) { - return listeners.get(0); - } else { - return new MultiplexingNodeStateListener(listeners); - } - } - - protected SchemaChangeListener buildSchemaChangeListener( - SchemaChangeListener schemaChangeListenerFromBuilder) { - List listeners = new ArrayList<>(); - if (schemaChangeListenerFromBuilder != null) { - listeners.add(schemaChangeListenerFromBuilder); - } - DefaultDriverOption newOption = DefaultDriverOption.METADATA_SCHEMA_CHANGE_LISTENER_CLASSES; - @SuppressWarnings("deprecation") - DefaultDriverOption legacyOption = DefaultDriverOption.METADATA_SCHEMA_CHANGE_LISTENER_CLASS; - DriverExecutionProfile profile = config.getDefaultProfile(); - if (profile.isDefined(newOption)) { - listeners.addAll( - Reflection.buildFromConfigList( - this, - newOption, - SchemaChangeListener.class, - "com.datastax.oss.driver.internal.core.metadata.schema")); - } - if (profile.isDefined(legacyOption)) { - LOG.warn( - "Option {} has been deprecated and will be removed in a future release; please use option {} instead.", - legacyOption, - newOption); - Reflection.buildFromConfig( - this, - legacyOption, - SchemaChangeListener.class, - "com.datastax.oss.driver.internal.core.metadata.schema") - .ifPresent(listeners::add); - } - if (listeners.isEmpty()) { - return new NoopSchemaChangeListener(this); - } else if (listeners.size() == 1) { - return listeners.get(0); - } else { - return new MultiplexingSchemaChangeListener(listeners); - } - } - - protected RequestTracker buildRequestTracker(RequestTracker requestTrackerFromBuilder) { - List trackers = new ArrayList<>(); - if (requestTrackerFromBuilder != null) { - trackers.add(requestTrackerFromBuilder); - } - for (LoadBalancingPolicy lbp : this.getLoadBalancingPolicies().values()) { - lbp.getRequestTracker().ifPresent(trackers::add); - } - DefaultDriverOption newOption = DefaultDriverOption.REQUEST_TRACKER_CLASSES; - @SuppressWarnings("deprecation") - DefaultDriverOption legacyOption = DefaultDriverOption.REQUEST_TRACKER_CLASS; - DriverExecutionProfile profile = config.getDefaultProfile(); - if (profile.isDefined(newOption)) { - trackers.addAll( - Reflection.buildFromConfigList( - this, - newOption, - RequestTracker.class, - "com.datastax.oss.driver.internal.core.tracker")); - } - if (profile.isDefined(legacyOption)) { - LOG.warn( - "Option {} has been deprecated and will be removed in a future release; please use option {} instead.", - legacyOption, - newOption); - Reflection.buildFromConfig( - this, - legacyOption, - RequestTracker.class, - "com.datastax.oss.driver.internal.core.tracker") - .ifPresent(trackers::add); - } - if (trackers.isEmpty()) { - return new NoopRequestTracker(this); - } else if (trackers.size() == 1) { - return trackers.get(0); - } else { - return new MultiplexingRequestTracker(trackers); - } - } - - protected Optional buildRequestIdGenerator( - RequestIdGenerator requestIdGenerator) { - return (requestIdGenerator != null) - ? Optional.of(requestIdGenerator) - : Reflection.buildFromConfig( - this, - DefaultDriverOption.REQUEST_ID_GENERATOR_CLASS, - RequestIdGenerator.class, - "com.datastax.oss.driver.internal.core.tracker"); - } - - protected Optional buildAuthProvider(AuthProvider authProviderFromBuilder) { - return (authProviderFromBuilder != null) - ? Optional.of(authProviderFromBuilder) - : Reflection.buildFromConfig( - this, - DefaultDriverOption.AUTH_PROVIDER_CLASS, - AuthProvider.class, - "com.datastax.oss.driver.internal.core.auth", - "com.datastax.dse.driver.internal.core.auth"); - } - - protected List buildLifecycleListeners() { - if (DefaultDependencyChecker.isPresent(JACKSON)) { - return Collections.singletonList(new InsightsClientLifecycleListener(this, initStackTrace)); - } else { - if (config.getDefaultProfile().getBoolean(DseDriverOption.MONITOR_REPORTING_ENABLED)) { - LOG.info( - "Could not initialize Insights monitoring; " - + "this is normal if Jackson was explicitly excluded from classpath"); - } - return Collections.emptyList(); - } - } - - @NonNull - @Override - public String getSessionName() { - return sessionName; - } - - @NonNull - @Override - public DriverConfig getConfig() { - return config; - } - - @NonNull - @Override - public DriverConfigLoader getConfigLoader() { - return configLoader; - } - - @NonNull - @Override - public Map getLoadBalancingPolicies() { - return loadBalancingPoliciesRef.get(); - } - - @NonNull - @Override - public Map getRetryPolicies() { - return retryPoliciesRef.get(); - } - - @NonNull - @Override - public Map getSpeculativeExecutionPolicies() { - return speculativeExecutionPoliciesRef.get(); - } - - @NonNull - @Override - public TimestampGenerator getTimestampGenerator() { - return timestampGeneratorRef.get(); - } - - @NonNull - @Override - public ReconnectionPolicy getReconnectionPolicy() { - return reconnectionPolicyRef.get(); - } - - @NonNull - @Override - public AddressTranslator getAddressTranslator() { - return addressTranslatorRef.get(); - } - - @NonNull - @Override - public Optional getAuthProvider() { - return authProviderRef.get(); - } - - @NonNull - @Override - public Optional getSslEngineFactory() { - return sslEngineFactoryRef.get(); - } - - @NonNull - @Override - public EventBus getEventBus() { - return eventBusRef.get(); - } - - @NonNull - @Override - public Compressor getCompressor() { - return compressorRef.get(); - } - - @NonNull - @Override - public PrimitiveCodec getPrimitiveCodec() { - return primitiveCodecRef.get(); - } - - @NonNull - @Override - public FrameCodec getFrameCodec() { - return frameCodecRef.get(); - } - - @NonNull - @Override - public SegmentCodec getSegmentCodec() { - return segmentCodecRef.get(); - } - - @NonNull - @Override - public ProtocolVersionRegistry getProtocolVersionRegistry() { - return protocolVersionRegistryRef.get(); - } - - @NonNull - @Override - public ConsistencyLevelRegistry getConsistencyLevelRegistry() { - return consistencyLevelRegistryRef.get(); - } - - @NonNull - @Override - public WriteTypeRegistry getWriteTypeRegistry() { - return writeTypeRegistryRef.get(); - } - - @NonNull - @Override - public NettyOptions getNettyOptions() { - return nettyOptionsRef.get(); - } - - @NonNull - @Override - public WriteCoalescer getWriteCoalescer() { - return writeCoalescerRef.get(); - } - - @NonNull - @Override - public Optional getSslHandlerFactory() { - return sslHandlerFactoryRef.get(); - } - - @NonNull - @Override - public ChannelFactory getChannelFactory() { - return channelFactoryRef.get(); - } - - @NonNull - @Override - public ChannelPoolFactory getChannelPoolFactory() { - return channelPoolFactory; - } - - @NonNull - @Override - public TopologyMonitor getTopologyMonitor() { - return topologyMonitorRef.get(); - } - - @NonNull - @Override - public MetadataManager getMetadataManager() { - return metadataManagerRef.get(); - } - - @NonNull - @Override - public LoadBalancingPolicyWrapper getLoadBalancingPolicyWrapper() { - return loadBalancingPolicyWrapperRef.get(); - } - - @NonNull - @Override - public ControlConnection getControlConnection() { - return controlConnectionRef.get(); - } - - @NonNull - @Override - public RequestProcessorRegistry getRequestProcessorRegistry() { - return requestProcessorRegistryRef.get(); - } - - @NonNull - @Override - public SchemaQueriesFactory getSchemaQueriesFactory() { - return schemaQueriesFactoryRef.get(); - } - - @NonNull - @Override - public SchemaParserFactory getSchemaParserFactory() { - return schemaParserFactoryRef.get(); - } - - @NonNull - @Override - public TokenFactoryRegistry getTokenFactoryRegistry() { - return tokenFactoryRegistryRef.get(); - } - - @NonNull - @Override - public ReplicationStrategyFactory getReplicationStrategyFactory() { - return replicationStrategyFactoryRef.get(); - } - - @NonNull - @Override - public PoolManager getPoolManager() { - return poolManagerRef.get(); - } - - @NonNull - @Override - public MetricsFactory getMetricsFactory() { - return metricsFactoryRef.get(); - } - - @NonNull - @Override - public MetricIdGenerator getMetricIdGenerator() { - return metricIdGeneratorRef.get(); - } - - @NonNull - @Override - public RequestThrottler getRequestThrottler() { - return requestThrottlerRef.get(); - } - - @NonNull - @Override - public NodeStateListener getNodeStateListener() { - return nodeStateListenerRef.get(); - } - - @NonNull - @Override - public SchemaChangeListener getSchemaChangeListener() { - return schemaChangeListenerRef.get(); - } - - @NonNull - @Override - public RequestTracker getRequestTracker() { - return requestTrackerRef.get(); - } - - @NonNull - @Override - public Optional getRequestIdGenerator() { - return requestIdGeneratorRef.get(); - } - - @Nullable - @Override - public String getLocalDatacenter(@NonNull String profileName) { - return localDatacentersFromBuilder.get(profileName); - } - - @Nullable - @Override - @Deprecated - public Predicate getNodeFilter(@NonNull String profileName) { - return nodeFiltersFromBuilder.get(profileName); - } - - @Nullable - @Override - public NodeDistanceEvaluator getNodeDistanceEvaluator(@NonNull String profileName) { - return nodeDistanceEvaluatorsFromBuilder.get(profileName); - } - - @Nullable - @Override - public ClassLoader getClassLoader() { - return classLoader; - } - - @NonNull - @Override - public CodecRegistry getCodecRegistry() { - return codecRegistry; - } - - @NonNull - @Override - public ProtocolVersion getProtocolVersion() { - return getChannelFactory().getProtocolVersion(); - } - - @NonNull - @Override - public Map getStartupOptions() { - // startup options are calculated dynamically and may vary per connection - return startupOptionsRef.get().build(); - } - - protected RequestLogFormatter buildRequestLogFormatter() { - return new RequestLogFormatter(this); - } - - @NonNull - @Override - public RequestLogFormatter getRequestLogFormatter() { - return requestLogFormatterRef.get(); - } - - @NonNull - @Override - public List getLifecycleListeners() { - return lifecycleListenersRef.get(); - } - - @Nullable - @Override - public Object getMetricRegistry() { - return metricRegistry; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/context/DefaultNettyOptions.java b/core/src/main/java/com/datastax/oss/driver/internal/core/context/DefaultNettyOptions.java deleted file mode 100644 index 763a71f8b12..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/context/DefaultNettyOptions.java +++ /dev/null @@ -1,206 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.context; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.internal.core.util.concurrent.BlockingOperation; -import com.datastax.oss.driver.internal.core.util.concurrent.PromiseCombiner; -import com.datastax.oss.driver.shaded.guava.common.util.concurrent.ThreadFactoryBuilder; -import io.netty.bootstrap.Bootstrap; -import io.netty.buffer.ByteBufAllocator; -import io.netty.channel.Channel; -import io.netty.channel.ChannelOption; -import io.netty.channel.DefaultEventLoopGroup; -import io.netty.channel.EventLoopGroup; -import io.netty.channel.FixedRecvByteBufAllocator; -import io.netty.channel.nio.NioEventLoopGroup; -import io.netty.channel.socket.nio.NioSocketChannel; -import io.netty.util.HashedWheelTimer; -import io.netty.util.Timer; -import io.netty.util.concurrent.DefaultPromise; -import io.netty.util.concurrent.EventExecutorGroup; -import io.netty.util.concurrent.Future; -import io.netty.util.concurrent.GlobalEventExecutor; -import io.netty.util.internal.PlatformDependent; -import java.time.Duration; -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.TimeUnit; -import net.jcip.annotations.Immutable; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@Immutable -public class DefaultNettyOptions implements NettyOptions { - - private static final Logger LOG = LoggerFactory.getLogger(DefaultNettyOptions.class); - - private final DriverExecutionProfile config; - private final EventLoopGroup ioEventLoopGroup; - private final EventLoopGroup adminEventLoopGroup; - private final int ioShutdownQuietPeriod; - private final int ioShutdownTimeout; - private final TimeUnit ioShutdownUnit; - private final int adminShutdownQuietPeriod; - private final int adminShutdownTimeout; - private final TimeUnit adminShutdownUnit; - private final Timer timer; - - public DefaultNettyOptions(InternalDriverContext context) { - this.config = context.getConfig().getDefaultProfile(); - boolean daemon = config.getBoolean(DefaultDriverOption.NETTY_DAEMON); - int ioGroupSize = config.getInt(DefaultDriverOption.NETTY_IO_SIZE); - this.ioShutdownQuietPeriod = config.getInt(DefaultDriverOption.NETTY_IO_SHUTDOWN_QUIET_PERIOD); - this.ioShutdownTimeout = config.getInt(DefaultDriverOption.NETTY_IO_SHUTDOWN_TIMEOUT); - this.ioShutdownUnit = - TimeUnit.valueOf(config.getString(DefaultDriverOption.NETTY_IO_SHUTDOWN_UNIT)); - int adminGroupSize = config.getInt(DefaultDriverOption.NETTY_ADMIN_SIZE); - this.adminShutdownQuietPeriod = - config.getInt(DefaultDriverOption.NETTY_ADMIN_SHUTDOWN_QUIET_PERIOD); - this.adminShutdownTimeout = config.getInt(DefaultDriverOption.NETTY_ADMIN_SHUTDOWN_TIMEOUT); - this.adminShutdownUnit = - TimeUnit.valueOf(config.getString(DefaultDriverOption.NETTY_ADMIN_SHUTDOWN_UNIT)); - - ThreadFactory safeFactory = new BlockingOperation.SafeThreadFactory(); - ThreadFactory ioThreadFactory = - new ThreadFactoryBuilder() - .setThreadFactory(safeFactory) - .setNameFormat(context.getSessionName() + "-io-%d") - .setDaemon(daemon) - .build(); - this.ioEventLoopGroup = new NioEventLoopGroup(ioGroupSize, ioThreadFactory); - - ThreadFactory adminThreadFactory = - new ThreadFactoryBuilder() - .setThreadFactory(safeFactory) - .setNameFormat(context.getSessionName() + "-admin-%d") - .setDaemon(daemon) - .build(); - this.adminEventLoopGroup = new DefaultEventLoopGroup(adminGroupSize, adminThreadFactory); - // setup the Timer - ThreadFactory timerThreadFactory = - new ThreadFactoryBuilder() - .setThreadFactory(safeFactory) - .setNameFormat(context.getSessionName() + "-timer-%d") - .setDaemon(daemon) - .build(); - - Duration tickDuration = config.getDuration(DefaultDriverOption.NETTY_TIMER_TICK_DURATION); - // JAVA-2264: tick durations on Windows cannot be less than 100 milliseconds, - // see https://github.com/netty/netty/issues/356. - if (PlatformDependent.isWindows() && tickDuration.toMillis() < 100) { - LOG.warn( - "Timer tick duration was set to a value too aggressive for Windows: {} ms; " - + "doing so is known to cause extreme CPU usage. " - + "Please set advanced.netty.timer.tick-duration to 100 ms or higher.", - tickDuration.toMillis()); - } - this.timer = createTimer(timerThreadFactory, tickDuration); - } - - private HashedWheelTimer createTimer(ThreadFactory timerThreadFactory, Duration tickDuration) { - HashedWheelTimer timer = - new HashedWheelTimer( - timerThreadFactory, - tickDuration.toNanos(), - TimeUnit.NANOSECONDS, - config.getInt(DefaultDriverOption.NETTY_TIMER_TICKS_PER_WHEEL)); - // Start the background thread eagerly during session initialization because - // it is a blocking operation. - timer.start(); - return timer; - } - - @Override - public EventLoopGroup ioEventLoopGroup() { - return ioEventLoopGroup; - } - - @Override - public EventExecutorGroup adminEventExecutorGroup() { - return adminEventLoopGroup; - } - - @Override - public Class channelClass() { - return NioSocketChannel.class; - } - - @Override - public ByteBufAllocator allocator() { - return ByteBufAllocator.DEFAULT; - } - - @Override - public void afterBootstrapInitialized(Bootstrap bootstrap) { - boolean tcpNoDelay = config.getBoolean(DefaultDriverOption.SOCKET_TCP_NODELAY); - bootstrap.option(ChannelOption.TCP_NODELAY, tcpNoDelay); - if (config.isDefined(DefaultDriverOption.SOCKET_KEEP_ALIVE)) { - boolean keepAlive = config.getBoolean(DefaultDriverOption.SOCKET_KEEP_ALIVE); - bootstrap.option(ChannelOption.SO_KEEPALIVE, keepAlive); - } - if (config.isDefined(DefaultDriverOption.SOCKET_REUSE_ADDRESS)) { - boolean reuseAddress = config.getBoolean(DefaultDriverOption.SOCKET_REUSE_ADDRESS); - bootstrap.option(ChannelOption.SO_REUSEADDR, reuseAddress); - } - if (config.isDefined(DefaultDriverOption.SOCKET_LINGER_INTERVAL)) { - int lingerInterval = config.getInt(DefaultDriverOption.SOCKET_LINGER_INTERVAL); - bootstrap.option(ChannelOption.SO_LINGER, lingerInterval); - } - if (config.isDefined(DefaultDriverOption.SOCKET_RECEIVE_BUFFER_SIZE)) { - int receiveBufferSize = config.getInt(DefaultDriverOption.SOCKET_RECEIVE_BUFFER_SIZE); - bootstrap - .option(ChannelOption.SO_RCVBUF, receiveBufferSize) - .option(ChannelOption.RCVBUF_ALLOCATOR, new FixedRecvByteBufAllocator(receiveBufferSize)); - } - if (config.isDefined(DefaultDriverOption.SOCKET_SEND_BUFFER_SIZE)) { - int sendBufferSize = config.getInt(DefaultDriverOption.SOCKET_SEND_BUFFER_SIZE); - bootstrap.option(ChannelOption.SO_SNDBUF, sendBufferSize); - } - if (config.isDefined(DefaultDriverOption.CONNECTION_CONNECT_TIMEOUT)) { - Duration connectTimeout = config.getDuration(DefaultDriverOption.CONNECTION_CONNECT_TIMEOUT); - bootstrap.option( - ChannelOption.CONNECT_TIMEOUT_MILLIS, Long.valueOf(connectTimeout.toMillis()).intValue()); - } - } - - @Override - public void afterChannelInitialized(Channel channel) { - // nothing to do - } - - @Override - public Future onClose() { - DefaultPromise closeFuture = new DefaultPromise<>(GlobalEventExecutor.INSTANCE); - GlobalEventExecutor.INSTANCE.execute( - () -> - PromiseCombiner.combine( - closeFuture, - adminEventLoopGroup.shutdownGracefully( - adminShutdownQuietPeriod, adminShutdownTimeout, adminShutdownUnit), - ioEventLoopGroup.shutdownGracefully( - ioShutdownQuietPeriod, ioShutdownTimeout, ioShutdownUnit))); - closeFuture.addListener(f -> timer.stop()); - return closeFuture; - } - - @Override - public Timer getTimer() { - return timer; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/context/EventBus.java b/core/src/main/java/com/datastax/oss/driver/internal/core/context/EventBus.java deleted file mode 100644 index dd9ccaa9979..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/context/EventBus.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.context; - -import com.datastax.oss.driver.shaded.guava.common.collect.HashMultimap; -import com.datastax.oss.driver.shaded.guava.common.collect.Multimaps; -import com.datastax.oss.driver.shaded.guava.common.collect.SetMultimap; -import java.util.function.Consumer; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Barebones event bus implementation, that allows components to communicate without knowing about - * each other. - * - *

This is intended for administrative events (topology changes, new connections, etc.), which - * are comparatively rare in the driver. Do not use it for anything on the request path, because it - * relies on synchronization. - * - *

We don't use Guava's implementation because Guava is shaded in the driver, and the event bus - * needs to be accessible from low-level 3rd party customizations. - */ -@ThreadSafe -public class EventBus { - private static final Logger LOG = LoggerFactory.getLogger(EventBus.class); - - private final String logPrefix; - private final SetMultimap, Consumer> listeners = - Multimaps.synchronizedSetMultimap(HashMultimap.create()); - - public EventBus(String logPrefix) { - this.logPrefix = logPrefix; - } - - /** - * Registers a listener for an event type. - * - *

If the listener has a shorter lifecycle than the {@code Cluster} instance, it is recommended - * to save the key returned by this method, and use it later to unregister and therefore avoid a - * leak. - * - * @return a key that is needed to unregister later. - */ - public Object register(Class eventClass, Consumer listener) { - LOG.debug("[{}] Registering {} for {}", logPrefix, listener, eventClass); - listeners.put(eventClass, listener); - // The reason for the key mechanism is that this will often be used with method references, - // and you get a different object every time you reference a method, so register(Foo::bar) - // followed by unregister(Foo::bar) wouldn't work as expected. - return listener; - } - - /** - * Unregisters a listener. - * - * @param key the key that was returned by {@link #register(Class, Consumer)} - */ - public boolean unregister(Object key, Class eventClass) { - LOG.debug("[{}] Unregistering {} for {}", logPrefix, key, eventClass); - return listeners.remove(eventClass, key); - } - - /** - * Sends an event that will notify any registered listener for that class. - * - *

Listeners are looked up by an exact match on the class of the object, as returned by - * {@code event.getClass()}. Listeners of a supertype won't be notified. - * - *

The listeners are invoked on the calling thread. It's their responsibility to schedule event - * processing asynchronously if needed. - */ - public void fire(Object event) { - LOG.debug("[{}] Firing an instance of {}: {}", logPrefix, event.getClass(), event); - // if the exact match thing gets too cumbersome, we can reconsider, but I'd like to avoid - // scanning all the keys with instanceof checks. - Class eventClass = event.getClass(); - for (Consumer l : listeners.get(eventClass)) { - @SuppressWarnings("unchecked") - Consumer listener = (Consumer) l; - LOG.debug("[{}] Notifying {} of {}", logPrefix, listener, event); - listener.accept(event); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/context/InternalDriverContext.java b/core/src/main/java/com/datastax/oss/driver/internal/core/context/InternalDriverContext.java deleted file mode 100644 index 81349b0c665..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/context/InternalDriverContext.java +++ /dev/null @@ -1,218 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.context; - -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistanceEvaluator; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.session.SessionBuilder; -import com.datastax.oss.driver.internal.core.ConsistencyLevelRegistry; -import com.datastax.oss.driver.internal.core.ProtocolVersionRegistry; -import com.datastax.oss.driver.internal.core.channel.ChannelFactory; -import com.datastax.oss.driver.internal.core.channel.WriteCoalescer; -import com.datastax.oss.driver.internal.core.control.ControlConnection; -import com.datastax.oss.driver.internal.core.metadata.LoadBalancingPolicyWrapper; -import com.datastax.oss.driver.internal.core.metadata.MetadataManager; -import com.datastax.oss.driver.internal.core.metadata.TopologyMonitor; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.SchemaParserFactory; -import com.datastax.oss.driver.internal.core.metadata.schema.queries.SchemaQueriesFactory; -import com.datastax.oss.driver.internal.core.metadata.token.ReplicationStrategyFactory; -import com.datastax.oss.driver.internal.core.metadata.token.TokenFactoryRegistry; -import com.datastax.oss.driver.internal.core.metrics.MetricIdGenerator; -import com.datastax.oss.driver.internal.core.metrics.MetricsFactory; -import com.datastax.oss.driver.internal.core.pool.ChannelPoolFactory; -import com.datastax.oss.driver.internal.core.servererrors.WriteTypeRegistry; -import com.datastax.oss.driver.internal.core.session.PoolManager; -import com.datastax.oss.driver.internal.core.session.RequestProcessorRegistry; -import com.datastax.oss.driver.internal.core.ssl.SslHandlerFactory; -import com.datastax.oss.driver.internal.core.tracker.RequestLogFormatter; -import com.datastax.oss.protocol.internal.Compressor; -import com.datastax.oss.protocol.internal.FrameCodec; -import com.datastax.oss.protocol.internal.PrimitiveCodec; -import com.datastax.oss.protocol.internal.SegmentCodec; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import io.netty.buffer.ByteBuf; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.function.Predicate; - -/** Extends the driver context with additional components that are not exposed by our public API. */ -public interface InternalDriverContext extends DriverContext { - - @NonNull - EventBus getEventBus(); - - @NonNull - Compressor getCompressor(); - - @NonNull - PrimitiveCodec getPrimitiveCodec(); - - @NonNull - FrameCodec getFrameCodec(); - - @NonNull - SegmentCodec getSegmentCodec(); - - @NonNull - ProtocolVersionRegistry getProtocolVersionRegistry(); - - @NonNull - ConsistencyLevelRegistry getConsistencyLevelRegistry(); - - @NonNull - WriteTypeRegistry getWriteTypeRegistry(); - - @NonNull - NettyOptions getNettyOptions(); - - @NonNull - WriteCoalescer getWriteCoalescer(); - - @NonNull - Optional getSslHandlerFactory(); - - @NonNull - ChannelFactory getChannelFactory(); - - @NonNull - ChannelPoolFactory getChannelPoolFactory(); - - @NonNull - TopologyMonitor getTopologyMonitor(); - - @NonNull - MetadataManager getMetadataManager(); - - @NonNull - LoadBalancingPolicyWrapper getLoadBalancingPolicyWrapper(); - - @NonNull - ControlConnection getControlConnection(); - - @NonNull - RequestProcessorRegistry getRequestProcessorRegistry(); - - @NonNull - SchemaQueriesFactory getSchemaQueriesFactory(); - - @NonNull - SchemaParserFactory getSchemaParserFactory(); - - @NonNull - TokenFactoryRegistry getTokenFactoryRegistry(); - - @NonNull - ReplicationStrategyFactory getReplicationStrategyFactory(); - - @NonNull - PoolManager getPoolManager(); - - @NonNull - MetricsFactory getMetricsFactory(); - - @NonNull - MetricIdGenerator getMetricIdGenerator(); - - /** - * The value that was passed to {@link SessionBuilder#withLocalDatacenter(String,String)} for this - * particular profile. If it was specified through the configuration instead, this method will - * return {@code null}. - */ - @Nullable - String getLocalDatacenter(@NonNull String profileName); - - /** - * This is the filter from {@link SessionBuilder#withNodeFilter(String, Predicate)}. If the filter - * for this profile was specified through the configuration instead, this method will return - * {@code null}. - * - * @deprecated Use {@link #getNodeDistanceEvaluator(String)} instead. - */ - @Nullable - @Deprecated - Predicate getNodeFilter(@NonNull String profileName); - - /** - * This is the node distance evaluator from {@link - * SessionBuilder#withNodeDistanceEvaluator(String, NodeDistanceEvaluator)}. If the evaluator for - * this profile was specified through the configuration instead, this method will return {@code - * null}. - */ - @Nullable - NodeDistanceEvaluator getNodeDistanceEvaluator(@NonNull String profileName); - - /** - * The {@link ClassLoader} to use to reflectively load class names defined in configuration. If - * null, the driver attempts to use the same {@link ClassLoader} that loaded the core driver - * classes. - */ - @Nullable - ClassLoader getClassLoader(); - - /** - * Retrieves the map of options to send in a Startup message. The returned map will be used to - * construct a {@link com.datastax.oss.protocol.internal.request.Startup} instance when - * initializing the native protocol handshake. - */ - @NonNull - Map getStartupOptions(); - - /** - * A list of additional components to notify of session lifecycle events. - * - *

For historical reasons, this method has a default implementation that returns an empty list. - * The built-in {@link DefaultDriverContext} overrides it to plug in the Insights monitoring - * listener. Custom driver extensions might override this method to add their own components. - * - *

Note that the driver assumes that the returned list is constant; there is no way to add - * listeners dynamically. - */ - @NonNull - default List getLifecycleListeners() { - return Collections.emptyList(); - } - - /** - * A {@link RequestLogFormatter} instance based on this {@link DriverContext}. - * - *

The {@link RequestLogFormatter} instance returned here will use the settings in - * advanced.request-tracker when formatting requests. - */ - @NonNull - RequestLogFormatter getRequestLogFormatter(); - - /** - * A metric registry for storing metrics. - * - *

This will return the object from {@link - * SessionBuilder#withMetricRegistry(java.lang.Object)}. Access to this registry object is only - * intended for {@link MetricsFactory} implementations that need to expose a way to specify the - * registry external to the Factory implementation itself. - * - *

The default metrics framework used by the Driver is DropWizard and does not need an external - * metrics registry object. - */ - @Nullable - default Object getMetricRegistry() { - return null; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/context/LifecycleListener.java b/core/src/main/java/com/datastax/oss/driver/internal/core/context/LifecycleListener.java deleted file mode 100644 index 39993e7094f..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/context/LifecycleListener.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.context; - -import com.datastax.oss.driver.api.core.session.SessionBuilder; - -/** A component that gets notified of certain events in the session's lifecycle. */ -public interface LifecycleListener extends AutoCloseable { - - /** - * Invoked when the session is ready to process user requests. - * - *

This corresponds to the moment when the {@link SessionBuilder#build()} returns, or the - * future returned by {@link SessionBuilder#buildAsync()} completes. If the session initialization - * fails, this method will not get called. - * - *

This method is invoked on a driver thread, it should complete relatively quickly and not - * block. - */ - void onSessionReady(); - - /** - * Invoked when the session shuts down. - * - *

Implementations should perform any necessary cleanup, for example freeing resources or - * cancelling scheduled tasks. - * - *

Note that this method gets called even if the shutdown results from a failed initialization. - * In that case, implementations should be ready to handle a call to this method even though - * {@link #onSessionReady()} hasn't been invoked. - * - *

This method is invoked on a driver thread, it should complete relatively quickly and not - * block. - */ - @Override - void close() throws Exception; -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/context/NettyOptions.java b/core/src/main/java/com/datastax/oss/driver/internal/core/context/NettyOptions.java deleted file mode 100644 index 5b4ff4dcec8..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/context/NettyOptions.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.context; - -import com.datastax.oss.driver.internal.core.util.concurrent.BlockingOperation; -import io.netty.bootstrap.Bootstrap; -import io.netty.buffer.ByteBufAllocator; -import io.netty.channel.Channel; -import io.netty.channel.EventLoopGroup; -import io.netty.util.Timer; -import io.netty.util.concurrent.EventExecutorGroup; -import io.netty.util.concurrent.Future; - -/** Low-level hooks to control certain aspects of Netty usage in the driver. */ -public interface NettyOptions { - - /** - * The event loop group that will be used for I/O. This must always return the same instance. - * - *

It is highly recommended that the threads in this event loop group be created by a {@link - * BlockingOperation.SafeThreadFactory}, so that the driver can protect against deadlocks - * introduced by bad client code. - */ - EventLoopGroup ioEventLoopGroup(); - - /** - * The class to create {@code Channel} instances from. This must be consistent with {@link - * #ioEventLoopGroup()}. - */ - Class channelClass(); - - /** - * An event executor group that will be used to schedule all tasks not related to request I/O: - * cluster events, refreshing metadata, reconnection, etc. - * - *

This must always return the same instance (it can be the same object as {@link - * #ioEventLoopGroup()}). - * - *

It is highly recommended that the threads in this event loop group be created by a {@link - * BlockingOperation.SafeThreadFactory}, so that the driver can protect against deadlocks - * introduced by bad client code. - */ - EventExecutorGroup adminEventExecutorGroup(); - - /** - * The byte buffer allocator to use. This must always return the same instance. Note that this is - * also used by the default implementation of {@link InternalDriverContext#getFrameCodec()}, and - * the built-in {@link com.datastax.oss.protocol.internal.Compressor} implementations. - */ - ByteBufAllocator allocator(); - - /** - * A hook invoked each time the driver creates a client bootstrap in order to open a channel. This - * is a good place to configure any custom option on the bootstrap. - */ - void afterBootstrapInitialized(Bootstrap bootstrap); - - /** - * A hook invoked on each channel, right after the channel has initialized it. This is a good - * place to register any custom handler on the channel's pipeline (note that built-in driver - * handlers are already installed at that point). - */ - void afterChannelInitialized(Channel channel); - - /** - * A hook involved when the driver instance shuts down. This is a good place to free any resources - * that you have allocated elsewhere in this component, for example shut down custom event loop - * groups. - */ - Future onClose(); - - /** - * The Timer on which non-I/O events should be scheduled. This must always return the same - * instance. This timer should be used for things like request timeout events and scheduling - * speculative executions. Under high load, scheduling these non-I/O events on a separate, lower - * resolution timer will allow for higher overall I/O throughput. - */ - Timer getTimer(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/context/StartupOptionsBuilder.java b/core/src/main/java/com/datastax/oss/driver/internal/core/context/StartupOptionsBuilder.java deleted file mode 100644 index 89a9266b3ac..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/context/StartupOptionsBuilder.java +++ /dev/null @@ -1,173 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.context; - -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.core.uuid.Uuids; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.protocol.internal.request.Startup; -import com.datastax.oss.protocol.internal.util.collection.NullAllowingImmutableMap; -import com.fasterxml.jackson.databind.ObjectMapper; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Map; -import java.util.Optional; -import java.util.UUID; -import net.jcip.annotations.Immutable; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@Immutable -public class StartupOptionsBuilder { - - public static final String DRIVER_NAME_KEY = "DRIVER_NAME"; - public static final String DRIVER_VERSION_KEY = "DRIVER_VERSION"; - public static final String DRIVER_BAGGAGE = "DRIVER_BAGGAGE"; - public static final String APPLICATION_NAME_KEY = "APPLICATION_NAME"; - public static final String APPLICATION_VERSION_KEY = "APPLICATION_VERSION"; - public static final String CLIENT_ID_KEY = "CLIENT_ID"; - - private static final Logger LOG = LoggerFactory.getLogger(StartupOptionsBuilder.class); - private static final ObjectMapper mapper = new ObjectMapper(); - - protected final InternalDriverContext context; - private UUID clientId; - private String applicationName; - private String applicationVersion; - - public StartupOptionsBuilder(InternalDriverContext context) { - this.context = context; - } - - /** - * Sets the client ID to be sent in the Startup message options. - * - *

If this method is not invoked, or the id passed in is null, a random {@link UUID} will be - * generated and used by default. - */ - public StartupOptionsBuilder withClientId(@Nullable UUID clientId) { - this.clientId = clientId; - return this; - } - - /** - * Sets the client application name to be sent in the Startup message options. - * - *

If this method is not invoked, or the name passed in is null, no application name option - * will be sent in the startup message options. - */ - public StartupOptionsBuilder withApplicationName(@Nullable String applicationName) { - this.applicationName = applicationName; - return this; - } - - /** - * Sets the client application version to be sent in the Startup message options. - * - *

If this method is not invoked, or the name passed in is null, no application version option - * will be sent in the startup message options. - */ - public StartupOptionsBuilder withApplicationVersion(@Nullable String applicationVersion) { - this.applicationVersion = applicationVersion; - return this; - } - - /** - * Builds a map of options to send in a Startup message. - * - *

The default set of options are built here and include {@link - * com.datastax.oss.protocol.internal.request.Startup#COMPRESSION_KEY} (if the context passed in - * has a compressor/algorithm set), and the driver's {@link #DRIVER_NAME_KEY} and {@link - * #DRIVER_VERSION_KEY}. The {@link com.datastax.oss.protocol.internal.request.Startup} - * constructor will add {@link - * com.datastax.oss.protocol.internal.request.Startup#CQL_VERSION_KEY}. - * - * @return Map of Startup Options. - */ - public Map build() { - DriverExecutionProfile config = context.getConfig().getDefaultProfile(); - - NullAllowingImmutableMap.Builder builder = NullAllowingImmutableMap.builder(3); - // add compression (if configured) and driver name and version - String compressionAlgorithm = context.getCompressor().algorithm(); - if (compressionAlgorithm != null && !compressionAlgorithm.trim().isEmpty()) { - builder.put(Startup.COMPRESSION_KEY, compressionAlgorithm.trim()); - } - builder.put(DRIVER_NAME_KEY, getDriverName()).put(DRIVER_VERSION_KEY, getDriverVersion()); - - // Add Insights entries, falling back to generation / config if no programmatic values provided: - if (clientId == null) { - clientId = Uuids.random(); - } - builder.put(CLIENT_ID_KEY, clientId.toString()); - if (applicationName == null) { - applicationName = config.getString(DseDriverOption.APPLICATION_NAME, null); - } - if (applicationName != null) { - builder.put(APPLICATION_NAME_KEY, applicationName); - } - if (applicationVersion == null) { - applicationVersion = config.getString(DseDriverOption.APPLICATION_VERSION, null); - } - if (applicationVersion != null) { - builder.put(APPLICATION_VERSION_KEY, applicationVersion); - } - driverBaggage().ifPresent(s -> builder.put(DRIVER_BAGGAGE, s)); - - return builder.build(); - } - - /** - * Returns this driver's name. - * - *

By default, this method will pull from the bundled Driver.properties file. Subclasses should - * override this method if they need to report a different Driver name on Startup. - */ - protected String getDriverName() { - return Session.OSS_DRIVER_COORDINATES.getName(); - } - - /** - * Returns this driver's version. - * - *

By default, this method will pull from the bundled Driver.properties file. Subclasses should - * override this method if they need to report a different Driver version on Startup. - */ - protected String getDriverVersion() { - return Session.OSS_DRIVER_COORDINATES.getVersion().toString(); - } - - private Optional driverBaggage() { - ImmutableMap.Builder builder = new ImmutableMap.Builder<>(); - for (Map.Entry entry : - context.getLoadBalancingPolicies().entrySet()) { - Map config = entry.getValue().getStartupConfiguration(); - if (!config.isEmpty()) { - builder.put(entry.getKey(), config); - } - } - try { - return Optional.of(mapper.writeValueAsString(builder.build())); - } catch (Exception e) { - LOG.warn("Failed to construct startup driver baggage", e); - return Optional.empty(); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/control/ControlConnection.java b/core/src/main/java/com/datastax/oss/driver/internal/core/control/ControlConnection.java deleted file mode 100644 index 5c29a9b704b..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/control/ControlConnection.java +++ /dev/null @@ -1,619 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.control; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.AsyncAutoCloseable; -import com.datastax.oss.driver.api.core.auth.AuthenticationException; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.connection.ReconnectionPolicy; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeState; -import com.datastax.oss.driver.internal.core.channel.ChannelEvent; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.channel.DriverChannelOptions; -import com.datastax.oss.driver.internal.core.channel.EventCallback; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.DefaultTopologyMonitor; -import com.datastax.oss.driver.internal.core.metadata.DistanceEvent; -import com.datastax.oss.driver.internal.core.metadata.MetadataManager; -import com.datastax.oss.driver.internal.core.metadata.NodeStateEvent; -import com.datastax.oss.driver.internal.core.metadata.TopologyEvent; -import com.datastax.oss.driver.internal.core.util.Loggers; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.driver.internal.core.util.concurrent.Reconnection; -import com.datastax.oss.driver.internal.core.util.concurrent.RunOrSchedule; -import com.datastax.oss.driver.internal.core.util.concurrent.UncaughtExceptions; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.response.Event; -import com.datastax.oss.protocol.internal.response.event.SchemaChangeEvent; -import com.datastax.oss.protocol.internal.response.event.StatusChangeEvent; -import com.datastax.oss.protocol.internal.response.event.TopologyChangeEvent; -import edu.umd.cs.findbugs.annotations.NonNull; -import io.netty.util.concurrent.EventExecutor; -import java.util.AbstractMap.SimpleEntry; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Queue; -import java.util.WeakHashMap; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.function.Consumer; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Maintains a dedicated connection to a Cassandra node for administrative queries. - * - *

If the control node goes down, a reconnection is triggered. The control node is chosen - * randomly among the contact points at startup, or according to the load balancing policy for later - * reconnections. - * - *

The control connection is used by: - * - *

    - *
  • {@link DefaultTopologyMonitor} to determine cluster connectivity and retrieve node - * metadata; - *
  • {@link MetadataManager} to run schema metadata queries. - *
- */ -@ThreadSafe -public class ControlConnection implements EventCallback, AsyncAutoCloseable { - private static final Logger LOG = LoggerFactory.getLogger(ControlConnection.class); - - private final InternalDriverContext context; - private final String logPrefix; - private final EventExecutor adminExecutor; - private final SingleThreaded singleThreaded; - - // The single channel used by this connection. This field is accessed concurrently, but only - // mutated on adminExecutor (by SingleThreaded methods) - private volatile DriverChannel channel; - - public ControlConnection(InternalDriverContext context) { - this.context = context; - this.logPrefix = context.getSessionName(); - this.adminExecutor = context.getNettyOptions().adminEventExecutorGroup().next(); - this.singleThreaded = new SingleThreaded(context); - } - - /** - * Initializes the control connection. If it is already initialized, this is a no-op and all - * parameters are ignored. - * - * @param listenToClusterEvents whether to register for TOPOLOGY_CHANGE and STATUS_CHANGE events. - * If the control connection has already initialized with another value, this is ignored. - * SCHEMA_CHANGE events are always registered. - * @param reconnectOnFailure whether to schedule a reconnection if the initial attempt fails (if - * true, the returned future will only complete once the reconnection has succeeded). - * @param useInitialReconnectionSchedule if no node can be reached, the type of reconnection - * schedule to use. In other words, the value that will be passed to {@link - * ReconnectionPolicy#newControlConnectionSchedule(boolean)}. Note that this parameter is only - * relevant if {@code reconnectOnFailure} is true, otherwise it is not used. - */ - public CompletionStage init( - boolean listenToClusterEvents, - boolean reconnectOnFailure, - boolean useInitialReconnectionSchedule) { - RunOrSchedule.on( - adminExecutor, - () -> - singleThreaded.init( - listenToClusterEvents, reconnectOnFailure, useInitialReconnectionSchedule)); - return singleThreaded.initFuture; - } - - public CompletionStage initFuture() { - return singleThreaded.initFuture; - } - - public boolean isInit() { - return singleThreaded.initFuture.isDone(); - } - - /** - * The channel currently used by this control connection. This is modified concurrently in the - * event of a reconnection, so it may occasionally return a closed channel (clients should be - * ready to deal with that). - */ - public DriverChannel channel() { - return channel; - } - - /** - * Forces an immediate reconnect: if we were connected to a node, that connection will be closed; - * if we were already reconnecting, the next attempt is started immediately, without waiting for - * the next scheduled interval; in all cases, a new query plan is fetched from the load balancing - * policy, and each node in it will be tried in sequence. - */ - public void reconnectNow() { - RunOrSchedule.on(adminExecutor, singleThreaded::reconnectNow); - } - - @NonNull - @Override - public CompletionStage closeFuture() { - return singleThreaded.closeFuture; - } - - @NonNull - @Override - public CompletionStage closeAsync() { - // Control queries are never critical, so there is no graceful close. - return forceCloseAsync(); - } - - @NonNull - @Override - public CompletionStage forceCloseAsync() { - RunOrSchedule.on(adminExecutor, singleThreaded::forceClose); - return singleThreaded.closeFuture; - } - - @Override - public void onEvent(Message eventMessage) { - if (!(eventMessage instanceof Event)) { - LOG.warn("[{}] Unsupported event class: {}", logPrefix, eventMessage.getClass().getName()); - } else { - LOG.debug("[{}] Processing incoming event {}", logPrefix, eventMessage); - Event event = (Event) eventMessage; - switch (event.type) { - case ProtocolConstants.EventType.TOPOLOGY_CHANGE: - processTopologyChange(event); - break; - case ProtocolConstants.EventType.STATUS_CHANGE: - processStatusChange(event); - break; - case ProtocolConstants.EventType.SCHEMA_CHANGE: - processSchemaChange(event); - break; - default: - LOG.warn("[{}] Unsupported event type: {}", logPrefix, event.type); - } - } - } - - private void processTopologyChange(Event event) { - TopologyChangeEvent tce = (TopologyChangeEvent) event; - switch (tce.changeType) { - case ProtocolConstants.TopologyChangeType.NEW_NODE: - context.getEventBus().fire(TopologyEvent.suggestAdded(tce.address)); - break; - case ProtocolConstants.TopologyChangeType.REMOVED_NODE: - context.getEventBus().fire(TopologyEvent.suggestRemoved(tce.address)); - break; - default: - LOG.warn("[{}] Unsupported topology change type: {}", logPrefix, tce.changeType); - } - } - - private void processStatusChange(Event event) { - StatusChangeEvent sce = (StatusChangeEvent) event; - switch (sce.changeType) { - case ProtocolConstants.StatusChangeType.UP: - context.getEventBus().fire(TopologyEvent.suggestUp(sce.address)); - break; - case ProtocolConstants.StatusChangeType.DOWN: - context.getEventBus().fire(TopologyEvent.suggestDown(sce.address)); - break; - default: - LOG.warn("[{}] Unsupported status change type: {}", logPrefix, sce.changeType); - } - } - - private void processSchemaChange(Event event) { - SchemaChangeEvent sce = (SchemaChangeEvent) event; - context - .getMetadataManager() - .refreshSchema(sce.keyspace, false, false) - .whenComplete( - (metadata, error) -> { - if (error != null) { - Loggers.warnWithException( - LOG, - "[{}] Unexpected error while refreshing schema for a SCHEMA_CHANGE event, " - + "keeping previous version", - logPrefix, - error); - } - }); - } - - private class SingleThreaded { - private final InternalDriverContext context; - private final DriverConfig config; - private final CompletableFuture initFuture = new CompletableFuture<>(); - private boolean initWasCalled; - private final CompletableFuture closeFuture = new CompletableFuture<>(); - private boolean closeWasCalled; - private final ReconnectionPolicy reconnectionPolicy; - private final Reconnection reconnection; - private DriverChannelOptions channelOptions; - // The last events received for each node - private final Map lastNodeDistance = new WeakHashMap<>(); - private final Map lastNodeState = new WeakHashMap<>(); - - private SingleThreaded(InternalDriverContext context) { - this.context = context; - this.config = context.getConfig(); - this.reconnectionPolicy = context.getReconnectionPolicy(); - this.reconnection = - new Reconnection( - logPrefix, - adminExecutor, - () -> reconnectionPolicy.newControlConnectionSchedule(false), - this::reconnect); - // In "reconnect-on-init" mode, handle cancellation of the initFuture by user code - CompletableFutures.whenCancelled( - this.initFuture, - () -> { - LOG.debug("[{}] Init future was cancelled, stopping reconnection", logPrefix); - reconnection.stop(); - }); - - context - .getEventBus() - .register(DistanceEvent.class, RunOrSchedule.on(adminExecutor, this::onDistanceEvent)); - context - .getEventBus() - .register(NodeStateEvent.class, RunOrSchedule.on(adminExecutor, this::onStateEvent)); - } - - private void init( - boolean listenToClusterEvents, - boolean reconnectOnFailure, - boolean useInitialReconnectionSchedule) { - assert adminExecutor.inEventLoop(); - if (initWasCalled) { - return; - } - initWasCalled = true; - try { - ImmutableList eventTypes = buildEventTypes(listenToClusterEvents); - LOG.debug("[{}] Initializing with event types {}", logPrefix, eventTypes); - channelOptions = - DriverChannelOptions.builder() - .withEvents(eventTypes, ControlConnection.this) - .withOwnerLogPrefix(logPrefix + "|control") - .build(); - - Queue nodes = context.getLoadBalancingPolicyWrapper().newQueryPlan(); - - connect( - nodes, - null, - () -> initFuture.complete(null), - error -> { - if (isAuthFailure(error)) { - LOG.warn( - "[{}] Authentication errors encountered on all contact points. Please check your authentication configuration.", - logPrefix); - } - if (reconnectOnFailure && !closeWasCalled) { - reconnection.start( - reconnectionPolicy.newControlConnectionSchedule( - useInitialReconnectionSchedule)); - } else { - // Special case for the initial connection: reword to a more user-friendly error - // message - if (error instanceof AllNodesFailedException) { - error = - ((AllNodesFailedException) error) - .reword( - "Could not reach any contact point, " - + "make sure you've provided valid addresses"); - } - initFuture.completeExceptionally(error); - } - }); - } catch (Throwable t) { - initFuture.completeExceptionally(t); - } - } - - private CompletionStage reconnect() { - assert adminExecutor.inEventLoop(); - Queue nodes = context.getLoadBalancingPolicyWrapper().newQueryPlan(); - CompletableFuture result = new CompletableFuture<>(); - connect( - nodes, - null, - () -> { - result.complete(true); - onSuccessfulReconnect(); - }, - error -> result.complete(false)); - return result; - } - - private void connect( - Queue nodes, - List> errors, - Runnable onSuccess, - Consumer onFailure) { - assert adminExecutor.inEventLoop(); - Node node = nodes.poll(); - if (node == null) { - onFailure.accept(AllNodesFailedException.fromErrors(errors)); - } else { - LOG.debug("[{}] Trying to establish a connection to {}", logPrefix, node); - context - .getChannelFactory() - .connect(node, channelOptions) - .whenCompleteAsync( - (channel, error) -> { - try { - NodeDistance lastDistance = lastNodeDistance.get(node); - NodeState lastState = lastNodeState.get(node); - if (error != null) { - if (closeWasCalled || initFuture.isCancelled()) { - onSuccess.run(); // abort, we don't really care about the result - } else { - if (error instanceof AuthenticationException) { - Loggers.warnWithException( - LOG, "[{}] Authentication error", logPrefix, error); - } else { - if (config - .getDefaultProfile() - .getBoolean(DefaultDriverOption.CONNECTION_WARN_INIT_ERROR)) { - Loggers.warnWithException( - LOG, - "[{}] Error connecting to {}, trying next node", - logPrefix, - node, - error); - } else { - LOG.debug( - "[{}] Error connecting to {}, trying next node", - logPrefix, - node, - error); - } - } - List> newErrors = - (errors == null) ? new ArrayList<>() : errors; - newErrors.add(new SimpleEntry<>(node, error)); - context.getEventBus().fire(ChannelEvent.controlConnectionFailed(node)); - connect(nodes, newErrors, onSuccess, onFailure); - } - } else if (closeWasCalled || initFuture.isCancelled()) { - LOG.debug( - "[{}] New channel opened ({}) but the control connection was closed, closing it", - logPrefix, - channel); - channel.forceClose(); - onSuccess.run(); - } else if (lastDistance == NodeDistance.IGNORED) { - LOG.debug( - "[{}] New channel opened ({}) but node became ignored, " - + "closing and trying next node", - logPrefix, - channel); - channel.forceClose(); - connect(nodes, errors, onSuccess, onFailure); - } else if (lastNodeState.containsKey(node) - && (lastState == null /*(removed)*/ - || lastState == NodeState.FORCED_DOWN)) { - LOG.debug( - "[{}] New channel opened ({}) but node was removed or forced down, " - + "closing and trying next node", - logPrefix, - channel); - channel.forceClose(); - connect(nodes, errors, onSuccess, onFailure); - } else { - LOG.debug("[{}] New channel opened {}", logPrefix, channel); - DriverChannel previousChannel = ControlConnection.this.channel; - ControlConnection.this.channel = channel; - if (previousChannel != null) { - // We were reconnecting: make sure previous channel gets closed (it may - // still be open if reconnection was forced) - LOG.debug( - "[{}] Forcefully closing previous channel {}", logPrefix, channel); - previousChannel.forceClose(); - } - context.getEventBus().fire(ChannelEvent.channelOpened(node)); - channel - .closeFuture() - .addListener( - f -> - adminExecutor - .submit(() -> onChannelClosed(channel, node)) - .addListener(UncaughtExceptions::log)); - onSuccess.run(); - } - } catch (Exception e) { - Loggers.warnWithException( - LOG, - "[{}] Unexpected exception while processing channel init result", - logPrefix, - e); - } - }, - adminExecutor); - } - } - - private void onSuccessfulReconnect() { - // If reconnectOnFailure was true and we've never connected before, complete the future now to - // signal that the initialization is complete. - boolean isFirstConnection = initFuture.complete(null); - - // Otherwise, perform a full refresh (we don't know how long we were disconnected) - if (!isFirstConnection) { - context - .getMetadataManager() - .refreshNodes() - .whenComplete( - (result, error) -> { - if (error != null) { - LOG.debug("[{}] Error while refreshing node list", logPrefix, error); - } else { - try { - // A failed node list refresh at startup is not fatal, so this might be the - // first successful refresh; make sure the LBP gets initialized (this is a - // no-op if it was initialized already). - context.getLoadBalancingPolicyWrapper().init(); - context - .getMetadataManager() - .refreshSchema(null, false, true) - .whenComplete( - (metadata, schemaError) -> { - if (schemaError != null) { - Loggers.warnWithException( - LOG, - "[{}] Unexpected error while refreshing schema after a " - + "successful reconnection, keeping previous version", - logPrefix, - schemaError); - } - }); - } catch (Throwable t) { - Loggers.warnWithException( - LOG, - "[{}] Unexpected error on control connection reconnect", - logPrefix, - t); - } - } - }); - } - } - - private void onChannelClosed(DriverChannel channel, Node node) { - assert adminExecutor.inEventLoop(); - if (!closeWasCalled) { - context.getEventBus().fire(ChannelEvent.channelClosed(node)); - // If this channel is the current control channel, we must start a - // reconnection attempt to get a new control channel. - if (channel == ControlConnection.this.channel) { - LOG.debug( - "[{}] The current control channel {} was closed, scheduling reconnection", - logPrefix, - channel); - reconnection.start(); - } else { - LOG.trace( - "[{}] A previous control channel {} was closed, reconnection not required", - logPrefix, - channel); - } - } - } - - private void reconnectNow() { - assert adminExecutor.inEventLoop(); - if (initWasCalled && !closeWasCalled) { - reconnection.reconnectNow(true); - } - } - - private void onDistanceEvent(DistanceEvent event) { - assert adminExecutor.inEventLoop(); - this.lastNodeDistance.put(event.node, event.distance); - if (event.distance == NodeDistance.IGNORED - && channel != null - && !channel.closeFuture().isDone() - && event.node.getEndPoint().equals(channel.getEndPoint())) { - LOG.debug( - "[{}] Control node {} became IGNORED, reconnecting to a different node", - logPrefix, - event.node); - reconnectNow(); - } - } - - private void onStateEvent(NodeStateEvent event) { - assert adminExecutor.inEventLoop(); - this.lastNodeState.put(event.node, event.newState); - if ((event.newState == null /*(removed)*/ || event.newState == NodeState.FORCED_DOWN) - && channel != null - && !channel.closeFuture().isDone() - && event.node.getEndPoint().equals(channel.getEndPoint())) { - LOG.debug( - "[{}] Control node {} was removed or forced down, reconnecting to a different node", - logPrefix, - event.node); - reconnectNow(); - } - } - - private void forceClose() { - assert adminExecutor.inEventLoop(); - if (closeWasCalled) { - return; - } - closeWasCalled = true; - LOG.debug("[{}] Starting shutdown", logPrefix); - reconnection.stop(); - if (channel == null) { - LOG.debug("[{}] Shutdown complete", logPrefix); - closeFuture.complete(null); - } else { - channel - .forceClose() - .addListener( - f -> { - if (f.isSuccess()) { - LOG.debug("[{}] Shutdown complete", logPrefix); - closeFuture.complete(null); - } else { - closeFuture.completeExceptionally(f.cause()); - } - }); - } - } - } - - private boolean isAuthFailure(Throwable error) { - if (error instanceof AllNodesFailedException) { - Collection> errors = - ((AllNodesFailedException) error).getAllErrors().values(); - if (errors.size() == 0) { - return false; - } - for (List nodeErrors : errors) { - for (Throwable nodeError : nodeErrors) { - if (!(nodeError instanceof AuthenticationException)) { - return false; - } - } - } - } - return true; - } - - private static ImmutableList buildEventTypes(boolean listenClusterEvents) { - ImmutableList.Builder builder = ImmutableList.builder(); - builder.add(ProtocolConstants.EventType.SCHEMA_CHANGE); - if (listenClusterEvents) { - builder - .add(ProtocolConstants.EventType.STATUS_CHANGE) - .add(ProtocolConstants.EventType.TOPOLOGY_CHANGE); - } - return builder.build(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/Conversions.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/Conversions.java deleted file mode 100644 index ff9384b3e24..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/Conversions.java +++ /dev/null @@ -1,593 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.BatchStatement; -import com.datastax.oss.driver.api.core.cql.BatchableStatement; -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.ColumnDefinition; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.PrepareRequest; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.RelationMetadata; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import com.datastax.oss.driver.api.core.servererrors.AlreadyExistsException; -import com.datastax.oss.driver.api.core.servererrors.BootstrappingException; -import com.datastax.oss.driver.api.core.servererrors.CASWriteUnknownException; -import com.datastax.oss.driver.api.core.servererrors.CDCWriteFailureException; -import com.datastax.oss.driver.api.core.servererrors.CoordinatorException; -import com.datastax.oss.driver.api.core.servererrors.FunctionFailureException; -import com.datastax.oss.driver.api.core.servererrors.InvalidConfigurationInQueryException; -import com.datastax.oss.driver.api.core.servererrors.InvalidQueryException; -import com.datastax.oss.driver.api.core.servererrors.OverloadedException; -import com.datastax.oss.driver.api.core.servererrors.ProtocolError; -import com.datastax.oss.driver.api.core.servererrors.ReadFailureException; -import com.datastax.oss.driver.api.core.servererrors.ReadTimeoutException; -import com.datastax.oss.driver.api.core.servererrors.ServerError; -import com.datastax.oss.driver.api.core.servererrors.SyntaxError; -import com.datastax.oss.driver.api.core.servererrors.TruncateException; -import com.datastax.oss.driver.api.core.servererrors.UnauthorizedException; -import com.datastax.oss.driver.api.core.servererrors.UnavailableException; -import com.datastax.oss.driver.api.core.servererrors.WriteFailureException; -import com.datastax.oss.driver.api.core.servererrors.WriteTimeoutException; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.specex.SpeculativeExecutionPolicy; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.internal.core.ConsistencyLevelRegistry; -import com.datastax.oss.driver.internal.core.DefaultProtocolFeature; -import com.datastax.oss.driver.internal.core.ProtocolVersionRegistry; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.data.ValuesHelper; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.primitives.Ints; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.request.Batch; -import com.datastax.oss.protocol.internal.request.Execute; -import com.datastax.oss.protocol.internal.request.Query; -import com.datastax.oss.protocol.internal.request.query.QueryOptions; -import com.datastax.oss.protocol.internal.response.Error; -import com.datastax.oss.protocol.internal.response.Result; -import com.datastax.oss.protocol.internal.response.error.AlreadyExists; -import com.datastax.oss.protocol.internal.response.error.CASWriteUnknown; -import com.datastax.oss.protocol.internal.response.error.ReadFailure; -import com.datastax.oss.protocol.internal.response.error.ReadTimeout; -import com.datastax.oss.protocol.internal.response.error.Unavailable; -import com.datastax.oss.protocol.internal.response.error.WriteFailure; -import com.datastax.oss.protocol.internal.response.error.WriteTimeout; -import com.datastax.oss.protocol.internal.response.result.ColumnSpec; -import com.datastax.oss.protocol.internal.response.result.Prepared; -import com.datastax.oss.protocol.internal.response.result.Rows; -import com.datastax.oss.protocol.internal.response.result.RowsMetadata; -import com.datastax.oss.protocol.internal.util.Bytes; -import com.datastax.oss.protocol.internal.util.collection.NullAllowingImmutableList; -import com.datastax.oss.protocol.internal.util.collection.NullAllowingImmutableMap; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Map; - -/** - * Utility methods to convert to/from protocol messages. - * - *

The main goal of this class is to move this code out of the request handlers. - */ -public class Conversions { - - public static DriverExecutionProfile resolveExecutionProfile( - Request request, DriverContext context) { - if (request.getExecutionProfile() != null) { - return request.getExecutionProfile(); - } else { - DriverConfig config = context.getConfig(); - String profileName = request.getExecutionProfileName(); - return (profileName == null || profileName.isEmpty()) - ? config.getDefaultProfile() - : config.getProfile(profileName); - } - } - - public static Message toMessage( - Statement statement, DriverExecutionProfile config, InternalDriverContext context) { - ConsistencyLevelRegistry consistencyLevelRegistry = context.getConsistencyLevelRegistry(); - ConsistencyLevel consistency = statement.getConsistencyLevel(); - int consistencyCode = - (consistency == null) - ? consistencyLevelRegistry.nameToCode( - config.getString(DefaultDriverOption.REQUEST_CONSISTENCY)) - : consistency.getProtocolCode(); - int pageSize = statement.getPageSize(); - if (pageSize <= 0) { - pageSize = config.getInt(DefaultDriverOption.REQUEST_PAGE_SIZE); - } - ConsistencyLevel serialConsistency = statement.getSerialConsistencyLevel(); - int serialConsistencyCode = - (serialConsistency == null) - ? consistencyLevelRegistry.nameToCode( - config.getString(DefaultDriverOption.REQUEST_SERIAL_CONSISTENCY)) - : serialConsistency.getProtocolCode(); - long timestamp = statement.getQueryTimestamp(); - if (timestamp == Statement.NO_DEFAULT_TIMESTAMP) { - timestamp = context.getTimestampGenerator().next(); - } - CodecRegistry codecRegistry = context.getCodecRegistry(); - ProtocolVersion protocolVersion = context.getProtocolVersion(); - ProtocolVersionRegistry protocolVersionRegistry = context.getProtocolVersionRegistry(); - CqlIdentifier keyspace = statement.getKeyspace(); - int nowInSeconds = statement.getNowInSeconds(); - if (nowInSeconds != Statement.NO_NOW_IN_SECONDS - && !protocolVersionRegistry.supports( - protocolVersion, DefaultProtocolFeature.NOW_IN_SECONDS)) { - throw new IllegalArgumentException("Can't use nowInSeconds with protocol " + protocolVersion); - } - if (statement instanceof SimpleStatement) { - SimpleStatement simpleStatement = (SimpleStatement) statement; - List positionalValues = simpleStatement.getPositionalValues(); - Map namedValues = simpleStatement.getNamedValues(); - if (!positionalValues.isEmpty() && !namedValues.isEmpty()) { - throw new IllegalArgumentException( - "Can't have both positional and named values in a statement."); - } - if (keyspace != null - && !protocolVersionRegistry.supports( - protocolVersion, DefaultProtocolFeature.PER_REQUEST_KEYSPACE)) { - throw new IllegalArgumentException( - "Can't use per-request keyspace with protocol " + protocolVersion); - } - QueryOptions queryOptions = - new QueryOptions( - consistencyCode, - encode(positionalValues, codecRegistry, protocolVersion), - encode(namedValues, codecRegistry, protocolVersion), - false, - pageSize, - statement.getPagingState(), - serialConsistencyCode, - timestamp, - (keyspace == null) ? null : keyspace.asInternal(), - nowInSeconds); - return new Query(simpleStatement.getQuery(), queryOptions); - } else if (statement instanceof BoundStatement) { - BoundStatement boundStatement = (BoundStatement) statement; - if (!protocolVersionRegistry.supports( - protocolVersion, DefaultProtocolFeature.UNSET_BOUND_VALUES)) { - ensureAllSet(boundStatement); - } - boolean skipMetadata = - boundStatement.getPreparedStatement().getResultSetDefinitions().size() > 0; - QueryOptions queryOptions = - new QueryOptions( - consistencyCode, - boundStatement.getValues(), - Collections.emptyMap(), - skipMetadata, - pageSize, - statement.getPagingState(), - serialConsistencyCode, - timestamp, - null, - nowInSeconds); - PreparedStatement preparedStatement = boundStatement.getPreparedStatement(); - ByteBuffer id = preparedStatement.getId(); - ByteBuffer resultMetadataId = preparedStatement.getResultMetadataId(); - return new Execute( - Bytes.getArray(id), - (resultMetadataId == null) ? null : Bytes.getArray(resultMetadataId), - queryOptions); - } else if (statement instanceof BatchStatement) { - BatchStatement batchStatement = (BatchStatement) statement; - if (!protocolVersionRegistry.supports( - protocolVersion, DefaultProtocolFeature.UNSET_BOUND_VALUES)) { - ensureAllSet(batchStatement); - } - if (keyspace != null - && !protocolVersionRegistry.supports( - protocolVersion, DefaultProtocolFeature.PER_REQUEST_KEYSPACE)) { - throw new IllegalArgumentException( - "Can't use per-request keyspace with protocol " + protocolVersion); - } - List queriesOrIds = new ArrayList<>(batchStatement.size()); - List> values = new ArrayList<>(batchStatement.size()); - for (BatchableStatement child : batchStatement) { - if (child instanceof SimpleStatement) { - SimpleStatement simpleStatement = (SimpleStatement) child; - if (simpleStatement.getNamedValues().size() > 0) { - throw new IllegalArgumentException( - String.format( - "Batch statements cannot contain simple statements with named values " - + "(offending statement: %s)", - simpleStatement.getQuery())); - } - queriesOrIds.add(simpleStatement.getQuery()); - values.add(encode(simpleStatement.getPositionalValues(), codecRegistry, protocolVersion)); - } else if (child instanceof BoundStatement) { - BoundStatement boundStatement = (BoundStatement) child; - queriesOrIds.add(Bytes.getArray(boundStatement.getPreparedStatement().getId())); - values.add(boundStatement.getValues()); - } else { - throw new IllegalArgumentException( - "Unsupported child statement: " + child.getClass().getName()); - } - } - return new Batch( - batchStatement.getBatchType().getProtocolCode(), - queriesOrIds, - values, - consistencyCode, - serialConsistencyCode, - timestamp, - (keyspace == null) ? null : keyspace.asInternal(), - nowInSeconds); - } else { - throw new IllegalArgumentException( - "Unsupported statement type: " + statement.getClass().getName()); - } - } - - public static List encode( - List values, CodecRegistry codecRegistry, ProtocolVersion protocolVersion) { - if (values.isEmpty()) { - return Collections.emptyList(); - } else { - ByteBuffer[] encodedValues = new ByteBuffer[values.size()]; - int i = 0; - for (Object value : values) { - encodedValues[i++] = - (value == null) - ? null - : ValuesHelper.encodeToDefaultCqlMapping(value, codecRegistry, protocolVersion); - } - return NullAllowingImmutableList.of(encodedValues); - } - } - - public static Map encode( - Map values, - CodecRegistry codecRegistry, - ProtocolVersion protocolVersion) { - if (values.isEmpty()) { - return Collections.emptyMap(); - } else { - NullAllowingImmutableMap.Builder encodedValues = - NullAllowingImmutableMap.builder(values.size()); - for (Map.Entry entry : values.entrySet()) { - if (entry.getValue() == null) { - encodedValues.put(entry.getKey().asInternal(), null); - } else { - encodedValues.put( - entry.getKey().asInternal(), - ValuesHelper.encodeToDefaultCqlMapping( - entry.getValue(), codecRegistry, protocolVersion)); - } - } - return encodedValues.build(); - } - } - - public static void ensureAllSet(BoundStatement boundStatement) { - for (int i = 0; i < boundStatement.size(); i++) { - if (!boundStatement.isSet(i)) { - throw new IllegalStateException( - "Unset value at index " - + i - + ". " - + "If you want this value to be null, please set it to null explicitly."); - } - } - } - - public static void ensureAllSet(BatchStatement batchStatement) { - for (BatchableStatement batchableStatement : batchStatement) { - if (batchableStatement instanceof BoundStatement) { - ensureAllSet(((BoundStatement) batchableStatement)); - } - } - } - - public static AsyncResultSet toResultSet( - Result result, - ExecutionInfo executionInfo, - CqlSession session, - InternalDriverContext context) { - if (result instanceof Rows) { - Rows rows = (Rows) result; - Statement statement = (Statement) executionInfo.getRequest(); - ColumnDefinitions columnDefinitions = getResultDefinitions(rows, statement, context); - return new DefaultAsyncResultSet( - columnDefinitions, executionInfo, rows.getData(), session, context); - } else if (result instanceof Prepared) { - // This should never happen - throw new IllegalArgumentException("Unexpected PREPARED response to a CQL query"); - } else { - // Void, SetKeyspace, SchemaChange - return DefaultAsyncResultSet.empty(executionInfo); - } - } - - public static ColumnDefinitions getResultDefinitions( - Rows rows, Statement statement, InternalDriverContext context) { - RowsMetadata rowsMetadata = rows.getMetadata(); - if (rowsMetadata.columnSpecs.isEmpty()) { - // If the response has no metadata, it means the request had SKIP_METADATA set, the driver - // only ever does that for bound statements. - BoundStatement boundStatement = (BoundStatement) statement; - return boundStatement.getPreparedStatement().getResultSetDefinitions(); - } else { - // The response has metadata, always use it above anything else we might have locally. - ColumnDefinitions definitions = toColumnDefinitions(rowsMetadata, context); - // In addition, if the server signaled a schema change (see CASSANDRA-10786), update the - // prepared statement's copy of the metadata - if (rowsMetadata.newResultMetadataId != null) { - BoundStatement boundStatement = (BoundStatement) statement; - PreparedStatement preparedStatement = boundStatement.getPreparedStatement(); - preparedStatement.setResultMetadata( - ByteBuffer.wrap(rowsMetadata.newResultMetadataId).asReadOnlyBuffer(), definitions); - } - return definitions; - } - } - - public static DefaultPreparedStatement toPreparedStatement( - Prepared response, PrepareRequest request, InternalDriverContext context) { - ColumnDefinitions variableDefinitions = - toColumnDefinitions(response.variablesMetadata, context); - - int[] pkIndicesInResponse = response.variablesMetadata.pkIndices; - // null means a legacy protocol version that doesn't provide the info, try to compute it - List pkIndices = - (pkIndicesInResponse == null) - ? computePkIndices(variableDefinitions, context) - : Ints.asList(pkIndicesInResponse); - - return new DefaultPreparedStatement( - ByteBuffer.wrap(response.preparedQueryId).asReadOnlyBuffer(), - request.getQuery(), - variableDefinitions, - pkIndices, - (response.resultMetadataId == null) - ? null - : ByteBuffer.wrap(response.resultMetadataId).asReadOnlyBuffer(), - toColumnDefinitions(response.resultMetadata, context), - request.getKeyspace(), - NullAllowingImmutableMap.copyOf(request.getCustomPayload()), - request.getExecutionProfileNameForBoundStatements(), - request.getExecutionProfileForBoundStatements(), - request.getRoutingKeyspaceForBoundStatements(), - request.getRoutingKeyForBoundStatements(), - request.getRoutingTokenForBoundStatements(), - NullAllowingImmutableMap.copyOf(request.getCustomPayloadForBoundStatements()), - request.areBoundStatementsIdempotent(), - request.getTimeoutForBoundStatements(), - request.getPagingStateForBoundStatements(), - request.getPageSizeForBoundStatements(), - request.getConsistencyLevelForBoundStatements(), - request.getSerialConsistencyLevelForBoundStatements(), - request.areBoundStatementsTracing(), - context.getCodecRegistry(), - context.getProtocolVersion()); - } - - public static ColumnDefinitions toColumnDefinitions( - RowsMetadata metadata, InternalDriverContext context) { - ColumnDefinition[] values = new ColumnDefinition[metadata.columnSpecs.size()]; - int i = 0; - for (ColumnSpec columnSpec : metadata.columnSpecs) { - values[i++] = new DefaultColumnDefinition(columnSpec, context); - } - return DefaultColumnDefinitions.valueOf(ImmutableList.copyOf(values)); - } - - public static List computePkIndices( - ColumnDefinitions variables, InternalDriverContext context) { - if (variables.size() == 0) { - return Collections.emptyList(); - } - // The rest of the computation relies on the fact that CQL does not have joins: all variables - // belong to the same keyspace and table. - ColumnDefinition firstVariable = variables.get(0); - return context - .getMetadataManager() - .getMetadata() - .getKeyspace(firstVariable.getKeyspace()) - .flatMap(ks -> ks.getTable(firstVariable.getTable())) - .map(RelationMetadata::getPartitionKey) - .map(pk -> findIndices(pk, variables)) - .orElse(Collections.emptyList()); - } - - // Find at which position in `variables` each element of `partitionKey` appears - @VisibleForTesting - static List findIndices(List partitionKey, ColumnDefinitions variables) { - ImmutableList.Builder result = - ImmutableList.builderWithExpectedSize(partitionKey.size()); - for (ColumnMetadata pkColumn : partitionKey) { - int firstIndex = variables.firstIndexOf(pkColumn.getName()); - if (firstIndex < 0) { - // If a single column is missing, we can abort right away - return Collections.emptyList(); - } else { - result.add(firstIndex); - } - } - return result.build(); - } - - public static CoordinatorException toThrowable( - Node node, Error errorMessage, InternalDriverContext context) { - switch (errorMessage.code) { - case ProtocolConstants.ErrorCode.UNPREPARED: - throw new AssertionError( - "UNPREPARED should be handled as a special case, not turned into an exception"); - case ProtocolConstants.ErrorCode.SERVER_ERROR: - return new ServerError(node, errorMessage.message); - case ProtocolConstants.ErrorCode.PROTOCOL_ERROR: - return new ProtocolError(node, errorMessage.message); - case ProtocolConstants.ErrorCode.AUTH_ERROR: - // This method is used for query execution, authentication errors should only happen during - // connection init - return new ProtocolError( - node, "Unexpected authentication error (" + errorMessage.message + ")"); - case ProtocolConstants.ErrorCode.UNAVAILABLE: - Unavailable unavailable = (Unavailable) errorMessage; - return new UnavailableException( - node, - context.getConsistencyLevelRegistry().codeToLevel(unavailable.consistencyLevel), - unavailable.required, - unavailable.alive); - case ProtocolConstants.ErrorCode.OVERLOADED: - return new OverloadedException(node, errorMessage.message); - case ProtocolConstants.ErrorCode.IS_BOOTSTRAPPING: - return new BootstrappingException(node); - case ProtocolConstants.ErrorCode.TRUNCATE_ERROR: - return new TruncateException(node, errorMessage.message); - case ProtocolConstants.ErrorCode.WRITE_TIMEOUT: - WriteTimeout writeTimeout = (WriteTimeout) errorMessage; - return new WriteTimeoutException( - node, - context.getConsistencyLevelRegistry().codeToLevel(writeTimeout.consistencyLevel), - writeTimeout.received, - writeTimeout.blockFor, - context.getWriteTypeRegistry().fromName(writeTimeout.writeType)); - case ProtocolConstants.ErrorCode.READ_TIMEOUT: - ReadTimeout readTimeout = (ReadTimeout) errorMessage; - return new ReadTimeoutException( - node, - context.getConsistencyLevelRegistry().codeToLevel(readTimeout.consistencyLevel), - readTimeout.received, - readTimeout.blockFor, - readTimeout.dataPresent); - case ProtocolConstants.ErrorCode.READ_FAILURE: - ReadFailure readFailure = (ReadFailure) errorMessage; - return new ReadFailureException( - node, - context.getConsistencyLevelRegistry().codeToLevel(readFailure.consistencyLevel), - readFailure.received, - readFailure.blockFor, - readFailure.numFailures, - readFailure.dataPresent, - readFailure.reasonMap); - case ProtocolConstants.ErrorCode.FUNCTION_FAILURE: - return new FunctionFailureException(node, errorMessage.message); - case ProtocolConstants.ErrorCode.WRITE_FAILURE: - WriteFailure writeFailure = (WriteFailure) errorMessage; - return new WriteFailureException( - node, - context.getConsistencyLevelRegistry().codeToLevel(writeFailure.consistencyLevel), - writeFailure.received, - writeFailure.blockFor, - context.getWriteTypeRegistry().fromName(writeFailure.writeType), - writeFailure.numFailures, - writeFailure.reasonMap); - case ProtocolConstants.ErrorCode.CDC_WRITE_FAILURE: - return new CDCWriteFailureException(node, errorMessage.message); - case ProtocolConstants.ErrorCode.CAS_WRITE_UNKNOWN: - CASWriteUnknown casFailure = (CASWriteUnknown) errorMessage; - return new CASWriteUnknownException( - node, - context.getConsistencyLevelRegistry().codeToLevel(casFailure.consistencyLevel), - casFailure.received, - casFailure.blockFor); - case ProtocolConstants.ErrorCode.SYNTAX_ERROR: - return new SyntaxError(node, errorMessage.message); - case ProtocolConstants.ErrorCode.UNAUTHORIZED: - return new UnauthorizedException(node, errorMessage.message); - case ProtocolConstants.ErrorCode.INVALID: - return new InvalidQueryException(node, errorMessage.message); - case ProtocolConstants.ErrorCode.CONFIG_ERROR: - return new InvalidConfigurationInQueryException(node, errorMessage.message); - case ProtocolConstants.ErrorCode.ALREADY_EXISTS: - AlreadyExists alreadyExists = (AlreadyExists) errorMessage; - return new AlreadyExistsException(node, alreadyExists.keyspace, alreadyExists.table); - default: - return new ProtocolError(node, "Unknown error code: " + errorMessage.code); - } - } - - /** Use {@link #resolveIdempotence(Request, DriverExecutionProfile)} instead. */ - @Deprecated - public static boolean resolveIdempotence(Request request, InternalDriverContext context) { - return resolveIdempotence(request, resolveExecutionProfile(request, context)); - } - - public static boolean resolveIdempotence( - Request request, DriverExecutionProfile executionProfile) { - Boolean requestIsIdempotent = request.isIdempotent(); - return (requestIsIdempotent == null) - ? executionProfile.getBoolean(DefaultDriverOption.REQUEST_DEFAULT_IDEMPOTENCE) - : requestIsIdempotent; - } - - /** Use {@link #resolveRequestTimeout(Request, DriverExecutionProfile)} instead. */ - @Deprecated - public static Duration resolveRequestTimeout(Request request, InternalDriverContext context) { - return resolveRequestTimeout(request, resolveExecutionProfile(request, context)); - } - - public static Duration resolveRequestTimeout( - Request request, DriverExecutionProfile executionProfile) { - Duration timeout = request.getTimeout(); - return timeout != null - ? timeout - : executionProfile.getDuration(DefaultDriverOption.REQUEST_TIMEOUT); - } - - /** Use {@link #resolveRetryPolicy(InternalDriverContext, DriverExecutionProfile)} instead. */ - @Deprecated - public static RetryPolicy resolveRetryPolicy(Request request, InternalDriverContext context) { - DriverExecutionProfile executionProfile = resolveExecutionProfile(request, context); - return context.getRetryPolicy(executionProfile.getName()); - } - - public static RetryPolicy resolveRetryPolicy( - InternalDriverContext context, DriverExecutionProfile executionProfile) { - return context.getRetryPolicy(executionProfile.getName()); - } - - /** - * Use {@link #resolveSpeculativeExecutionPolicy(InternalDriverContext, DriverExecutionProfile)} - * instead. - */ - @Deprecated - public static SpeculativeExecutionPolicy resolveSpeculativeExecutionPolicy( - Request request, InternalDriverContext context) { - DriverExecutionProfile executionProfile = resolveExecutionProfile(request, context); - return context.getSpeculativeExecutionPolicy(executionProfile.getName()); - } - - public static SpeculativeExecutionPolicy resolveSpeculativeExecutionPolicy( - InternalDriverContext context, DriverExecutionProfile executionProfile) { - return context.getSpeculativeExecutionPolicy(executionProfile.getName()); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlPrepareAsyncProcessor.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlPrepareAsyncProcessor.java deleted file mode 100644 index a3d11cff054..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlPrepareAsyncProcessor.java +++ /dev/null @@ -1,181 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import com.datastax.oss.driver.api.core.cql.PrepareRequest; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.ListType; -import com.datastax.oss.driver.api.core.type.MapType; -import com.datastax.oss.driver.api.core.type.SetType; -import com.datastax.oss.driver.api.core.type.TupleType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.schema.events.TypeChangeEvent; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.datastax.oss.driver.internal.core.session.RequestProcessor; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.driver.internal.core.util.concurrent.RunOrSchedule; -import com.datastax.oss.driver.shaded.guava.common.base.Functions; -import com.datastax.oss.driver.shaded.guava.common.cache.Cache; -import com.datastax.oss.driver.shaded.guava.common.cache.CacheBuilder; -import com.datastax.oss.driver.shaded.guava.common.collect.Iterables; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import edu.umd.cs.findbugs.annotations.NonNull; -import io.netty.util.concurrent.EventExecutor; -import java.util.Map; -import java.util.Optional; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.ExecutionException; -import java.util.function.Function; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@ThreadSafe -public class CqlPrepareAsyncProcessor - implements RequestProcessor> { - - private static final Logger LOG = LoggerFactory.getLogger(CqlPrepareAsyncProcessor.class); - - protected final Cache> cache; - - public CqlPrepareAsyncProcessor() { - this(Optional.empty()); - } - - public CqlPrepareAsyncProcessor(@NonNull Optional context) { - this(context, Functions.identity()); - } - - protected CqlPrepareAsyncProcessor( - Optional context, - Function, CacheBuilder> decorator) { - - CacheBuilder baseCache = CacheBuilder.newBuilder().weakValues(); - this.cache = decorator.apply(baseCache).build(); - context.ifPresent( - (ctx) -> { - LOG.info("Adding handler to invalidate cached prepared statements on type changes"); - EventExecutor adminExecutor = ctx.getNettyOptions().adminEventExecutorGroup().next(); - ctx.getEventBus() - .register( - TypeChangeEvent.class, RunOrSchedule.on(adminExecutor, this::onTypeChanged)); - }); - } - - private static boolean typeMatches(UserDefinedType oldType, DataType typeToCheck) { - - switch (typeToCheck.getProtocolCode()) { - case ProtocolConstants.DataType.UDT: - UserDefinedType udtType = (UserDefinedType) typeToCheck; - return udtType.equals(oldType) - ? true - : Iterables.any(udtType.getFieldTypes(), (testType) -> typeMatches(oldType, testType)); - case ProtocolConstants.DataType.LIST: - ListType listType = (ListType) typeToCheck; - return typeMatches(oldType, listType.getElementType()); - case ProtocolConstants.DataType.SET: - SetType setType = (SetType) typeToCheck; - return typeMatches(oldType, setType.getElementType()); - case ProtocolConstants.DataType.MAP: - MapType mapType = (MapType) typeToCheck; - return typeMatches(oldType, mapType.getKeyType()) - || typeMatches(oldType, mapType.getValueType()); - case ProtocolConstants.DataType.TUPLE: - TupleType tupleType = (TupleType) typeToCheck; - return Iterables.any( - tupleType.getComponentTypes(), (testType) -> typeMatches(oldType, testType)); - default: - return false; - } - } - - private void onTypeChanged(TypeChangeEvent event) { - for (Map.Entry> entry : - this.cache.asMap().entrySet()) { - - try { - PreparedStatement stmt = entry.getValue().get(); - if (Iterables.any( - stmt.getResultSetDefinitions(), (def) -> typeMatches(event.oldType, def.getType())) - || Iterables.any( - stmt.getVariableDefinitions(), - (def) -> typeMatches(event.oldType, def.getType()))) { - - this.cache.invalidate(entry.getKey()); - this.cache.cleanUp(); - } - } catch (Exception e) { - LOG.info("Exception while invalidating prepared statement cache due to UDT change", e); - } - } - } - - @Override - public boolean canProcess(Request request, GenericType resultType) { - return request instanceof PrepareRequest && resultType.equals(PrepareRequest.ASYNC); - } - - @Override - public CompletionStage process( - PrepareRequest request, - DefaultSession session, - InternalDriverContext context, - String sessionLogPrefix) { - - try { - CompletableFuture result = cache.getIfPresent(request); - if (result == null) { - CompletableFuture mine = new CompletableFuture<>(); - result = cache.get(request, () -> mine); - if (result == mine) { - new CqlPrepareHandler(request, session, context, sessionLogPrefix) - .handle() - .whenComplete( - (preparedStatement, error) -> { - if (error != null) { - mine.completeExceptionally(error); - cache.invalidate(request); // Make sure failure isn't cached indefinitely - } else { - mine.complete(preparedStatement); - } - }); - } - } - // Return a defensive copy. So if a client cancels its request, the cache won't be impacted - // nor a potential concurrent request. - return result.thenApply(x -> x); // copy() is available only since Java 9 - } catch (ExecutionException e) { - return CompletableFutures.failedFuture(e.getCause()); - } - } - - @Override - public CompletionStage newFailure(RuntimeException error) { - return CompletableFutures.failedFuture(error); - } - - public Cache> getCache() { - return cache; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlPrepareHandler.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlPrepareHandler.java deleted file mode 100644 index 1ee1f303ab2..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlPrepareHandler.java +++ /dev/null @@ -1,487 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.DriverTimeoutException; -import com.datastax.oss.driver.api.core.NodeUnavailableException; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.RequestThrottlingException; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.PrepareRequest; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; -import com.datastax.oss.driver.api.core.retry.RetryDecision; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import com.datastax.oss.driver.api.core.retry.RetryVerdict; -import com.datastax.oss.driver.api.core.servererrors.BootstrappingException; -import com.datastax.oss.driver.api.core.servererrors.CoordinatorException; -import com.datastax.oss.driver.api.core.servererrors.FunctionFailureException; -import com.datastax.oss.driver.api.core.servererrors.ProtocolError; -import com.datastax.oss.driver.api.core.servererrors.QueryValidationException; -import com.datastax.oss.driver.api.core.session.throttling.RequestThrottler; -import com.datastax.oss.driver.api.core.session.throttling.Throttled; -import com.datastax.oss.driver.internal.core.DefaultProtocolFeature; -import com.datastax.oss.driver.internal.core.ProtocolVersionRegistry; -import com.datastax.oss.driver.internal.core.adminrequest.ThrottledAdminRequestHandler; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.channel.ResponseCallback; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.datastax.oss.driver.internal.core.util.Loggers; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.protocol.internal.Frame; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.request.Prepare; -import com.datastax.oss.protocol.internal.response.Error; -import com.datastax.oss.protocol.internal.response.result.Prepared; -import edu.umd.cs.findbugs.annotations.NonNull; -import io.netty.util.Timeout; -import io.netty.util.Timer; -import io.netty.util.concurrent.Future; -import io.netty.util.concurrent.GenericFutureListener; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.AbstractMap; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Queue; -import java.util.concurrent.CancellationException; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.TimeUnit; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** Handles the lifecycle of the preparation of a CQL statement. */ -@ThreadSafe -public class CqlPrepareHandler implements Throttled { - - private static final Logger LOG = LoggerFactory.getLogger(CqlPrepareHandler.class); - - private final long startTimeNanos; - private final String logPrefix; - private final PrepareRequest initialRequest; - private final DefaultSession session; - private final InternalDriverContext context; - private final Queue queryPlan; - protected final CompletableFuture result; - private final Timer timer; - private final Timeout scheduledTimeout; - private final RequestThrottler throttler; - private final Boolean prepareOnAllNodes; - private final DriverExecutionProfile executionProfile; - private volatile InitialPrepareCallback initialCallback; - - // The errors on the nodes that were already tried (lazily initialized on the first error). - // We don't use a map because nodes can appear multiple times. - private volatile List> errors; - - protected CqlPrepareHandler( - PrepareRequest request, - DefaultSession session, - InternalDriverContext context, - String sessionLogPrefix) { - - this.startTimeNanos = System.nanoTime(); - this.logPrefix = sessionLogPrefix + "|" + this.hashCode(); - LOG.trace("[{}] Creating new handler for prepare request {}", logPrefix, request); - - this.initialRequest = request; - this.session = session; - this.context = context; - executionProfile = Conversions.resolveExecutionProfile(request, context); - this.queryPlan = - context - .getLoadBalancingPolicyWrapper() - .newQueryPlan(request, executionProfile.getName(), session); - - this.result = new CompletableFuture<>(); - this.result.exceptionally( - t -> { - try { - if (t instanceof CancellationException) { - cancelTimeout(); - context.getRequestThrottler().signalCancel(this); - } - } catch (Throwable t2) { - Loggers.warnWithException(LOG, "[{}] Uncaught exception", logPrefix, t2); - } - return null; - }); - this.timer = context.getNettyOptions().getTimer(); - - Duration timeout = Conversions.resolveRequestTimeout(request, executionProfile); - this.scheduledTimeout = scheduleTimeout(timeout); - this.prepareOnAllNodes = executionProfile.getBoolean(DefaultDriverOption.PREPARE_ON_ALL_NODES); - - this.throttler = context.getRequestThrottler(); - this.throttler.register(this); - } - - @Override - public void onThrottleReady(boolean wasDelayed) { - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(initialRequest, context); - if (wasDelayed) { - session - .getMetricUpdater() - .updateTimer( - DefaultSessionMetric.THROTTLING_DELAY, - executionProfile.getName(), - System.nanoTime() - startTimeNanos, - TimeUnit.NANOSECONDS); - } - sendRequest(initialRequest, null, 0); - } - - public CompletableFuture handle() { - return result; - } - - private Timeout scheduleTimeout(Duration timeoutDuration) { - if (timeoutDuration.toNanos() > 0) { - return this.timer.newTimeout( - (Timeout timeout1) -> { - setFinalError(new DriverTimeoutException("Query timed out after " + timeoutDuration)); - if (initialCallback != null) { - initialCallback.cancel(); - } - }, - timeoutDuration.toNanos(), - TimeUnit.NANOSECONDS); - } else { - return null; - } - } - - private void cancelTimeout() { - if (this.scheduledTimeout != null) { - this.scheduledTimeout.cancel(); - } - } - - private void sendRequest(PrepareRequest request, Node node, int retryCount) { - if (result.isDone()) { - return; - } - DriverChannel channel = null; - if (node == null || (channel = session.getChannel(node, logPrefix)) == null) { - while (!result.isDone() && (node = queryPlan.poll()) != null) { - channel = session.getChannel(node, logPrefix); - if (channel != null) { - break; - } else { - recordError(node, new NodeUnavailableException(node)); - } - } - } - if (channel == null) { - setFinalError(AllNodesFailedException.fromErrors(this.errors)); - } else { - InitialPrepareCallback initialPrepareCallback = - new InitialPrepareCallback(request, node, channel, retryCount); - - Prepare message = toPrepareMessage(request); - - channel - .write(message, false, request.getCustomPayload(), initialPrepareCallback) - .addListener(initialPrepareCallback); - } - } - - @NonNull - private Prepare toPrepareMessage(PrepareRequest request) { - ProtocolVersion protocolVersion = context.getProtocolVersion(); - ProtocolVersionRegistry registry = context.getProtocolVersionRegistry(); - CqlIdentifier keyspace = request.getKeyspace(); - if (keyspace != null - && !registry.supports(protocolVersion, DefaultProtocolFeature.PER_REQUEST_KEYSPACE)) { - throw new IllegalArgumentException( - "Can't use per-request keyspace with protocol " + protocolVersion); - } - return new Prepare(request.getQuery(), (keyspace == null) ? null : keyspace.asInternal()); - } - - private void recordError(Node node, Throwable error) { - // Use a local variable to do only a single single volatile read in the nominal case - List> errorsSnapshot = this.errors; - if (errorsSnapshot == null) { - synchronized (CqlPrepareHandler.this) { - errorsSnapshot = this.errors; - if (errorsSnapshot == null) { - this.errors = errorsSnapshot = new CopyOnWriteArrayList<>(); - } - } - } - errorsSnapshot.add(new AbstractMap.SimpleEntry<>(node, error)); - } - - private void setFinalResult(PrepareRequest request, Prepared response) { - - // Whatever happens below, we're done with this stream id - throttler.signalSuccess(this); - - DefaultPreparedStatement preparedStatement = - Conversions.toPreparedStatement(response, request, context); - - session - .getRepreparePayloads() - .put(preparedStatement.getId(), preparedStatement.getRepreparePayload()); - if (prepareOnAllNodes) { - prepareOnOtherNodes(request) - .thenRun( - () -> { - LOG.trace( - "[{}] Done repreparing on other nodes, completing the request", logPrefix); - result.complete(preparedStatement); - }) - .exceptionally( - error -> { - result.completeExceptionally(error); - return null; - }); - } else { - LOG.trace("[{}] Prepare on all nodes is disabled, completing the request", logPrefix); - result.complete(preparedStatement); - } - } - - private CompletionStage prepareOnOtherNodes(PrepareRequest request) { - List> otherNodesFutures = new ArrayList<>(); - // Only process the rest of the query plan. Any node before that is either the coordinator, or - // a node that failed (we assume that retrying right now has little chance of success). - for (Node node : queryPlan) { - otherNodesFutures.add(prepareOnOtherNode(request, node)); - } - return CompletableFutures.allDone(otherNodesFutures); - } - - // Try to reprepare on another node, after the initial query has succeeded. Errors are not - // blocking, the preparation will be retried later on that node. Simply warn and move on. - private CompletionStage prepareOnOtherNode(PrepareRequest request, Node node) { - LOG.trace("[{}] Repreparing on {}", logPrefix, node); - DriverChannel channel = session.getChannel(node, logPrefix); - if (channel == null) { - LOG.trace("[{}] Could not get a channel to reprepare on {}, skipping", logPrefix, node); - return CompletableFuture.completedFuture(null); - } else { - ThrottledAdminRequestHandler handler = - ThrottledAdminRequestHandler.prepare( - channel, - false, - toPrepareMessage(request), - request.getCustomPayload(), - Conversions.resolveRequestTimeout(request, executionProfile), - throttler, - session.getMetricUpdater(), - logPrefix); - return handler - .start() - .handle( - (result, error) -> { - if (error == null) { - LOG.trace("[{}] Successfully reprepared on {}", logPrefix, node); - } else { - Loggers.warnWithException( - LOG, "[{}] Error while repreparing on {}", node, logPrefix, error); - } - return null; - }); - } - } - - @Override - public void onThrottleFailure(@NonNull RequestThrottlingException error) { - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(initialRequest, context); - session - .getMetricUpdater() - .incrementCounter(DefaultSessionMetric.THROTTLING_ERRORS, executionProfile.getName()); - setFinalError(error); - } - - private void setFinalError(Throwable error) { - if (result.completeExceptionally(error)) { - cancelTimeout(); - if (error instanceof DriverTimeoutException) { - throttler.signalTimeout(this); - } else if (!(error instanceof RequestThrottlingException)) { - throttler.signalError(this, error); - } - } - } - - private class InitialPrepareCallback - implements ResponseCallback, GenericFutureListener> { - private final PrepareRequest request; - private final Node node; - private final DriverChannel channel; - // How many times we've invoked the retry policy and it has returned a "retry" decision (0 for - // the first attempt of each execution). - private final int retryCount; - - private InitialPrepareCallback( - PrepareRequest request, Node node, DriverChannel channel, int retryCount) { - this.request = request; - this.node = node; - this.channel = channel; - this.retryCount = retryCount; - } - - // this gets invoked once the write completes. - @Override - public void operationComplete(Future future) { - if (!future.isSuccess()) { - LOG.trace( - "[{}] Failed to send request on {}, trying next node (cause: {})", - logPrefix, - node, - future.cause().toString()); - recordError(node, future.cause()); - sendRequest(request, null, retryCount); // try next host - } else { - if (result.isDone()) { - // Might happen if the timeout just fired - cancel(); - } else { - LOG.trace("[{}] Request sent to {}", logPrefix, node); - initialCallback = this; - } - } - } - - @Override - public void onResponse(Frame responseFrame) { - if (result.isDone()) { - return; - } - try { - Message responseMessage = responseFrame.message; - if (responseMessage instanceof Prepared) { - LOG.trace("[{}] Got result, completing", logPrefix); - setFinalResult(request, (Prepared) responseMessage); - } else if (responseMessage instanceof Error) { - LOG.trace("[{}] Got error response, processing", logPrefix); - processErrorResponse((Error) responseMessage); - } else { - setFinalError(new IllegalStateException("Unexpected response " + responseMessage)); - } - } catch (Throwable t) { - setFinalError(t); - } - } - - private void processErrorResponse(Error errorMessage) { - if (errorMessage.code == ProtocolConstants.ErrorCode.UNPREPARED - || errorMessage.code == ProtocolConstants.ErrorCode.ALREADY_EXISTS - || errorMessage.code == ProtocolConstants.ErrorCode.READ_FAILURE - || errorMessage.code == ProtocolConstants.ErrorCode.READ_TIMEOUT - || errorMessage.code == ProtocolConstants.ErrorCode.WRITE_FAILURE - || errorMessage.code == ProtocolConstants.ErrorCode.WRITE_TIMEOUT - || errorMessage.code == ProtocolConstants.ErrorCode.UNAVAILABLE - || errorMessage.code == ProtocolConstants.ErrorCode.TRUNCATE_ERROR) { - setFinalError( - new IllegalStateException( - "Unexpected server error for a PREPARE query" + errorMessage)); - return; - } - CoordinatorException error = Conversions.toThrowable(node, errorMessage, context); - if (error instanceof BootstrappingException) { - LOG.trace("[{}] {} is bootstrapping, trying next node", logPrefix, node); - recordError(node, error); - sendRequest(request, null, retryCount); - } else if (error instanceof QueryValidationException - || error instanceof FunctionFailureException - || error instanceof ProtocolError) { - LOG.trace("[{}] Unrecoverable error, rethrowing", logPrefix); - setFinalError(error); - } else { - // Because prepare requests are known to always be idempotent, we call the retry policy - // directly, without checking the flag. - RetryPolicy retryPolicy = Conversions.resolveRetryPolicy(context, executionProfile); - RetryVerdict verdict = retryPolicy.onErrorResponseVerdict(request, error, retryCount); - processRetryVerdict(verdict, error); - } - } - - private void processRetryVerdict(RetryVerdict verdict, Throwable error) { - RetryDecision decision = verdict.getRetryDecision(); - LOG.trace("[{}] Processing retry decision {}", logPrefix, decision); - switch (decision) { - case RETRY_SAME: - recordError(node, error); - sendRequest(verdict.getRetryRequest(request), node, retryCount + 1); - break; - case RETRY_NEXT: - recordError(node, error); - sendRequest(verdict.getRetryRequest(request), null, retryCount + 1); - break; - case RETHROW: - setFinalError(error); - break; - case IGNORE: - setFinalError( - new IllegalArgumentException( - "IGNORE decisions are not allowed for prepare requests, " - + "please fix your retry policy.")); - break; - } - } - - @Override - public void onFailure(Throwable error) { - if (result.isDone()) { - return; - } - LOG.trace("[{}] Request failure, processing: {}", logPrefix, error.toString()); - RetryVerdict verdict; - try { - RetryPolicy retryPolicy = Conversions.resolveRetryPolicy(context, executionProfile); - verdict = retryPolicy.onRequestAbortedVerdict(request, error, retryCount); - } catch (Throwable cause) { - setFinalError( - new IllegalStateException("Unexpected error while invoking the retry policy", cause)); - return; - } - processRetryVerdict(verdict, error); - } - - public void cancel() { - try { - if (!channel.closeFuture().isDone()) { - this.channel.cancel(this); - } - } catch (Throwable t) { - Loggers.warnWithException(LOG, "[{}] Error cancelling", logPrefix, t); - } - } - - @Override - public String toString() { - return logPrefix; - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlPrepareSyncProcessor.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlPrepareSyncProcessor.java deleted file mode 100644 index 0896df07140..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlPrepareSyncProcessor.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import com.datastax.oss.driver.api.core.cql.PrepareRequest; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.datastax.oss.driver.internal.core.session.RequestProcessor; -import com.datastax.oss.driver.internal.core.util.concurrent.BlockingOperation; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.driver.shaded.guava.common.cache.Cache; -import java.util.concurrent.CompletableFuture; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class CqlPrepareSyncProcessor - implements RequestProcessor { - - private final CqlPrepareAsyncProcessor asyncProcessor; - - /** - * Note: if you also register a {@link CqlPrepareAsyncProcessor} with your session, make sure that - * you pass that same instance to this constructor. This is necessary for proper behavior of the - * prepared statement cache. - */ - public CqlPrepareSyncProcessor(CqlPrepareAsyncProcessor asyncProcessor) { - this.asyncProcessor = asyncProcessor; - } - - @Override - public boolean canProcess(Request request, GenericType resultType) { - return request instanceof PrepareRequest && resultType.equals(PrepareRequest.SYNC); - } - - @Override - public PreparedStatement process( - PrepareRequest request, - DefaultSession session, - InternalDriverContext context, - String sessionLogPrefix) { - - BlockingOperation.checkNotDriverThread(); - return CompletableFutures.getUninterruptibly( - asyncProcessor.process(request, session, context, sessionLogPrefix)); - } - - public Cache> getCache() { - return asyncProcessor.getCache(); - } - - @Override - public PreparedStatement newFailure(RuntimeException error) { - throw error; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlRequestAsyncProcessor.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlRequestAsyncProcessor.java deleted file mode 100644 index 3013848372b..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlRequestAsyncProcessor.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.datastax.oss.driver.internal.core.session.RequestProcessor; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import java.util.concurrent.CompletionStage; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class CqlRequestAsyncProcessor - implements RequestProcessor, CompletionStage> { - - @Override - public boolean canProcess(Request request, GenericType resultType) { - return request instanceof Statement && resultType.equals(Statement.ASYNC); - } - - @Override - public CompletionStage process( - Statement request, - DefaultSession session, - InternalDriverContext context, - String sessionLogPrefix) { - return new CqlRequestHandler(request, session, context, sessionLogPrefix).handle(); - } - - @Override - public CompletionStage newFailure(RuntimeException error) { - return CompletableFutures.failedFuture(error); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandler.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandler.java deleted file mode 100644 index 6842547b11a..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandler.java +++ /dev/null @@ -1,976 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.DriverTimeoutException; -import com.datastax.oss.driver.api.core.NodeUnavailableException; -import com.datastax.oss.driver.api.core.RequestThrottlingException; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.connection.FrameTooLongException; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; -import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import com.datastax.oss.driver.api.core.retry.RetryVerdict; -import com.datastax.oss.driver.api.core.servererrors.BootstrappingException; -import com.datastax.oss.driver.api.core.servererrors.CoordinatorException; -import com.datastax.oss.driver.api.core.servererrors.FunctionFailureException; -import com.datastax.oss.driver.api.core.servererrors.ProtocolError; -import com.datastax.oss.driver.api.core.servererrors.QueryValidationException; -import com.datastax.oss.driver.api.core.servererrors.ReadTimeoutException; -import com.datastax.oss.driver.api.core.servererrors.UnavailableException; -import com.datastax.oss.driver.api.core.servererrors.WriteTimeoutException; -import com.datastax.oss.driver.api.core.session.throttling.RequestThrottler; -import com.datastax.oss.driver.api.core.session.throttling.Throttled; -import com.datastax.oss.driver.api.core.tracker.RequestIdGenerator; -import com.datastax.oss.driver.api.core.tracker.RequestTracker; -import com.datastax.oss.driver.internal.core.adminrequest.ThrottledAdminRequestHandler; -import com.datastax.oss.driver.internal.core.adminrequest.UnexpectedResponseException; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.channel.ResponseCallback; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.DefaultNode; -import com.datastax.oss.driver.internal.core.metrics.NodeMetricUpdater; -import com.datastax.oss.driver.internal.core.metrics.SessionMetricUpdater; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.datastax.oss.driver.internal.core.session.RepreparePayload; -import com.datastax.oss.driver.internal.core.tracker.NoopRequestTracker; -import com.datastax.oss.driver.internal.core.tracker.RequestLogger; -import com.datastax.oss.driver.internal.core.util.Loggers; -import com.datastax.oss.driver.internal.core.util.collection.SimpleQueryPlan; -import com.datastax.oss.driver.shaded.guava.common.base.Joiner; -import com.datastax.oss.protocol.internal.Frame; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.request.Prepare; -import com.datastax.oss.protocol.internal.response.Error; -import com.datastax.oss.protocol.internal.response.Result; -import com.datastax.oss.protocol.internal.response.error.Unprepared; -import com.datastax.oss.protocol.internal.response.result.Rows; -import com.datastax.oss.protocol.internal.response.result.SchemaChange; -import com.datastax.oss.protocol.internal.response.result.SetKeyspace; -import com.datastax.oss.protocol.internal.response.result.Void; -import com.datastax.oss.protocol.internal.util.Bytes; -import edu.umd.cs.findbugs.annotations.NonNull; -import io.netty.handler.codec.EncoderException; -import io.netty.util.Timeout; -import io.netty.util.Timer; -import io.netty.util.concurrent.Future; -import io.netty.util.concurrent.GenericFutureListener; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.AbstractMap; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.Queue; -import java.util.concurrent.CancellationException; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@ThreadSafe -public class CqlRequestHandler implements Throttled { - - private static final Logger LOG = LoggerFactory.getLogger(CqlRequestHandler.class); - private static final long NANOTIME_NOT_MEASURED_YET = -1; - - private final long startTimeNanos; - private final String handlerLogPrefix; - private final Statement initialStatement; - private final DefaultSession session; - private final CqlIdentifier keyspace; - private final InternalDriverContext context; - protected final CompletableFuture result; - private final Timer timer; - /** - * How many speculative executions are currently running (including the initial execution). We - * track this in order to know when to fail the request if all executions have reached the end of - * the query plan. - */ - private final AtomicInteger activeExecutionsCount; - /** - * How many speculative executions have started (excluding the initial execution), whether they - * have completed or not. We track this in order to fill {@link - * ExecutionInfo#getSpeculativeExecutionCount()}. - */ - private final AtomicInteger startedSpeculativeExecutionsCount; - - final Timeout scheduledTimeout; - final List scheduledExecutions; - private final List inFlightCallbacks; - private final RequestThrottler throttler; - private final RequestTracker requestTracker; - private final Optional requestIdGenerator; - private final SessionMetricUpdater sessionMetricUpdater; - private final DriverExecutionProfile executionProfile; - - // The errors on the nodes that were already tried (lazily initialized on the first error). - // We don't use a map because nodes can appear multiple times. - private volatile List> errors; - - private final Joiner logPrefixJoiner = Joiner.on('|'); - private final String sessionName; - private final String sessionRequestId; - - protected CqlRequestHandler( - Statement statement, - DefaultSession session, - InternalDriverContext context, - String sessionName) { - - this.startTimeNanos = System.nanoTime(); - this.requestIdGenerator = context.getRequestIdGenerator(); - this.sessionName = sessionName; - this.sessionRequestId = - this.requestIdGenerator - .map(RequestIdGenerator::getSessionRequestId) - .orElse(Integer.toString(this.hashCode())); - this.handlerLogPrefix = logPrefixJoiner.join(sessionName, sessionRequestId); - LOG.trace("[{}] Creating new handler for request {}", handlerLogPrefix, statement); - - this.initialStatement = statement; - this.session = session; - this.keyspace = session.getKeyspace().orElse(null); - this.context = context; - this.result = new CompletableFuture<>(); - this.result.exceptionally( - t -> { - try { - if (t instanceof CancellationException) { - cancelScheduledTasks(); - context.getRequestThrottler().signalCancel(this); - } - } catch (Throwable t2) { - Loggers.warnWithException(LOG, "[{}] Uncaught exception", handlerLogPrefix, t2); - } - return null; - }); - - this.activeExecutionsCount = new AtomicInteger(1); - this.startedSpeculativeExecutionsCount = new AtomicInteger(0); - this.scheduledExecutions = new CopyOnWriteArrayList<>(); - this.inFlightCallbacks = new CopyOnWriteArrayList<>(); - - this.requestTracker = context.getRequestTracker(); - this.sessionMetricUpdater = session.getMetricUpdater(); - - this.timer = context.getNettyOptions().getTimer(); - this.executionProfile = Conversions.resolveExecutionProfile(initialStatement, context); - Duration timeout = Conversions.resolveRequestTimeout(statement, executionProfile); - this.scheduledTimeout = scheduleTimeout(timeout); - - this.throttler = context.getRequestThrottler(); - this.throttler.register(this); - } - - @Override - public void onThrottleReady(boolean wasDelayed) { - if (wasDelayed - // avoid call to nanoTime() if metric is disabled: - && sessionMetricUpdater.isEnabled( - DefaultSessionMetric.THROTTLING_DELAY, executionProfile.getName())) { - sessionMetricUpdater.updateTimer( - DefaultSessionMetric.THROTTLING_DELAY, - executionProfile.getName(), - System.nanoTime() - startTimeNanos, - TimeUnit.NANOSECONDS); - } - Queue queryPlan = - this.initialStatement.getNode() != null - ? new SimpleQueryPlan(this.initialStatement.getNode()) - : context - .getLoadBalancingPolicyWrapper() - .newQueryPlan(initialStatement, executionProfile.getName(), session); - sendRequest(initialStatement, null, queryPlan, 0, 0, true); - } - - public CompletionStage handle() { - return result; - } - - private Timeout scheduleTimeout(Duration timeoutDuration) { - if (timeoutDuration.toNanos() > 0) { - try { - return this.timer.newTimeout( - (Timeout timeout1) -> - setFinalError( - initialStatement, - new DriverTimeoutException("Query timed out after " + timeoutDuration), - null, - -1), - timeoutDuration.toNanos(), - TimeUnit.NANOSECONDS); - } catch (IllegalStateException e) { - // If we raced with session shutdown the timer might be closed already, rethrow with a more - // explicit message - result.completeExceptionally( - "cannot be started once stopped".equals(e.getMessage()) - ? new IllegalStateException("Session is closed") - : e); - } - } - return null; - } - - /** - * Sends the request to the next available node. - * - * @param statement The statement to execute. - * @param retriedNode if not null, it will be attempted first before the rest of the query plan. - * @param queryPlan the list of nodes to try (shared with all other executions) - * @param currentExecutionIndex 0 for the initial execution, 1 for the first speculative one, etc. - * @param retryCount the number of times that the retry policy was invoked for this execution - * already (note that some internal retries don't go through the policy, and therefore don't - * increment this counter) - * @param scheduleNextExecution whether to schedule the next speculative execution - */ - private void sendRequest( - Statement statement, - Node retriedNode, - Queue queryPlan, - int currentExecutionIndex, - int retryCount, - boolean scheduleNextExecution) { - if (result.isDone()) { - return; - } - Node node = retriedNode; - DriverChannel channel = null; - if (node == null || (channel = session.getChannel(node, handlerLogPrefix)) == null) { - while (!result.isDone() && (node = queryPlan.poll()) != null) { - channel = session.getChannel(node, handlerLogPrefix); - if (channel != null) { - break; - } else { - recordError(node, new NodeUnavailableException(node)); - } - } - } - if (channel == null) { - // We've reached the end of the query plan without finding any node to write to - if (!result.isDone() && activeExecutionsCount.decrementAndGet() == 0) { - // We're the last execution so fail the result - setFinalError(statement, AllNodesFailedException.fromErrors(this.errors), null, -1); - } - } else { - Statement finalStatement = statement; - String nodeRequestId = - this.requestIdGenerator - .map((g) -> g.getNodeRequestId(finalStatement, sessionRequestId)) - .orElse(Integer.toString(this.hashCode())); - statement = - this.requestIdGenerator - .map((g) -> g.getDecoratedStatement(finalStatement, nodeRequestId)) - .orElse(finalStatement); - - NodeResponseCallback nodeResponseCallback = - new NodeResponseCallback( - statement, - node, - queryPlan, - channel, - currentExecutionIndex, - retryCount, - scheduleNextExecution, - logPrefixJoiner.join(this.sessionName, nodeRequestId, currentExecutionIndex)); - Message message = Conversions.toMessage(statement, executionProfile, context); - channel - .write(message, statement.isTracing(), statement.getCustomPayload(), nodeResponseCallback) - .addListener(nodeResponseCallback); - } - } - - private void recordError(Node node, Throwable error) { - // Use a local variable to do only a single single volatile read in the nominal case - List> errorsSnapshot = this.errors; - if (errorsSnapshot == null) { - synchronized (CqlRequestHandler.this) { - errorsSnapshot = this.errors; - if (errorsSnapshot == null) { - this.errors = errorsSnapshot = new CopyOnWriteArrayList<>(); - } - } - } - errorsSnapshot.add(new AbstractMap.SimpleEntry<>(node, error)); - } - - private void cancelScheduledTasks() { - if (this.scheduledTimeout != null) { - this.scheduledTimeout.cancel(); - } - if (scheduledExecutions != null) { - for (Timeout scheduledExecution : scheduledExecutions) { - scheduledExecution.cancel(); - } - } - for (NodeResponseCallback callback : inFlightCallbacks) { - callback.cancel(); - } - } - - private void setFinalResult( - Result resultMessage, - Frame responseFrame, - boolean schemaInAgreement, - NodeResponseCallback callback) { - try { - ExecutionInfo executionInfo = - buildExecutionInfo(callback, resultMessage, responseFrame, schemaInAgreement); - AsyncResultSet resultSet = - Conversions.toResultSet(resultMessage, executionInfo, session, context); - if (result.complete(resultSet)) { - cancelScheduledTasks(); - throttler.signalSuccess(this); - - // Only call nanoTime() if we're actually going to use it - long completionTimeNanos = NANOTIME_NOT_MEASURED_YET, - totalLatencyNanos = NANOTIME_NOT_MEASURED_YET; - - if (!(requestTracker instanceof NoopRequestTracker)) { - completionTimeNanos = System.nanoTime(); - totalLatencyNanos = completionTimeNanos - startTimeNanos; - long nodeLatencyNanos = completionTimeNanos - callback.nodeStartTimeNanos; - requestTracker.onNodeSuccess( - callback.statement, - nodeLatencyNanos, - executionProfile, - callback.node, - handlerLogPrefix); - requestTracker.onSuccess( - callback.statement, - totalLatencyNanos, - executionProfile, - callback.node, - handlerLogPrefix); - } - if (sessionMetricUpdater.isEnabled( - DefaultSessionMetric.CQL_REQUESTS, executionProfile.getName())) { - if (completionTimeNanos == NANOTIME_NOT_MEASURED_YET) { - completionTimeNanos = System.nanoTime(); - totalLatencyNanos = completionTimeNanos - startTimeNanos; - } - sessionMetricUpdater.updateTimer( - DefaultSessionMetric.CQL_REQUESTS, - executionProfile.getName(), - totalLatencyNanos, - TimeUnit.NANOSECONDS); - } - } - // log the warnings if they have NOT been disabled - if (!executionInfo.getWarnings().isEmpty() - && executionProfile.getBoolean(DefaultDriverOption.REQUEST_LOG_WARNINGS) - && LOG.isWarnEnabled()) { - logServerWarnings(callback.statement, executionProfile, executionInfo.getWarnings()); - } - } catch (Throwable error) { - setFinalError(callback.statement, error, callback.node, -1); - } - } - - private void logServerWarnings( - Statement statement, DriverExecutionProfile executionProfile, List warnings) { - // use the RequestLogFormatter to format the query - StringBuilder statementString = new StringBuilder(); - context - .getRequestLogFormatter() - .appendRequest( - statement, - executionProfile.getInt( - DefaultDriverOption.REQUEST_LOGGER_MAX_QUERY_LENGTH, - RequestLogger.DEFAULT_REQUEST_LOGGER_MAX_QUERY_LENGTH), - executionProfile.getBoolean( - DefaultDriverOption.REQUEST_LOGGER_VALUES, - RequestLogger.DEFAULT_REQUEST_LOGGER_SHOW_VALUES), - executionProfile.getInt( - DefaultDriverOption.REQUEST_LOGGER_MAX_VALUES, - RequestLogger.DEFAULT_REQUEST_LOGGER_MAX_VALUES), - executionProfile.getInt( - DefaultDriverOption.REQUEST_LOGGER_MAX_VALUE_LENGTH, - RequestLogger.DEFAULT_REQUEST_LOGGER_MAX_VALUE_LENGTH), - statementString); - // log each warning separately - warnings.forEach( - (warning) -> - LOG.warn("Query '{}' generated server side warning(s): {}", statementString, warning)); - } - - private ExecutionInfo buildExecutionInfo( - NodeResponseCallback callback, - Result resultMessage, - Frame responseFrame, - boolean schemaInAgreement) { - ByteBuffer pagingState = - (resultMessage instanceof Rows) ? ((Rows) resultMessage).getMetadata().pagingState : null; - return new DefaultExecutionInfo( - callback.statement, - callback.node, - startedSpeculativeExecutionsCount.get(), - callback.execution, - errors, - pagingState, - responseFrame, - schemaInAgreement, - session, - context, - executionProfile); - } - - @Override - public void onThrottleFailure(@NonNull RequestThrottlingException error) { - sessionMetricUpdater.incrementCounter( - DefaultSessionMetric.THROTTLING_ERRORS, executionProfile.getName()); - setFinalError(initialStatement, error, null, -1); - } - - private void setFinalError(Statement statement, Throwable error, Node node, int execution) { - if (error instanceof DriverException) { - ((DriverException) error) - .setExecutionInfo( - new DefaultExecutionInfo( - statement, - node, - startedSpeculativeExecutionsCount.get(), - execution, - errors, - null, - null, - true, - session, - context, - executionProfile)); - } - if (result.completeExceptionally(error)) { - cancelScheduledTasks(); - if (!(requestTracker instanceof NoopRequestTracker)) { - long latencyNanos = System.nanoTime() - startTimeNanos; - requestTracker.onError( - statement, error, latencyNanos, executionProfile, node, handlerLogPrefix); - } - if (error instanceof DriverTimeoutException) { - throttler.signalTimeout(this); - sessionMetricUpdater.incrementCounter( - DefaultSessionMetric.CQL_CLIENT_TIMEOUTS, executionProfile.getName()); - } else if (!(error instanceof RequestThrottlingException)) { - throttler.signalError(this, error); - } - } - } - - /** - * Handles the interaction with a single node in the query plan. - * - *

An instance of this class is created each time we (re)try a node. - */ - private class NodeResponseCallback - implements ResponseCallback, GenericFutureListener> { - - private final long nodeStartTimeNanos = System.nanoTime(); - private final Statement statement; - private final Node node; - private final Queue queryPlan; - private final DriverChannel channel; - // The identifier of the current execution (0 for the initial execution, 1 for the first - // speculative execution, etc.) - private final int execution; - // How many times we've invoked the retry policy and it has returned a "retry" decision (0 for - // the first attempt of each execution). - private final int retryCount; - private final boolean scheduleNextExecution; - private final String logPrefix; - - private NodeResponseCallback( - Statement statement, - Node node, - Queue queryPlan, - DriverChannel channel, - int execution, - int retryCount, - boolean scheduleNextExecution, - String logPrefix) { - this.statement = statement; - this.node = node; - this.queryPlan = queryPlan; - this.channel = channel; - this.execution = execution; - this.retryCount = retryCount; - this.scheduleNextExecution = scheduleNextExecution; - this.logPrefix = logPrefix; - } - - // this gets invoked once the write completes. - @Override - public void operationComplete(Future future) throws Exception { - if (!future.isSuccess()) { - Throwable error = future.cause(); - if (error instanceof EncoderException - && error.getCause() instanceof FrameTooLongException) { - trackNodeError(node, error.getCause(), NANOTIME_NOT_MEASURED_YET); - setFinalError(statement, error.getCause(), node, execution); - } else { - LOG.trace( - "[{}] Failed to send request on {}, trying next node (cause: {})", - logPrefix, - channel, - error); - recordError(node, error); - trackNodeError(node, error, NANOTIME_NOT_MEASURED_YET); - ((DefaultNode) node) - .getMetricUpdater() - .incrementCounter(DefaultNodeMetric.UNSENT_REQUESTS, executionProfile.getName()); - sendRequest( - statement, - null, - queryPlan, - execution, - retryCount, - scheduleNextExecution); // try next node - } - } else { - LOG.trace("[{}] Request sent on {}", logPrefix, channel); - if (result.isDone()) { - // If the handler completed since the last time we checked, cancel directly because we - // don't know if cancelScheduledTasks() has run yet - cancel(); - } else { - inFlightCallbacks.add(this); - if (scheduleNextExecution - && Conversions.resolveIdempotence(statement, executionProfile)) { - int nextExecution = execution + 1; - long nextDelay; - try { - nextDelay = - Conversions.resolveSpeculativeExecutionPolicy(context, executionProfile) - .nextExecution(node, keyspace, statement, nextExecution); - } catch (Throwable cause) { - // This is a bug in the policy, but not fatal since we have at least one other - // execution already running. Don't fail the whole request. - LOG.error( - "[{}] Unexpected error while invoking the speculative execution policy", - logPrefix, - cause); - return; - } - if (nextDelay >= 0) { - scheduleSpeculativeExecution(nextExecution, nextDelay); - } else { - LOG.trace( - "[{}] Speculative execution policy returned {}, no next execution", - logPrefix, - nextDelay); - } - } - } - } - } - - private void scheduleSpeculativeExecution(int index, long delay) { - LOG.trace("[{}] Scheduling speculative execution {} in {} ms", logPrefix, index, delay); - try { - scheduledExecutions.add( - timer.newTimeout( - (Timeout timeout1) -> { - if (!result.isDone()) { - LOG.trace( - "[{}] Starting speculative execution {}", - CqlRequestHandler.this.handlerLogPrefix, - index); - activeExecutionsCount.incrementAndGet(); - startedSpeculativeExecutionsCount.incrementAndGet(); - // Note that `node` is the first node of the execution, it might not be the - // "slow" one if there were retries, but in practice retries are rare. - ((DefaultNode) node) - .getMetricUpdater() - .incrementCounter( - DefaultNodeMetric.SPECULATIVE_EXECUTIONS, executionProfile.getName()); - sendRequest(statement, null, queryPlan, index, 0, true); - } - }, - delay, - TimeUnit.MILLISECONDS)); - } catch (IllegalStateException e) { - // If we're racing with session shutdown, the timer might be stopped already. We don't want - // to schedule more executions anyway, so swallow the error. - if (!"cannot be started once stopped".equals(e.getMessage())) { - Loggers.warnWithException( - LOG, "[{}] Error while scheduling speculative execution", logPrefix, e); - } - } - } - - @Override - public void onResponse(Frame responseFrame) { - long nodeResponseTimeNanos = NANOTIME_NOT_MEASURED_YET; - NodeMetricUpdater nodeMetricUpdater = ((DefaultNode) node).getMetricUpdater(); - if (nodeMetricUpdater.isEnabled(DefaultNodeMetric.CQL_MESSAGES, executionProfile.getName())) { - nodeResponseTimeNanos = System.nanoTime(); - long nodeLatency = System.nanoTime() - nodeStartTimeNanos; - nodeMetricUpdater.updateTimer( - DefaultNodeMetric.CQL_MESSAGES, - executionProfile.getName(), - nodeLatency, - TimeUnit.NANOSECONDS); - } - inFlightCallbacks.remove(this); - if (result.isDone()) { - return; - } - try { - Message responseMessage = responseFrame.message; - if (responseMessage instanceof SchemaChange) { - SchemaChange schemaChange = (SchemaChange) responseMessage; - context - .getMetadataManager() - .refreshSchema(schemaChange.keyspace, false, false) - .whenComplete( - (result, error) -> { - boolean schemaInAgreement; - if (error != null) { - Loggers.warnWithException( - LOG, - "[{}] Unexpected error while refreshing schema after DDL query, " - + "keeping previous version", - logPrefix, - error); - schemaInAgreement = false; - } else { - schemaInAgreement = result.isSchemaInAgreement(); - } - setFinalResult(schemaChange, responseFrame, schemaInAgreement, this); - }); - } else if (responseMessage instanceof SetKeyspace) { - SetKeyspace setKeyspace = (SetKeyspace) responseMessage; - session - .setKeyspace(CqlIdentifier.fromInternal(setKeyspace.keyspace)) - .whenComplete((v, error) -> setFinalResult(setKeyspace, responseFrame, true, this)); - } else if (responseMessage instanceof Result) { - LOG.trace("[{}] Got result, completing", logPrefix); - setFinalResult((Result) responseMessage, responseFrame, true, this); - } else if (responseMessage instanceof Error) { - LOG.trace("[{}] Got error response, processing", logPrefix); - processErrorResponse((Error) responseMessage); - } else { - trackNodeError( - node, - new IllegalStateException("Unexpected response " + responseMessage), - nodeResponseTimeNanos); - setFinalError( - statement, - new IllegalStateException("Unexpected response " + responseMessage), - node, - execution); - } - } catch (Throwable t) { - trackNodeError(node, t, nodeResponseTimeNanos); - setFinalError(statement, t, node, execution); - } - } - - private void processErrorResponse(Error errorMessage) { - if (errorMessage.code == ProtocolConstants.ErrorCode.UNPREPARED) { - ByteBuffer idToReprepare = ByteBuffer.wrap(((Unprepared) errorMessage).id); - LOG.trace( - "[{}] Statement {} is not prepared on {}, repreparing", - logPrefix, - Bytes.toHexString(idToReprepare), - node); - RepreparePayload repreparePayload = session.getRepreparePayloads().get(idToReprepare); - if (repreparePayload == null) { - throw new IllegalStateException( - String.format( - "Tried to execute unprepared query %s but we don't have the data to reprepare it", - Bytes.toHexString(idToReprepare))); - } - Prepare reprepareMessage = repreparePayload.toMessage(); - ThrottledAdminRequestHandler reprepareHandler = - ThrottledAdminRequestHandler.prepare( - channel, - true, - reprepareMessage, - repreparePayload.customPayload, - Conversions.resolveRequestTimeout(statement, executionProfile), - throttler, - sessionMetricUpdater, - logPrefix); - reprepareHandler - .start() - .handle( - (repreparedId, exception) -> { - if (exception != null) { - // If the error is not recoverable, surface it to the client instead of retrying - if (exception instanceof UnexpectedResponseException) { - Message prepareErrorMessage = - ((UnexpectedResponseException) exception).message; - if (prepareErrorMessage instanceof Error) { - CoordinatorException prepareError = - Conversions.toThrowable(node, (Error) prepareErrorMessage, context); - if (prepareError instanceof QueryValidationException - || prepareError instanceof FunctionFailureException - || prepareError instanceof ProtocolError) { - LOG.trace("[{}] Unrecoverable error on reprepare, rethrowing", logPrefix); - trackNodeError(node, prepareError, NANOTIME_NOT_MEASURED_YET); - setFinalError(statement, prepareError, node, execution); - return null; - } - } - } else if (exception instanceof RequestThrottlingException) { - trackNodeError(node, exception, NANOTIME_NOT_MEASURED_YET); - setFinalError(statement, exception, node, execution); - return null; - } - recordError(node, exception); - trackNodeError(node, exception, NANOTIME_NOT_MEASURED_YET); - LOG.trace("[{}] Reprepare failed, trying next node", logPrefix); - sendRequest(statement, null, queryPlan, execution, retryCount, false); - } else { - if (!repreparedId.equals(idToReprepare)) { - IllegalStateException illegalStateException = - new IllegalStateException( - String.format( - "ID mismatch while trying to reprepare (expected %s, got %s). " - + "This prepared statement won't work anymore. " - + "This usually happens when you run a 'USE...' query after " - + "the statement was prepared.", - Bytes.toHexString(idToReprepare), - Bytes.toHexString(repreparedId))); - trackNodeError(node, illegalStateException, NANOTIME_NOT_MEASURED_YET); - setFinalError(statement, illegalStateException, node, execution); - } - LOG.trace("[{}] Reprepare sucessful, retrying", logPrefix); - sendRequest(statement, node, queryPlan, execution, retryCount, false); - } - return null; - }); - return; - } - CoordinatorException error = Conversions.toThrowable(node, errorMessage, context); - NodeMetricUpdater metricUpdater = ((DefaultNode) node).getMetricUpdater(); - if (error instanceof BootstrappingException) { - LOG.trace("[{}] {} is bootstrapping, trying next node", logPrefix, node); - recordError(node, error); - trackNodeError(node, error, NANOTIME_NOT_MEASURED_YET); - sendRequest(statement, null, queryPlan, execution, retryCount, false); - } else if (error instanceof QueryValidationException - || error instanceof FunctionFailureException - || error instanceof ProtocolError) { - LOG.trace("[{}] Unrecoverable error, rethrowing", logPrefix); - metricUpdater.incrementCounter(DefaultNodeMetric.OTHER_ERRORS, executionProfile.getName()); - trackNodeError(node, error, NANOTIME_NOT_MEASURED_YET); - setFinalError(statement, error, node, execution); - } else { - RetryPolicy retryPolicy = Conversions.resolveRetryPolicy(context, executionProfile); - RetryVerdict verdict; - if (error instanceof ReadTimeoutException) { - ReadTimeoutException readTimeout = (ReadTimeoutException) error; - verdict = - retryPolicy.onReadTimeoutVerdict( - statement, - readTimeout.getConsistencyLevel(), - readTimeout.getBlockFor(), - readTimeout.getReceived(), - readTimeout.wasDataPresent(), - retryCount); - updateErrorMetrics( - metricUpdater, - verdict, - DefaultNodeMetric.READ_TIMEOUTS, - DefaultNodeMetric.RETRIES_ON_READ_TIMEOUT, - DefaultNodeMetric.IGNORES_ON_READ_TIMEOUT); - } else if (error instanceof WriteTimeoutException) { - WriteTimeoutException writeTimeout = (WriteTimeoutException) error; - verdict = - Conversions.resolveIdempotence(statement, executionProfile) - ? retryPolicy.onWriteTimeoutVerdict( - statement, - writeTimeout.getConsistencyLevel(), - writeTimeout.getWriteType(), - writeTimeout.getBlockFor(), - writeTimeout.getReceived(), - retryCount) - : RetryVerdict.RETHROW; - updateErrorMetrics( - metricUpdater, - verdict, - DefaultNodeMetric.WRITE_TIMEOUTS, - DefaultNodeMetric.RETRIES_ON_WRITE_TIMEOUT, - DefaultNodeMetric.IGNORES_ON_WRITE_TIMEOUT); - } else if (error instanceof UnavailableException) { - UnavailableException unavailable = (UnavailableException) error; - verdict = - retryPolicy.onUnavailableVerdict( - statement, - unavailable.getConsistencyLevel(), - unavailable.getRequired(), - unavailable.getAlive(), - retryCount); - updateErrorMetrics( - metricUpdater, - verdict, - DefaultNodeMetric.UNAVAILABLES, - DefaultNodeMetric.RETRIES_ON_UNAVAILABLE, - DefaultNodeMetric.IGNORES_ON_UNAVAILABLE); - } else { - verdict = - Conversions.resolveIdempotence(statement, executionProfile) - ? retryPolicy.onErrorResponseVerdict(statement, error, retryCount) - : RetryVerdict.RETHROW; - updateErrorMetrics( - metricUpdater, - verdict, - DefaultNodeMetric.OTHER_ERRORS, - DefaultNodeMetric.RETRIES_ON_OTHER_ERROR, - DefaultNodeMetric.IGNORES_ON_OTHER_ERROR); - } - processRetryVerdict(verdict, error); - } - } - - private void processRetryVerdict(RetryVerdict verdict, Throwable error) { - LOG.trace("[{}] Processing retry decision {}", logPrefix, verdict); - switch (verdict.getRetryDecision()) { - case RETRY_SAME: - recordError(node, error); - trackNodeError(node, error, NANOTIME_NOT_MEASURED_YET); - sendRequest( - verdict.getRetryRequest(statement), - node, - queryPlan, - execution, - retryCount + 1, - false); - break; - case RETRY_NEXT: - recordError(node, error); - trackNodeError(node, error, NANOTIME_NOT_MEASURED_YET); - sendRequest( - verdict.getRetryRequest(statement), - null, - queryPlan, - execution, - retryCount + 1, - false); - break; - case RETHROW: - trackNodeError(node, error, NANOTIME_NOT_MEASURED_YET); - setFinalError(statement, error, node, execution); - break; - case IGNORE: - setFinalResult(Void.INSTANCE, null, true, this); - break; - } - } - - private void updateErrorMetrics( - NodeMetricUpdater metricUpdater, - RetryVerdict verdict, - DefaultNodeMetric error, - DefaultNodeMetric retriesOnError, - DefaultNodeMetric ignoresOnError) { - metricUpdater.incrementCounter(error, executionProfile.getName()); - switch (verdict.getRetryDecision()) { - case RETRY_SAME: - case RETRY_NEXT: - metricUpdater.incrementCounter(DefaultNodeMetric.RETRIES, executionProfile.getName()); - metricUpdater.incrementCounter(retriesOnError, executionProfile.getName()); - break; - case IGNORE: - metricUpdater.incrementCounter(DefaultNodeMetric.IGNORES, executionProfile.getName()); - metricUpdater.incrementCounter(ignoresOnError, executionProfile.getName()); - break; - case RETHROW: - // nothing do do - } - } - - @Override - public void onFailure(Throwable error) { - inFlightCallbacks.remove(this); - if (result.isDone()) { - return; - } - LOG.trace("[{}] Request failure, processing: {}", logPrefix, error); - RetryVerdict verdict; - if (!Conversions.resolveIdempotence(statement, executionProfile) - || error instanceof FrameTooLongException) { - verdict = RetryVerdict.RETHROW; - } else { - try { - RetryPolicy retryPolicy = Conversions.resolveRetryPolicy(context, executionProfile); - verdict = retryPolicy.onRequestAbortedVerdict(statement, error, retryCount); - } catch (Throwable cause) { - setFinalError( - statement, - new IllegalStateException("Unexpected error while invoking the retry policy", cause), - null, - execution); - return; - } - } - processRetryVerdict(verdict, error); - updateErrorMetrics( - ((DefaultNode) node).getMetricUpdater(), - verdict, - DefaultNodeMetric.ABORTED_REQUESTS, - DefaultNodeMetric.RETRIES_ON_ABORTED, - DefaultNodeMetric.IGNORES_ON_ABORTED); - } - - public void cancel() { - try { - if (!channel.closeFuture().isDone()) { - this.channel.cancel(this); - } - } catch (Throwable t) { - Loggers.warnWithException(LOG, "[{}] Error cancelling", logPrefix, t); - } - } - - /** - * @param nodeResponseTimeNanos the time we received the response, if it's already been - * measured. If {@link #NANOTIME_NOT_MEASURED_YET}, it hasn't and we need to measure it now - * (this is to avoid unnecessary calls to System.nanoTime) - */ - private void trackNodeError(Node node, Throwable error, long nodeResponseTimeNanos) { - if (requestTracker instanceof NoopRequestTracker) { - return; - } - if (nodeResponseTimeNanos == NANOTIME_NOT_MEASURED_YET) { - nodeResponseTimeNanos = System.nanoTime(); - } - long latencyNanos = nodeResponseTimeNanos - this.nodeStartTimeNanos; - requestTracker.onNodeError(statement, error, latencyNanos, executionProfile, node, logPrefix); - } - - @Override - public String toString() { - return logPrefix; - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlRequestSyncProcessor.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlRequestSyncProcessor.java deleted file mode 100644 index d3bd40149fb..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlRequestSyncProcessor.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.datastax.oss.driver.internal.core.session.RequestProcessor; -import com.datastax.oss.driver.internal.core.util.concurrent.BlockingOperation; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class CqlRequestSyncProcessor implements RequestProcessor, ResultSet> { - - private final CqlRequestAsyncProcessor asyncProcessor; - - public CqlRequestSyncProcessor(CqlRequestAsyncProcessor asyncProcessor) { - this.asyncProcessor = asyncProcessor; - } - - @Override - public boolean canProcess(Request request, GenericType resultType) { - return request instanceof Statement && resultType.equals(Statement.SYNC); - } - - @Override - public ResultSet process( - Statement request, - DefaultSession session, - InternalDriverContext context, - String sessionLogPrefix) { - - BlockingOperation.checkNotDriverThread(); - AsyncResultSet firstPage = - CompletableFutures.getUninterruptibly( - asyncProcessor.process(request, session, context, sessionLogPrefix)); - return ResultSets.newInstance(firstPage); - } - - @Override - public ResultSet newFailure(RuntimeException error) { - throw error; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultAsyncResultSet.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultAsyncResultSet.java deleted file mode 100644 index 243e9aeb775..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultAsyncResultSet.java +++ /dev/null @@ -1,172 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.util.CountingIterator; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.util.Collections; -import java.util.List; -import java.util.Queue; -import java.util.concurrent.CompletionStage; -import net.jcip.annotations.NotThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@NotThreadSafe // wraps a mutable queue -public class DefaultAsyncResultSet implements AsyncResultSet { - - private static final Logger LOG = LoggerFactory.getLogger(DefaultAsyncResultSet.class); - - private final ColumnDefinitions definitions; - private final ExecutionInfo executionInfo; - private final CqlSession session; - private final CountingIterator iterator; - private final Iterable currentPage; - - public DefaultAsyncResultSet( - ColumnDefinitions definitions, - ExecutionInfo executionInfo, - Queue> data, - CqlSession session, - InternalDriverContext context) { - this.definitions = definitions; - this.executionInfo = executionInfo; - this.session = session; - this.iterator = - new CountingIterator(data.size()) { - @Override - protected Row computeNext() { - List rowData = data.poll(); - return (rowData == null) ? endOfData() : new DefaultRow(definitions, rowData, context); - } - }; - this.currentPage = () -> iterator; - } - - @NonNull - @Override - public ColumnDefinitions getColumnDefinitions() { - return definitions; - } - - @NonNull - @Override - public ExecutionInfo getExecutionInfo() { - return executionInfo; - } - - @NonNull - @Override - public Iterable currentPage() { - return currentPage; - } - - @Override - public int remaining() { - return iterator.remaining(); - } - - @Override - public boolean hasMorePages() { - return executionInfo.getPagingState() != null; - } - - @NonNull - @Override - public CompletionStage fetchNextPage() throws IllegalStateException { - ByteBuffer nextState = executionInfo.getPagingState(); - if (nextState == null) { - throw new IllegalStateException( - "No next page. Use #hasMorePages before calling this method to avoid this error."); - } - Statement statement = (Statement) executionInfo.getRequest(); - LOG.trace("Fetching next page for {}", statement); - Statement nextStatement = statement.copy(nextState); - return session.executeAsync(nextStatement); - } - - @Override - public boolean wasApplied() { - if (!definitions.contains("[applied]") - || !definitions.get("[applied]").getType().equals(DataTypes.BOOLEAN)) { - return true; - } else if (iterator.hasNext()) { - // Note that [applied] has the same value for all rows, so as long as we have a row we don't - // care which one it is. - return iterator.peek().getBoolean("[applied]"); - } else { - // If the server provided [applied], it means there was at least one row. So if we get here it - // means the client consumed all the rows before, we can't handle that case because we have - // nowhere left to read the boolean from. - throw new IllegalStateException("This method must be called before consuming all the rows"); - } - } - - static AsyncResultSet empty(final ExecutionInfo executionInfo) { - return new AsyncResultSet() { - @NonNull - @Override - public ColumnDefinitions getColumnDefinitions() { - return EmptyColumnDefinitions.INSTANCE; - } - - @NonNull - @Override - public ExecutionInfo getExecutionInfo() { - return executionInfo; - } - - @NonNull - @Override - public Iterable currentPage() { - return Collections.emptyList(); - } - - @Override - public int remaining() { - return 0; - } - - @Override - public boolean hasMorePages() { - return false; - } - - @NonNull - @Override - public CompletionStage fetchNextPage() throws IllegalStateException { - throw new IllegalStateException( - "No next page. Use #hasMorePages before calling this method to avoid this error."); - } - - @Override - public boolean wasApplied() { - return true; - } - }; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultBatchStatement.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultBatchStatement.java deleted file mode 100644 index 38b6cf242a1..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultBatchStatement.java +++ /dev/null @@ -1,788 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.BatchStatement; -import com.datastax.oss.driver.api.core.cql.BatchType; -import com.datastax.oss.driver.api.core.cql.BatchableStatement; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.Iterables; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultBatchStatement implements BatchStatement { - - private final BatchType batchType; - private final List> statements; - private final String executionProfileName; - private final DriverExecutionProfile executionProfile; - private final CqlIdentifier keyspace; - private final CqlIdentifier routingKeyspace; - private final ByteBuffer routingKey; - private final Token routingToken; - private final Map customPayload; - private final Boolean idempotent; - private final boolean tracing; - private final long timestamp; - private final ByteBuffer pagingState; - private final int pageSize; - private final ConsistencyLevel consistencyLevel; - private final ConsistencyLevel serialConsistencyLevel; - private final Duration timeout; - private final Node node; - private final int nowInSeconds; - - public DefaultBatchStatement( - BatchType batchType, - List> statements, - String executionProfileName, - DriverExecutionProfile executionProfile, - CqlIdentifier keyspace, - CqlIdentifier routingKeyspace, - ByteBuffer routingKey, - Token routingToken, - Map customPayload, - Boolean idempotent, - boolean tracing, - long timestamp, - ByteBuffer pagingState, - int pageSize, - ConsistencyLevel consistencyLevel, - ConsistencyLevel serialConsistencyLevel, - Duration timeout, - Node node, - int nowInSeconds) { - this.batchType = batchType; - this.statements = ImmutableList.copyOf(statements); - this.executionProfileName = executionProfileName; - this.executionProfile = executionProfile; - this.keyspace = keyspace; - this.routingKeyspace = routingKeyspace; - this.routingKey = routingKey; - this.routingToken = routingToken; - this.customPayload = customPayload; - this.idempotent = idempotent; - this.tracing = tracing; - this.timestamp = timestamp; - this.pagingState = pagingState; - this.pageSize = pageSize; - this.consistencyLevel = consistencyLevel; - this.serialConsistencyLevel = serialConsistencyLevel; - this.timeout = timeout; - this.node = node; - this.nowInSeconds = nowInSeconds; - } - - @NonNull - @Override - public BatchType getBatchType() { - return batchType; - } - - @NonNull - @Override - public BatchStatement setBatchType(@NonNull BatchType newBatchType) { - return new DefaultBatchStatement( - newBatchType, - statements, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @NonNull - @Override - public BatchStatement setKeyspace(@Nullable CqlIdentifier newKeyspace) { - return new DefaultBatchStatement( - batchType, - statements, - executionProfileName, - executionProfile, - newKeyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @NonNull - @Override - public BatchStatement add(@NonNull BatchableStatement statement) { - if (statements.size() >= 0xFFFF) { - throw new IllegalStateException( - "Batch statement cannot contain more than " + 0xFFFF + " statements."); - } else { - return new DefaultBatchStatement( - batchType, - ImmutableList.>builder().addAll(statements).add(statement).build(), - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - } - - @NonNull - @Override - public BatchStatement addAll(@NonNull Iterable> newStatements) { - if (statements.size() + Iterables.size(newStatements) > 0xFFFF) { - throw new IllegalStateException( - "Batch statement cannot contain more than " + 0xFFFF + " statements."); - } else { - return new DefaultBatchStatement( - batchType, - ImmutableList.>builder() - .addAll(statements) - .addAll(newStatements) - .build(), - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - } - - @Override - public int size() { - return statements.size(); - } - - @NonNull - @Override - public BatchStatement clear() { - return new DefaultBatchStatement( - batchType, - ImmutableList.of(), - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @NonNull - @Override - public Iterator> iterator() { - return statements.iterator(); - } - - @Override - public ByteBuffer getPagingState() { - return pagingState; - } - - @NonNull - @Override - public BatchStatement setPagingState(ByteBuffer newPagingState) { - return new DefaultBatchStatement( - batchType, - statements, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - newPagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @Override - public int getPageSize() { - return pageSize; - } - - @NonNull - @Override - public BatchStatement setPageSize(int newPageSize) { - return new DefaultBatchStatement( - batchType, - statements, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - newPageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @Nullable - @Override - public ConsistencyLevel getConsistencyLevel() { - return consistencyLevel; - } - - @NonNull - @Override - public BatchStatement setConsistencyLevel(@Nullable ConsistencyLevel newConsistencyLevel) { - return new DefaultBatchStatement( - batchType, - statements, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - newConsistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @Nullable - @Override - public ConsistencyLevel getSerialConsistencyLevel() { - return serialConsistencyLevel; - } - - @NonNull - @Override - public BatchStatement setSerialConsistencyLevel( - @Nullable ConsistencyLevel newSerialConsistencyLevel) { - return new DefaultBatchStatement( - batchType, - statements, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - newSerialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @Override - public String getExecutionProfileName() { - return executionProfileName; - } - - @NonNull - @Override - public BatchStatement setExecutionProfileName(@Nullable String newConfigProfileName) { - return new DefaultBatchStatement( - batchType, - statements, - newConfigProfileName, - (newConfigProfileName == null) ? executionProfile : null, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @Override - public DriverExecutionProfile getExecutionProfile() { - return executionProfile; - } - - @NonNull - @Override - public DefaultBatchStatement setExecutionProfile(@Nullable DriverExecutionProfile newProfile) { - return new DefaultBatchStatement( - batchType, - statements, - (newProfile == null) ? executionProfileName : null, - newProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @Override - public CqlIdentifier getKeyspace() { - if (keyspace != null) { - return keyspace; - } else { - for (BatchableStatement statement : statements) { - if (statement instanceof SimpleStatement && statement.getKeyspace() != null) { - return statement.getKeyspace(); - } - } - } - return null; - } - - @Override - public CqlIdentifier getRoutingKeyspace() { - if (routingKeyspace != null) { - return routingKeyspace; - } else { - for (BatchableStatement statement : statements) { - CqlIdentifier ks = statement.getRoutingKeyspace(); - if (ks != null) { - return ks; - } - } - } - return null; - } - - @NonNull - @Override - public BatchStatement setRoutingKeyspace(CqlIdentifier newRoutingKeyspace) { - return new DefaultBatchStatement( - batchType, - statements, - executionProfileName, - executionProfile, - keyspace, - newRoutingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @NonNull - @Override - public BatchStatement setNode(@Nullable Node newNode) { - return new DefaultBatchStatement( - batchType, - statements, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - newNode, - nowInSeconds); - } - - @Nullable - @Override - public Node getNode() { - return node; - } - - @Override - public ByteBuffer getRoutingKey() { - if (routingKey != null) { - return routingKey; - } else { - for (BatchableStatement statement : statements) { - ByteBuffer key = statement.getRoutingKey(); - if (key != null) { - return key; - } - } - } - return null; - } - - @NonNull - @Override - public BatchStatement setRoutingKey(ByteBuffer newRoutingKey) { - return new DefaultBatchStatement( - batchType, - statements, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - newRoutingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @Override - public Token getRoutingToken() { - if (routingToken != null) { - return routingToken; - } else { - for (BatchableStatement statement : statements) { - Token token = statement.getRoutingToken(); - if (token != null) { - return token; - } - } - } - return null; - } - - @NonNull - @Override - public BatchStatement setRoutingToken(Token newRoutingToken) { - return new DefaultBatchStatement( - batchType, - statements, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - newRoutingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @NonNull - @Override - public Map getCustomPayload() { - return customPayload; - } - - @NonNull - @Override - public DefaultBatchStatement setCustomPayload(@NonNull Map newCustomPayload) { - return new DefaultBatchStatement( - batchType, - statements, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - newCustomPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @Override - public Boolean isIdempotent() { - return idempotent; - } - - @Nullable - @Override - public Duration getTimeout() { - return null; - } - - @NonNull - @Override - public DefaultBatchStatement setIdempotent(Boolean newIdempotence) { - return new DefaultBatchStatement( - batchType, - statements, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - newIdempotence, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @Override - public boolean isTracing() { - return tracing; - } - - @NonNull - @Override - public BatchStatement setTracing(boolean newTracing) { - return new DefaultBatchStatement( - batchType, - statements, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - newTracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @Override - public long getQueryTimestamp() { - return timestamp; - } - - @NonNull - @Override - public BatchStatement setQueryTimestamp(long newTimestamp) { - return new DefaultBatchStatement( - batchType, - statements, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - newTimestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @NonNull - @Override - public BatchStatement setTimeout(@Nullable Duration newTimeout) { - return new DefaultBatchStatement( - batchType, - statements, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - newTimeout, - node, - nowInSeconds); - } - - @Override - public int getNowInSeconds() { - return nowInSeconds; - } - - @NonNull - @Override - public BatchStatement setNowInSeconds(int newNowInSeconds) { - return new DefaultBatchStatement( - batchType, - statements, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - newNowInSeconds); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultBoundStatement.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultBoundStatement.java deleted file mode 100644 index 3cf99c1be6e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultBoundStatement.java +++ /dev/null @@ -1,764 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.internal.core.util.RoutingKey; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.Arrays; -import java.util.List; -import java.util.Map; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultBoundStatement implements BoundStatement { - - private final PreparedStatement preparedStatement; - private final ColumnDefinitions variableDefinitions; - private final ByteBuffer[] values; - private final String executionProfileName; - private final DriverExecutionProfile executionProfile; - private final CqlIdentifier routingKeyspace; - private final ByteBuffer routingKey; - private final Token routingToken; - private final Map customPayload; - private final Boolean idempotent; - private final boolean tracing; - private final long timestamp; - private final ByteBuffer pagingState; - private final int pageSize; - private final ConsistencyLevel consistencyLevel; - private final ConsistencyLevel serialConsistencyLevel; - private final Duration timeout; - private final CodecRegistry codecRegistry; - private final ProtocolVersion protocolVersion; - private final Node node; - private final int nowInSeconds; - - public DefaultBoundStatement( - PreparedStatement preparedStatement, - ColumnDefinitions variableDefinitions, - ByteBuffer[] values, - String executionProfileName, - DriverExecutionProfile executionProfile, - CqlIdentifier routingKeyspace, - ByteBuffer routingKey, - Token routingToken, - Map customPayload, - Boolean idempotent, - boolean tracing, - long timestamp, - ByteBuffer pagingState, - int pageSize, - ConsistencyLevel consistencyLevel, - ConsistencyLevel serialConsistencyLevel, - Duration timeout, - CodecRegistry codecRegistry, - ProtocolVersion protocolVersion, - Node node, - int nowInSeconds) { - this.preparedStatement = preparedStatement; - this.variableDefinitions = variableDefinitions; - this.values = values; - this.executionProfileName = executionProfileName; - this.executionProfile = executionProfile; - this.routingKeyspace = routingKeyspace; - this.routingKey = routingKey; - this.routingToken = routingToken; - this.customPayload = customPayload; - this.idempotent = idempotent; - this.tracing = tracing; - this.timestamp = timestamp; - this.pagingState = pagingState; - this.pageSize = pageSize; - this.consistencyLevel = consistencyLevel; - this.serialConsistencyLevel = serialConsistencyLevel; - this.timeout = timeout; - this.codecRegistry = codecRegistry; - this.protocolVersion = protocolVersion; - this.node = node; - this.nowInSeconds = nowInSeconds; - } - - @Override - public int size() { - return variableDefinitions.size(); - } - - @NonNull - @Override - public DataType getType(int i) { - return variableDefinitions.get(i).getType(); - } - - @NonNull - @Override - public List allIndicesOf(@NonNull CqlIdentifier id) { - List indices = variableDefinitions.allIndicesOf(id); - if (indices.isEmpty()) { - throw new IllegalArgumentException(id + " is not a variable in this bound statement"); - } - return indices; - } - - @Override - public int firstIndexOf(@NonNull CqlIdentifier id) { - int indexOf = variableDefinitions.firstIndexOf(id); - if (indexOf == -1) { - throw new IllegalArgumentException(id + " is not a variable in this bound statement"); - } - return indexOf; - } - - @NonNull - @Override - public List allIndicesOf(@NonNull String name) { - List indices = variableDefinitions.allIndicesOf(name); - if (indices.isEmpty()) { - throw new IllegalArgumentException(name + " is not a variable in this bound statement"); - } - return indices; - } - - @Override - public int firstIndexOf(@NonNull String name) { - int indexOf = variableDefinitions.firstIndexOf(name); - if (indexOf == -1) { - throw new IllegalArgumentException(name + " is not a variable in this bound statement"); - } - return indexOf; - } - - @NonNull - @Override - public CodecRegistry codecRegistry() { - return codecRegistry; - } - - @NonNull - @Override - public ProtocolVersion protocolVersion() { - return protocolVersion; - } - - @Override - public ByteBuffer getBytesUnsafe(int i) { - return values[i]; - } - - @NonNull - @Override - public BoundStatement setBytesUnsafe(int i, ByteBuffer v) { - ByteBuffer[] newValues = Arrays.copyOf(values, values.length); - newValues[i] = v; - return new DefaultBoundStatement( - preparedStatement, - variableDefinitions, - newValues, - executionProfileName, - executionProfile, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - codecRegistry, - protocolVersion, - node, - nowInSeconds); - } - - @NonNull - @Override - public PreparedStatement getPreparedStatement() { - return preparedStatement; - } - - @NonNull - @Override - public List getValues() { - return Arrays.asList(values); - } - - @Override - public String getExecutionProfileName() { - return executionProfileName; - } - - @NonNull - @Override - public BoundStatement setExecutionProfileName(@Nullable String newConfigProfileName) { - return new DefaultBoundStatement( - preparedStatement, - variableDefinitions, - values, - newConfigProfileName, - (newConfigProfileName == null) ? executionProfile : null, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - codecRegistry, - protocolVersion, - node, - nowInSeconds); - } - - @Override - public DriverExecutionProfile getExecutionProfile() { - return executionProfile; - } - - @NonNull - @Override - public BoundStatement setExecutionProfile(@Nullable DriverExecutionProfile newProfile) { - return new DefaultBoundStatement( - preparedStatement, - variableDefinitions, - values, - (newProfile == null) ? executionProfileName : null, - newProfile, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - codecRegistry, - protocolVersion, - node, - nowInSeconds); - } - - @Override - public CqlIdentifier getRoutingKeyspace() { - // If it was set explicitly, use that value, else try to infer it from the prepared statement's - // metadata - if (routingKeyspace != null) { - return routingKeyspace; - } else { - ColumnDefinitions definitions = preparedStatement.getVariableDefinitions(); - return (definitions.size() == 0) ? null : definitions.get(0).getKeyspace(); - } - } - - @NonNull - @Override - public BoundStatement setRoutingKeyspace(@Nullable CqlIdentifier newRoutingKeyspace) { - return new DefaultBoundStatement( - preparedStatement, - variableDefinitions, - values, - executionProfileName, - executionProfile, - newRoutingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - codecRegistry, - protocolVersion, - node, - nowInSeconds); - } - - @NonNull - @Override - public BoundStatement setNode(@Nullable Node newNode) { - return new DefaultBoundStatement( - preparedStatement, - variableDefinitions, - values, - executionProfileName, - executionProfile, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - codecRegistry, - protocolVersion, - newNode, - nowInSeconds); - } - - @Nullable - @Override - public Node getNode() { - return node; - } - - @Override - public ByteBuffer getRoutingKey() { - if (routingKey != null) { - return routingKey; - } else { - List indices = preparedStatement.getPartitionKeyIndices(); - if (indices.isEmpty()) { - return null; - } else if (indices.size() == 1) { - int index = indices.get(0); - return isSet(index) ? getBytesUnsafe(index) : null; - } else { - ByteBuffer[] components = new ByteBuffer[indices.size()]; - for (int i = 0; i < components.length; i++) { - ByteBuffer value; - int index = indices.get(i); - if (!isSet(index) || (value = getBytesUnsafe(index)) == null) { - return null; - } else { - components[i] = value; - } - } - return RoutingKey.compose(components); - } - } - } - - @NonNull - @Override - public BoundStatement setRoutingKey(@Nullable ByteBuffer newRoutingKey) { - return new DefaultBoundStatement( - preparedStatement, - variableDefinitions, - values, - executionProfileName, - executionProfile, - routingKeyspace, - newRoutingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - codecRegistry, - protocolVersion, - node, - nowInSeconds); - } - - @Override - public Token getRoutingToken() { - return routingToken; - } - - @NonNull - @Override - public BoundStatement setRoutingToken(@Nullable Token newRoutingToken) { - return new DefaultBoundStatement( - preparedStatement, - variableDefinitions, - values, - executionProfileName, - executionProfile, - routingKeyspace, - routingKey, - newRoutingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - codecRegistry, - protocolVersion, - node, - nowInSeconds); - } - - @NonNull - @Override - public Map getCustomPayload() { - return customPayload; - } - - @NonNull - @Override - public BoundStatement setCustomPayload(@NonNull Map newCustomPayload) { - return new DefaultBoundStatement( - preparedStatement, - variableDefinitions, - values, - executionProfileName, - executionProfile, - routingKeyspace, - routingKey, - routingToken, - newCustomPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - codecRegistry, - protocolVersion, - node, - nowInSeconds); - } - - @Override - public Boolean isIdempotent() { - return idempotent; - } - - @NonNull - @Override - public BoundStatement setIdempotent(@Nullable Boolean newIdempotence) { - return new DefaultBoundStatement( - preparedStatement, - variableDefinitions, - values, - executionProfileName, - executionProfile, - routingKeyspace, - routingKey, - routingToken, - customPayload, - newIdempotence, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - codecRegistry, - protocolVersion, - node, - nowInSeconds); - } - - @Override - public boolean isTracing() { - return tracing; - } - - @NonNull - @Override - public BoundStatement setTracing(boolean newTracing) { - return new DefaultBoundStatement( - preparedStatement, - variableDefinitions, - values, - executionProfileName, - executionProfile, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - newTracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - codecRegistry, - protocolVersion, - node, - nowInSeconds); - } - - @Override - public long getQueryTimestamp() { - return timestamp; - } - - @NonNull - @Override - public BoundStatement setQueryTimestamp(long newTimestamp) { - return new DefaultBoundStatement( - preparedStatement, - variableDefinitions, - values, - executionProfileName, - executionProfile, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - newTimestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - codecRegistry, - protocolVersion, - node, - nowInSeconds); - } - - @Nullable - @Override - public Duration getTimeout() { - return timeout; - } - - @NonNull - @Override - public BoundStatement setTimeout(@Nullable Duration newTimeout) { - return new DefaultBoundStatement( - preparedStatement, - variableDefinitions, - values, - executionProfileName, - executionProfile, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - newTimeout, - codecRegistry, - protocolVersion, - node, - nowInSeconds); - } - - @Override - public ByteBuffer getPagingState() { - return pagingState; - } - - @NonNull - @Override - public BoundStatement setPagingState(@Nullable ByteBuffer newPagingState) { - return new DefaultBoundStatement( - preparedStatement, - variableDefinitions, - values, - executionProfileName, - executionProfile, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - newPagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - codecRegistry, - protocolVersion, - node, - nowInSeconds); - } - - @Override - public int getPageSize() { - return pageSize; - } - - @NonNull - @Override - public BoundStatement setPageSize(int newPageSize) { - return new DefaultBoundStatement( - preparedStatement, - variableDefinitions, - values, - executionProfileName, - executionProfile, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - newPageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - codecRegistry, - protocolVersion, - node, - nowInSeconds); - } - - @Nullable - @Override - public ConsistencyLevel getConsistencyLevel() { - return consistencyLevel; - } - - @NonNull - @Override - public BoundStatement setConsistencyLevel(@Nullable ConsistencyLevel newConsistencyLevel) { - return new DefaultBoundStatement( - preparedStatement, - variableDefinitions, - values, - executionProfileName, - executionProfile, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - newConsistencyLevel, - serialConsistencyLevel, - timeout, - codecRegistry, - protocolVersion, - node, - nowInSeconds); - } - - @Nullable - @Override - public ConsistencyLevel getSerialConsistencyLevel() { - return serialConsistencyLevel; - } - - @NonNull - @Override - public BoundStatement setSerialConsistencyLevel( - @Nullable ConsistencyLevel newSerialConsistencyLevel) { - return new DefaultBoundStatement( - preparedStatement, - variableDefinitions, - values, - executionProfileName, - executionProfile, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - newSerialConsistencyLevel, - timeout, - codecRegistry, - protocolVersion, - node, - nowInSeconds); - } - - @Override - public int getNowInSeconds() { - return nowInSeconds; - } - - @NonNull - @Override - public BoundStatement setNowInSeconds(int newNowInSeconds) { - return new DefaultBoundStatement( - preparedStatement, - variableDefinitions, - values, - executionProfileName, - executionProfile, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - codecRegistry, - protocolVersion, - node, - newNowInSeconds); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultColumnDefinition.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultColumnDefinition.java deleted file mode 100644 index e003637c07f..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultColumnDefinition.java +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.cql.ColumnDefinition; -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.internal.core.type.DataTypeHelper; -import com.datastax.oss.protocol.internal.response.result.ColumnSpec; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.Serializable; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultColumnDefinition implements ColumnDefinition, Serializable { - - private static final long serialVersionUID = 1; - - /** @serial */ - private final CqlIdentifier keyspace; - /** @serial */ - private final CqlIdentifier table; - /** @serial */ - private final CqlIdentifier name; - /** @serial */ - private final DataType type; - - /** @param spec the raw data decoded by the protocol layer */ - public DefaultColumnDefinition( - @NonNull ColumnSpec spec, @NonNull AttachmentPoint attachmentPoint) { - this.keyspace = CqlIdentifier.fromInternal(spec.ksName); - this.table = CqlIdentifier.fromInternal(spec.tableName); - this.name = CqlIdentifier.fromInternal(spec.name); - this.type = DataTypeHelper.fromProtocolSpec(spec.type, attachmentPoint); - } - - @NonNull - @Override - public CqlIdentifier getKeyspace() { - return keyspace; - } - - @NonNull - @Override - public CqlIdentifier getTable() { - return table; - } - - @NonNull - @Override - public CqlIdentifier getName() { - return name; - } - - @NonNull - @Override - public DataType getType() { - return type; - } - - @Override - public boolean isDetached() { - return type.isDetached(); - } - - @Override - public void attach(@NonNull AttachmentPoint attachmentPoint) { - type.attach(attachmentPoint); - } - - @Override - public String toString() { - return keyspace.asCql(true) + "." + table.asCql(true) + "." + name.asCql(true) + " " + type; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultColumnDefinitions.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultColumnDefinitions.java deleted file mode 100644 index 58304cb4f67..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultColumnDefinitions.java +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.cql.ColumnDefinition; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.internal.core.data.IdentifierIndex; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.InvalidObjectException; -import java.io.ObjectInputStream; -import java.io.Serializable; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultColumnDefinitions implements ColumnDefinitions, Serializable { - - public static ColumnDefinitions valueOf(List definitions) { - return definitions.isEmpty() - ? EmptyColumnDefinitions.INSTANCE - : new DefaultColumnDefinitions(definitions); - } - - private final List definitions; - private final IdentifierIndex index; - - private DefaultColumnDefinitions(List definitions) { - assert definitions != null && definitions.size() > 0; - this.definitions = definitions; - this.index = buildIndex(definitions); - } - - @Override - public int size() { - return definitions.size(); - } - - @NonNull - @Override - public ColumnDefinition get(int i) { - return definitions.get(i); - } - - @NonNull - @Override - public Iterator iterator() { - return definitions.iterator(); - } - - @Override - public boolean contains(@NonNull String name) { - return index.firstIndexOf(name) >= 0; - } - - @Override - public boolean contains(@NonNull CqlIdentifier id) { - return index.firstIndexOf(id) >= 0; - } - - @NonNull - @Override - public List allIndicesOf(@NonNull String name) { - return index.allIndicesOf(name); - } - - @Override - public int firstIndexOf(@NonNull String name) { - return index.firstIndexOf(name); - } - - @NonNull - @Override - public List allIndicesOf(@NonNull CqlIdentifier id) { - return index.allIndicesOf(id); - } - - @Override - public int firstIndexOf(@NonNull CqlIdentifier id) { - return index.firstIndexOf(id); - } - - @Override - public boolean isDetached() { - return definitions.get(0).isDetached(); - } - - @Override - public void attach(@NonNull AttachmentPoint attachmentPoint) { - for (ColumnDefinition definition : definitions) { - definition.attach(attachmentPoint); - } - } - - private static IdentifierIndex buildIndex(List definitions) { - List identifiers = new ArrayList<>(definitions.size()); - for (ColumnDefinition definition : definitions) { - identifiers.add(definition.getName()); - } - return new IdentifierIndex(identifiers); - } - - /** - * @serialData The list of definitions (the identifier index is reconstructed at deserialization). - */ - private Object writeReplace() { - return new SerializationProxy(this); - } - - private void readObject(@SuppressWarnings("unused") ObjectInputStream stream) - throws InvalidObjectException { - // Should never be called since we serialized a proxy - throw new InvalidObjectException("Proxy required"); - } - - private static class SerializationProxy implements Serializable { - - private static final long serialVersionUID = 1; - - private final List definitions; - - private SerializationProxy(DefaultColumnDefinitions columnDefinitions) { - this.definitions = columnDefinitions.definitions; - } - - private Object readResolve() { - return new DefaultColumnDefinitions(this.definitions); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultExecutionInfo.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultExecutionInfo.java deleted file mode 100644 index 3ab57ddc598..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultExecutionInfo.java +++ /dev/null @@ -1,192 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.PagingState; -import com.datastax.oss.driver.api.core.cql.QueryTrace; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.protocol.internal.Frame; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import java.util.concurrent.CompletionStage; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultExecutionInfo implements ExecutionInfo { - - private final Request request; - private final Node coordinator; - private final int speculativeExecutionCount; - private final int successfulExecutionIndex; - private final List> errors; - private final ByteBuffer pagingState; - private final UUID tracingId; - private final int responseSizeInBytes; - private final int compressedResponseSizeInBytes; - private final List warnings; - private final Map customPayload; - private final boolean schemaInAgreement; - private final DefaultSession session; - private final InternalDriverContext context; - private final DriverExecutionProfile executionProfile; - - public DefaultExecutionInfo( - Request request, - Node coordinator, - int speculativeExecutionCount, - int successfulExecutionIndex, - List> errors, - ByteBuffer pagingState, - Frame frame, - boolean schemaInAgreement, - DefaultSession session, - InternalDriverContext context, - DriverExecutionProfile executionProfile) { - - this.request = request; - this.coordinator = coordinator; - this.speculativeExecutionCount = speculativeExecutionCount; - this.successfulExecutionIndex = successfulExecutionIndex; - this.errors = errors; - this.pagingState = pagingState; - - this.tracingId = (frame == null) ? null : frame.tracingId; - this.responseSizeInBytes = (frame == null) ? -1 : frame.size; - this.compressedResponseSizeInBytes = (frame == null) ? -1 : frame.compressedSize; - // Note: the collections returned by the protocol layer are already unmodifiable - this.warnings = (frame == null) ? Collections.emptyList() : frame.warnings; - this.customPayload = (frame == null) ? Collections.emptyMap() : frame.customPayload; - this.schemaInAgreement = schemaInAgreement; - this.session = session; - this.context = context; - this.executionProfile = executionProfile; - } - - @NonNull - @Override - @Deprecated - public Statement getStatement() { - return (Statement) request; - } - - @NonNull - @Override - public Request getRequest() { - return request; - } - - @Nullable - @Override - public Node getCoordinator() { - return coordinator; - } - - @Override - public int getSpeculativeExecutionCount() { - return speculativeExecutionCount; - } - - @Override - public int getSuccessfulExecutionIndex() { - return successfulExecutionIndex; - } - - @NonNull - @Override - public List> getErrors() { - // Assume this method will be called 0 or 1 time, so we create the unmodifiable wrapper on - // demand. - return (errors == null) ? Collections.emptyList() : Collections.unmodifiableList(errors); - } - - @Override - @Nullable - public ByteBuffer getPagingState() { - return pagingState; - } - - @Nullable - @Override - public PagingState getSafePagingState() { - if (pagingState == null) { - return null; - } else { - if (!(request instanceof Statement)) { - throw new IllegalStateException("Only statements should have a paging state"); - } - Statement statement = (Statement) request; - return new DefaultPagingState(pagingState, statement, session.getContext()); - } - } - - @NonNull - @Override - public List getWarnings() { - return warnings; - } - - @NonNull - @Override - public Map getIncomingPayload() { - return customPayload; - } - - @Override - public boolean isSchemaInAgreement() { - return schemaInAgreement; - } - - @Override - @Nullable - public UUID getTracingId() { - return tracingId; - } - - @NonNull - @Override - public CompletionStage getQueryTraceAsync() { - if (tracingId == null) { - return CompletableFutures.failedFuture( - new IllegalStateException("Tracing was disabled for this request")); - } else { - return new QueryTraceFetcher(tracingId, session, context, executionProfile).fetch(); - } - } - - @Override - public int getResponseSizeInBytes() { - return responseSizeInBytes; - } - - @Override - public int getCompressedResponseSizeInBytes() { - return compressedResponseSizeInBytes; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultPagingState.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultPagingState.java deleted file mode 100644 index 71243285e3e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultPagingState.java +++ /dev/null @@ -1,162 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import com.datastax.oss.driver.api.core.cql.BatchStatement; -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.PagingState; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.internal.core.data.ValuesHelper; -import com.datastax.oss.protocol.internal.util.Bytes; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.nio.charset.Charset; -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; -import java.util.Arrays; - -public class DefaultPagingState implements PagingState { - - private final ByteBuffer rawPagingState; - private final byte[] hash; - private final int protocolVersion; - - public DefaultPagingState( - ByteBuffer rawPagingState, Statement statement, AttachmentPoint attachmentPoint) { - this( - rawPagingState, - hash(statement, rawPagingState, attachmentPoint), - attachmentPoint.getProtocolVersion().getCode()); - } - - private DefaultPagingState(ByteBuffer rawPagingState, byte[] hash, int protocolVersion) { - this.rawPagingState = rawPagingState; - this.hash = hash; - this.protocolVersion = protocolVersion; - } - - // Same serialized form as in driver 3: - // size of raw state|size of hash|raw state|hash|protocol version - // - // The protocol version might be absent, in which case it defaults to V2 (this is for backward - // compatibility with 2.0.10 where it is always absent). - public static DefaultPagingState fromBytes(byte[] bytes) { - ByteBuffer buffer = ByteBuffer.wrap(bytes); - short rawPagingStateLength = buffer.getShort(); - short hashLength = buffer.getShort(); - int length = rawPagingStateLength + hashLength + 2; - int legacyLength = rawPagingStateLength + hashLength; // without protocol version - if (buffer.remaining() != length && buffer.remaining() != legacyLength) { - throw new IllegalArgumentException( - "Cannot deserialize paging state, invalid format. The serialized form was corrupted, " - + "or not initially generated from a PagingState object."); - } - byte[] rawPagingState = new byte[rawPagingStateLength]; - buffer.get(rawPagingState); - byte[] hash = new byte[hashLength]; - buffer.get(hash); - int protocolVersion = buffer.hasRemaining() ? buffer.getShort() : 2; - return new DefaultPagingState(ByteBuffer.wrap(rawPagingState), hash, protocolVersion); - } - - @Override - public byte[] toBytes() { - ByteBuffer buffer = ByteBuffer.allocate(rawPagingState.remaining() + hash.length + 6); - buffer.putShort((short) rawPagingState.remaining()); - buffer.putShort((short) hash.length); - buffer.put(rawPagingState.duplicate()); - buffer.put(hash); - buffer.putShort((short) protocolVersion); - buffer.rewind(); - return buffer.array(); - } - - public static DefaultPagingState fromString(String string) { - byte[] bytes = Bytes.getArray(Bytes.fromHexString("0x" + string)); - return fromBytes(bytes); - } - - @Override - public String toString() { - return Bytes.toHexString(toBytes()).substring(2); // remove "0x" prefix - } - - @Override - public boolean matches(@NonNull Statement statement, @Nullable Session session) { - AttachmentPoint attachmentPoint = - (session == null) ? AttachmentPoint.NONE : session.getContext(); - byte[] actual = hash(statement, rawPagingState, attachmentPoint); - return Arrays.equals(actual, hash); - } - - @NonNull - @Override - public ByteBuffer getRawPagingState() { - return rawPagingState; - } - - // Hashes a statement's query string and parameters. We also include the paging state itself in - // the hash computation, to make the serialized form a bit more resistant to manual tampering. - private static byte[] hash( - @NonNull Statement statement, - ByteBuffer rawPagingState, - @NonNull AttachmentPoint attachmentPoint) { - // Batch statements don't have paging, the driver should never call this method for one - assert !(statement instanceof BatchStatement); - - MessageDigest messageDigest; - try { - messageDigest = MessageDigest.getInstance("MD5"); - } catch (NoSuchAlgorithmException e) { - throw new IllegalStateException( - "It looks like this JVM doesn't support MD5 digests, " - + "can't use the rich paging state feature", - e); - } - if (statement instanceof BoundStatement) { - BoundStatement boundStatement = (BoundStatement) statement; - String queryString = boundStatement.getPreparedStatement().getQuery(); - messageDigest.update(queryString.getBytes(Charset.defaultCharset())); - for (ByteBuffer value : boundStatement.getValues()) { - messageDigest.update(value.duplicate()); - } - } else { - SimpleStatement simpleStatement = (SimpleStatement) statement; - String queryString = simpleStatement.getQuery(); - messageDigest.update(queryString.getBytes(Charset.defaultCharset())); - for (Object value : simpleStatement.getPositionalValues()) { - ByteBuffer encodedValue = - ValuesHelper.encodeToDefaultCqlMapping( - value, attachmentPoint.getCodecRegistry(), attachmentPoint.getProtocolVersion()); - messageDigest.update(encodedValue); - } - for (Object value : simpleStatement.getNamedValues().values()) { - ByteBuffer encodedValue = - ValuesHelper.encodeToDefaultCqlMapping( - value, attachmentPoint.getCodecRegistry(), attachmentPoint.getProtocolVersion()); - messageDigest.update(encodedValue); - } - } - messageDigest.update(rawPagingState.duplicate()); - return messageDigest.digest(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultPrepareRequest.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultPrepareRequest.java deleted file mode 100644 index 7f87dbe5b51..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultPrepareRequest.java +++ /dev/null @@ -1,221 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.PrepareRequest; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.Map; -import net.jcip.annotations.Immutable; - -/** - * Default implementation of a prepare request, which is built internally to handle calls such as - * {@link CqlSession#prepare(String)} and {@link CqlSession#prepare(SimpleStatement)}. - * - *

When built from a {@link SimpleStatement}, it propagates the attributes to bound statements - * according to the rules described in {@link CqlSession#prepare(SimpleStatement)}. The prepare - * request itself: - * - *

    - *
  • will use the same execution profile (or execution profile name) as the {@code - * SimpleStatement}; - *
  • will use the same custom payload as the {@code SimpleStatement}; - *
  • will use a {@code null} timeout in order to default to the configuration (assuming that if - * a statement with a custom timeout is prepared, it is intended for the bound statements, not - * the preparation itself). - *
- */ -@Immutable -public class DefaultPrepareRequest implements PrepareRequest { - - private final SimpleStatement statement; - - public DefaultPrepareRequest(SimpleStatement statement) { - this.statement = statement; - } - - public DefaultPrepareRequest(String query) { - this.statement = SimpleStatement.newInstance(query); - } - - @NonNull - @Override - public String getQuery() { - return statement.getQuery(); - } - - @Nullable - @Override - public String getExecutionProfileName() { - return statement.getExecutionProfileName(); - } - - @Nullable - @Override - public DriverExecutionProfile getExecutionProfile() { - return statement.getExecutionProfile(); - } - - @Nullable - @Override - public CqlIdentifier getKeyspace() { - return statement.getKeyspace(); - } - - @Nullable - @Override - public CqlIdentifier getRoutingKeyspace() { - // Prepare requests do not operate on a particular partition, token-aware routing doesn't apply. - return null; - } - - @Nullable - @Override - public ByteBuffer getRoutingKey() { - return null; - } - - @Nullable - @Override - public Token getRoutingToken() { - return null; - } - - @NonNull - @Override - public Map getCustomPayload() { - return statement.getCustomPayload(); - } - - @Nullable - @Override - public Duration getTimeout() { - return null; - } - - @Nullable - @Override - public String getExecutionProfileNameForBoundStatements() { - return statement.getExecutionProfileName(); - } - - @Nullable - @Override - public DriverExecutionProfile getExecutionProfileForBoundStatements() { - return statement.getExecutionProfile(); - } - - @Nullable - @Override - public CqlIdentifier getRoutingKeyspaceForBoundStatements() { - return (statement.getKeyspace() != null) - ? statement.getKeyspace() - : statement.getRoutingKeyspace(); - } - - @Nullable - @Override - public ByteBuffer getRoutingKeyForBoundStatements() { - return statement.getRoutingKey(); - } - - @Nullable - @Override - public Token getRoutingTokenForBoundStatements() { - return statement.getRoutingToken(); - } - - @NonNull - @Override - public Map getCustomPayloadForBoundStatements() { - return statement.getCustomPayload(); - } - - @Nullable - @Override - public Boolean areBoundStatementsIdempotent() { - return statement.isIdempotent(); - } - - @Nullable - @Override - public Duration getTimeoutForBoundStatements() { - return statement.getTimeout(); - } - - @Nullable - @Override - public ByteBuffer getPagingStateForBoundStatements() { - return statement.getPagingState(); - } - - @Override - public int getPageSizeForBoundStatements() { - return statement.getPageSize(); - } - - @Nullable - @Override - public ConsistencyLevel getConsistencyLevelForBoundStatements() { - return statement.getConsistencyLevel(); - } - - @Nullable - @Override - public ConsistencyLevel getSerialConsistencyLevelForBoundStatements() { - return statement.getSerialConsistencyLevel(); - } - - @Nullable - @Override - public Node getNode() { - // never target prepare requests - return null; - } - - @Override - public boolean areBoundStatementsTracing() { - return statement.isTracing(); - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof DefaultPrepareRequest) { - DefaultPrepareRequest that = (DefaultPrepareRequest) other; - return this.statement.equals(that.statement); - } else { - return false; - } - } - - @Override - public int hashCode() { - return statement.hashCode(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultPreparedStatement.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultPreparedStatement.java deleted file mode 100644 index e45e1e5add0..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultPreparedStatement.java +++ /dev/null @@ -1,222 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.BoundStatementBuilder; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.internal.core.data.ValuesHelper; -import com.datastax.oss.driver.internal.core.session.RepreparePayload; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.List; -import java.util.Map; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class DefaultPreparedStatement implements PreparedStatement { - - private final ByteBuffer id; - private final RepreparePayload repreparePayload; - private final ColumnDefinitions variableDefinitions; - private final List partitionKeyIndices; - private volatile ResultMetadata resultMetadata; - private final CodecRegistry codecRegistry; - private final ProtocolVersion protocolVersion; - private final String executionProfileNameForBoundStatements; - private final DriverExecutionProfile executionProfileForBoundStatements; - private final ByteBuffer pagingStateForBoundStatements; - private final CqlIdentifier routingKeyspaceForBoundStatements; - private final ByteBuffer routingKeyForBoundStatements; - private final Token routingTokenForBoundStatements; - private final Map customPayloadForBoundStatements; - private final Boolean areBoundStatementsIdempotent; - private final boolean areBoundStatementsTracing; - private final int pageSizeForBoundStatements; - private final ConsistencyLevel consistencyLevelForBoundStatements; - private final ConsistencyLevel serialConsistencyLevelForBoundStatements; - private final Duration timeoutForBoundStatements; - - public DefaultPreparedStatement( - ByteBuffer id, - String query, - ColumnDefinitions variableDefinitions, - List partitionKeyIndices, - ByteBuffer resultMetadataId, - ColumnDefinitions resultSetDefinitions, - CqlIdentifier keyspace, - Map customPayloadForPrepare, - String executionProfileNameForBoundStatements, - DriverExecutionProfile executionProfileForBoundStatements, - CqlIdentifier routingKeyspaceForBoundStatements, - ByteBuffer routingKeyForBoundStatements, - Token routingTokenForBoundStatements, - Map customPayloadForBoundStatements, - Boolean areBoundStatementsIdempotent, - Duration timeoutForBoundStatements, - ByteBuffer pagingStateForBoundStatements, - int pageSizeForBoundStatements, - ConsistencyLevel consistencyLevelForBoundStatements, - ConsistencyLevel serialConsistencyLevelForBoundStatements, - boolean areBoundStatementsTracing, - CodecRegistry codecRegistry, - ProtocolVersion protocolVersion) { - this.id = id; - this.partitionKeyIndices = partitionKeyIndices; - // It's important that we keep a reference to this object, so that it only gets evicted from - // the map in DefaultSession if no client reference the PreparedStatement anymore. - this.repreparePayload = new RepreparePayload(id, query, keyspace, customPayloadForPrepare); - this.variableDefinitions = variableDefinitions; - this.resultMetadata = new ResultMetadata(resultMetadataId, resultSetDefinitions); - - this.executionProfileNameForBoundStatements = executionProfileNameForBoundStatements; - this.executionProfileForBoundStatements = executionProfileForBoundStatements; - this.routingKeyspaceForBoundStatements = routingKeyspaceForBoundStatements; - this.routingKeyForBoundStatements = routingKeyForBoundStatements; - this.routingTokenForBoundStatements = routingTokenForBoundStatements; - this.customPayloadForBoundStatements = customPayloadForBoundStatements; - this.areBoundStatementsIdempotent = areBoundStatementsIdempotent; - this.timeoutForBoundStatements = timeoutForBoundStatements; - this.pagingStateForBoundStatements = pagingStateForBoundStatements; - this.pageSizeForBoundStatements = pageSizeForBoundStatements; - this.consistencyLevelForBoundStatements = consistencyLevelForBoundStatements; - this.serialConsistencyLevelForBoundStatements = serialConsistencyLevelForBoundStatements; - this.areBoundStatementsTracing = areBoundStatementsTracing; - - this.codecRegistry = codecRegistry; - this.protocolVersion = protocolVersion; - } - - @NonNull - @Override - public ByteBuffer getId() { - return id; - } - - @NonNull - @Override - public String getQuery() { - return repreparePayload.query; - } - - @NonNull - @Override - public ColumnDefinitions getVariableDefinitions() { - return variableDefinitions; - } - - @NonNull - @Override - public List getPartitionKeyIndices() { - return partitionKeyIndices; - } - - @Override - public ByteBuffer getResultMetadataId() { - return resultMetadata.resultMetadataId; - } - - @NonNull - @Override - public ColumnDefinitions getResultSetDefinitions() { - return resultMetadata.resultSetDefinitions; - } - - @Override - public void setResultMetadata( - @NonNull ByteBuffer newResultMetadataId, @NonNull ColumnDefinitions newResultSetDefinitions) { - this.resultMetadata = new ResultMetadata(newResultMetadataId, newResultSetDefinitions); - } - - @NonNull - @Override - public BoundStatement bind(@NonNull Object... values) { - return new DefaultBoundStatement( - this, - variableDefinitions, - ValuesHelper.encodePreparedValues( - values, variableDefinitions, codecRegistry, protocolVersion), - executionProfileNameForBoundStatements, - executionProfileForBoundStatements, - routingKeyspaceForBoundStatements, - routingKeyForBoundStatements, - routingTokenForBoundStatements, - customPayloadForBoundStatements, - areBoundStatementsIdempotent, - areBoundStatementsTracing, - Statement.NO_DEFAULT_TIMESTAMP, - pagingStateForBoundStatements, - pageSizeForBoundStatements, - consistencyLevelForBoundStatements, - serialConsistencyLevelForBoundStatements, - timeoutForBoundStatements, - codecRegistry, - protocolVersion, - null, - Statement.NO_NOW_IN_SECONDS); - } - - @NonNull - @Override - public BoundStatementBuilder boundStatementBuilder(@NonNull Object... values) { - return new BoundStatementBuilder( - this, - variableDefinitions, - ValuesHelper.encodePreparedValues( - values, variableDefinitions, codecRegistry, protocolVersion), - executionProfileNameForBoundStatements, - executionProfileForBoundStatements, - routingKeyspaceForBoundStatements, - routingKeyForBoundStatements, - routingTokenForBoundStatements, - customPayloadForBoundStatements, - areBoundStatementsIdempotent, - areBoundStatementsTracing, - Statement.NO_DEFAULT_TIMESTAMP, - pagingStateForBoundStatements, - pageSizeForBoundStatements, - consistencyLevelForBoundStatements, - serialConsistencyLevelForBoundStatements, - timeoutForBoundStatements, - codecRegistry, - protocolVersion); - } - - public RepreparePayload getRepreparePayload() { - return this.repreparePayload; - } - - private static class ResultMetadata { - private ByteBuffer resultMetadataId; - private ColumnDefinitions resultSetDefinitions; - - private ResultMetadata(ByteBuffer resultMetadataId, ColumnDefinitions resultSetDefinitions) { - this.resultMetadataId = resultMetadataId; - this.resultSetDefinitions = resultSetDefinitions; - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultQueryTrace.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultQueryTrace.java deleted file mode 100644 index db95cc408b6..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultQueryTrace.java +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import com.datastax.oss.driver.api.core.cql.QueryTrace; -import com.datastax.oss.driver.api.core.cql.TraceEvent; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultQueryTrace implements QueryTrace { - - private final UUID tracingId; - private final String requestType; - private final int durationMicros; - private final InetSocketAddress coordinator; - private final Map parameters; - private final long startedAt; - private final List events; - - public DefaultQueryTrace( - UUID tracingId, - String requestType, - int durationMicros, - InetSocketAddress coordinator, - Map parameters, - long startedAt, - List events) { - this.tracingId = tracingId; - this.requestType = requestType; - this.durationMicros = durationMicros; - this.coordinator = coordinator; - this.parameters = parameters; - this.startedAt = startedAt; - this.events = events; - } - - @NonNull - @Override - public UUID getTracingId() { - return tracingId; - } - - @NonNull - @Override - public String getRequestType() { - return requestType; - } - - @Override - public int getDurationMicros() { - return durationMicros; - } - - @NonNull - @Override - @Deprecated - public InetAddress getCoordinator() { - return coordinator.getAddress(); - } - - @NonNull - @Override - public InetSocketAddress getCoordinatorAddress() { - return coordinator; - } - - @NonNull - @Override - public Map getParameters() { - return parameters; - } - - @Override - public long getStartedAt() { - return startedAt; - } - - @NonNull - @Override - public List getEvents() { - return events; - } - - @Override - public String toString() { - return String.format("%s [%s] - %dµs", requestType, tracingId, durationMicros); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultRow.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultRow.java deleted file mode 100644 index d6bf39ab9c0..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultRow.java +++ /dev/null @@ -1,190 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.protocol.internal.util.Bytes; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.io.InvalidObjectException; -import java.io.ObjectInputStream; -import java.io.Serializable; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.List; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultRow implements Row, Serializable { - - private final ColumnDefinitions definitions; - private final List data; - private transient volatile AttachmentPoint attachmentPoint; - - public DefaultRow( - ColumnDefinitions definitions, List data, AttachmentPoint attachmentPoint) { - this.definitions = definitions; - this.data = data; - this.attachmentPoint = attachmentPoint; - } - - public DefaultRow(ColumnDefinitions definitions, List data) { - this(definitions, data, AttachmentPoint.NONE); - } - - @NonNull - @Override - public ColumnDefinitions getColumnDefinitions() { - return definitions; - } - - @Override - public int size() { - return definitions.size(); - } - - @NonNull - @Override - public DataType getType(int i) { - return definitions.get(i).getType(); - } - - @NonNull - @Override - public List allIndicesOf(@NonNull CqlIdentifier id) { - List indices = definitions.allIndicesOf(id); - if (indices.isEmpty()) { - throw new IllegalArgumentException(id + " is not a column in this row"); - } - return indices; - } - - @Override - public int firstIndexOf(@NonNull CqlIdentifier id) { - int indexOf = definitions.firstIndexOf(id); - if (indexOf == -1) { - throw new IllegalArgumentException(id + " is not a column in this row"); - } - return indexOf; - } - - @NonNull - @Override - public DataType getType(@NonNull CqlIdentifier id) { - return definitions.get(firstIndexOf(id)).getType(); - } - - @NonNull - @Override - public List allIndicesOf(@NonNull String name) { - List indices = definitions.allIndicesOf(name); - if (indices.isEmpty()) { - throw new IllegalArgumentException(name + " is not a column in this row"); - } - return indices; - } - - @Override - public int firstIndexOf(@NonNull String name) { - int indexOf = definitions.firstIndexOf(name); - if (indexOf == -1) { - throw new IllegalArgumentException(name + " is not a column in this row"); - } - return indexOf; - } - - @NonNull - @Override - public DataType getType(@NonNull String name) { - return definitions.get(firstIndexOf(name)).getType(); - } - - @NonNull - @Override - public CodecRegistry codecRegistry() { - return attachmentPoint.getCodecRegistry(); - } - - @NonNull - @Override - public ProtocolVersion protocolVersion() { - return attachmentPoint.getProtocolVersion(); - } - - @Override - public boolean isDetached() { - return attachmentPoint == AttachmentPoint.NONE; - } - - @Override - public void attach(@NonNull AttachmentPoint attachmentPoint) { - this.attachmentPoint = attachmentPoint; - this.definitions.attach(attachmentPoint); - } - - @Nullable - @Override - public ByteBuffer getBytesUnsafe(int i) { - return data.get(i); - } - /** - * @serialData The column definitions, followed by an array of byte arrays representing the column - * values (null values are represented by {@code null}). - */ - private Object writeReplace() { - return new SerializationProxy(this); - } - - private void readObject(@SuppressWarnings("unused") ObjectInputStream stream) - throws InvalidObjectException { - // Should never be called since we serialized a proxy - throw new InvalidObjectException("Proxy required"); - } - - private static class SerializationProxy implements Serializable { - - private static final long serialVersionUID = 1; - - private final ColumnDefinitions definitions; - private final byte[][] values; - - SerializationProxy(DefaultRow row) { - this.definitions = row.definitions; - this.values = new byte[row.data.size()][]; - int i = 0; - for (ByteBuffer buffer : row.data) { - this.values[i] = (buffer == null) ? null : Bytes.getArray(buffer); - i += 1; - } - } - - private Object readResolve() { - List data = new ArrayList<>(this.values.length); - for (byte[] value : this.values) { - data.add((value == null) ? null : ByteBuffer.wrap(value)); - } - return new DefaultRow(this.definitions, data); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultSimpleStatement.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultSimpleStatement.java deleted file mode 100644 index c763860479e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultSimpleStatement.java +++ /dev/null @@ -1,810 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.protocol.internal.util.collection.NullAllowingImmutableList; -import com.datastax.oss.protocol.internal.util.collection.NullAllowingImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultSimpleStatement implements SimpleStatement { - - private final String query; - private final List positionalValues; - private final Map namedValues; - private final String executionProfileName; - private final DriverExecutionProfile executionProfile; - private final CqlIdentifier keyspace; - private final CqlIdentifier routingKeyspace; - private final ByteBuffer routingKey; - private final Token routingToken; - - private final Map customPayload; - private final Boolean idempotent; - private final boolean tracing; - private final long timestamp; - private final ByteBuffer pagingState; - private final int pageSize; - private final ConsistencyLevel consistencyLevel; - private final ConsistencyLevel serialConsistencyLevel; - private final Duration timeout; - private final Node node; - private final int nowInSeconds; - - /** @see SimpleStatement#builder(String) */ - public DefaultSimpleStatement( - String query, - List positionalValues, - Map namedValues, - String executionProfileName, - DriverExecutionProfile executionProfile, - CqlIdentifier keyspace, - CqlIdentifier routingKeyspace, - ByteBuffer routingKey, - Token routingToken, - Map customPayload, - Boolean idempotent, - boolean tracing, - long timestamp, - ByteBuffer pagingState, - int pageSize, - ConsistencyLevel consistencyLevel, - ConsistencyLevel serialConsistencyLevel, - Duration timeout, - Node node, - int nowInSeconds) { - if (!positionalValues.isEmpty() && !namedValues.isEmpty()) { - throw new IllegalArgumentException("Can't have both positional and named values"); - } - this.query = query; - this.positionalValues = NullAllowingImmutableList.copyOf(positionalValues); - this.namedValues = NullAllowingImmutableMap.copyOf(namedValues); - this.executionProfileName = executionProfileName; - this.executionProfile = executionProfile; - this.keyspace = keyspace; - this.routingKeyspace = routingKeyspace; - this.routingKey = routingKey; - this.routingToken = routingToken; - this.customPayload = customPayload; - this.idempotent = idempotent; - this.tracing = tracing; - this.timestamp = timestamp; - this.pagingState = pagingState; - this.pageSize = pageSize; - this.consistencyLevel = consistencyLevel; - this.serialConsistencyLevel = serialConsistencyLevel; - this.timeout = timeout; - this.node = node; - this.nowInSeconds = nowInSeconds; - } - - @NonNull - @Override - public String getQuery() { - return query; - } - - @NonNull - @Override - public SimpleStatement setQuery(@NonNull String newQuery) { - return new DefaultSimpleStatement( - newQuery, - positionalValues, - namedValues, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @NonNull - @Override - public List getPositionalValues() { - return positionalValues; - } - - @NonNull - @Override - public SimpleStatement setPositionalValues(@NonNull List newPositionalValues) { - return new DefaultSimpleStatement( - query, - newPositionalValues, - namedValues, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @NonNull - @Override - public Map getNamedValues() { - return namedValues; - } - - @NonNull - @Override - public SimpleStatement setNamedValuesWithIds(@NonNull Map newNamedValues) { - return new DefaultSimpleStatement( - query, - positionalValues, - newNamedValues, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @Nullable - @Override - public String getExecutionProfileName() { - return executionProfileName; - } - - @NonNull - @Override - public SimpleStatement setExecutionProfileName(@Nullable String newConfigProfileName) { - return new DefaultSimpleStatement( - query, - positionalValues, - namedValues, - newConfigProfileName, - (newConfigProfileName == null) ? executionProfile : null, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @Nullable - @Override - public DriverExecutionProfile getExecutionProfile() { - return executionProfile; - } - - @NonNull - @Override - public SimpleStatement setExecutionProfile(@Nullable DriverExecutionProfile newProfile) { - return new DefaultSimpleStatement( - query, - positionalValues, - namedValues, - (newProfile == null) ? executionProfileName : null, - newProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @Nullable - @Override - public CqlIdentifier getKeyspace() { - return keyspace; - } - - @NonNull - @Override - public SimpleStatement setKeyspace(@Nullable CqlIdentifier newKeyspace) { - return new DefaultSimpleStatement( - query, - positionalValues, - namedValues, - executionProfileName, - executionProfile, - newKeyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @Nullable - @Override - public CqlIdentifier getRoutingKeyspace() { - return routingKeyspace; - } - - @NonNull - @Override - public SimpleStatement setRoutingKeyspace(@Nullable CqlIdentifier newRoutingKeyspace) { - return new DefaultSimpleStatement( - query, - positionalValues, - namedValues, - executionProfileName, - executionProfile, - keyspace, - newRoutingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @NonNull - @Override - public SimpleStatement setNode(@Nullable Node newNode) { - return new DefaultSimpleStatement( - query, - positionalValues, - namedValues, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - newNode, - nowInSeconds); - } - - @Nullable - @Override - public Node getNode() { - return node; - } - - @Nullable - @Override - public ByteBuffer getRoutingKey() { - return routingKey; - } - - @NonNull - @Override - public SimpleStatement setRoutingKey(@Nullable ByteBuffer newRoutingKey) { - return new DefaultSimpleStatement( - query, - positionalValues, - namedValues, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - newRoutingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @Nullable - @Override - public Token getRoutingToken() { - return routingToken; - } - - @NonNull - @Override - public SimpleStatement setRoutingToken(@Nullable Token newRoutingToken) { - return new DefaultSimpleStatement( - query, - positionalValues, - namedValues, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - newRoutingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @NonNull - @Override - public Map getCustomPayload() { - return customPayload; - } - - @NonNull - @Override - public SimpleStatement setCustomPayload(@NonNull Map newCustomPayload) { - return new DefaultSimpleStatement( - query, - positionalValues, - namedValues, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - newCustomPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @Nullable - @Override - public Boolean isIdempotent() { - return idempotent; - } - - @NonNull - @Override - public SimpleStatement setIdempotent(@Nullable Boolean newIdempotence) { - return new DefaultSimpleStatement( - query, - positionalValues, - namedValues, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - newIdempotence, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @Override - public boolean isTracing() { - return tracing; - } - - @NonNull - @Override - public SimpleStatement setTracing(boolean newTracing) { - return new DefaultSimpleStatement( - query, - positionalValues, - namedValues, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - newTracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @Override - public long getQueryTimestamp() { - return timestamp; - } - - @NonNull - @Override - public SimpleStatement setQueryTimestamp(long newTimestamp) { - return new DefaultSimpleStatement( - query, - positionalValues, - namedValues, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - newTimestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @Nullable - @Override - public Duration getTimeout() { - return timeout; - } - - @NonNull - @Override - public SimpleStatement setTimeout(@Nullable Duration newTimeout) { - return new DefaultSimpleStatement( - query, - positionalValues, - namedValues, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - newTimeout, - node, - nowInSeconds); - } - - @Nullable - @Override - public ByteBuffer getPagingState() { - return pagingState; - } - - @NonNull - @Override - public SimpleStatement setPagingState(@Nullable ByteBuffer newPagingState) { - return new DefaultSimpleStatement( - query, - positionalValues, - namedValues, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - newPagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @Override - public int getPageSize() { - return pageSize; - } - - @NonNull - @Override - public SimpleStatement setPageSize(int newPageSize) { - return new DefaultSimpleStatement( - query, - positionalValues, - namedValues, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - newPageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @Nullable - @Override - public ConsistencyLevel getConsistencyLevel() { - return consistencyLevel; - } - - @NonNull - @Override - public SimpleStatement setConsistencyLevel(@Nullable ConsistencyLevel newConsistencyLevel) { - return new DefaultSimpleStatement( - query, - positionalValues, - namedValues, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - newConsistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @Nullable - @Override - public ConsistencyLevel getSerialConsistencyLevel() { - return serialConsistencyLevel; - } - - @NonNull - @Override - public SimpleStatement setSerialConsistencyLevel( - @Nullable ConsistencyLevel newSerialConsistencyLevel) { - return new DefaultSimpleStatement( - query, - positionalValues, - namedValues, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - newSerialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @Override - public int getNowInSeconds() { - return nowInSeconds; - } - - @NonNull - @Override - public SimpleStatement setNowInSeconds(int newNowInSeconds) { - return new DefaultSimpleStatement( - query, - positionalValues, - namedValues, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - newNowInSeconds); - } - - public static Map wrapKeys(Map namedValues) { - NullAllowingImmutableMap.Builder builder = - NullAllowingImmutableMap.builder(); - for (Map.Entry entry : namedValues.entrySet()) { - builder.put(CqlIdentifier.fromCql(entry.getKey()), entry.getValue()); - } - return builder.build(); - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof DefaultSimpleStatement) { - DefaultSimpleStatement that = (DefaultSimpleStatement) other; - return this.query.equals(that.query) - && this.positionalValues.equals(that.positionalValues) - && this.namedValues.equals(that.namedValues) - && Objects.equals(this.executionProfileName, that.executionProfileName) - && Objects.equals(this.executionProfile, that.executionProfile) - && Objects.equals(this.keyspace, that.keyspace) - && Objects.equals(this.routingKeyspace, that.routingKeyspace) - && Objects.equals(this.routingKey, that.routingKey) - && Objects.equals(this.routingToken, that.routingToken) - && Objects.equals(this.customPayload, that.customPayload) - && Objects.equals(this.idempotent, that.idempotent) - && this.tracing == that.tracing - && this.timestamp == that.timestamp - && Objects.equals(this.pagingState, that.pagingState) - && this.pageSize == that.pageSize - && Objects.equals(this.consistencyLevel, that.consistencyLevel) - && Objects.equals(this.serialConsistencyLevel, that.serialConsistencyLevel) - && Objects.equals(this.timeout, that.timeout) - && Objects.equals(this.node, that.node) - && this.nowInSeconds == that.nowInSeconds; - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash( - query, - positionalValues, - namedValues, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultTraceEvent.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultTraceEvent.java deleted file mode 100644 index 9bf7ff7c8ee..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultTraceEvent.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import com.datastax.oss.driver.api.core.cql.TraceEvent; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.util.Date; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultTraceEvent implements TraceEvent { - - private final String activity; - private final long timestamp; - private final InetSocketAddress source; - private final int sourceElapsedMicros; - private final String threadName; - - public DefaultTraceEvent( - String activity, - long timestamp, - InetSocketAddress source, - int sourceElapsedMicros, - String threadName) { - this.activity = activity; - // Convert the UUID timestamp to an epoch timestamp - this.timestamp = (timestamp - 0x01b21dd213814000L) / 10000; - this.source = source; - this.sourceElapsedMicros = sourceElapsedMicros; - this.threadName = threadName; - } - - @Override - public String getActivity() { - return activity; - } - - @Override - public long getTimestamp() { - return timestamp; - } - - @Override - @Deprecated - public InetAddress getSource() { - return source.getAddress(); - } - - @Override - public InetSocketAddress getSourceAddress() { - return source; - } - - @Override - public int getSourceElapsedMicros() { - return sourceElapsedMicros; - } - - @Override - public String getThreadName() { - return threadName; - } - - @Override - public String toString() { - return String.format("%s on %s[%s] at %s", activity, source, threadName, new Date(timestamp)); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/EmptyColumnDefinitions.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/EmptyColumnDefinitions.java deleted file mode 100644 index 53cfee98b3e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/EmptyColumnDefinitions.java +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.cql.ColumnDefinition; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; - -/** - * The singleton that represents no column definitions (implemented as an enum which provides the - * serialization machinery for free). - */ -public enum EmptyColumnDefinitions implements ColumnDefinitions { - INSTANCE; - - @Override - public int size() { - return 0; - } - - @NonNull - @Override - public ColumnDefinition get(int i) { - throw new ArrayIndexOutOfBoundsException(); - } - - @Override - public boolean contains(@NonNull String name) { - return false; - } - - @Override - public boolean contains(@NonNull CqlIdentifier id) { - return false; - } - - @NonNull - @Override - public List allIndicesOf(@NonNull String name) { - return Collections.emptyList(); - } - - @Override - public int firstIndexOf(@NonNull String name) { - return -1; - } - - @NonNull - @Override - public List allIndicesOf(@NonNull CqlIdentifier id) { - return Collections.emptyList(); - } - - @Override - public int firstIndexOf(@NonNull CqlIdentifier id) { - return -1; - } - - @Override - public boolean isDetached() { - return false; - } - - @Override - public void attach(@NonNull AttachmentPoint attachmentPoint) {} - - @Override - public Iterator iterator() { - return Collections.emptyList().iterator(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/MultiPageResultSet.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/MultiPageResultSet.java deleted file mode 100644 index 2115a127dc6..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/MultiPageResultSet.java +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.internal.core.util.CountingIterator; -import com.datastax.oss.driver.internal.core.util.concurrent.BlockingOperation; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; -import net.jcip.annotations.NotThreadSafe; - -@NotThreadSafe -public class MultiPageResultSet implements ResultSet { - - private final RowIterator iterator; - private final List executionInfos = new ArrayList<>(); - private ColumnDefinitions columnDefinitions; - - public MultiPageResultSet(@NonNull AsyncResultSet firstPage) { - assert firstPage.hasMorePages(); - this.iterator = new RowIterator(firstPage); - this.executionInfos.add(firstPage.getExecutionInfo()); - this.columnDefinitions = firstPage.getColumnDefinitions(); - } - - @NonNull - @Override - public ColumnDefinitions getColumnDefinitions() { - return columnDefinitions; - } - - @NonNull - @Override - public List getExecutionInfos() { - return executionInfos; - } - - @Override - public boolean isFullyFetched() { - return iterator.isFullyFetched(); - } - - @Override - public int getAvailableWithoutFetching() { - return iterator.remaining(); - } - - @NonNull - @Override - public Iterator iterator() { - return iterator; - } - - @Override - public boolean wasApplied() { - return iterator.wasApplied(); - } - - private class RowIterator extends CountingIterator { - private AsyncResultSet currentPage; - private Iterator currentRows; - - private RowIterator(AsyncResultSet firstPage) { - super(firstPage.remaining()); - this.currentPage = firstPage; - this.currentRows = firstPage.currentPage().iterator(); - } - - @Override - protected Row computeNext() { - maybeMoveToNextPage(); - return currentRows.hasNext() ? currentRows.next() : endOfData(); - } - - private void maybeMoveToNextPage() { - if (!currentRows.hasNext() && currentPage.hasMorePages()) { - BlockingOperation.checkNotDriverThread(); - AsyncResultSet nextPage = - CompletableFutures.getUninterruptibly(currentPage.fetchNextPage()); - currentPage = nextPage; - remaining += nextPage.remaining(); - currentRows = nextPage.currentPage().iterator(); - executionInfos.add(nextPage.getExecutionInfo()); - // The definitions can change from page to page if this result set was built from a bound - // 'SELECT *', and the schema was altered. - columnDefinitions = nextPage.getColumnDefinitions(); - } - } - - private boolean isFullyFetched() { - return !currentPage.hasMorePages(); - } - - private boolean wasApplied() { - return currentPage.wasApplied(); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/PagingIterableSpliterator.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/PagingIterableSpliterator.java deleted file mode 100644 index 742699d2c1e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/PagingIterableSpliterator.java +++ /dev/null @@ -1,162 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import com.datastax.oss.driver.api.core.PagingIterable; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Objects; -import java.util.Spliterator; -import java.util.Spliterators; -import java.util.function.Consumer; -import net.jcip.annotations.NotThreadSafe; - -/** - * A Spliterator for {@link PagingIterable} instances that splits the stream in chunks of equal - * size. - * - * @param The element type of the underlying stream. - */ -@NotThreadSafe -public class PagingIterableSpliterator implements Spliterator { - - @NonNull - public static Builder builder(@NonNull PagingIterable iterable) { - return new Builder<>(iterable); - } - - /** The default chunk size for {@link PagingIterableSpliterator}. */ - public static final int DEFAULT_CHUNK_SIZE = 128; - - private final PagingIterable iterable; - private long estimatedSize; - private final int chunkSize; - private final int characteristics; - - /** - * Creates a new {@link PagingIterableSpliterator} for the given iterable, with unknown size and - * default chunk size ({@value #DEFAULT_CHUNK_SIZE}). - * - * @param iterable The {@link PagingIterable} to create a spliterator for. - */ - public PagingIterableSpliterator(@NonNull PagingIterable iterable) { - this(iterable, Long.MAX_VALUE, DEFAULT_CHUNK_SIZE); - } - - private PagingIterableSpliterator( - @NonNull PagingIterable iterable, long estimatedSize, int chunkSize) { - this.iterable = Objects.requireNonNull(iterable, "iterable cannot be null"); - this.estimatedSize = estimatedSize; - this.chunkSize = chunkSize; - if (estimatedSize < Long.MAX_VALUE) { - characteristics = - Spliterator.ORDERED - | Spliterator.IMMUTABLE - | Spliterator.NONNULL - | Spliterator.SIZED - | Spliterator.SUBSIZED; - } else { - characteristics = Spliterator.ORDERED | Spliterator.IMMUTABLE | Spliterator.NONNULL; - } - } - - @Override - public boolean tryAdvance(Consumer action) { - Objects.requireNonNull(action, "action cannot be null"); - ElementT row = iterable.one(); - if (row == null) { - return false; - } - action.accept(row); - return true; - } - - @Override - @Nullable - public Spliterator trySplit() { - if (estimatedSize != Long.MAX_VALUE && estimatedSize <= chunkSize) { - // There is no point in splitting if the number of remaining elements is below the chunk size - return null; - } - ElementT row = iterable.one(); - if (row == null) { - return null; - } - Object[] array = new Object[chunkSize]; - int i = 0; - do { - array[i++] = row; - if (i < chunkSize) { - row = iterable.one(); - } else { - break; - } - } while (row != null); - if (estimatedSize != Long.MAX_VALUE) { - estimatedSize -= i; - } - // Splits will also report SIZED and SUBSIZED as well. - return Spliterators.spliterator(array, 0, i, characteristics()); - } - - @Override - public void forEachRemaining(Consumer action) { - iterable.iterator().forEachRemaining(action); - } - - @Override - public long estimateSize() { - return estimatedSize; - } - - @Override - public int characteristics() { - return characteristics; - } - - public static class Builder { - - private final PagingIterable iterable; - private long estimatedSize = Long.MAX_VALUE; - private int chunkSize = DEFAULT_CHUNK_SIZE; - - Builder(@NonNull PagingIterable iterable) { - this.iterable = iterable; - } - - @NonNull - public Builder withEstimatedSize(long estimatedSize) { - Preconditions.checkArgument(estimatedSize >= 0, "estimatedSize must be >= 0"); - this.estimatedSize = estimatedSize; - return this; - } - - @NonNull - public Builder withChunkSize(int chunkSize) { - Preconditions.checkArgument(chunkSize > 0, "chunkSize must be > 0"); - this.chunkSize = chunkSize; - return this; - } - - @NonNull - public PagingIterableSpliterator build() { - return new PagingIterableSpliterator<>(iterable, estimatedSize, chunkSize); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/QueryTraceFetcher.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/QueryTraceFetcher.java deleted file mode 100644 index 7ea54aa3b0e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/QueryTraceFetcher.java +++ /dev/null @@ -1,167 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.QueryTrace; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.cql.TraceEvent; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.Iterables; -import io.netty.util.concurrent.EventExecutor; -import java.net.InetSocketAddress; -import java.nio.ByteBuffer; -import java.time.Instant; -import java.util.ArrayList; -import java.util.List; -import java.util.UUID; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.TimeUnit; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -class QueryTraceFetcher { - - private final UUID tracingId; - private final CqlSession session; - private final DriverExecutionProfile config; - private final int maxAttempts; - private final long intervalNanos; - private final EventExecutor scheduler; - private final CompletableFuture resultFuture = new CompletableFuture<>(); - - QueryTraceFetcher( - UUID tracingId, - CqlSession session, - InternalDriverContext context, - DriverExecutionProfile config) { - this.tracingId = tracingId; - this.session = session; - - String regularConsistency = config.getString(DefaultDriverOption.REQUEST_CONSISTENCY); - String traceConsistency = config.getString(DefaultDriverOption.REQUEST_TRACE_CONSISTENCY); - this.config = - traceConsistency.equals(regularConsistency) - ? config - : config.withString(DefaultDriverOption.REQUEST_CONSISTENCY, traceConsistency); - - this.maxAttempts = config.getInt(DefaultDriverOption.REQUEST_TRACE_ATTEMPTS); - this.intervalNanos = config.getDuration(DefaultDriverOption.REQUEST_TRACE_INTERVAL).toNanos(); - this.scheduler = context.getNettyOptions().adminEventExecutorGroup().next(); - - querySession(maxAttempts); - } - - CompletionStage fetch() { - return resultFuture; - } - - private void querySession(int remainingAttempts) { - session - .executeAsync( - SimpleStatement.builder("SELECT * FROM system_traces.sessions WHERE session_id = ?") - .addPositionalValue(tracingId) - .setExecutionProfile(config) - .build()) - .whenComplete( - (rs, error) -> { - if (error != null) { - resultFuture.completeExceptionally(error); - } else { - Row row = rs.one(); - if (row == null || row.isNull("duration") || row.isNull("started_at")) { - // Trace is incomplete => fail if last try, or schedule retry - if (remainingAttempts == 1) { - resultFuture.completeExceptionally( - new IllegalStateException( - String.format( - "Trace %s still not complete after %d attempts", - tracingId, maxAttempts))); - } else { - scheduler.schedule( - () -> querySession(remainingAttempts - 1), - intervalNanos, - TimeUnit.NANOSECONDS); - } - } else { - queryEvents(row, new ArrayList<>(), null); - } - } - }); - } - - private void queryEvents(Row sessionRow, List events, ByteBuffer pagingState) { - session - .executeAsync( - SimpleStatement.builder("SELECT * FROM system_traces.events WHERE session_id = ?") - .addPositionalValue(tracingId) - .setPagingState(pagingState) - .setExecutionProfile(config) - .build()) - .whenComplete( - (rs, error) -> { - if (error != null) { - resultFuture.completeExceptionally(error); - } else { - Iterables.addAll(events, rs.currentPage()); - ByteBuffer nextPagingState = rs.getExecutionInfo().getPagingState(); - if (nextPagingState == null) { - resultFuture.complete(buildTrace(sessionRow, events)); - } else { - queryEvents(sessionRow, events, nextPagingState); - } - } - }); - } - - private QueryTrace buildTrace(Row sessionRow, Iterable eventRows) { - ImmutableList.Builder eventsBuilder = ImmutableList.builder(); - for (Row eventRow : eventRows) { - UUID eventId = eventRow.getUuid("event_id"); - int sourcePort = 0; - if (eventRow.getColumnDefinitions().contains("source_port")) { - sourcePort = eventRow.getInt("source_port"); - } - eventsBuilder.add( - new DefaultTraceEvent( - eventRow.getString("activity"), - eventId == null ? -1 : eventId.timestamp(), - new InetSocketAddress(eventRow.getInetAddress("source"), sourcePort), - eventRow.getInt("source_elapsed"), - eventRow.getString("thread"))); - } - Instant startedAt = sessionRow.getInstant("started_at"); - int coordinatorPort = 0; - if (sessionRow.getColumnDefinitions().contains("coordinator_port")) { - coordinatorPort = sessionRow.getInt("coordinator_port"); - } - return new DefaultQueryTrace( - tracingId, - sessionRow.getString("request"), - sessionRow.getInt("duration"), - new InetSocketAddress(sessionRow.getInetAddress("coordinator"), coordinatorPort), - sessionRow.getMap("parameters", String.class, String.class), - startedAt == null ? -1 : startedAt.toEpochMilli(), - eventsBuilder.build()); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/ResultSets.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/ResultSets.java deleted file mode 100644 index eb15d92acc5..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/ResultSets.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.ResultSet; - -public class ResultSets { - public static ResultSet newInstance(AsyncResultSet firstPage) { - return firstPage.hasMorePages() - ? new MultiPageResultSet(firstPage) - : new SinglePageResultSet(firstPage); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/SinglePageResultSet.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/SinglePageResultSet.java deleted file mode 100644 index eb33da3f430..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/SinglePageResultSet.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import com.datastax.oss.driver.api.core.PagingIterable; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.internal.core.PagingIterableWrapper; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Iterator; -import java.util.List; -import java.util.Spliterator; -import java.util.function.Function; -import net.jcip.annotations.NotThreadSafe; - -@NotThreadSafe -public class SinglePageResultSet implements ResultSet { - private final AsyncResultSet onlyPage; - - public SinglePageResultSet(AsyncResultSet onlyPage) { - this.onlyPage = onlyPage; - assert !onlyPage.hasMorePages(); - } - - @NonNull - @Override - public ColumnDefinitions getColumnDefinitions() { - return onlyPage.getColumnDefinitions(); - } - - @NonNull - @Override - public ExecutionInfo getExecutionInfo() { - return onlyPage.getExecutionInfo(); - } - - @NonNull - @Override - public List getExecutionInfos() { - // Assuming this will be called 0 or 1 time, avoid creating the list if it's 0. - return ImmutableList.of(onlyPage.getExecutionInfo()); - } - - @Override - public boolean isFullyFetched() { - return true; - } - - @Override - public int getAvailableWithoutFetching() { - return onlyPage.remaining(); - } - - @NonNull - @Override - public Iterator iterator() { - return onlyPage.currentPage().iterator(); - } - - @NonNull - @Override - public Spliterator spliterator() { - return PagingIterableSpliterator.builder(this) - .withEstimatedSize(getAvailableWithoutFetching()) - .build(); - } - - @NonNull - @Override - public PagingIterable map( - Function elementMapper) { - return new PagingIterableWrapper<>(this, elementMapper, true); - } - - @Override - public boolean wasApplied() { - return onlyPage.wasApplied(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/data/DefaultTupleValue.java b/core/src/main/java/com/datastax/oss/driver/internal/core/data/DefaultTupleValue.java deleted file mode 100644 index 77cfa759237..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/data/DefaultTupleValue.java +++ /dev/null @@ -1,202 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.data; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.data.TupleValue; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.TupleType; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.protocol.internal.util.Bytes; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.io.InvalidObjectException; -import java.io.ObjectInputStream; -import java.io.Serializable; -import java.nio.ByteBuffer; -import java.util.Objects; -import net.jcip.annotations.NotThreadSafe; - -/** - * Implementation note: contrary to most GettableBy* and SettableBy* implementations, this class is - * mutable. - */ -@NotThreadSafe -public class DefaultTupleValue implements TupleValue, Serializable { - - private static final long serialVersionUID = 1; - private final TupleType type; - private final ByteBuffer[] values; - - public DefaultTupleValue(@NonNull TupleType type) { - this(type, new ByteBuffer[type.getComponentTypes().size()]); - } - - public DefaultTupleValue(@NonNull TupleType type, @NonNull Object... values) { - this( - type, - ValuesHelper.encodeValues( - values, - type.getComponentTypes(), - type.getAttachmentPoint().getCodecRegistry(), - type.getAttachmentPoint().getProtocolVersion())); - } - - private DefaultTupleValue(TupleType type, ByteBuffer[] values) { - Preconditions.checkNotNull(type); - this.type = type; - this.values = values; - } - - @NonNull - @Override - public TupleType getType() { - return type; - } - - @Override - public int size() { - return values.length; - } - - @Override - public ByteBuffer getBytesUnsafe(int i) { - return values[i]; - } - - @NonNull - @Override - public TupleValue setBytesUnsafe(int i, @Nullable ByteBuffer v) { - values[i] = v; - return this; - } - - @NonNull - @Override - public DataType getType(int i) { - return type.getComponentTypes().get(i); - } - - @NonNull - @Override - public CodecRegistry codecRegistry() { - return type.getAttachmentPoint().getCodecRegistry(); - } - - @NonNull - @Override - public ProtocolVersion protocolVersion() { - return type.getAttachmentPoint().getProtocolVersion(); - } - - /** - * @serialData The type of the tuple, followed by an array of byte arrays representing the values - * (null values are represented by {@code null}). - */ - private Object writeReplace() { - return new SerializationProxy(this); - } - - private void readObject(@SuppressWarnings("unused") ObjectInputStream stream) - throws InvalidObjectException { - // Should never be called since we serialized a proxy - throw new InvalidObjectException("Proxy required"); - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - - if (!(o instanceof TupleValue)) { - return false; - } - TupleValue that = (TupleValue) o; - - if (!type.equals(that.getType())) { - return false; - } - - for (int i = 0; i < values.length; i++) { - DataType innerThisType = type.getComponentTypes().get(i); - DataType innerThatType = that.getType().getComponentTypes().get(i); - if (!innerThisType.equals(innerThatType)) { - return false; - } - Object thisValue = - this.codecRegistry() - .codecFor(innerThisType) - .decode(this.getBytesUnsafe(i), this.protocolVersion()); - Object thatValue = - that.codecRegistry() - .codecFor(innerThatType) - .decode(that.getBytesUnsafe(i), that.protocolVersion()); - if (!Objects.equals(thisValue, thatValue)) { - return false; - } - } - return true; - } - - @Override - public int hashCode() { - - int result = type.hashCode(); - - for (int i = 0; i < values.length; i++) { - DataType innerThisType = type.getComponentTypes().get(i); - Object thisValue = - this.codecRegistry() - .codecFor(innerThisType) - .decode(this.values[i], this.protocolVersion()); - if (thisValue != null) { - result = 31 * result + thisValue.hashCode(); - } - } - - return result; - } - - private static class SerializationProxy implements Serializable { - - private static final long serialVersionUID = 1; - - private final TupleType type; - private final byte[][] values; - - SerializationProxy(DefaultTupleValue tuple) { - this.type = tuple.type; - this.values = new byte[tuple.values.length][]; - for (int i = 0; i < tuple.values.length; i++) { - ByteBuffer buffer = tuple.values[i]; - this.values[i] = (buffer == null) ? null : Bytes.getArray(buffer); - } - } - - private Object readResolve() { - ByteBuffer[] buffers = new ByteBuffer[this.values.length]; - for (int i = 0; i < this.values.length; i++) { - byte[] value = this.values[i]; - buffers[i] = (value == null) ? null : ByteBuffer.wrap(value); - } - return new DefaultTupleValue(this.type, buffers); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/data/DefaultUdtValue.java b/core/src/main/java/com/datastax/oss/driver/internal/core/data/DefaultUdtValue.java deleted file mode 100644 index c9bf986fcc8..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/data/DefaultUdtValue.java +++ /dev/null @@ -1,240 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.data; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.data.UdtValue; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.protocol.internal.util.Bytes; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.io.InvalidObjectException; -import java.io.ObjectInputStream; -import java.io.Serializable; -import java.nio.ByteBuffer; -import java.util.List; -import java.util.Objects; -import net.jcip.annotations.NotThreadSafe; - -/** - * Implementation note: contrary to most GettableBy* and SettableBy* implementations, this class is - * mutable. - */ -@NotThreadSafe -public class DefaultUdtValue implements UdtValue, Serializable { - - private static final long serialVersionUID = 1; - - private final UserDefinedType type; - private final ByteBuffer[] values; - - public DefaultUdtValue(@NonNull UserDefinedType type) { - this(type, new ByteBuffer[type.getFieldTypes().size()]); - } - - public DefaultUdtValue(@NonNull UserDefinedType type, @NonNull Object... values) { - this( - type, - ValuesHelper.encodeValues( - values, - type.getFieldTypes(), - type.getAttachmentPoint().getCodecRegistry(), - type.getAttachmentPoint().getProtocolVersion())); - } - - private DefaultUdtValue(UserDefinedType type, ByteBuffer[] values) { - Preconditions.checkNotNull(type); - this.type = type; - this.values = values; - } - - @NonNull - @Override - public UserDefinedType getType() { - return type; - } - - @Override - public int size() { - return values.length; - } - - @NonNull - @Override - public List allIndicesOf(@NonNull CqlIdentifier id) { - List indices = type.allIndicesOf(id); - if (indices.isEmpty()) { - throw new IllegalArgumentException(id + " is not a field in this UDT"); - } - return indices; - } - - @Override - public int firstIndexOf(@NonNull CqlIdentifier id) { - int indexOf = type.firstIndexOf(id); - if (indexOf == -1) { - throw new IllegalArgumentException(id + " is not a field in this UDT"); - } - return indexOf; - } - - @NonNull - @Override - public List allIndicesOf(@NonNull String name) { - List indices = type.allIndicesOf(name); - if (indices.isEmpty()) { - throw new IllegalArgumentException(name + " is not a field in this UDT"); - } - return indices; - } - - @Override - public int firstIndexOf(@NonNull String name) { - int indexOf = type.firstIndexOf(name); - if (indexOf == -1) { - throw new IllegalArgumentException(name + " is not a field in this UDT"); - } - return indexOf; - } - - @NonNull - @Override - public DataType getType(int i) { - return type.getFieldTypes().get(i); - } - - @Override - public ByteBuffer getBytesUnsafe(int i) { - return values[i]; - } - - @NonNull - @Override - public UdtValue setBytesUnsafe(int i, @Nullable ByteBuffer v) { - values[i] = v; - return this; - } - - @NonNull - @Override - public CodecRegistry codecRegistry() { - return type.getAttachmentPoint().getCodecRegistry(); - } - - @NonNull - @Override - public ProtocolVersion protocolVersion() { - return type.getAttachmentPoint().getProtocolVersion(); - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - - if (!(o instanceof UdtValue)) { - return false; - } - UdtValue that = (UdtValue) o; - - if (!type.equals(that.getType())) { - return false; - } - - for (int i = 0; i < values.length; i++) { - - DataType innerThisType = type.getFieldTypes().get(i); - DataType innerThatType = that.getType().getFieldTypes().get(i); - - Object thisValue = - this.codecRegistry() - .codecFor(innerThisType) - .decode(this.getBytesUnsafe(i), this.protocolVersion()); - Object thatValue = - that.codecRegistry() - .codecFor(innerThatType) - .decode(that.getBytesUnsafe(i), that.protocolVersion()); - - if (!Objects.equals(thisValue, thatValue)) { - return false; - } - } - return true; - } - - @Override - public int hashCode() { - int result = type.hashCode(); - for (int i = 0; i < values.length; i++) { - DataType innerThisType = type.getFieldTypes().get(i); - Object thisValue = - this.codecRegistry() - .codecFor(innerThisType) - .decode(this.values[i], this.protocolVersion()); - if (thisValue != null) { - result = 31 * result + thisValue.hashCode(); - } - } - return result; - } - - /** - * @serialData The type of the tuple, followed by an array of byte arrays representing the values - * (null values are represented by {@code null}). - */ - private Object writeReplace() { - return new SerializationProxy(this); - } - - private void readObject(@SuppressWarnings("unused") ObjectInputStream stream) - throws InvalidObjectException { - // Should never be called since we serialized a proxy - throw new InvalidObjectException("Proxy required"); - } - - private static class SerializationProxy implements Serializable { - - private static final long serialVersionUID = 1; - - private final UserDefinedType type; - private final byte[][] values; - - SerializationProxy(DefaultUdtValue udt) { - this.type = udt.type; - this.values = new byte[udt.values.length][]; - for (int i = 0; i < udt.values.length; i++) { - ByteBuffer buffer = udt.values[i]; - this.values[i] = (buffer == null) ? null : Bytes.getArray(buffer); - } - } - - private Object readResolve() { - ByteBuffer[] buffers = new ByteBuffer[this.values.length]; - for (int i = 0; i < this.values.length; i++) { - byte[] value = this.values[i]; - buffers[i] = (value == null) ? null : ByteBuffer.wrap(value); - } - return new DefaultUdtValue(this.type, buffers); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/data/IdentifierIndex.java b/core/src/main/java/com/datastax/oss/driver/internal/core/data/IdentifierIndex.java deleted file mode 100644 index d35c164eb84..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/data/IdentifierIndex.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.data; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.data.AccessibleByName; -import com.datastax.oss.driver.api.core.data.GettableById; -import com.datastax.oss.driver.api.core.data.GettableByName; -import com.datastax.oss.driver.internal.core.util.Strings; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableListMultimap; -import com.datastax.oss.driver.shaded.guava.common.collect.ListMultimap; -import java.util.Iterator; -import java.util.List; -import java.util.Locale; -import net.jcip.annotations.Immutable; - -/** - * Indexes an ordered list of identifiers. - * - * @see GettableByName - * @see GettableById - */ -@Immutable -public class IdentifierIndex { - - private final ListMultimap byId; - private final ListMultimap byCaseSensitiveName; - private final ListMultimap byCaseInsensitiveName; - - public IdentifierIndex(List ids) { - ImmutableListMultimap.Builder byId = ImmutableListMultimap.builder(); - ImmutableListMultimap.Builder byCaseSensitiveName = - ImmutableListMultimap.builder(); - ImmutableListMultimap.Builder byCaseInsensitiveName = - ImmutableListMultimap.builder(); - - int i = 0; - for (CqlIdentifier id : ids) { - byId.put(id, i); - byCaseSensitiveName.put(id.asInternal(), i); - byCaseInsensitiveName.put(id.asInternal().toLowerCase(Locale.ROOT), i); - i += 1; - } - - this.byId = byId.build(); - this.byCaseSensitiveName = byCaseSensitiveName.build(); - this.byCaseInsensitiveName = byCaseInsensitiveName.build(); - } - - /** - * Returns all occurrences of a given name, given the matching rules described in {@link - * AccessibleByName}. - */ - public List allIndicesOf(String name) { - return Strings.isDoubleQuoted(name) - ? byCaseSensitiveName.get(Strings.unDoubleQuote(name)) - : byCaseInsensitiveName.get(name.toLowerCase(Locale.ROOT)); - } - - /** - * Returns the first occurrence of a given name, given the matching rules described in {@link - * AccessibleByName}, or -1 if it's not in the list. - */ - public int firstIndexOf(String name) { - Iterator indices = allIndicesOf(name).iterator(); - return indices.hasNext() ? indices.next() : -1; - } - - /** Returns all occurrences of a given identifier. */ - public List allIndicesOf(CqlIdentifier id) { - return byId.get(id); - } - - /** Returns the first occurrence of a given identifier, or -1 if it's not in the list. */ - public int firstIndexOf(CqlIdentifier id) { - Iterator indices = allIndicesOf(id).iterator(); - return indices.hasNext() ? indices.next() : -1; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/data/ValuesHelper.java b/core/src/main/java/com/datastax/oss/driver/internal/core/data/ValuesHelper.java deleted file mode 100644 index 24490ca2509..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/data/ValuesHelper.java +++ /dev/null @@ -1,141 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.data; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.internal.core.metadata.token.ByteOrderedToken; -import com.datastax.oss.driver.internal.core.metadata.token.Murmur3Token; -import com.datastax.oss.driver.internal.core.metadata.token.RandomToken; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import java.nio.ByteBuffer; -import java.util.List; - -public class ValuesHelper { - - public static ByteBuffer[] encodeValues( - Object[] values, - List fieldTypes, - CodecRegistry codecRegistry, - ProtocolVersion protocolVersion) { - Preconditions.checkArgument( - values.length <= fieldTypes.size(), - "Too many values (expected %s, got %s)", - fieldTypes.size(), - values.length); - - ByteBuffer[] encodedValues = new ByteBuffer[fieldTypes.size()]; - for (int i = 0; i < values.length; i++) { - Object value = values[i]; - ByteBuffer encodedValue; - if (value instanceof Token) { - if (value instanceof Murmur3Token) { - encodedValue = - TypeCodecs.BIGINT.encode(((Murmur3Token) value).getValue(), protocolVersion); - } else if (value instanceof ByteOrderedToken) { - encodedValue = - TypeCodecs.BLOB.encode(((ByteOrderedToken) value).getValue(), protocolVersion); - } else if (value instanceof RandomToken) { - encodedValue = - TypeCodecs.VARINT.encode(((RandomToken) value).getValue(), protocolVersion); - } else { - throw new IllegalArgumentException("Unsupported token type " + value.getClass()); - } - } else { - TypeCodec codec = - (value == null) - ? codecRegistry.codecFor(fieldTypes.get(i)) - : codecRegistry.codecFor(fieldTypes.get(i), value); - encodedValue = codec.encode(value, protocolVersion); - } - encodedValues[i] = encodedValue; - } - return encodedValues; - } - - public static ByteBuffer[] encodePreparedValues( - Object[] values, - ColumnDefinitions variableDefinitions, - CodecRegistry codecRegistry, - ProtocolVersion protocolVersion) { - - // Almost same as encodeValues, but we can't reuse because of variableDefinitions. Rebuilding a - // list of datatypes is not worth it, so duplicate the code. - - Preconditions.checkArgument( - values.length <= variableDefinitions.size(), - "Too many variables (expected %s, got %s)", - variableDefinitions.size(), - values.length); - - ByteBuffer[] encodedValues = new ByteBuffer[variableDefinitions.size()]; - int i; - for (i = 0; i < values.length; i++) { - Object value = values[i]; - ByteBuffer encodedValue; - if (value instanceof Token) { - if (value instanceof Murmur3Token) { - encodedValue = - TypeCodecs.BIGINT.encode(((Murmur3Token) value).getValue(), protocolVersion); - } else if (value instanceof ByteOrderedToken) { - encodedValue = - TypeCodecs.BLOB.encode(((ByteOrderedToken) value).getValue(), protocolVersion); - } else if (value instanceof RandomToken) { - encodedValue = - TypeCodecs.VARINT.encode(((RandomToken) value).getValue(), protocolVersion); - } else { - throw new IllegalArgumentException("Unsupported token type " + value.getClass()); - } - } else { - TypeCodec codec = - (value == null) - ? codecRegistry.codecFor(variableDefinitions.get(i).getType()) - : codecRegistry.codecFor(variableDefinitions.get(i).getType(), value); - encodedValue = codec.encode(value, protocolVersion); - } - encodedValues[i] = encodedValue; - } - for (; i < encodedValues.length; i++) { - encodedValues[i] = ProtocolConstants.UNSET_VALUE; - } - return encodedValues; - } - - public static ByteBuffer encodeToDefaultCqlMapping( - Object value, CodecRegistry codecRegistry, ProtocolVersion protocolVersion) { - if (value instanceof Token) { - if (value instanceof Murmur3Token) { - return TypeCodecs.BIGINT.encode(((Murmur3Token) value).getValue(), protocolVersion); - } else if (value instanceof ByteOrderedToken) { - return TypeCodecs.BLOB.encode(((ByteOrderedToken) value).getValue(), protocolVersion); - } else if (value instanceof RandomToken) { - return TypeCodecs.VARINT.encode(((RandomToken) value).getValue(), protocolVersion); - } else { - throw new IllegalArgumentException("Unsupported token type " + value.getClass()); - } - } else { - return codecRegistry.codecFor(value).encode(value, protocolVersion); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicy.java b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicy.java deleted file mode 100644 index a02a5eb3148..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicy.java +++ /dev/null @@ -1,498 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing; - -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistanceEvaluator; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeState; -import com.datastax.oss.driver.api.core.metadata.TokenMap; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.loadbalancing.helper.DefaultNodeDistanceEvaluatorHelper; -import com.datastax.oss.driver.internal.core.loadbalancing.helper.OptionalLocalDcHelper; -import com.datastax.oss.driver.internal.core.loadbalancing.nodeset.DcAgnosticNodeSet; -import com.datastax.oss.driver.internal.core.loadbalancing.nodeset.MultiDcNodeSet; -import com.datastax.oss.driver.internal.core.loadbalancing.nodeset.NodeSet; -import com.datastax.oss.driver.internal.core.loadbalancing.nodeset.SingleDcNodeSet; -import com.datastax.oss.driver.internal.core.util.ArrayUtils; -import com.datastax.oss.driver.internal.core.util.collection.CompositeQueryPlan; -import com.datastax.oss.driver.internal.core.util.collection.LazyQueryPlan; -import com.datastax.oss.driver.internal.core.util.collection.QueryPlan; -import com.datastax.oss.driver.internal.core.util.collection.SimpleQueryPlan; -import com.datastax.oss.driver.shaded.guava.common.base.Predicates; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import com.datastax.oss.driver.shaded.guava.common.collect.Sets; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.util.Collections; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Optional; -import java.util.Queue; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.function.IntUnaryOperator; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A basic implementation of {@link LoadBalancingPolicy} that can serve as a building block for more - * advanced use cases. - * - *

To activate this policy, modify the {@code basic.load-balancing-policy} section in the driver - * configuration, for example: - * - *

- * datastax-java-driver {
- *   basic.load-balancing-policy {
- *     class = BasicLoadBalancingPolicy
- *     local-datacenter = datacenter1 # optional
- *   }
- * }
- * 
- * - * See {@code reference.conf} (in the manual or core driver JAR) for more details. - * - *

Local datacenter: This implementation will only define a local datacenter if it is - * explicitly set either through configuration or programmatically; if the local datacenter is - * unspecified, this implementation will effectively act as a datacenter-agnostic load balancing - * policy and will consider all nodes in the cluster when creating query plans, regardless of their - * datacenter. - * - *

Query plan: This implementation prioritizes replica nodes over non-replica ones; if - * more than one replica is available, the replicas will be shuffled. Non-replica nodes will be - * included in a round-robin fashion. If the local datacenter is defined (see above), query plans - * will only include local nodes, never remote ones; if it is unspecified however, query plans may - * contain nodes from different datacenters. - * - *

This class is not recommended for normal users who should always prefer {@link - * DefaultLoadBalancingPolicy}. - */ -@ThreadSafe -public class BasicLoadBalancingPolicy implements LoadBalancingPolicy { - - private static final Logger LOG = LoggerFactory.getLogger(BasicLoadBalancingPolicy.class); - - protected static final IntUnaryOperator INCREMENT = i -> (i == Integer.MAX_VALUE) ? 0 : i + 1; - private static final Object[] EMPTY_NODES = new Object[0]; - - @NonNull protected final InternalDriverContext context; - @NonNull protected final DriverExecutionProfile profile; - @NonNull protected final String logPrefix; - - protected final AtomicInteger roundRobinAmount = new AtomicInteger(); - - private final int maxNodesPerRemoteDc; - private final boolean allowDcFailoverForLocalCl; - private final ConsistencyLevel defaultConsistencyLevel; - - // private because they should be set in init() and never be modified after - private volatile DistanceReporter distanceReporter; - private volatile NodeDistanceEvaluator nodeDistanceEvaluator; - private volatile String localDc; - private volatile NodeSet liveNodes; - private final LinkedHashSet preferredRemoteDcs; - - public BasicLoadBalancingPolicy(@NonNull DriverContext context, @NonNull String profileName) { - this.context = (InternalDriverContext) context; - profile = context.getConfig().getProfile(profileName); - logPrefix = context.getSessionName() + "|" + profileName; - maxNodesPerRemoteDc = - profile.getInt(DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_MAX_NODES_PER_REMOTE_DC); - allowDcFailoverForLocalCl = - profile.getBoolean( - DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_ALLOW_FOR_LOCAL_CONSISTENCY_LEVELS); - defaultConsistencyLevel = - this.context - .getConsistencyLevelRegistry() - .nameToLevel(profile.getString(DefaultDriverOption.REQUEST_CONSISTENCY)); - - preferredRemoteDcs = - new LinkedHashSet<>( - profile.getStringList( - DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_PREFERRED_REMOTE_DCS)); - } - - /** - * Returns the local datacenter name, if known; empty otherwise. - * - *

When this method returns null, then datacenter awareness is completely disabled. All - * non-ignored nodes will be considered "local" regardless of their actual datacenters, and will - * have equal chances of being selected for query plans. - * - *

After the policy is {@linkplain #init(Map, DistanceReporter) initialized} this method will - * return the local datacenter that was discovered by calling {@link #discoverLocalDc(Map)}. - * Before initialization, this method always returns null. - */ - @Nullable - public String getLocalDatacenter() { - return localDc; - } - - @NonNull - @Override - public Map getStartupConfiguration() { - ImmutableMap.Builder builder = new ImmutableMap.Builder<>(); - if (localDc != null) { - builder.put("localDc", localDc); - } else { - // Local data center may not be discovered prior to connection pool initialization. - // In such scenario, return configured local data center name. - // Note that when using DC inferring load balancing policy, startup configuration - // may not show local DC name, because it will be discovered only once control connection - // is established and datacenter of contact points known. - Optional configuredDc = - new OptionalLocalDcHelper(context, profile, logPrefix).configuredLocalDc(); - configuredDc.ifPresent(d -> builder.put("localDc", d)); - } - if (!preferredRemoteDcs.isEmpty()) { - builder.put("preferredRemoteDcs", preferredRemoteDcs); - } - if (allowDcFailoverForLocalCl) { - builder.put("allowDcFailoverForLocalCl", allowDcFailoverForLocalCl); - } - if (maxNodesPerRemoteDc > 0) { - builder.put("maxNodesPerRemoteDc", maxNodesPerRemoteDc); - } - return ImmutableMap.of(BasicLoadBalancingPolicy.class.getSimpleName(), builder.build()); - } - - /** @return The nodes currently considered as live. */ - protected NodeSet getLiveNodes() { - return liveNodes; - } - - @Override - public void init(@NonNull Map nodes, @NonNull DistanceReporter distanceReporter) { - this.distanceReporter = distanceReporter; - localDc = discoverLocalDc(nodes).orElse(null); - nodeDistanceEvaluator = createNodeDistanceEvaluator(localDc, nodes); - liveNodes = - localDc == null - ? new DcAgnosticNodeSet() - : maxNodesPerRemoteDc <= 0 ? new SingleDcNodeSet(localDc) : new MultiDcNodeSet(); - for (Node node : nodes.values()) { - NodeDistance distance = computeNodeDistance(node); - distanceReporter.setDistance(node, distance); - if (distance != NodeDistance.IGNORED && node.getState() != NodeState.DOWN) { - // This includes state == UNKNOWN. If the node turns out to be unreachable, this will be - // detected when we try to open a pool to it, it will get marked down and this will be - // signaled back to this policy, which will then remove it from the live set. - liveNodes.add(node); - } - } - } - - /** - * Returns the local datacenter, if it can be discovered, or returns {@link Optional#empty empty} - * otherwise. - * - *

This method is called only once, during {@linkplain LoadBalancingPolicy#init(Map, - * LoadBalancingPolicy.DistanceReporter) initialization}. - * - *

Implementors may choose to throw {@link IllegalStateException} instead of returning {@link - * Optional#empty empty}, if they require a local datacenter to be defined in order to operate - * properly. - * - *

If this method returns empty, then datacenter awareness will be completely disabled. All - * non-ignored nodes will be considered "local" regardless of their actual datacenters, and will - * have equal chances of being selected for query plans. - * - * @param nodes All the nodes that were known to exist in the cluster (regardless of their state) - * when the load balancing policy was initialized. This argument is provided in case - * implementors need to inspect the cluster topology to discover the local datacenter. - * @return The local datacenter, or {@link Optional#empty empty} if none found. - * @throws IllegalStateException if the local datacenter could not be discovered, and this policy - * cannot operate without it. - */ - @NonNull - protected Optional discoverLocalDc(@NonNull Map nodes) { - return new OptionalLocalDcHelper(context, profile, logPrefix).discoverLocalDc(nodes); - } - - /** - * Creates a new node distance evaluator to use with this policy. - * - *

This method is called only once, during {@linkplain LoadBalancingPolicy#init(Map, - * LoadBalancingPolicy.DistanceReporter) initialization}, and only after local datacenter - * discovery has been attempted. - * - * @param localDc The local datacenter that was just discovered, or null if none found. - * @param nodes All the nodes that were known to exist in the cluster (regardless of their state) - * when the load balancing policy was initialized. This argument is provided in case - * implementors need to inspect the cluster topology to create the evaluator. - * @return the distance evaluator to use. - */ - @NonNull - protected NodeDistanceEvaluator createNodeDistanceEvaluator( - @Nullable String localDc, @NonNull Map nodes) { - return new DefaultNodeDistanceEvaluatorHelper(context, profile, logPrefix) - .createNodeDistanceEvaluator(localDc, nodes); - } - - @NonNull - @Override - public Queue newQueryPlan(@Nullable Request request, @Nullable Session session) { - // Take a snapshot since the set is concurrent: - Object[] currentNodes = liveNodes.dc(localDc).toArray(); - - Set allReplicas = getReplicas(request, session); - int replicaCount = 0; // in currentNodes - - if (!allReplicas.isEmpty()) { - // Move replicas to the beginning - for (int i = 0; i < currentNodes.length; i++) { - Node node = (Node) currentNodes[i]; - if (allReplicas.contains(node)) { - ArrayUtils.bubbleUp(currentNodes, i, replicaCount); - replicaCount += 1; - } - } - - if (replicaCount > 1) { - shuffleHead(currentNodes, replicaCount); - } - } - - LOG.trace("[{}] Prioritizing {} local replicas", logPrefix, replicaCount); - - // Round-robin the remaining nodes - ArrayUtils.rotate( - currentNodes, - replicaCount, - currentNodes.length - replicaCount, - roundRobinAmount.getAndUpdate(INCREMENT)); - - QueryPlan plan = currentNodes.length == 0 ? QueryPlan.EMPTY : new SimpleQueryPlan(currentNodes); - return maybeAddDcFailover(request, plan); - } - - @NonNull - protected Set getReplicas(@Nullable Request request, @Nullable Session session) { - if (request == null || session == null) { - return Collections.emptySet(); - } - - Optional maybeTokenMap = context.getMetadataManager().getMetadata().getTokenMap(); - if (!maybeTokenMap.isPresent()) { - return Collections.emptySet(); - } - - // Note: we're on the hot path and the getXxx methods are potentially more than simple getters, - // so we only call each method when strictly necessary (which is why the code below looks a bit - // weird). - CqlIdentifier keyspace; - Token token; - ByteBuffer key; - try { - keyspace = request.getKeyspace(); - if (keyspace == null) { - keyspace = request.getRoutingKeyspace(); - } - if (keyspace == null && session.getKeyspace().isPresent()) { - keyspace = session.getKeyspace().get(); - } - if (keyspace == null) { - return Collections.emptySet(); - } - - token = request.getRoutingToken(); - key = (token == null) ? request.getRoutingKey() : null; - if (token == null && key == null) { - return Collections.emptySet(); - } - } catch (Exception e) { - // Protect against poorly-implemented Request instances - LOG.error("Unexpected error while trying to compute query plan", e); - return Collections.emptySet(); - } - - TokenMap tokenMap = maybeTokenMap.get(); - return token != null - ? tokenMap.getReplicas(keyspace, token) - : tokenMap.getReplicas(keyspace, key); - } - - @NonNull - protected Queue maybeAddDcFailover(@Nullable Request request, @NonNull Queue local) { - if (maxNodesPerRemoteDc <= 0 || localDc == null) { - return local; - } - if (!allowDcFailoverForLocalCl && request instanceof Statement) { - Statement statement = (Statement) request; - ConsistencyLevel consistency = statement.getConsistencyLevel(); - if (consistency == null) { - consistency = defaultConsistencyLevel; - } - if (consistency.isDcLocal()) { - return local; - } - } - if (preferredRemoteDcs.isEmpty()) { - return new CompositeQueryPlan(local, buildRemoteQueryPlanAll()); - } - return new CompositeQueryPlan(local, buildRemoteQueryPlanPreferred()); - } - - private QueryPlan buildRemoteQueryPlanAll() { - - return new LazyQueryPlan() { - @Override - protected Object[] computeNodes() { - - Object[] remoteNodes = - liveNodes.dcs().stream() - .filter(Predicates.not(Predicates.equalTo(localDc))) - .flatMap(dc -> liveNodes.dc(dc).stream().limit(maxNodesPerRemoteDc)) - .toArray(); - if (remoteNodes.length == 0) { - return EMPTY_NODES; - } - shuffleHead(remoteNodes, remoteNodes.length); - return remoteNodes; - } - }; - } - - private QueryPlan buildRemoteQueryPlanPreferred() { - - Set dcs = liveNodes.dcs(); - List orderedDcs = Lists.newArrayListWithCapacity(dcs.size()); - orderedDcs.addAll(preferredRemoteDcs); - orderedDcs.addAll(Sets.difference(dcs, preferredRemoteDcs)); - - QueryPlan[] queryPlans = - orderedDcs.stream() - .filter(Predicates.not(Predicates.equalTo(localDc))) - .map( - (dc) -> { - return new LazyQueryPlan() { - @Override - protected Object[] computeNodes() { - Object[] rv = liveNodes.dc(dc).stream().limit(maxNodesPerRemoteDc).toArray(); - if (rv.length == 0) { - return EMPTY_NODES; - } - shuffleHead(rv, rv.length); - return rv; - } - }; - }) - .toArray(QueryPlan[]::new); - - return new CompositeQueryPlan(queryPlans); - } - - /** Exposed as a protected method so that it can be accessed by tests */ - protected void shuffleHead(Object[] currentNodes, int headLength) { - ArrayUtils.shuffleHead(currentNodes, headLength); - } - - @Override - public void onAdd(@NonNull Node node) { - NodeDistance distance = computeNodeDistance(node); - // Setting to a non-ignored distance triggers the session to open a pool, which will in turn - // set the node UP when the first channel gets opened, then #onUp will be called, and the - // node will be eventually added to the live set. - distanceReporter.setDistance(node, distance); - LOG.debug("[{}] {} was added, setting distance to {}", logPrefix, node, distance); - } - - @Override - public void onUp(@NonNull Node node) { - NodeDistance distance = computeNodeDistance(node); - if (node.getDistance() != distance) { - distanceReporter.setDistance(node, distance); - } - if (distance != NodeDistance.IGNORED && liveNodes.add(node)) { - LOG.debug("[{}] {} came back UP, added to live set", logPrefix, node); - } - } - - @Override - public void onDown(@NonNull Node node) { - if (liveNodes.remove(node)) { - LOG.debug("[{}] {} went DOWN, removed from live set", logPrefix, node); - } - } - - @Override - public void onRemove(@NonNull Node node) { - if (liveNodes.remove(node)) { - LOG.debug("[{}] {} was removed, removed from live set", logPrefix, node); - } - } - - /** - * Computes the distance of the given node. - * - *

This method is called during {@linkplain #init(Map, DistanceReporter) initialization}, when - * a node {@linkplain #onAdd(Node) is added}, and when a node {@linkplain #onUp(Node) is back UP}. - */ - protected NodeDistance computeNodeDistance(@NonNull Node node) { - // We interrogate the custom evaluator every time since it could be dynamic - // and change its verdict between two invocations of this method. - NodeDistance distance = nodeDistanceEvaluator.evaluateDistance(node, localDc); - if (distance != null) { - return distance; - } - // no local DC defined: all nodes are considered LOCAL. - if (localDc == null) { - return NodeDistance.LOCAL; - } - // otherwise, the node is LOCAL if its datacenter is the local datacenter. - if (Objects.equals(node.getDatacenter(), localDc)) { - return NodeDistance.LOCAL; - } - // otherwise, the node will be either REMOTE or IGNORED, depending - // on how many remote nodes we accept per DC. - if (maxNodesPerRemoteDc > 0) { - Object[] remoteNodes = liveNodes.dc(node.getDatacenter()).toArray(); - for (int i = 0; i < maxNodesPerRemoteDc; i++) { - if (i == remoteNodes.length) { - // there is still room for one more REMOTE node in this DC - return NodeDistance.REMOTE; - } else if (remoteNodes[i] == node) { - return NodeDistance.REMOTE; - } - } - } - return NodeDistance.IGNORED; - } - - @Override - public void close() { - // nothing to do - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/DcInferringLoadBalancingPolicy.java b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/DcInferringLoadBalancingPolicy.java deleted file mode 100644 index 1d978091c9d..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/DcInferringLoadBalancingPolicy.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.loadbalancing.helper.InferringLocalDcHelper; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; -import java.util.Optional; -import java.util.UUID; -import net.jcip.annotations.ThreadSafe; - -/** - * An implementation of {@link LoadBalancingPolicy} that infers the local datacenter from the - * contact points, if no datacenter was provided neither through configuration nor programmatically. - * - *

To activate this policy, modify the {@code basic.load-balancing-policy} section in the driver - * configuration, for example: - * - *

- * datastax-java-driver {
- *   basic.load-balancing-policy {
- *     class = DcInferringLoadBalancingPolicy
- *     local-datacenter = datacenter1 # optional
- *   }
- * }
- * 
- * - * See {@code reference.conf} (in the manual or core driver JAR) for more details. - * - *

Local datacenter: This implementation requires a local datacenter to be defined, - * otherwise it will throw an {@link IllegalStateException}. A local datacenter can be supplied - * either: - * - *

    - *
  1. Programmatically with {@link - * com.datastax.oss.driver.api.core.session.SessionBuilder#withLocalDatacenter(String) - * SessionBuilder#withLocalDatacenter(String)}; - *
  2. Through configuration, by defining the option {@link - * DefaultDriverOption#LOAD_BALANCING_LOCAL_DATACENTER - * basic.load-balancing-policy.local-datacenter}; - *
  3. Or implicitly: in this case this implementation will infer the local datacenter from the - * provided contact points, if and only if they are all located in the same datacenter. - *
- * - *

Query plan: see {@link DefaultLoadBalancingPolicy} for details on the computation of - * query plans. - * - *

This class is not recommended for normal users who should always prefer {@link - * DefaultLoadBalancingPolicy}. - */ -@ThreadSafe -public class DcInferringLoadBalancingPolicy extends DefaultLoadBalancingPolicy { - - public DcInferringLoadBalancingPolicy( - @NonNull DriverContext context, @NonNull String profileName) { - super(context, profileName); - } - - @NonNull - @Override - protected Optional discoverLocalDc(@NonNull Map nodes) { - return new InferringLocalDcHelper(context, profile, logPrefix).discoverLocalDc(nodes); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicy.java b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicy.java deleted file mode 100644 index 8e1c1fe5039..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicy.java +++ /dev/null @@ -1,363 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing; - -import static java.util.concurrent.TimeUnit.MILLISECONDS; -import static java.util.concurrent.TimeUnit.MINUTES; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.core.tracker.RequestTracker; -import com.datastax.oss.driver.internal.core.loadbalancing.helper.MandatoryLocalDcHelper; -import com.datastax.oss.driver.internal.core.pool.ChannelPool; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.datastax.oss.driver.internal.core.util.ArrayUtils; -import com.datastax.oss.driver.internal.core.util.collection.QueryPlan; -import com.datastax.oss.driver.internal.core.util.collection.SimpleQueryPlan; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.MapMaker; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.BitSet; -import java.util.Map; -import java.util.Optional; -import java.util.OptionalLong; -import java.util.Queue; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.ThreadLocalRandom; -import java.util.concurrent.atomic.AtomicLongArray; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * The default load balancing policy implementation. - * - *

To activate this policy, modify the {@code basic.load-balancing-policy} section in the driver - * configuration, for example: - * - *

- * datastax-java-driver {
- *   basic.load-balancing-policy {
- *     class = DefaultLoadBalancingPolicy
- *     local-datacenter = datacenter1
- *   }
- * }
- * 
- * - * See {@code reference.conf} (in the manual or core driver JAR) for more details. - * - *

Local datacenter: This implementation requires a local datacenter to be defined, - * otherwise it will throw an {@link IllegalStateException}. A local datacenter can be supplied - * either: - * - *

    - *
  1. Programmatically with {@link - * com.datastax.oss.driver.api.core.session.SessionBuilder#withLocalDatacenter(String) - * SessionBuilder#withLocalDatacenter(String)}; - *
  2. Through configuration, by defining the option {@link - * DefaultDriverOption#LOAD_BALANCING_LOCAL_DATACENTER - * basic.load-balancing-policy.local-datacenter}; - *
  3. Or implicitly, if and only if no explicit contact points were provided: in this case this - * implementation will infer the local datacenter from the implicit contact point (localhost). - *
- * - *

Query plan: This implementation prioritizes replica nodes over non-replica ones; if - * more than one replica is available, the replicas will be shuffled; if more than 2 replicas are - * available, they will be ordered from most healthy to least healthy ("Power of 2 choices" or busy - * node avoidance algorithm). Non-replica nodes will be included in a round-robin fashion. If the - * local datacenter is defined (see above), query plans will only include local nodes, never remote - * ones; if it is unspecified however, query plans may contain nodes from different datacenters. - */ -@ThreadSafe -public class DefaultLoadBalancingPolicy extends BasicLoadBalancingPolicy implements RequestTracker { - - private static final Logger LOG = LoggerFactory.getLogger(DefaultLoadBalancingPolicy.class); - - private static final long NEWLY_UP_INTERVAL_NANOS = MINUTES.toNanos(1); - private static final int MAX_IN_FLIGHT_THRESHOLD = 10; - private static final long RESPONSE_COUNT_RESET_INTERVAL_NANOS = MILLISECONDS.toNanos(200); - - protected final ConcurrentMap responseTimes; - protected final Map upTimes = new ConcurrentHashMap<>(); - private final boolean avoidSlowReplicas; - - public DefaultLoadBalancingPolicy(@NonNull DriverContext context, @NonNull String profileName) { - super(context, profileName); - this.avoidSlowReplicas = - profile.getBoolean(DefaultDriverOption.LOAD_BALANCING_POLICY_SLOW_AVOIDANCE, true); - this.responseTimes = new MapMaker().weakKeys().makeMap(); - } - - @NonNull - @Override - public Optional getRequestTracker() { - if (avoidSlowReplicas) { - return Optional.of(this); - } else { - return Optional.empty(); - } - } - - @NonNull - @Override - protected Optional discoverLocalDc(@NonNull Map nodes) { - return new MandatoryLocalDcHelper(context, profile, logPrefix).discoverLocalDc(nodes); - } - - @NonNull - @Override - public Queue newQueryPlan(@Nullable Request request, @Nullable Session session) { - if (!avoidSlowReplicas) { - return super.newQueryPlan(request, session); - } - - // Take a snapshot since the set is concurrent: - Object[] currentNodes = getLiveNodes().dc(getLocalDatacenter()).toArray(); - - Set allReplicas = getReplicas(request, session); - int replicaCount = 0; // in currentNodes - - if (!allReplicas.isEmpty()) { - - // Move replicas to the beginning of the plan - for (int i = 0; i < currentNodes.length; i++) { - Node node = (Node) currentNodes[i]; - if (allReplicas.contains(node)) { - ArrayUtils.bubbleUp(currentNodes, i, replicaCount); - replicaCount++; - } - } - - if (replicaCount > 1) { - - shuffleHead(currentNodes, replicaCount); - - if (replicaCount > 2) { - - assert session != null; - - // Test replicas health - Node newestUpReplica = null; - BitSet unhealthyReplicas = null; // bit mask storing indices of unhealthy replicas - long mostRecentUpTimeNanos = -1; - long now = nanoTime(); - for (int i = 0; i < replicaCount; i++) { - Node node = (Node) currentNodes[i]; - assert node != null; - Long upTimeNanos = upTimes.get(node); - if (upTimeNanos != null - && now - upTimeNanos - NEWLY_UP_INTERVAL_NANOS < 0 - && upTimeNanos - mostRecentUpTimeNanos > 0) { - newestUpReplica = node; - mostRecentUpTimeNanos = upTimeNanos; - } - if (newestUpReplica == null && isUnhealthy(node, session, now)) { - if (unhealthyReplicas == null) { - unhealthyReplicas = new BitSet(replicaCount); - } - unhealthyReplicas.set(i); - } - } - - // When: - // - there isn't any newly UP replica and - // - there is one or more unhealthy replicas and - // - there is a majority of healthy replicas - int unhealthyReplicasCount = - unhealthyReplicas == null ? 0 : unhealthyReplicas.cardinality(); - if (newestUpReplica == null - && unhealthyReplicasCount > 0 - && unhealthyReplicasCount < (replicaCount / 2.0)) { - - // Reorder the unhealthy replicas to the back of the list - // Start from the back of the replicas, then move backwards; - // stop once all unhealthy replicas are moved to the back. - int counter = 0; - for (int i = replicaCount - 1; i >= 0 && counter < unhealthyReplicasCount; i--) { - if (unhealthyReplicas.get(i)) { - ArrayUtils.bubbleDown(currentNodes, i, replicaCount - 1 - counter); - counter++; - } - } - } - - // When: - // - there is a newly UP replica and - // - the replica in first or second position is the most recent replica marked as UP and - // - dice roll 1d4 != 1 - else if ((newestUpReplica == currentNodes[0] || newestUpReplica == currentNodes[1]) - && diceRoll1d4() != 1) { - - // Send it to the back of the replicas - ArrayUtils.bubbleDown( - currentNodes, newestUpReplica == currentNodes[0] ? 0 : 1, replicaCount - 1); - } - - // Reorder the first two replicas in the shuffled list based on the number of - // in-flight requests - if (getInFlight((Node) currentNodes[0], session) - > getInFlight((Node) currentNodes[1], session)) { - ArrayUtils.swap(currentNodes, 0, 1); - } - } - } - } - - LOG.trace("[{}] Prioritizing {} local replicas", logPrefix, replicaCount); - - // Round-robin the remaining nodes - ArrayUtils.rotate( - currentNodes, - replicaCount, - currentNodes.length - replicaCount, - roundRobinAmount.getAndUpdate(INCREMENT)); - - QueryPlan plan = currentNodes.length == 0 ? QueryPlan.EMPTY : new SimpleQueryPlan(currentNodes); - return maybeAddDcFailover(request, plan); - } - - @Override - public void onNodeSuccess( - @NonNull Request request, - long latencyNanos, - @NonNull DriverExecutionProfile executionProfile, - @NonNull Node node, - @NonNull String nodeRequestLogPrefix) { - updateResponseTimes(node); - } - - @Override - public void onNodeError( - @NonNull Request request, - @NonNull Throwable error, - long latencyNanos, - @NonNull DriverExecutionProfile executionProfile, - @NonNull Node node, - @NonNull String nodeRequestLogPrefix) { - updateResponseTimes(node); - } - - /** Exposed as a protected method so that it can be accessed by tests */ - protected long nanoTime() { - return System.nanoTime(); - } - - /** Exposed as a protected method so that it can be accessed by tests */ - protected int diceRoll1d4() { - return ThreadLocalRandom.current().nextInt(4); - } - - protected boolean isUnhealthy(@NonNull Node node, @NonNull Session session, long now) { - return isBusy(node, session) && isResponseRateInsufficient(node, now); - } - - protected boolean isBusy(@NonNull Node node, @NonNull Session session) { - return getInFlight(node, session) >= MAX_IN_FLIGHT_THRESHOLD; - } - - protected boolean isResponseRateInsufficient(@NonNull Node node, long now) { - NodeResponseRateSample sample = responseTimes.get(node); - return !(sample == null || sample.hasSufficientResponses(now)); - } - - /** - * Synchronously updates the response times for the given node. It is synchronous because the - * {@link #DefaultLoadBalancingPolicy(com.datastax.oss.driver.api.core.context.DriverContext, - * java.lang.String) CacheLoader.load} assigned is synchronous. - * - * @param node The node to update. - */ - protected void updateResponseTimes(@NonNull Node node) { - this.responseTimes.compute(node, (k, v) -> v == null ? new NodeResponseRateSample() : v.next()); - } - - protected int getInFlight(@NonNull Node node, @NonNull Session session) { - // The cast will always succeed because there's no way to replace the internal session impl - ChannelPool pool = ((DefaultSession) session).getPools().get(node); - // Note: getInFlight() includes orphaned ids, which is what we want as we need to account - // for requests that were cancelled or timed out (since the node is likely to still be - // processing them). - return (pool == null) ? 0 : pool.getInFlight(); - } - - protected class NodeResponseRateSample { - - @VisibleForTesting protected final long oldest; - @VisibleForTesting protected final OptionalLong newest; - - private NodeResponseRateSample() { - long now = nanoTime(); - this.oldest = now; - this.newest = OptionalLong.empty(); - } - - private NodeResponseRateSample(long oldestSample) { - this(oldestSample, nanoTime()); - } - - private NodeResponseRateSample(long oldestSample, long newestSample) { - this.oldest = oldestSample; - this.newest = OptionalLong.of(newestSample); - } - - @VisibleForTesting - protected NodeResponseRateSample(AtomicLongArray times) { - assert times.length() >= 1; - this.oldest = times.get(0); - this.newest = (times.length() > 1) ? OptionalLong.of(times.get(1)) : OptionalLong.empty(); - } - - // Our newest sample becomes the oldest in the next generation - private NodeResponseRateSample next() { - return new NodeResponseRateSample(this.getNewestValidSample(), nanoTime()); - } - - // If we have a pair of values return the newest, otherwise we have just one value... so just - // return it - private long getNewestValidSample() { - return this.newest.orElse(this.oldest); - } - - // response rate is considered insufficient when less than 2 responses were obtained in - // the past interval delimited by RESPONSE_COUNT_RESET_INTERVAL_NANOS. - private boolean hasSufficientResponses(long now) { - // If we only have one sample it's an automatic failure - if (!this.newest.isPresent()) return true; - long threshold = now - RESPONSE_COUNT_RESET_INTERVAL_NANOS; - return this.oldest - threshold >= 0; - } - } - - @NonNull - @Override - public Map getStartupConfiguration() { - Map parent = super.getStartupConfiguration(); - return ImmutableMap.of( - DefaultLoadBalancingPolicy.class.getSimpleName(), - parent.get(BasicLoadBalancingPolicy.class.getSimpleName())); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/DefaultNodeDistanceEvaluatorHelper.java b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/DefaultNodeDistanceEvaluatorHelper.java deleted file mode 100644 index 537497b83c8..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/DefaultNodeDistanceEvaluatorHelper.java +++ /dev/null @@ -1,114 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing.helper; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistanceEvaluator; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.util.Reflection; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Map; -import java.util.UUID; -import java.util.function.Predicate; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A {@link NodeDistanceEvaluatorHelper} implementation that fetches the user-supplied evaluator, if - * any, from the programmatic configuration API, or else, from the driver configuration. If no - * user-supplied evaluator can be retrieved, a dummy evaluator will be used which always evaluates - * null distances. - */ -@ThreadSafe -public class DefaultNodeDistanceEvaluatorHelper implements NodeDistanceEvaluatorHelper { - - private static final Logger LOG = - LoggerFactory.getLogger(DefaultNodeDistanceEvaluatorHelper.class); - - @NonNull protected final InternalDriverContext context; - @NonNull protected final DriverExecutionProfile profile; - @NonNull protected final String logPrefix; - - public DefaultNodeDistanceEvaluatorHelper( - @NonNull InternalDriverContext context, - @NonNull DriverExecutionProfile profile, - @NonNull String logPrefix) { - this.context = context; - this.profile = profile; - this.logPrefix = logPrefix; - } - - @NonNull - @Override - public NodeDistanceEvaluator createNodeDistanceEvaluator( - @Nullable String localDc, @NonNull Map nodes) { - NodeDistanceEvaluator nodeDistanceEvaluatorFromConfig = nodeDistanceEvaluatorFromConfig(); - return (node, dc) -> { - NodeDistance distance = nodeDistanceEvaluatorFromConfig.evaluateDistance(node, dc); - if (distance != null) { - LOG.debug("[{}] Evaluator assigned distance {} to node {}", logPrefix, distance, node); - } else { - LOG.debug("[{}] Evaluator did not assign a distance to node {}", logPrefix, node); - } - return distance; - }; - } - - @NonNull - protected NodeDistanceEvaluator nodeDistanceEvaluatorFromConfig() { - NodeDistanceEvaluator evaluator = context.getNodeDistanceEvaluator(profile.getName()); - if (evaluator != null) { - LOG.debug("[{}] Node distance evaluator set programmatically", logPrefix); - } else { - evaluator = - Reflection.buildFromConfig( - context, - profile.getName(), - DefaultDriverOption.LOAD_BALANCING_DISTANCE_EVALUATOR_CLASS, - NodeDistanceEvaluator.class) - .orElse(null); - if (evaluator != null) { - LOG.debug("[{}] Node distance evaluator set from configuration", logPrefix); - } else { - @SuppressWarnings({"unchecked", "deprecation"}) - Predicate nodeFilterFromConfig = - Reflection.buildFromConfig( - context, - profile.getName(), - DefaultDriverOption.LOAD_BALANCING_FILTER_CLASS, - Predicate.class) - .orElse(null); - if (nodeFilterFromConfig != null) { - evaluator = new NodeFilterToDistanceEvaluatorAdapter(nodeFilterFromConfig); - LOG.debug( - "[{}] Node distance evaluator set from deprecated node filter configuration", - logPrefix); - } - } - } - if (evaluator == null) { - evaluator = PASS_THROUGH_DISTANCE_EVALUATOR; - } - return evaluator; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/InferringLocalDcHelper.java b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/InferringLocalDcHelper.java deleted file mode 100644 index 8608b855e8d..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/InferringLocalDcHelper.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing.helper; - -import static com.datastax.oss.driver.internal.core.time.Clock.LOG; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.DefaultNode; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.HashSet; -import java.util.Map; -import java.util.Optional; -import java.util.Set; -import java.util.UUID; -import net.jcip.annotations.ThreadSafe; - -/** - * An implementation of {@link LocalDcHelper} that fetches the user-supplied datacenter, if any, - * from the programmatic configuration API, or else, from the driver configuration. If no local - * datacenter is explicitly defined, this implementation infers the local datacenter from the - * contact points: if all contact points share the same datacenter, that datacenter is returned. If - * the contact points are from different datacenters, or if no contact points reported any - * datacenter, an {@link IllegalStateException} is thrown. - */ -@ThreadSafe -public class InferringLocalDcHelper extends OptionalLocalDcHelper { - - public InferringLocalDcHelper( - @NonNull InternalDriverContext context, - @NonNull DriverExecutionProfile profile, - @NonNull String logPrefix) { - super(context, profile, logPrefix); - } - - /** @return The local datacenter; always present. */ - @NonNull - @Override - public Optional discoverLocalDc(@NonNull Map nodes) { - Optional optionalLocalDc = super.discoverLocalDc(nodes); - if (optionalLocalDc.isPresent()) { - return optionalLocalDc; - } - Set datacenters = new HashSet<>(); - Set contactPoints = context.getMetadataManager().getContactPoints(); - for (Node node : contactPoints) { - String datacenter = node.getDatacenter(); - if (datacenter != null) { - datacenters.add(datacenter); - } - } - if (datacenters.size() == 1) { - String localDc = datacenters.iterator().next(); - LOG.info("[{}] Inferred local DC from contact points: {}", logPrefix, localDc); - return Optional.of(localDc); - } - if (datacenters.isEmpty()) { - throw new IllegalStateException( - "The local DC could not be inferred from contact points, please set it explicitly (see " - + DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER.getPath() - + " in the config, or set it programmatically with SessionBuilder.withLocalDatacenter)"); - } - throw new IllegalStateException( - String.format( - "No local DC was provided, but the contact points are from different DCs: %s; " - + "please set the local DC explicitly (see " - + DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER.getPath() - + " in the config, or set it programmatically with SessionBuilder.withLocalDatacenter)", - formatNodesAndDcs(contactPoints))); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/LocalDcHelper.java b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/LocalDcHelper.java deleted file mode 100644 index 183c7f90dec..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/LocalDcHelper.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing.helper; - -import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; -import com.datastax.oss.driver.api.core.metadata.Node; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; -import java.util.Optional; -import java.util.UUID; -import net.jcip.annotations.ThreadSafe; - -@FunctionalInterface -@ThreadSafe -public interface LocalDcHelper { - - /** - * Returns the local datacenter, if it can be discovered, or returns {@link Optional#empty empty} - * otherwise. - * - *

Implementors may choose to throw {@link IllegalStateException} instead of returning {@link - * Optional#empty empty}, if they require a local datacenter to be defined in order to operate - * properly. - * - * @param nodes All the nodes that were known to exist in the cluster (regardless of their state) - * when the load balancing policy was {@linkplain LoadBalancingPolicy#init(Map, - * LoadBalancingPolicy.DistanceReporter) initialized}. This argument is provided in case - * implementors need to inspect the cluster topology to discover the local datacenter. - * @return The local datacenter, or {@link Optional#empty empty} if none found. - * @throws IllegalStateException if the local datacenter could not be discovered, and this policy - * cannot operate without it. - */ - @NonNull - Optional discoverLocalDc(@NonNull Map nodes); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/MandatoryLocalDcHelper.java b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/MandatoryLocalDcHelper.java deleted file mode 100644 index 9a0e9a2d4ce..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/MandatoryLocalDcHelper.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing.helper; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.DefaultNode; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; -import java.util.Optional; -import java.util.Set; -import java.util.UUID; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * An implementation of {@link LocalDcHelper} that fetches the user-supplied datacenter, if any, - * from the programmatic configuration API, or else, from the driver configuration. If no local - * datacenter is explicitly defined, this implementation will consider two distinct situations: - * - *

    - *
  1. If no explicit contact points were provided, this implementation will infer the local - * datacenter from the implicit contact point (localhost). - *
  2. If explicit contact points were provided however, this implementation will throw {@link - * IllegalStateException}. - *
- */ -@ThreadSafe -public class MandatoryLocalDcHelper extends OptionalLocalDcHelper { - - private static final Logger LOG = LoggerFactory.getLogger(MandatoryLocalDcHelper.class); - - public MandatoryLocalDcHelper( - @NonNull InternalDriverContext context, - @NonNull DriverExecutionProfile profile, - @NonNull String logPrefix) { - super(context, profile, logPrefix); - } - - /** @return The local datacenter; always present. */ - @NonNull - @Override - public Optional discoverLocalDc(@NonNull Map nodes) { - Optional optionalLocalDc = super.discoverLocalDc(nodes); - if (optionalLocalDc.isPresent()) { - return optionalLocalDc; - } - Set contactPoints = context.getMetadataManager().getContactPoints(); - if (context.getMetadataManager().wasImplicitContactPoint()) { - // We only allow automatic inference of the local DC in this specific case - assert contactPoints.size() == 1; - Node contactPoint = contactPoints.iterator().next(); - String localDc = contactPoint.getDatacenter(); - if (localDc != null) { - LOG.debug( - "[{}] Local DC set from implicit contact point {}: {}", - logPrefix, - contactPoint, - localDc); - return Optional.of(localDc); - } else { - throw new IllegalStateException( - "The local DC could not be inferred from implicit contact point, please set it explicitly (see " - + DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER.getPath() - + " in the config, or set it programmatically with SessionBuilder.withLocalDatacenter)"); - } - } else { - throw new IllegalStateException( - "Since you provided explicit contact points, the local DC must be explicitly set (see " - + DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER.getPath() - + " in the config, or set it programmatically with SessionBuilder.withLocalDatacenter). " - + "Current contact points are: " - + formatNodesAndDcs(contactPoints) - + ". Current DCs in this cluster are: " - + formatDcs(nodes.values())); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/NodeDistanceEvaluatorHelper.java b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/NodeDistanceEvaluatorHelper.java deleted file mode 100644 index 61e094b318a..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/NodeDistanceEvaluatorHelper.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing.helper; - -import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistanceEvaluator; -import com.datastax.oss.driver.api.core.metadata.Node; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Map; -import java.util.UUID; -import net.jcip.annotations.ThreadSafe; - -@FunctionalInterface -@ThreadSafe -public interface NodeDistanceEvaluatorHelper { - - NodeDistanceEvaluator PASS_THROUGH_DISTANCE_EVALUATOR = (node, localDc) -> null; - - /** - * Creates a new node distance evaluator. - * - * @param localDc The local datacenter, or null if none defined. - * @param nodes All the nodes that were known to exist in the cluster (regardless of their state) - * when the load balancing policy was {@linkplain LoadBalancingPolicy#init(Map, - * LoadBalancingPolicy.DistanceReporter) initialized}. This argument is provided in case - * implementors need to inspect the cluster topology to create the node distance evaluator. - * @return the node distance evaluator to use. - */ - @NonNull - NodeDistanceEvaluator createNodeDistanceEvaluator( - @Nullable String localDc, @NonNull Map nodes); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/NodeFilterToDistanceEvaluatorAdapter.java b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/NodeFilterToDistanceEvaluatorAdapter.java deleted file mode 100644 index 902018fb7d4..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/NodeFilterToDistanceEvaluatorAdapter.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing.helper; - -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistanceEvaluator; -import com.datastax.oss.driver.api.core.metadata.Node; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.function.Predicate; - -public class NodeFilterToDistanceEvaluatorAdapter implements NodeDistanceEvaluator { - - private final Predicate nodeFilter; - - public NodeFilterToDistanceEvaluatorAdapter(@NonNull Predicate nodeFilter) { - this.nodeFilter = nodeFilter; - } - - @Nullable - @Override - public NodeDistance evaluateDistance(@NonNull Node node, @Nullable String localDc) { - return nodeFilter.test(node) ? null : NodeDistance.IGNORED; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/OptionalLocalDcHelper.java b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/OptionalLocalDcHelper.java deleted file mode 100644 index c6143f3fa16..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/OptionalLocalDcHelper.java +++ /dev/null @@ -1,150 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing.helper; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.ArrayList; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Optional; -import java.util.Set; -import java.util.TreeSet; -import java.util.UUID; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * An implementation of {@link LocalDcHelper} that fetches the local datacenter from the - * programmatic configuration API, or else, from the driver configuration. If no user-supplied - * datacenter can be retrieved, it returns {@link Optional#empty empty}. - */ -@ThreadSafe -public class OptionalLocalDcHelper implements LocalDcHelper { - - private static final Logger LOG = LoggerFactory.getLogger(OptionalLocalDcHelper.class); - - @NonNull protected final InternalDriverContext context; - @NonNull protected final DriverExecutionProfile profile; - @NonNull protected final String logPrefix; - - public OptionalLocalDcHelper( - @NonNull InternalDriverContext context, - @NonNull DriverExecutionProfile profile, - @NonNull String logPrefix) { - this.context = context; - this.profile = profile; - this.logPrefix = logPrefix; - } - - /** - * @return The local datacenter from the programmatic configuration API, or from the driver - * configuration; {@link Optional#empty empty} if none found. - */ - @Override - @NonNull - public Optional discoverLocalDc(@NonNull Map nodes) { - Optional localDc = configuredLocalDc(); - if (localDc.isPresent()) { - checkLocalDatacenterCompatibility( - localDc.get(), context.getMetadataManager().getContactPoints()); - } else { - LOG.debug("[{}] Local DC not set, DC awareness will be disabled", logPrefix); - } - return localDc; - } - - /** - * Checks if the contact points are compatible with the local datacenter specified either through - * configuration, or programmatically. - * - *

The default implementation logs a warning when a contact point reports a datacenter - * different from the local one, and only for the default profile. - * - * @param localDc The local datacenter, as specified in the config, or programmatically. - * @param contactPoints The contact points provided when creating the session. - */ - protected void checkLocalDatacenterCompatibility( - @NonNull String localDc, Set contactPoints) { - if (profile.getName().equals(DriverExecutionProfile.DEFAULT_NAME)) { - Set badContactPoints = new LinkedHashSet<>(); - for (Node node : contactPoints) { - if (!Objects.equals(localDc, node.getDatacenter())) { - badContactPoints.add(node); - } - } - if (!badContactPoints.isEmpty()) { - LOG.warn( - "[{}] You specified {} as the local DC, but some contact points are from a different DC: {}; " - + "please provide the correct local DC, or check your contact points", - logPrefix, - localDc, - formatNodesAndDcs(badContactPoints)); - } - } - } - - /** - * Formats the given nodes as a string detailing each contact point and its datacenter, for - * informational purposes. - */ - @NonNull - protected String formatNodesAndDcs(Iterable nodes) { - List l = new ArrayList<>(); - for (Node node : nodes) { - l.add(node + "=" + node.getDatacenter()); - } - return String.join(", ", l); - } - - /** - * Formats the given nodes as a string detailing each distinct datacenter, for informational - * purposes. - */ - @NonNull - protected String formatDcs(Iterable nodes) { - List l = new ArrayList<>(); - for (Node node : nodes) { - if (node.getDatacenter() != null) { - l.add(node.getDatacenter()); - } - } - return String.join(", ", new TreeSet<>(l)); - } - - /** @return Local data center set programmatically or from configuration file. */ - @NonNull - public Optional configuredLocalDc() { - String localDc = context.getLocalDatacenter(profile.getName()); - if (localDc != null) { - LOG.debug("[{}] Local DC set programmatically: {}", logPrefix, localDc); - return Optional.of(localDc); - } else if (profile.isDefined(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER)) { - localDc = profile.getString(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER); - LOG.debug("[{}] Local DC set from configuration: {}", logPrefix, localDc); - return Optional.of(localDc); - } - return Optional.empty(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/DcAgnosticNodeSet.java b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/DcAgnosticNodeSet.java deleted file mode 100644 index 2a6e79023de..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/DcAgnosticNodeSet.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing.nodeset; - -import com.datastax.oss.driver.api.core.metadata.Node; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Collections; -import java.util.Set; -import java.util.concurrent.CopyOnWriteArraySet; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class DcAgnosticNodeSet implements NodeSet { - - private final Set nodes = new CopyOnWriteArraySet<>(); - - @Override - public boolean add(@NonNull Node node) { - return nodes.add(node); - } - - @Override - public boolean remove(@NonNull Node node) { - return nodes.remove(node); - } - - @Override - @NonNull - public Set dc(@Nullable String dc) { - return nodes; - } - - @Override - public Set dcs() { - return Collections.emptySet(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/MultiDcNodeSet.java b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/MultiDcNodeSet.java deleted file mode 100644 index 37f02bec878..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/MultiDcNodeSet.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing.nodeset; - -import com.datastax.oss.driver.api.core.metadata.Node; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Collections; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.CopyOnWriteArraySet; -import java.util.concurrent.atomic.AtomicBoolean; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class MultiDcNodeSet implements NodeSet { - - private static final String UNKNOWN_DC = ""; - - private final Map> nodes = new ConcurrentHashMap<>(); - - @Override - public boolean add(@NonNull Node node) { - AtomicBoolean added = new AtomicBoolean(); - nodes.compute( - getMapKey(node), - (key, current) -> { - if (current == null) { - // We use CopyOnWriteArraySet because we need - // 1) to preserve insertion order, and - // 2) a "snapshot"-style toArray() implementation - current = new CopyOnWriteArraySet<>(); - } - if (current.add(node)) { - added.set(true); - } - return current; - }); - return added.get(); - } - - @Override - public boolean remove(@NonNull Node node) { - AtomicBoolean removed = new AtomicBoolean(); - nodes.compute( - getMapKey(node), - (key, current) -> { - if (current != null) { - if (current.remove(node)) { - removed.set(true); - } - } - return current; - }); - return removed.get(); - } - - @Override - @NonNull - public Set dc(@Nullable String dc) { - return nodes.getOrDefault(getMapKey(dc), Collections.emptySet()); - } - - @Override - public Set dcs() { - return nodes.keySet(); - } - - @NonNull - private String getMapKey(@NonNull Node node) { - return getMapKey(node.getDatacenter()); - } - - @NonNull - private String getMapKey(@Nullable String dc) { - return dc == null ? UNKNOWN_DC : dc; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/NodeSet.java b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/NodeSet.java deleted file mode 100644 index 66460e16a7c..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/NodeSet.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing.nodeset; - -import com.datastax.oss.driver.api.core.metadata.Node; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Set; -import net.jcip.annotations.ThreadSafe; - -/** - * A thread-safe abstraction around a map of nodes per datacenter, to facilitate node management by - * load balancing policies. - */ -@ThreadSafe -public interface NodeSet { - - /** - * Adds the given node to this set. - * - *

If this set was initialized with datacenter awareness, the node will be added to its - * datacenter's specific set; otherwise, the node is added to a general set containing all nodes - * in the cluster. - * - * @param node The node to add. - * @return true if the node was added, false otherwise (because it was already present). - */ - boolean add(@NonNull Node node); - - /** - * Removes the node from the set. - * - * @param node The node to remove. - * @return true if the node was removed, false otherwise (because it was not present). - */ - boolean remove(@NonNull Node node); - - /** - * Returns the current nodes in the given datacenter. - * - *

If this set was initialized with datacenter awareness, the returned set will contain only - * nodes pertaining to the given datacenter; otherwise, the given datacenter name is ignored and - * the returned set will contain all nodes in the cluster. - * - * @param dc The datacenter name, or null if the datacenter name is not known, or irrelevant. - * @return the current nodes in the given datacenter. - */ - @NonNull - Set dc(@Nullable String dc); - - /** - * Returns the current datacenter names known to this set. If datacenter awareness has been - * disabled, this method returns an empty set. - */ - Set dcs(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/SingleDcNodeSet.java b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/SingleDcNodeSet.java deleted file mode 100644 index 21c89d46927..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/SingleDcNodeSet.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing.nodeset; - -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Collections; -import java.util.Objects; -import java.util.Set; -import java.util.concurrent.CopyOnWriteArraySet; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class SingleDcNodeSet implements NodeSet { - - private final Set nodes = new CopyOnWriteArraySet<>(); - - private final String dc; - private final Set dcs; - - public SingleDcNodeSet(@NonNull String dc) { - this.dc = dc; - dcs = ImmutableSet.of(dc); - } - - @Override - public boolean add(@NonNull Node node) { - if (Objects.equals(node.getDatacenter(), dc)) { - return nodes.add(node); - } - return false; - } - - @Override - public boolean remove(@NonNull Node node) { - if (Objects.equals(node.getDatacenter(), dc)) { - return nodes.remove(node); - } - return false; - } - - @Override - @NonNull - public Set dc(@Nullable String dc) { - if (Objects.equals(this.dc, dc)) { - return nodes; - } - return Collections.emptySet(); - } - - @Override - public Set dcs() { - return dcs; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/AddNodeRefresh.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/AddNodeRefresh.java deleted file mode 100644 index ac68b92fef2..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/AddNodeRefresh.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import java.util.Map; -import java.util.UUID; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class AddNodeRefresh extends NodesRefresh { - - @VisibleForTesting final NodeInfo newNodeInfo; - - AddNodeRefresh(NodeInfo newNodeInfo) { - this.newNodeInfo = newNodeInfo; - } - - @Override - public Result compute( - DefaultMetadata oldMetadata, boolean tokenMapEnabled, InternalDriverContext context) { - Map oldNodes = oldMetadata.getNodes(); - Node existing = oldNodes.get(newNodeInfo.getHostId()); - if (existing == null) { - DefaultNode newNode = new DefaultNode(newNodeInfo.getEndPoint(), context); - copyInfos(newNodeInfo, newNode, context); - Map newNodes = - ImmutableMap.builder() - .putAll(oldNodes) - .put(newNode.getHostId(), newNode) - .build(); - return new Result( - oldMetadata.withNodes(newNodes, tokenMapEnabled, false, null, context), - ImmutableList.of(NodeStateEvent.added(newNode))); - } else { - // If a node is restarted after changing its broadcast RPC address, Cassandra considers that - // an addition, even though the host_id hasn't changed :( - // Update the existing instance and emit an UP event to trigger a pool reconnection. - if (!existing.getEndPoint().equals(newNodeInfo.getEndPoint())) { - copyInfos(newNodeInfo, ((DefaultNode) existing), context); - assert newNodeInfo.getBroadcastRpcAddress().isPresent(); // always for peer nodes - return new Result( - oldMetadata, - ImmutableList.of(TopologyEvent.suggestUp(newNodeInfo.getBroadcastRpcAddress().get()))); - } else { - return new Result(oldMetadata); - } - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/CloudTopologyMonitor.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/CloudTopologyMonitor.java deleted file mode 100644 index 021824a9b16..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/CloudTopologyMonitor.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.net.InetSocketAddress; -import java.util.Objects; -import java.util.UUID; - -public class CloudTopologyMonitor extends DefaultTopologyMonitor { - - private final InetSocketAddress cloudProxyAddress; - - public CloudTopologyMonitor(InternalDriverContext context, InetSocketAddress cloudProxyAddress) { - super(context); - this.cloudProxyAddress = cloudProxyAddress; - } - - @NonNull - @Override - protected EndPoint buildNodeEndPoint( - @NonNull AdminRow row, - @Nullable InetSocketAddress broadcastRpcAddress, - @NonNull EndPoint localEndPoint) { - UUID hostId = Objects.requireNonNull(row.getUuid("host_id")); - return new SniEndPoint(cloudProxyAddress, hostId.toString()); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DefaultEndPoint.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DefaultEndPoint.java deleted file mode 100644 index 7ffbee8e4bb..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DefaultEndPoint.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.Serializable; -import java.net.InetSocketAddress; -import java.util.Objects; - -public class DefaultEndPoint implements EndPoint, Serializable { - - private static final long serialVersionUID = 1; - - private final InetSocketAddress address; - private final String metricPrefix; - - public DefaultEndPoint(InetSocketAddress address) { - this.address = Objects.requireNonNull(address, "address can't be null"); - this.metricPrefix = buildMetricPrefix(address); - } - - @NonNull - @Override - public InetSocketAddress resolve() { - return address; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof DefaultEndPoint) { - InetSocketAddress thisAddress = this.address; - InetSocketAddress thatAddress = ((DefaultEndPoint) other).address; - // If only one of the addresses is unresolved, resolve the other. Otherwise (both resolved or - // both unresolved), compare as-is. - if (thisAddress.isUnresolved() && !thatAddress.isUnresolved()) { - thisAddress = new InetSocketAddress(thisAddress.getHostName(), thisAddress.getPort()); - } else if (thatAddress.isUnresolved() && !thisAddress.isUnresolved()) { - thatAddress = new InetSocketAddress(thatAddress.getHostName(), thatAddress.getPort()); - } - return thisAddress.equals(thatAddress); - } else { - return false; - } - } - - @Override - public int hashCode() { - return address.hashCode(); - } - - @Override - public String toString() { - return address.toString(); - } - - @NonNull - @Override - public String asMetricPrefix() { - return metricPrefix; - } - - private static String buildMetricPrefix(InetSocketAddress address) { - String hostString = address.getHostString(); - if (hostString == null) { - throw new IllegalArgumentException( - "Could not extract a host string from provided address " + address); - } - // Append the port since Cassandra 4 supports nodes with different ports - return hostString.replace('.', '_') + ':' + address.getPort(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DefaultMetadata.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DefaultMetadata.java deleted file mode 100644 index 38f7e4a093e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DefaultMetadata.java +++ /dev/null @@ -1,193 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.Metadata; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.TokenMap; -import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.token.DefaultTokenMap; -import com.datastax.oss.driver.internal.core.metadata.token.ReplicationStrategyFactory; -import com.datastax.oss.driver.internal.core.metadata.token.TokenFactory; -import com.datastax.oss.driver.internal.core.util.Loggers; -import com.datastax.oss.driver.internal.core.util.NanoTime; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Collections; -import java.util.Map; -import java.util.Optional; -import java.util.UUID; -import net.jcip.annotations.Immutable; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * This class is immutable, so that metadata changes are atomic for the client. Every mutation - * operation must return a new instance, that will replace the existing one in {@link - * MetadataManager}'s volatile field. - */ -@Immutable -public class DefaultMetadata implements Metadata { - private static final Logger LOG = LoggerFactory.getLogger(DefaultMetadata.class); - public static DefaultMetadata EMPTY = - new DefaultMetadata(Collections.emptyMap(), Collections.emptyMap(), null, null); - - protected final Map nodes; - protected final Map keyspaces; - protected final TokenMap tokenMap; - protected final String clusterName; - - protected DefaultMetadata( - Map nodes, - Map keyspaces, - TokenMap tokenMap, - String clusterName) { - this.nodes = nodes; - this.keyspaces = keyspaces; - this.tokenMap = tokenMap; - this.clusterName = clusterName; - } - - @NonNull - @Override - public Map getNodes() { - return nodes; - } - - @NonNull - @Override - public Map getKeyspaces() { - return keyspaces; - } - - @NonNull - @Override - public Optional getTokenMap() { - return Optional.ofNullable(tokenMap); - } - - @NonNull - @Override - public Optional getClusterName() { - return Optional.ofNullable(clusterName); - } - - /** - * Refreshes the current metadata with the given list of nodes. - * - * @param tokenMapEnabled whether to rebuild the token map or not; if this is {@code false} the - * current token map will be copied into the new metadata without being recomputed. - * @param tokensChanged whether we observed a change of tokens for at least one node. This will - * require a full rebuild of the token map. - * @param tokenFactory only needed for the initial refresh, afterwards the existing one in the - * token map is used. - * @return the new metadata. - */ - public DefaultMetadata withNodes( - Map newNodes, - boolean tokenMapEnabled, - boolean tokensChanged, - TokenFactory tokenFactory, - InternalDriverContext context) { - - // Force a rebuild if at least one node has different tokens, or there are new or removed nodes. - boolean forceFullRebuild = tokensChanged || !newNodes.equals(nodes); - - return new DefaultMetadata( - ImmutableMap.copyOf(newNodes), - this.keyspaces, - rebuildTokenMap( - newNodes, keyspaces, tokenMapEnabled, forceFullRebuild, tokenFactory, context), - context.getChannelFactory().getClusterName()); - } - - public DefaultMetadata withSchema( - Map newKeyspaces, - boolean tokenMapEnabled, - InternalDriverContext context) { - return new DefaultMetadata( - this.nodes, - ImmutableMap.copyOf(newKeyspaces), - rebuildTokenMap(nodes, newKeyspaces, tokenMapEnabled, false, null, context), - context.getChannelFactory().getClusterName()); - } - - @Nullable - protected TokenMap rebuildTokenMap( - Map newNodes, - Map newKeyspaces, - boolean tokenMapEnabled, - boolean forceFullRebuild, - TokenFactory tokenFactory, - InternalDriverContext context) { - - String logPrefix = context.getSessionName(); - ReplicationStrategyFactory replicationStrategyFactory = context.getReplicationStrategyFactory(); - - if (!tokenMapEnabled) { - LOG.debug("[{}] Token map is disabled, skipping", logPrefix); - return this.tokenMap; - } - long start = System.nanoTime(); - try { - DefaultTokenMap oldTokenMap = (DefaultTokenMap) this.tokenMap; - if (oldTokenMap == null) { - // Initial build, we need the token factory - if (tokenFactory == null) { - LOG.debug( - "[{}] Building initial token map but the token factory is missing, skipping", - logPrefix); - return null; - } else { - LOG.debug("[{}] Building initial token map", logPrefix); - return DefaultTokenMap.build( - newNodes.values(), - newKeyspaces.values(), - tokenFactory, - replicationStrategyFactory, - logPrefix); - } - } else if (forceFullRebuild) { - LOG.debug( - "[{}] Updating token map but some nodes/tokens have changed, full rebuild", logPrefix); - return DefaultTokenMap.build( - newNodes.values(), - newKeyspaces.values(), - oldTokenMap.getTokenFactory(), - replicationStrategyFactory, - logPrefix); - } else { - LOG.debug("[{}] Refreshing token map (only schema has changed)", logPrefix); - return oldTokenMap.refresh( - newNodes.values(), newKeyspaces.values(), replicationStrategyFactory); - } - } catch (Throwable t) { - Loggers.warnWithException( - LOG, - "[{}] Unexpected error while refreshing token map, keeping previous version", - logPrefix, - t); - return this.tokenMap; - } finally { - LOG.debug("[{}] Rebuilding token map took {}", logPrefix, NanoTime.formatTimeSince(start)); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DefaultNode.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DefaultNode.java deleted file mode 100644 index 28f9e2de81c..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DefaultNode.java +++ /dev/null @@ -1,198 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeState; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metrics.NodeMetricUpdater; -import com.datastax.oss.driver.internal.core.metrics.NoopNodeMetricUpdater; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.io.Serializable; -import java.net.InetSocketAddress; -import java.util.Collections; -import java.util.Map; -import java.util.Optional; -import java.util.Set; -import java.util.UUID; -import net.jcip.annotations.ThreadSafe; - -/** - * Implementation note: all the mutable state in this class is read concurrently, but only mutated - * from {@link MetadataManager}'s admin thread. - */ -@ThreadSafe -public class DefaultNode implements Node, Serializable { - - private static final long serialVersionUID = 1; - - private volatile EndPoint endPoint; - // A deserialized node is not attached to a session anymore, so we don't need to retain this - private transient volatile NodeMetricUpdater metricUpdater; - - volatile InetSocketAddress broadcastRpcAddress; - volatile InetSocketAddress broadcastAddress; - volatile InetSocketAddress listenAddress; - volatile String datacenter; - volatile String rack; - volatile Version cassandraVersion; - // Keep a copy of the raw tokens, to detect if they have changed when we refresh the node - volatile Set rawTokens; - volatile Map extras; - volatile UUID hostId; - volatile UUID schemaVersion; - - // These 4 fields are read concurrently, but only mutated on NodeStateManager's admin thread - volatile NodeState state; - volatile int openConnections; - volatile int reconnections; - volatile long upSinceMillis; - - volatile NodeDistance distance; - - public DefaultNode(EndPoint endPoint, InternalDriverContext context) { - this.endPoint = endPoint; - this.state = NodeState.UNKNOWN; - this.distance = NodeDistance.IGNORED; - this.rawTokens = Collections.emptySet(); - this.extras = Collections.emptyMap(); - // We leak a reference to a partially constructed object (this), but in practice this won't be a - // problem because the node updater only needs the connect address to initialize. - this.metricUpdater = context.getMetricsFactory().newNodeUpdater(this); - this.upSinceMillis = -1; - } - - @NonNull - @Override - public EndPoint getEndPoint() { - return endPoint; - } - - public void setEndPoint(@NonNull EndPoint newEndPoint, @NonNull InternalDriverContext context) { - if (!newEndPoint.equals(endPoint)) { - endPoint = newEndPoint; - - // The endpoint is also used to build metric names, so make sure they get updated - NodeMetricUpdater previousMetricUpdater = metricUpdater; - if (!(previousMetricUpdater instanceof NoopNodeMetricUpdater)) { - metricUpdater = context.getMetricsFactory().newNodeUpdater(this); - } - } - } - - @NonNull - @Override - public Optional getBroadcastRpcAddress() { - return Optional.ofNullable(broadcastRpcAddress); - } - - @NonNull - @Override - public Optional getBroadcastAddress() { - return Optional.ofNullable(broadcastAddress); - } - - @NonNull - @Override - public Optional getListenAddress() { - return Optional.ofNullable(listenAddress); - } - - @Nullable - @Override - public String getDatacenter() { - return datacenter; - } - - @Nullable - @Override - public String getRack() { - return rack; - } - - @Nullable - @Override - public Version getCassandraVersion() { - return cassandraVersion; - } - - @Nullable - @Override - public UUID getHostId() { - return hostId; - } - - @Nullable - @Override - public UUID getSchemaVersion() { - return schemaVersion; - } - - @NonNull - @Override - public Map getExtras() { - return extras; - } - - @NonNull - @Override - public NodeState getState() { - return state; - } - - @Override - public long getUpSinceMillis() { - return upSinceMillis; - } - - @Override - public int getOpenConnections() { - return openConnections; - } - - @Override - public boolean isReconnecting() { - return reconnections > 0; - } - - @NonNull - @Override - public NodeDistance getDistance() { - return distance; - } - - public NodeMetricUpdater getMetricUpdater() { - return metricUpdater; - } - - @Override - public String toString() { - // Include the hash code because this class uses reference equality - return String.format( - "Node(endPoint=%s, hostId=%s, hashCode=%x)", getEndPoint(), getHostId(), hashCode()); - } - - /** Note: deliberately not exposed by the public interface. */ - public Set getRawTokens() { - return rawTokens; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DefaultNodeInfo.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DefaultNodeInfo.java deleted file mode 100644 index 8908f0be078..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DefaultNodeInfo.java +++ /dev/null @@ -1,216 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.net.InetSocketAddress; -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; -import java.util.Optional; -import java.util.Set; -import java.util.UUID; -import net.jcip.annotations.Immutable; -import net.jcip.annotations.NotThreadSafe; - -@Immutable -public class DefaultNodeInfo implements NodeInfo { - public static Builder builder() { - return new Builder(); - } - - private final EndPoint endPoint; - private final InetSocketAddress broadcastRpcAddress; - private final InetSocketAddress broadcastAddress; - private final InetSocketAddress listenAddress; - private final String datacenter; - private final String rack; - private final String cassandraVersion; - private final String partitioner; - private final Set tokens; - private final Map extras; - private final UUID hostId; - private final UUID schemaVersion; - - private DefaultNodeInfo(Builder builder) { - this.endPoint = builder.endPoint; - this.broadcastRpcAddress = builder.broadcastRpcAddress; - this.broadcastAddress = builder.broadcastAddress; - this.listenAddress = builder.listenAddress; - this.datacenter = builder.datacenter; - this.rack = builder.rack; - this.cassandraVersion = builder.cassandraVersion; - this.partitioner = builder.partitioner; - this.tokens = (builder.tokens == null) ? Collections.emptySet() : builder.tokens; - this.hostId = builder.hostId; - this.schemaVersion = builder.schemaVersion; - this.extras = (builder.extras == null) ? Collections.emptyMap() : builder.extras; - } - - @NonNull - @Override - public EndPoint getEndPoint() { - return endPoint; - } - - @NonNull - @Override - public Optional getBroadcastRpcAddress() { - return Optional.ofNullable(broadcastRpcAddress); - } - - @NonNull - @Override - public Optional getBroadcastAddress() { - return Optional.ofNullable(broadcastAddress); - } - - @NonNull - @Override - public Optional getListenAddress() { - return Optional.ofNullable(listenAddress); - } - - @Override - public String getDatacenter() { - return datacenter; - } - - @Override - public String getRack() { - return rack; - } - - @Override - public String getCassandraVersion() { - return cassandraVersion; - } - - @Override - public String getPartitioner() { - return partitioner; - } - - @Override - public Set getTokens() { - return tokens; - } - - @Override - public Map getExtras() { - return extras; - } - - @NonNull - @Override - public UUID getHostId() { - return hostId; - } - - @Override - public UUID getSchemaVersion() { - return schemaVersion; - } - - @NotThreadSafe - public static class Builder { - private EndPoint endPoint; - private InetSocketAddress broadcastRpcAddress; - private InetSocketAddress broadcastAddress; - private InetSocketAddress listenAddress; - private String datacenter; - private String rack; - private String cassandraVersion; - private String partitioner; - private Set tokens; - private Map extras; - private UUID hostId; - private UUID schemaVersion; - - public Builder withEndPoint(@NonNull EndPoint endPoint) { - this.endPoint = endPoint; - return this; - } - - public Builder withBroadcastRpcAddress(@Nullable InetSocketAddress address) { - this.broadcastRpcAddress = address; - return this; - } - - public Builder withBroadcastAddress(@Nullable InetSocketAddress address) { - this.broadcastAddress = address; - return this; - } - - public Builder withListenAddress(@Nullable InetSocketAddress address) { - this.listenAddress = address; - return this; - } - - public Builder withDatacenter(@Nullable String datacenter) { - this.datacenter = datacenter; - return this; - } - - public Builder withRack(@Nullable String rack) { - this.rack = rack; - return this; - } - - public Builder withCassandraVersion(@Nullable String cassandraVersion) { - this.cassandraVersion = cassandraVersion; - return this; - } - - public Builder withPartitioner(@Nullable String partitioner) { - this.partitioner = partitioner; - return this; - } - - public Builder withTokens(@Nullable Set tokens) { - this.tokens = tokens; - return this; - } - - public Builder withHostId(@NonNull UUID hostId) { - this.hostId = hostId; - return this; - } - - public Builder withSchemaVersion(@Nullable UUID schemaVersion) { - this.schemaVersion = schemaVersion; - return this; - } - - public Builder withExtra(@NonNull String key, @Nullable Object value) { - if (value != null) { - if (this.extras == null) { - this.extras = new HashMap<>(); - } - this.extras.put(key, value); - } - return this; - } - - public DefaultNodeInfo build() { - return new DefaultNodeInfo(this); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DefaultTopologyMonitor.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DefaultTopologyMonitor.java deleted file mode 100644 index f3dc988cfbc..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DefaultTopologyMonitor.java +++ /dev/null @@ -1,598 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import com.datastax.dse.driver.api.core.metadata.DseNodeProperties; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRequestHandler; -import com.datastax.oss.driver.internal.core.adminrequest.AdminResult; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.internal.core.adminrequest.UnexpectedResponseException; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.control.ControlConnection; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import com.datastax.oss.driver.shaded.guava.common.collect.Iterators; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.response.Error; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.net.SocketAddress; -import java.time.Duration; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Optional; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * The default topology monitor, based on {@link ControlConnection}. - * - *

Note that event processing is implemented directly in the control connection, not here. - */ -@ThreadSafe -public class DefaultTopologyMonitor implements TopologyMonitor { - private static final Logger LOG = LoggerFactory.getLogger(DefaultTopologyMonitor.class); - - // Assume topology queries never need paging - private static final int INFINITE_PAGE_SIZE = -1; - - // A few system.peers columns which get special handling below - private static final String NATIVE_PORT = "native_port"; - private static final String NATIVE_TRANSPORT_PORT = "native_transport_port"; - - private final String logPrefix; - private final InternalDriverContext context; - private final ControlConnection controlConnection; - private final Duration timeout; - private final boolean reconnectOnInit; - private final CompletableFuture closeFuture; - - @VisibleForTesting volatile boolean isSchemaV2; - @VisibleForTesting volatile int port = -1; - - public DefaultTopologyMonitor(InternalDriverContext context) { - this.logPrefix = context.getSessionName(); - this.context = context; - this.controlConnection = context.getControlConnection(); - DriverExecutionProfile config = context.getConfig().getDefaultProfile(); - this.timeout = config.getDuration(DefaultDriverOption.CONTROL_CONNECTION_TIMEOUT); - this.reconnectOnInit = config.getBoolean(DefaultDriverOption.RECONNECT_ON_INIT); - this.closeFuture = new CompletableFuture<>(); - // Set this to true initially, after the first refreshNodes is called this will either stay true - // or be set to false; - this.isSchemaV2 = true; - } - - @Override - public CompletionStage init() { - if (closeFuture.isDone()) { - return CompletableFutures.failedFuture(new IllegalStateException("closed")); - } - return controlConnection.init(true, reconnectOnInit, true); - } - - @Override - public CompletionStage initFuture() { - return controlConnection.initFuture(); - } - - @Override - public CompletionStage> refreshNode(Node node) { - if (closeFuture.isDone()) { - return CompletableFutures.failedFuture(new IllegalStateException("closed")); - } - LOG.debug("[{}] Refreshing info for {}", logPrefix, node); - DriverChannel channel = controlConnection.channel(); - EndPoint localEndPoint = channel.getEndPoint(); - if (node.getEndPoint().equals(channel.getEndPoint())) { - // refreshNode is called for nodes that just came up. If the control node just came up, it - // means the control connection just reconnected, which means we did a full node refresh. So - // we don't need to process this call. - LOG.debug("[{}] Ignoring refresh of control node", logPrefix); - return CompletableFuture.completedFuture(Optional.empty()); - } else if (node.getBroadcastAddress().isPresent()) { - CompletionStage query; - if (isSchemaV2) { - query = - query( - channel, - "SELECT * FROM " - + getPeerTableName() - + " WHERE peer = :address and peer_port = :port", - ImmutableMap.of( - "address", - node.getBroadcastAddress().get().getAddress(), - "port", - node.getBroadcastAddress().get().getPort())); - } else { - query = - query( - channel, - "SELECT * FROM " + getPeerTableName() + " WHERE peer = :address", - ImmutableMap.of("address", node.getBroadcastAddress().get().getAddress())); - } - return query.thenApply(result -> firstPeerRowAsNodeInfo(result, localEndPoint)); - } else { - return query(channel, "SELECT * FROM " + getPeerTableName()) - .thenApply(result -> findInPeers(result, node.getHostId(), localEndPoint)); - } - } - - @Override - public CompletionStage> getNewNodeInfo(InetSocketAddress broadcastRpcAddress) { - if (closeFuture.isDone()) { - return CompletableFutures.failedFuture(new IllegalStateException("closed")); - } - LOG.debug("[{}] Fetching info for new node {}", logPrefix, broadcastRpcAddress); - DriverChannel channel = controlConnection.channel(); - EndPoint localEndPoint = channel.getEndPoint(); - return query(channel, "SELECT * FROM " + getPeerTableName()) - .thenApply(result -> findInPeers(result, broadcastRpcAddress, localEndPoint)); - } - - @Override - public CompletionStage> refreshNodeList() { - if (closeFuture.isDone()) { - return CompletableFutures.failedFuture(new IllegalStateException("closed")); - } - LOG.debug("[{}] Refreshing node list", logPrefix); - DriverChannel channel = controlConnection.channel(); - EndPoint localEndPoint = channel.getEndPoint(); - - savePort(channel); - - CompletionStage localQuery = query(channel, "SELECT * FROM system.local"); - CompletionStage peersV2Query = query(channel, "SELECT * FROM system.peers_v2"); - CompletableFuture peersQuery = new CompletableFuture<>(); - - peersV2Query.whenComplete( - (r, t) -> { - if (t != null) { - // If system.peers_v2 does not exist, downgrade to system.peers - if (t instanceof UnexpectedResponseException - && ((UnexpectedResponseException) t).message instanceof Error) { - Error error = (Error) ((UnexpectedResponseException) t).message; - if (error.code == ProtocolConstants.ErrorCode.INVALID - // Also downgrade on server error with a specific error message (DSE 6.0.0 to - // 6.0.2 with search enabled) - || (error.code == ProtocolConstants.ErrorCode.SERVER_ERROR - && error.message.contains("Unknown keyspace/cf pair (system.peers_v2)"))) { - this.isSchemaV2 = false; // We should not attempt this query in the future. - CompletableFutures.completeFrom( - query(channel, "SELECT * FROM system.peers"), peersQuery); - return; - } - } - peersQuery.completeExceptionally(t); - } else { - peersQuery.complete(r); - } - }); - - return localQuery.thenCombine( - peersQuery, - (controlNodeResult, peersResult) -> { - List nodeInfos = new ArrayList<>(); - AdminRow localRow = controlNodeResult.iterator().next(); - InetSocketAddress localBroadcastRpcAddress = - getBroadcastRpcAddress(localRow, localEndPoint); - nodeInfos.add(nodeInfoBuilder(localRow, localBroadcastRpcAddress, localEndPoint).build()); - for (AdminRow peerRow : peersResult) { - if (isPeerValid(peerRow)) { - InetSocketAddress peerBroadcastRpcAddress = - getBroadcastRpcAddress(peerRow, localEndPoint); - if (peerBroadcastRpcAddress != null) { - NodeInfo nodeInfo = - nodeInfoBuilder(peerRow, peerBroadcastRpcAddress, localEndPoint).build(); - nodeInfos.add(nodeInfo); - } - } - } - return nodeInfos; - }); - } - - @Override - public CompletionStage checkSchemaAgreement() { - if (closeFuture.isDone()) { - return CompletableFuture.completedFuture(true); - } - DriverChannel channel = controlConnection.channel(); - return new SchemaAgreementChecker(channel, context, logPrefix).run(); - } - - @NonNull - @Override - public CompletionStage closeFuture() { - return closeFuture; - } - - @NonNull - @Override - public CompletionStage closeAsync() { - closeFuture.complete(null); - return closeFuture; - } - - @NonNull - @Override - public CompletionStage forceCloseAsync() { - return closeAsync(); - } - - @VisibleForTesting - protected CompletionStage query( - DriverChannel channel, String queryString, Map parameters) { - AdminRequestHandler handler; - try { - handler = - AdminRequestHandler.query( - channel, queryString, parameters, timeout, INFINITE_PAGE_SIZE, logPrefix); - } catch (Exception e) { - return CompletableFutures.failedFuture(e); - } - return handler.start(); - } - - private CompletionStage query(DriverChannel channel, String queryString) { - return query(channel, queryString, Collections.emptyMap()); - } - - private String getPeerTableName() { - return isSchemaV2 ? "system.peers_v2" : "system.peers"; - } - - private Optional firstPeerRowAsNodeInfo(AdminResult result, EndPoint localEndPoint) { - Iterator iterator = result.iterator(); - if (iterator.hasNext()) { - AdminRow row = iterator.next(); - if (isPeerValid(row)) { - return Optional.ofNullable(getBroadcastRpcAddress(row, localEndPoint)) - .map( - broadcastRpcAddress -> - nodeInfoBuilder(row, broadcastRpcAddress, localEndPoint).build()); - } - } - return Optional.empty(); - } - - /** - * Creates a {@link DefaultNodeInfo.Builder} instance from the given row. - * - * @param broadcastRpcAddress this is a parameter only because we already have it when we come - * from {@link #findInPeers(AdminResult, InetSocketAddress, EndPoint)}. Callers that don't - * already have it can use {@link #getBroadcastRpcAddress}. For the control host, this can be - * null; if this node is a peer however, this cannot be null, since we use that address to - * create the node's endpoint. Callers can use {@link #isPeerValid(AdminRow)} to check that - * before calling this method. - * @param localEndPoint the control node endpoint that was used to query the node's system tables. - * This is a parameter because it would be racy to call {@code - * controlConnection.channel().getEndPoint()} from within this method, as the control - * connection may have changed its channel since. So this parameter must be provided by the - * caller. - */ - @NonNull - protected DefaultNodeInfo.Builder nodeInfoBuilder( - @NonNull AdminRow row, - @Nullable InetSocketAddress broadcastRpcAddress, - @NonNull EndPoint localEndPoint) { - - EndPoint endPoint = buildNodeEndPoint(row, broadcastRpcAddress, localEndPoint); - - // in system.local - InetAddress broadcastInetAddress = row.getInetAddress("broadcast_address"); - if (broadcastInetAddress == null) { - // in system.peers or system.peers_v2 - broadcastInetAddress = row.getInetAddress("peer"); - } - - Integer broadcastPort = 0; - if (row.contains("broadcast_port")) { - // system.local for Cassandra >= 4.0 - broadcastPort = row.getInteger("broadcast_port"); - } else if (row.contains("peer_port")) { - // system.peers_v2 - broadcastPort = row.getInteger("peer_port"); - } - - InetSocketAddress broadcastAddress = null; - if (broadcastInetAddress != null && broadcastPort != null) { - broadcastAddress = new InetSocketAddress(broadcastInetAddress, broadcastPort); - } - - // in system.local only, and only for Cassandra versions >= 2.0.17, 2.1.8, 2.2.0 rc2; - // not present in system.peers nor system.peers_v2 - InetAddress listenInetAddress = row.getInetAddress("listen_address"); - - // in system.local only, and only for Cassandra >= 4.0 - Integer listenPort = 0; - if (row.contains("listen_port")) { - listenPort = row.getInteger("listen_port"); - } - - InetSocketAddress listenAddress = null; - if (listenInetAddress != null && listenPort != null) { - listenAddress = new InetSocketAddress(listenInetAddress, listenPort); - } - - DefaultNodeInfo.Builder builder = - DefaultNodeInfo.builder() - .withEndPoint(endPoint) - .withBroadcastRpcAddress(broadcastRpcAddress) - .withBroadcastAddress(broadcastAddress) - .withListenAddress(listenAddress) - .withDatacenter(row.getString("data_center")) - .withRack(row.getString("rack")) - .withCassandraVersion(row.getString("release_version")) - .withTokens(row.getSetOfString("tokens")) - .withPartitioner(row.getString("partitioner")) - .withHostId(Objects.requireNonNull(row.getUuid("host_id"))) - .withSchemaVersion(row.getUuid("schema_version")); - - // Handle DSE-specific columns, if present - String rawVersion = row.getString("dse_version"); - if (rawVersion != null) { - builder.withExtra(DseNodeProperties.DSE_VERSION, Version.parse(rawVersion)); - } - - ImmutableSet.Builder workloadsBuilder = ImmutableSet.builder(); - Boolean legacyGraph = row.getBoolean("graph"); // DSE 5.0 - if (legacyGraph != null && legacyGraph) { - workloadsBuilder.add("Graph"); - } - String legacyWorkload = row.getString("workload"); // DSE 5.0 (other than graph) - if (legacyWorkload != null) { - workloadsBuilder.add(legacyWorkload); - } - Set modernWorkloads = row.getSetOfString("workloads"); // DSE 5.1+ - if (modernWorkloads != null) { - workloadsBuilder.addAll(modernWorkloads); - } - ImmutableSet workloads = workloadsBuilder.build(); - if (!workloads.isEmpty()) { - builder.withExtra(DseNodeProperties.DSE_WORKLOADS, workloads); - } - - // Note: withExtra discards null values - builder - .withExtra(DseNodeProperties.SERVER_ID, row.getString("server_id")) - .withExtra(DseNodeProperties.NATIVE_TRANSPORT_PORT, row.getInteger("native_transport_port")) - .withExtra( - DseNodeProperties.NATIVE_TRANSPORT_PORT_SSL, - row.getInteger("native_transport_port_ssl")) - .withExtra(DseNodeProperties.STORAGE_PORT, row.getInteger("storage_port")) - .withExtra(DseNodeProperties.STORAGE_PORT_SSL, row.getInteger("storage_port_ssl")) - .withExtra(DseNodeProperties.JMX_PORT, row.getInteger("jmx_port")); - - return builder; - } - - /** - * Builds the node's endpoint from the given row. - * - * @param broadcastRpcAddress this is a parameter only because we already have it when we come - * from {@link #findInPeers(AdminResult, InetSocketAddress, EndPoint)}. Callers that don't - * already have it can use {@link #getBroadcastRpcAddress}. For the control host, this can be - * null; if this node is a peer however, this cannot be null, since we use that address to - * create the node's endpoint. Callers can use {@link #isPeerValid(AdminRow)} to check that - * before calling this method. - * @param localEndPoint the control node endpoint that was used to query the node's system tables. - * This is a parameter because it would be racy to call {@code - * controlConnection.channel().getEndPoint()} from within this method, as the control - * connection may have changed its channel since. So this parameter must be provided by the - * caller. - */ - @NonNull - protected EndPoint buildNodeEndPoint( - @NonNull AdminRow row, - @Nullable InetSocketAddress broadcastRpcAddress, - @NonNull EndPoint localEndPoint) { - boolean peer = row.contains("peer"); - if (peer) { - // If this node is a peer, its broadcast RPC address must be present. - Objects.requireNonNull( - broadcastRpcAddress, "broadcastRpcAddress cannot be null for a peer row"); - // Deployments that use a custom EndPoint implementation will need their own TopologyMonitor. - // One simple approach is to extend this class and override this method. - return new DefaultEndPoint(context.getAddressTranslator().translate(broadcastRpcAddress)); - } else { - // Don't rely on system.local.rpc_address for the control node, because it mistakenly - // reports the normal RPC address instead of the broadcast one (CASSANDRA-11181). We - // already know the endpoint anyway since we've just used it to query. - return localEndPoint; - } - } - - // Called when a new node is being added; the peers table is keyed by broadcast_address, - // but the received event only contains broadcast_rpc_address, so - // we have to traverse the whole table and check the rows one by one. - private Optional findInPeers( - AdminResult result, InetSocketAddress broadcastRpcAddressToFind, EndPoint localEndPoint) { - for (AdminRow row : result) { - InetSocketAddress broadcastRpcAddress = getBroadcastRpcAddress(row, localEndPoint); - if (broadcastRpcAddress != null - && broadcastRpcAddress.equals(broadcastRpcAddressToFind) - && isPeerValid(row)) { - return Optional.of(nodeInfoBuilder(row, broadcastRpcAddress, localEndPoint).build()); - } - } - LOG.debug("[{}] Could not find any peer row matching {}", logPrefix, broadcastRpcAddressToFind); - return Optional.empty(); - } - - // Called when refreshing an existing node, and we don't know its broadcast address; in this - // case we attempt a search by host id and have to traverse the whole table and check the rows one - // by one. - private Optional findInPeers( - AdminResult result, UUID hostIdToFind, EndPoint localEndPoint) { - for (AdminRow row : result) { - UUID hostId = row.getUuid("host_id"); - if (hostId != null && hostId.equals(hostIdToFind) && isPeerValid(row)) { - return Optional.ofNullable(getBroadcastRpcAddress(row, localEndPoint)) - .map( - broadcastRpcAddress -> - nodeInfoBuilder(row, broadcastRpcAddress, localEndPoint).build()); - } - } - LOG.debug("[{}] Could not find any peer row matching {}", logPrefix, hostIdToFind); - return Optional.empty(); - } - - // Current versions of Cassandra (3.11 at the time of writing), require the same port for all - // nodes. As a consequence, the port is not stored in system tables. - // We save it the first time we get a control connection channel. - private void savePort(DriverChannel channel) { - if (port < 0) { - SocketAddress address = channel.getEndPoint().resolve(); - if (address instanceof InetSocketAddress) { - port = ((InetSocketAddress) address).getPort(); - } - } - } - - /** - * Determines the broadcast RPC address of the node represented by the given row. - * - * @param row The row to inspect; can represent either a local (control) node or a peer node. - * @param localEndPoint the control node endpoint that was used to query the node's system tables. - * This is a parameter because it would be racy to call {@code - * controlConnection.channel().getEndPoint()} from within this method, as the control - * connection may have changed its channel since. So this parameter must be provided by the - * caller. - * @return the broadcast RPC address of the node, if it could be determined; or {@code null} - * otherwise. - */ - @Nullable - protected InetSocketAddress getBroadcastRpcAddress( - @NonNull AdminRow row, @NonNull EndPoint localEndPoint) { - - InetAddress broadcastRpcInetAddress = null; - Iterator addrCandidates = - Iterators.forArray( - // in system.peers_v2 (Cassandra >= 4.0) - "native_address", - // DSE 6.8 introduced native_transport_address and native_transport_port for the - // listen address. - "native_transport_address", - // in system.peers or system.local - "rpc_address"); - - while (broadcastRpcInetAddress == null && addrCandidates.hasNext()) - broadcastRpcInetAddress = row.getInetAddress(addrCandidates.next()); - // This could only happen if system tables are corrupted, but handle gracefully - if (broadcastRpcInetAddress == null) { - LOG.warn( - "[{}] Unable to determine broadcast RPC IP address, returning null. " - + "This is likely due to a misconfiguration or invalid system tables. " - + "Please validate the contents of system.local and/or {}.", - logPrefix, - getPeerTableName()); - return null; - } - - Integer broadcastRpcPort = null; - Iterator portCandidates = - Iterators.forArray( - // in system.peers_v2 (Cassandra >= 4.0) - NATIVE_PORT, - // DSE 6.8 introduced native_transport_address and native_transport_port for the - // listen address. - NATIVE_TRANSPORT_PORT, - // system.local for Cassandra >= 4.0 - "rpc_port"); - - while ((broadcastRpcPort == null || broadcastRpcPort == 0) && portCandidates.hasNext()) { - - String colName = portCandidates.next(); - broadcastRpcPort = row.getInteger(colName); - // Support override for SSL port (if enabled) in DSE - if (NATIVE_TRANSPORT_PORT.equals(colName) && context.getSslEngineFactory().isPresent()) { - - String sslColName = colName + "_ssl"; - broadcastRpcPort = row.getInteger(sslColName); - } - } - // use the default port if no port information was found in the row; - // note that in rare situations, the default port might not be known, in which case we - // report zero, as advertised in the javadocs of Node and NodeInfo. - if (broadcastRpcPort == null || broadcastRpcPort == 0) { - - LOG.warn( - "[{}] Unable to determine broadcast RPC port. " - + "Trying to fall back to port used by the control connection.", - logPrefix); - broadcastRpcPort = port == -1 ? 0 : port; - } - - InetSocketAddress broadcastRpcAddress = - new InetSocketAddress(broadcastRpcInetAddress, broadcastRpcPort); - if (row.contains("peer") && broadcastRpcAddress.equals(localEndPoint.resolve())) { - // JAVA-2303: if the peer is actually the control node, ignore that peer as it is likely - // a misconfiguration problem. - LOG.warn( - "[{}] Control node {} has an entry for itself in {}: this entry will be ignored. " - + "This is likely due to a misconfiguration; please verify your rpc_address " - + "configuration in cassandra.yaml on all nodes in your cluster.", - logPrefix, - localEndPoint, - getPeerTableName()); - return null; - } - - return broadcastRpcAddress; - } - - /** - * Returns {@code true} if the given peer row is valid, and {@code false} otherwise. - * - *

This method must at least ensure that the row contains enough information to extract the - * node's broadcast RPC address and host ID; otherwise the driver may not work properly. - */ - protected boolean isPeerValid(AdminRow peerRow) { - if (PeerRowValidator.isValid(peerRow)) { - return true; - } else { - LOG.warn( - "[{}] Found invalid row in {} for peer: {}. " - + "This is likely a gossip or snitch issue, this node will be ignored.", - logPrefix, - getPeerTableName(), - peerRow.getInetAddress("peer")); - return false; - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DistanceEvent.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DistanceEvent.java deleted file mode 100644 index 5d58727484c..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DistanceEvent.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -/** - * Indicates that the load balancing policy has assigned a new distance to a host. - * - *

This is informational only: firing this event manually does not change the distance. - */ -@Immutable -public class DistanceEvent { - public final NodeDistance distance; - public final DefaultNode node; - - public DistanceEvent(NodeDistance distance, DefaultNode node) { - this.distance = distance; - this.node = node; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof DistanceEvent) { - DistanceEvent that = (DistanceEvent) other; - return this.distance == that.distance && Objects.equals(this.node, that.node); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(this.distance, this.node); - } - - @Override - public String toString() { - return "DistanceEvent(" + distance + ", " + node + ")"; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/FullNodeListRefresh.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/FullNodeListRefresh.java deleted file mode 100644 index 7388980c230..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/FullNodeListRefresh.java +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.token.DefaultTokenMap; -import com.datastax.oss.driver.internal.core.metadata.token.TokenFactory; -import com.datastax.oss.driver.internal.core.metadata.token.TokenFactoryRegistry; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.Sets; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; -import java.util.UUID; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@ThreadSafe -class FullNodeListRefresh extends NodesRefresh { - - private static final Logger LOG = LoggerFactory.getLogger(FullNodeListRefresh.class); - - @VisibleForTesting final Iterable nodeInfos; - - FullNodeListRefresh(Iterable nodeInfos) { - this.nodeInfos = nodeInfos; - } - - @Override - public Result compute( - DefaultMetadata oldMetadata, boolean tokenMapEnabled, InternalDriverContext context) { - - String logPrefix = context.getSessionName(); - TokenFactoryRegistry tokenFactoryRegistry = context.getTokenFactoryRegistry(); - - Map oldNodes = oldMetadata.getNodes(); - - Map added = new HashMap<>(); - Set seen = new HashSet<>(); - - TokenFactory tokenFactory = - oldMetadata.getTokenMap().map(m -> ((DefaultTokenMap) m).getTokenFactory()).orElse(null); - boolean tokensChanged = false; - - for (NodeInfo nodeInfo : nodeInfos) { - UUID id = nodeInfo.getHostId(); - if (seen.contains(id)) { - LOG.warn( - "[{}] Found duplicate entries with host_id {} in system.peers, " - + "keeping only the first one", - logPrefix, - id); - } else { - seen.add(id); - DefaultNode node = (DefaultNode) oldNodes.get(id); - if (node == null) { - node = new DefaultNode(nodeInfo.getEndPoint(), context); - LOG.debug("[{}] Adding new node {}", logPrefix, node); - added.put(id, node); - } - if (tokenFactory == null && nodeInfo.getPartitioner() != null) { - tokenFactory = tokenFactoryRegistry.tokenFactoryFor(nodeInfo.getPartitioner()); - } - tokensChanged |= copyInfos(nodeInfo, node, context); - } - } - - Set removed = Sets.difference(oldNodes.keySet(), seen); - - if (added.isEmpty() && removed.isEmpty()) { // The list didn't change - if (!oldMetadata.getTokenMap().isPresent() && tokenFactory != null) { - // First time we found out what the partitioner is => set the token factory and trigger a - // token map rebuild: - return new Result( - oldMetadata.withNodes( - oldMetadata.getNodes(), tokenMapEnabled, true, tokenFactory, context)); - } else { - // No need to create a new metadata instance - return new Result(oldMetadata); - } - } else { - ImmutableMap.Builder newNodesBuilder = ImmutableMap.builder(); - ImmutableList.Builder eventsBuilder = ImmutableList.builder(); - - newNodesBuilder.putAll(added); - for (Map.Entry entry : oldNodes.entrySet()) { - if (!removed.contains(entry.getKey())) { - newNodesBuilder.put(entry.getKey(), entry.getValue()); - } - } - - for (Node node : added.values()) { - eventsBuilder.add(NodeStateEvent.added((DefaultNode) node)); - } - for (UUID id : removed) { - Node node = oldNodes.get(id); - eventsBuilder.add(NodeStateEvent.removed((DefaultNode) node)); - } - - return new Result( - oldMetadata.withNodes( - newNodesBuilder.build(), tokenMapEnabled, tokensChanged, tokenFactory, context), - eventsBuilder.build()); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/InitialNodeListRefresh.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/InitialNodeListRefresh.java deleted file mode 100644 index 517bfca27fa..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/InitialNodeListRefresh.java +++ /dev/null @@ -1,127 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.token.TokenFactory; -import com.datastax.oss.driver.internal.core.metadata.token.TokenFactoryRegistry; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * The first node list refresh: contact points are not in the metadata yet, we need to copy them - * over. - */ -@ThreadSafe -class InitialNodeListRefresh extends NodesRefresh { - - private static final Logger LOG = LoggerFactory.getLogger(InitialNodeListRefresh.class); - - @VisibleForTesting final Iterable nodeInfos; - @VisibleForTesting final Set contactPoints; - - InitialNodeListRefresh(Iterable nodeInfos, Set contactPoints) { - this.nodeInfos = nodeInfos; - this.contactPoints = contactPoints; - } - - @Override - public Result compute( - DefaultMetadata oldMetadata, boolean tokenMapEnabled, InternalDriverContext context) { - - String logPrefix = context.getSessionName(); - TokenFactoryRegistry tokenFactoryRegistry = context.getTokenFactoryRegistry(); - - // Since this is the first refresh, and we've stored contact points separately until now, the - // metadata is empty. - assert oldMetadata == DefaultMetadata.EMPTY; - TokenFactory tokenFactory = null; - - Map newNodes = new HashMap<>(); - // Contact point nodes don't have host ID as well as other info yet, so we fill them with node - // info found on first match by endpoint - Set matchedContactPoints = new HashSet<>(); - List addedNodes = new ArrayList<>(); - - for (NodeInfo nodeInfo : nodeInfos) { - UUID hostId = nodeInfo.getHostId(); - if (newNodes.containsKey(hostId)) { - LOG.warn( - "[{}] Found duplicate entries with host_id {} in system.peers, " - + "keeping only the first one {}", - logPrefix, - hostId, - newNodes.get(hostId)); - } else { - EndPoint endPoint = nodeInfo.getEndPoint(); - DefaultNode contactPointNode = findContactPointNode(endPoint); - DefaultNode node; - if (contactPointNode == null || matchedContactPoints.contains(endPoint)) { - node = new DefaultNode(endPoint, context); - addedNodes.add(node); - LOG.debug("[{}] Adding new node {}", logPrefix, node); - } else { - matchedContactPoints.add(contactPointNode.getEndPoint()); - node = contactPointNode; - LOG.debug("[{}] Copying contact point {}", logPrefix, node); - } - if (tokenMapEnabled && tokenFactory == null && nodeInfo.getPartitioner() != null) { - tokenFactory = tokenFactoryRegistry.tokenFactoryFor(nodeInfo.getPartitioner()); - } - copyInfos(nodeInfo, node, context); - newNodes.put(hostId, node); - } - } - - ImmutableList.Builder eventsBuilder = ImmutableList.builder(); - for (DefaultNode addedNode : addedNodes) { - eventsBuilder.add(NodeStateEvent.added(addedNode)); - } - for (DefaultNode contactPoint : contactPoints) { - if (!matchedContactPoints.contains(contactPoint.getEndPoint())) { - eventsBuilder.add(NodeStateEvent.removed(contactPoint)); - } - } - - return new Result( - oldMetadata.withNodes( - ImmutableMap.copyOf(newNodes), tokenMapEnabled, true, tokenFactory, context), - eventsBuilder.build()); - } - - private DefaultNode findContactPointNode(EndPoint endPoint) { - for (DefaultNode node : contactPoints) { - if (node.getEndPoint().equals(endPoint)) { - return node; - } - } - return null; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/LoadBalancingPolicyWrapper.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/LoadBalancingPolicyWrapper.java deleted file mode 100644 index 5c8473a3b67..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/LoadBalancingPolicyWrapper.java +++ /dev/null @@ -1,275 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.metadata.Metadata; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeState; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.util.concurrent.ReplayingEventFilter; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Queue; -import java.util.Set; -import java.util.WeakHashMap; -import java.util.concurrent.ConcurrentLinkedQueue; -import java.util.concurrent.atomic.AtomicReference; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; -import net.jcip.annotations.GuardedBy; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Wraps the user-provided LBPs for internal use. This serves multiple purposes: - * - *
    - *
  • help enforce the guarantee that init is called exactly once, and before any other method. - *
  • handle the early stages of initialization (before first actual connect), where the LBPs are - * not ready yet. - *
  • handle incoming node state events from the outside world and propagate them to the - * policies. - *
  • process distance decisions from the policies and propagate them to the outside world. - *
- */ -@ThreadSafe -public class LoadBalancingPolicyWrapper implements AutoCloseable { - - private static final Logger LOG = LoggerFactory.getLogger(LoadBalancingPolicyWrapper.class); - - private enum State { - BEFORE_INIT, - DURING_INIT, - RUNNING, - CLOSING - } - - private final InternalDriverContext context; - private final Set policies; - private final Map policiesPerProfile; - private final Map reporters; - - private final Lock distancesLock = new ReentrantLock(); - - // Remember which distance each policy reported for each node. We assume that distance events will - // be rare, so don't try to be too clever, a global lock should suffice. - @GuardedBy("distancesLock") - private final Map> distances; - - private final String logPrefix; - private final ReplayingEventFilter eventFilter = - new ReplayingEventFilter<>(this::processNodeStateEvent); - private final AtomicReference stateRef = new AtomicReference<>(State.BEFORE_INIT); - - public LoadBalancingPolicyWrapper( - @NonNull InternalDriverContext context, - @NonNull Map policiesPerProfile) { - this.context = context; - - this.policiesPerProfile = policiesPerProfile; - ImmutableMap.Builder reportersBuilder = - ImmutableMap.builder(); - // ImmutableMap.values does not remove duplicates, do it now so that we won't invoke a policy - // more than once if it's associated with multiple profiles - for (LoadBalancingPolicy policy : ImmutableSet.copyOf(policiesPerProfile.values())) { - reportersBuilder.put(policy, new SinglePolicyDistanceReporter(policy)); - } - this.reporters = reportersBuilder.build(); - // Just an alias to make the rest of the code more readable - this.policies = reporters.keySet(); - - this.distances = new WeakHashMap<>(); - - this.logPrefix = context.getSessionName(); - context.getEventBus().register(NodeStateEvent.class, this::onNodeStateEvent); - } - - public void init() { - if (stateRef.compareAndSet(State.BEFORE_INIT, State.DURING_INIT)) { - LOG.debug("[{}] Initializing policies", logPrefix); - // State events can happen concurrently with init, so we must record them and replay once the - // policy is initialized. - eventFilter.start(); - MetadataManager metadataManager = context.getMetadataManager(); - Metadata metadata = metadataManager.getMetadata(); - for (LoadBalancingPolicy policy : policies) { - policy.init(metadata.getNodes(), reporters.get(policy)); - } - if (stateRef.compareAndSet(State.DURING_INIT, State.RUNNING)) { - eventFilter.markReady(); - } else { // closed during init - assert stateRef.get() == State.CLOSING; - for (LoadBalancingPolicy policy : policies) { - policy.close(); - } - } - } - } - - /** - * Note: we could infer the profile name from the request again in this method, but since that's - * already done in request processors, pass the value directly. - * - * @see LoadBalancingPolicy#newQueryPlan(Request, Session) - */ - @NonNull - public Queue newQueryPlan( - @Nullable Request request, @NonNull String executionProfileName, @Nullable Session session) { - switch (stateRef.get()) { - case BEFORE_INIT: - case DURING_INIT: - // The contact points are not stored in the metadata yet: - List nodes = new ArrayList<>(context.getMetadataManager().getContactPoints()); - Collections.shuffle(nodes); - return new ConcurrentLinkedQueue<>(nodes); - case RUNNING: - LoadBalancingPolicy policy = policiesPerProfile.get(executionProfileName); - if (policy == null) { - policy = policiesPerProfile.get(DriverExecutionProfile.DEFAULT_NAME); - } - return policy.newQueryPlan(request, session); - default: - return new ConcurrentLinkedQueue<>(); - } - } - - @NonNull - public Queue newQueryPlan() { - return newQueryPlan(null, DriverExecutionProfile.DEFAULT_NAME, null); - } - - // when it comes in from the outside - private void onNodeStateEvent(NodeStateEvent event) { - eventFilter.accept(event); - } - - // once it has gone through the filter - private void processNodeStateEvent(NodeStateEvent event) { - DefaultNode node = event.node; - switch (stateRef.get()) { - case BEFORE_INIT: - case DURING_INIT: - throw new AssertionError("Filter should not be marked ready until LBP init"); - case CLOSING: - return; // ignore - case RUNNING: - for (LoadBalancingPolicy policy : policies) { - if (event.newState == NodeState.UP) { - policy.onUp(node); - } else if (event.newState == NodeState.DOWN || event.newState == NodeState.FORCED_DOWN) { - policy.onDown(node); - } else if (event.newState == NodeState.UNKNOWN) { - policy.onAdd(node); - } else if (event.newState == null) { - policy.onRemove(node); - } else { - LOG.warn("[{}] Unsupported event: {}", logPrefix, event); - } - } - break; - } - } - - @Override - public void close() { - State old; - while (true) { - old = stateRef.get(); - if (old == State.CLOSING) { - return; // already closed - } else if (stateRef.compareAndSet(old, State.CLOSING)) { - break; - } - } - // If BEFORE_INIT, no need to close because they were never initialized - // If DURING_INIT, this will be handled in init() - if (old == State.RUNNING) { - for (LoadBalancingPolicy policy : policies) { - policy.close(); - } - } - } - - // An individual distance reporter for one of the policies. The results are aggregated across all - // policies, the smallest distance for each node is used. - private class SinglePolicyDistanceReporter implements LoadBalancingPolicy.DistanceReporter { - - private final LoadBalancingPolicy policy; - - private SinglePolicyDistanceReporter(LoadBalancingPolicy policy) { - this.policy = policy; - } - - @Override - public void setDistance(@NonNull Node node, @NonNull NodeDistance suggestedDistance) { - LOG.debug( - "[{}] {} suggested {} to {}, checking what other policies said", - logPrefix, - policy, - node, - suggestedDistance); - distancesLock.lock(); - try { - Map distancesForNode = - distances.computeIfAbsent(node, (n) -> new HashMap<>()); - distancesForNode.put(policy, suggestedDistance); - NodeDistance newDistance = aggregate(distancesForNode); - LOG.debug("[{}] Shortest distance across all policies is {}", logPrefix, newDistance); - - // There is a small race condition here (check-then-act on a volatile field). However this - // would only happen if external code changes the distance, which is unlikely (and - // dangerous). - // The driver internals only ever set the distance here, and we're protected by the lock. - NodeDistance oldDistance = node.getDistance(); - if (!oldDistance.equals(newDistance)) { - LOG.debug("[{}] {} was {}, changing to {}", logPrefix, node, oldDistance, newDistance); - DefaultNode defaultNode = (DefaultNode) node; - defaultNode.distance = newDistance; - context.getEventBus().fire(new DistanceEvent(newDistance, defaultNode)); - } else { - LOG.debug("[{}] {} was already {}, ignoring", logPrefix, node, oldDistance); - } - } finally { - distancesLock.unlock(); - } - } - - private NodeDistance aggregate(Map distances) { - NodeDistance minimum = NodeDistance.IGNORED; - for (NodeDistance candidate : distances.values()) { - if (candidate.compareTo(minimum) < 0) { - minimum = candidate; - } - } - return minimum; - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/MetadataManager.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/MetadataManager.java deleted file mode 100644 index efb04bde5e1..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/MetadataManager.java +++ /dev/null @@ -1,530 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import com.datastax.oss.driver.api.core.AsyncAutoCloseable; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.api.core.metadata.Metadata; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.config.ConfigChangeEvent; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.control.ControlConnection; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.SchemaParserFactory; -import com.datastax.oss.driver.internal.core.metadata.schema.queries.KeyspaceFilter; -import com.datastax.oss.driver.internal.core.metadata.schema.queries.SchemaQueriesFactory; -import com.datastax.oss.driver.internal.core.metadata.schema.queries.SchemaRows; -import com.datastax.oss.driver.internal.core.metadata.schema.refresh.SchemaRefresh; -import com.datastax.oss.driver.internal.core.util.Loggers; -import com.datastax.oss.driver.internal.core.util.NanoTime; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.driver.internal.core.util.concurrent.Debouncer; -import com.datastax.oss.driver.internal.core.util.concurrent.RunOrSchedule; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import edu.umd.cs.findbugs.annotations.NonNull; -import io.netty.util.concurrent.EventExecutor; -import java.net.InetSocketAddress; -import java.util.Collections; -import java.util.List; -import java.util.Set; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** Holds the immutable instance of the {@link Metadata}, and handles requests to update it. */ -@ThreadSafe -public class MetadataManager implements AsyncAutoCloseable { - private static final Logger LOG = LoggerFactory.getLogger(MetadataManager.class); - - static final EndPoint DEFAULT_CONTACT_POINT = - new DefaultEndPoint(new InetSocketAddress("127.0.0.1", 9042)); - - private final InternalDriverContext context; - private final String logPrefix; - private final EventExecutor adminExecutor; - private final DriverExecutionProfile config; - private final SingleThreaded singleThreaded; - private final ControlConnection controlConnection; - - private volatile DefaultMetadata metadata; // only updated from adminExecutor - private volatile boolean schemaEnabledInConfig; - private volatile List refreshedKeyspaces; - private volatile KeyspaceFilter keyspaceFilter; - private volatile Boolean schemaEnabledProgrammatically; - private volatile boolean tokenMapEnabled; - private volatile Set contactPoints; - private volatile boolean wasImplicitContactPoint; - - public MetadataManager(InternalDriverContext context) { - this(context, DefaultMetadata.EMPTY); - } - - protected MetadataManager(InternalDriverContext context, DefaultMetadata initialMetadata) { - this.context = context; - this.metadata = initialMetadata; - this.logPrefix = context.getSessionName(); - this.adminExecutor = context.getNettyOptions().adminEventExecutorGroup().next(); - this.config = context.getConfig().getDefaultProfile(); - this.singleThreaded = new SingleThreaded(context, config); - this.controlConnection = context.getControlConnection(); - this.schemaEnabledInConfig = config.getBoolean(DefaultDriverOption.METADATA_SCHEMA_ENABLED); - this.refreshedKeyspaces = - config.getStringList( - DefaultDriverOption.METADATA_SCHEMA_REFRESHED_KEYSPACES, Collections.emptyList()); - this.keyspaceFilter = KeyspaceFilter.newInstance(logPrefix, refreshedKeyspaces); - this.tokenMapEnabled = config.getBoolean(DefaultDriverOption.METADATA_TOKEN_MAP_ENABLED); - - context.getEventBus().register(ConfigChangeEvent.class, this::onConfigChanged); - } - - private void onConfigChanged(@SuppressWarnings("unused") ConfigChangeEvent event) { - boolean schemaEnabledBefore = isSchemaEnabled(); - boolean tokenMapEnabledBefore = tokenMapEnabled; - List keyspacesBefore = this.refreshedKeyspaces; - - this.schemaEnabledInConfig = config.getBoolean(DefaultDriverOption.METADATA_SCHEMA_ENABLED); - this.refreshedKeyspaces = - config.getStringList( - DefaultDriverOption.METADATA_SCHEMA_REFRESHED_KEYSPACES, Collections.emptyList()); - this.keyspaceFilter = KeyspaceFilter.newInstance(logPrefix, refreshedKeyspaces); - this.tokenMapEnabled = config.getBoolean(DefaultDriverOption.METADATA_TOKEN_MAP_ENABLED); - - if ((!schemaEnabledBefore - || !keyspacesBefore.equals(refreshedKeyspaces) - || (!tokenMapEnabledBefore && tokenMapEnabled)) - && isSchemaEnabled()) { - refreshSchema(null, false, true) - .whenComplete( - (metadata, error) -> { - if (error != null) { - Loggers.warnWithException( - LOG, - "[{}] Unexpected error while refreshing schema after it was re-enabled " - + "in the configuration, keeping previous version", - logPrefix, - error); - } - }); - } - } - - public Metadata getMetadata() { - return this.metadata; - } - - public void addContactPoints(Set providedContactPoints) { - // Convert the EndPoints to Nodes, but we can't put them into the Metadata yet, because we - // don't know their host_id. So store them in a volatile field instead, they will get copied - // during the first node refresh. - ImmutableSet.Builder contactPointsBuilder = ImmutableSet.builder(); - if (providedContactPoints == null || providedContactPoints.isEmpty()) { - LOG.info( - "[{}] No contact points provided, defaulting to {}", logPrefix, DEFAULT_CONTACT_POINT); - this.wasImplicitContactPoint = true; - contactPointsBuilder.add(new DefaultNode(DEFAULT_CONTACT_POINT, context)); - } else { - for (EndPoint endPoint : providedContactPoints) { - contactPointsBuilder.add(new DefaultNode(endPoint, context)); - } - } - this.contactPoints = contactPointsBuilder.build(); - LOG.debug("[{}] Adding initial contact points {}", logPrefix, contactPoints); - } - - /** - * The contact points that were used by the driver to initialize. If none were provided - * explicitly, this will be the default (127.0.0.1:9042). - * - * @see #wasImplicitContactPoint() - */ - public Set getContactPoints() { - return contactPoints; - } - - /** Whether the default contact point was used (because none were provided explicitly). */ - public boolean wasImplicitContactPoint() { - return wasImplicitContactPoint; - } - - public CompletionStage refreshNodes() { - return context - .getTopologyMonitor() - .refreshNodeList() - .thenApplyAsync(singleThreaded::refreshNodes, adminExecutor); - } - - public CompletionStage refreshNode(Node node) { - return context - .getTopologyMonitor() - .refreshNode(node) - .thenApplyAsync( - maybeInfo -> { - if (maybeInfo.isPresent()) { - boolean tokensChanged = - NodesRefresh.copyInfos(maybeInfo.get(), (DefaultNode) node, context); - if (tokensChanged) { - apply(new TokensChangedRefresh()); - } - } else { - LOG.debug( - "[{}] Topology monitor did not return any info for the refresh of {}, skipping", - logPrefix, - node); - } - return null; - }, - adminExecutor); - } - - public void addNode(InetSocketAddress broadcastRpcAddress) { - context - .getTopologyMonitor() - .getNewNodeInfo(broadcastRpcAddress) - .whenCompleteAsync( - (info, error) -> { - if (error != null) { - LOG.debug( - "[{}] Error refreshing node info for {}, " - + "this will be retried on the next full refresh", - logPrefix, - broadcastRpcAddress, - error); - } else { - singleThreaded.addNode(broadcastRpcAddress, info.orElse(null)); - } - }, - adminExecutor); - } - - public void removeNode(InetSocketAddress broadcastRpcAddress) { - RunOrSchedule.on(adminExecutor, () -> singleThreaded.removeNode(broadcastRpcAddress)); - } - - /** - * @param keyspace if this refresh was triggered by an event, that event's keyspace, otherwise - * null (this is only used to discard the event if it targets a keyspace that we're ignoring) - * @param evenIfDisabled force the refresh even if schema is currently disabled (used for user - * request) - * @param flushNow bypass the debouncer and force an immediate refresh (used to avoid a delay at - * startup) - */ - public CompletionStage refreshSchema( - String keyspace, boolean evenIfDisabled, boolean flushNow) { - CompletableFuture future = new CompletableFuture<>(); - RunOrSchedule.on( - adminExecutor, - () -> singleThreaded.refreshSchema(keyspace, evenIfDisabled, flushNow, future)); - return future; - } - - public static class RefreshSchemaResult { - private final Metadata metadata; - private final boolean isSchemaInAgreement; - - public RefreshSchemaResult(Metadata metadata, boolean isSchemaInAgreement) { - this.metadata = metadata; - this.isSchemaInAgreement = isSchemaInAgreement; - } - - public RefreshSchemaResult(Metadata metadata) { - this( - metadata, - // This constructor is used in corner cases where agreement doesn't matter - true); - } - - public Metadata getMetadata() { - return metadata; - } - - public boolean isSchemaInAgreement() { - return isSchemaInAgreement; - } - } - - public boolean isSchemaEnabled() { - return (schemaEnabledProgrammatically != null) - ? schemaEnabledProgrammatically - : schemaEnabledInConfig; - } - - public CompletionStage setSchemaEnabled(Boolean newValue) { - boolean wasEnabledBefore = isSchemaEnabled(); - schemaEnabledProgrammatically = newValue; - if (!wasEnabledBefore && isSchemaEnabled()) { - return refreshSchema(null, false, true).thenApply(RefreshSchemaResult::getMetadata); - } else { - return CompletableFuture.completedFuture(metadata); - } - } - - @NonNull - @Override - public CompletionStage closeFuture() { - return singleThreaded.closeFuture; - } - - @NonNull - @Override - public CompletionStage closeAsync() { - RunOrSchedule.on(adminExecutor, singleThreaded::close); - return singleThreaded.closeFuture; - } - - @NonNull - @Override - public CompletionStage forceCloseAsync() { - return this.closeAsync(); - } - - private class SingleThreaded { - private final CompletableFuture closeFuture = new CompletableFuture<>(); - private boolean closeWasCalled; - private final CompletableFuture firstSchemaRefreshFuture = new CompletableFuture<>(); - private final Debouncer< - CompletableFuture, CompletableFuture> - schemaRefreshDebouncer; - private final SchemaQueriesFactory schemaQueriesFactory; - private final SchemaParserFactory schemaParserFactory; - - // We don't allow concurrent schema refreshes. If one is already running, the next one is queued - // (and the ones after that are merged with the queued one). - private CompletableFuture currentSchemaRefresh; - private CompletableFuture queuedSchemaRefresh; - - private boolean didFirstNodeListRefresh; - - private SingleThreaded(InternalDriverContext context, DriverExecutionProfile config) { - this.schemaRefreshDebouncer = - new Debouncer<>( - logPrefix + "|metadata debouncer", - adminExecutor, - this::coalesceSchemaRequests, - this::startSchemaRequest, - config.getDuration(DefaultDriverOption.METADATA_SCHEMA_WINDOW), - config.getInt(DefaultDriverOption.METADATA_SCHEMA_MAX_EVENTS)); - this.schemaQueriesFactory = context.getSchemaQueriesFactory(); - this.schemaParserFactory = context.getSchemaParserFactory(); - } - - private Void refreshNodes(Iterable nodeInfos) { - MetadataRefresh refresh = - didFirstNodeListRefresh - ? new FullNodeListRefresh(nodeInfos) - : new InitialNodeListRefresh(nodeInfos, contactPoints); - didFirstNodeListRefresh = true; - return apply(refresh); - } - - private void addNode(InetSocketAddress address, NodeInfo info) { - try { - if (info != null) { - if (!address.equals(info.getBroadcastRpcAddress().orElse(null))) { - // This would be a bug in the TopologyMonitor, protect against it - LOG.warn( - "[{}] Received a request to add a node for broadcast RPC address {}, " - + "but the provided info reports {}, ignoring it", - logPrefix, - address, - info.getBroadcastAddress()); - } else { - apply(new AddNodeRefresh(info)); - } - } else { - LOG.debug( - "[{}] Ignoring node addition for {} because the " - + "topology monitor didn't return any information", - logPrefix, - address); - } - } catch (Throwable t) { - LOG.warn("[" + logPrefix + "] Unexpected exception while handling added node", logPrefix); - } - } - - private void removeNode(InetSocketAddress broadcastRpcAddress) { - apply(new RemoveNodeRefresh(broadcastRpcAddress)); - } - - private void refreshSchema( - String keyspace, - boolean evenIfDisabled, - boolean flushNow, - CompletableFuture future) { - - if (!didFirstNodeListRefresh) { - // This happen if the control connection receives a schema event during init. We can't - // refresh yet because we don't know the nodes' versions, simply ignore. - future.complete(new RefreshSchemaResult(metadata)); - return; - } - - // If this is an event, make sure it's not targeting a keyspace that we're ignoring. - boolean isRefreshedKeyspace = keyspace == null || keyspaceFilter.includes(keyspace); - - if (isRefreshedKeyspace && (evenIfDisabled || isSchemaEnabled())) { - acceptSchemaRequest(future, flushNow); - } else { - future.complete(new RefreshSchemaResult(metadata)); - singleThreaded.firstSchemaRefreshFuture.complete(null); - } - } - - // An external component has requested a schema refresh, feed it to the debouncer. - private void acceptSchemaRequest( - CompletableFuture future, boolean flushNow) { - assert adminExecutor.inEventLoop(); - if (closeWasCalled) { - future.complete(new RefreshSchemaResult(metadata)); - } else { - schemaRefreshDebouncer.receive(future); - if (flushNow) { - schemaRefreshDebouncer.flushNow(); - } - } - } - - // Multiple requests have arrived within the debouncer window, coalesce them. - private CompletableFuture coalesceSchemaRequests( - List> futures) { - assert adminExecutor.inEventLoop(); - assert !futures.isEmpty(); - // Keep only one, but ensure that the discarded ones will still be completed when we're done - CompletableFuture result = null; - for (CompletableFuture future : futures) { - if (result == null) { - result = future; - } else { - CompletableFutures.completeFrom(result, future); - } - } - return result; - } - - // The debouncer has flushed, start the actual work. - private void startSchemaRequest(CompletableFuture refreshFuture) { - assert adminExecutor.inEventLoop(); - if (closeWasCalled) { - refreshFuture.complete(new RefreshSchemaResult(metadata)); - return; - } - if (currentSchemaRefresh == null) { - currentSchemaRefresh = refreshFuture; - LOG.debug("[{}] Starting schema refresh", logPrefix); - initControlConnectionForSchema() - .thenCompose(v -> context.getTopologyMonitor().checkSchemaAgreement()) - .whenComplete( - (schemaInAgreement, agreementError) -> { - if (agreementError != null) { - refreshFuture.completeExceptionally(agreementError); - } else { - try { - schemaQueriesFactory - .newInstance() - .execute() - .thenApplyAsync(this::parseAndApplySchemaRows, adminExecutor) - .whenComplete( - (newMetadata, metadataError) -> { - if (metadataError != null) { - refreshFuture.completeExceptionally(metadataError); - } else { - refreshFuture.complete( - new RefreshSchemaResult(newMetadata, schemaInAgreement)); - } - - firstSchemaRefreshFuture.complete(null); - - currentSchemaRefresh = null; - // If another refresh was enqueued during this one, run it now - if (queuedSchemaRefresh != null) { - CompletableFuture tmp = - this.queuedSchemaRefresh; - this.queuedSchemaRefresh = null; - startSchemaRequest(tmp); - } - }); - } catch (Throwable t) { - LOG.debug("[{}] Exception getting new metadata", logPrefix, t); - refreshFuture.completeExceptionally(t); - } - } - }); - } else if (queuedSchemaRefresh == null) { - queuedSchemaRefresh = refreshFuture; // wait for our turn - } else { - CompletableFutures.completeFrom( - queuedSchemaRefresh, refreshFuture); // join the queued request - } - } - - // To query schema tables, we need the control connection. - // Normally that the topology monitor has already initialized it to query node tables. But if a - // custom topology monitor is in place, it might not use the control connection at all. - private CompletionStage initControlConnectionForSchema() { - if (firstSchemaRefreshFuture.isDone()) { - // We tried to refresh the schema before, so we know we called init already. Don't call it - // again since that is cheaper. - return firstSchemaRefreshFuture; - } else { - // Trigger init (a no-op if the topology monitor already done so) - return controlConnection.init(false, true, false); - } - } - - private Metadata parseAndApplySchemaRows(SchemaRows schemaRows) { - assert adminExecutor.inEventLoop(); - SchemaRefresh schemaRefresh = schemaParserFactory.newInstance(schemaRows).parse(); - long start = System.nanoTime(); - apply(schemaRefresh); - LOG.debug("[{}] Applying schema refresh took {}", logPrefix, NanoTime.formatTimeSince(start)); - return metadata; - } - - private void close() { - if (closeWasCalled) { - return; - } - closeWasCalled = true; - LOG.debug("[{}] Closing", logPrefix); - // The current schema refresh should fail when its channel gets closed. - if (queuedSchemaRefresh != null) { - queuedSchemaRefresh.completeExceptionally(new IllegalStateException("Cluster is closed")); - } - closeFuture.complete(null); - } - } - - @VisibleForTesting - Void apply(MetadataRefresh refresh) { - assert adminExecutor.inEventLoop(); - MetadataRefresh.Result result = refresh.compute(metadata, tokenMapEnabled, context); - metadata = result.newMetadata; - boolean isFirstSchemaRefresh = - refresh instanceof SchemaRefresh && !singleThreaded.firstSchemaRefreshFuture.isDone(); - if (!singleThreaded.closeWasCalled && !isFirstSchemaRefresh) { - for (Object event : result.events) { - context.getEventBus().fire(event); - } - } - return null; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/MetadataRefresh.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/MetadataRefresh.java deleted file mode 100644 index fc31f317622..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/MetadataRefresh.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import java.util.Collections; -import java.util.List; - -/** - * Any update to the driver's metadata. It produces a new metadata instance, and may also trigger - * events. - * - *

This is modelled as a separate type for modularity, and because we can't send the events while - * we are doing the refresh (by contract, the new copy of the metadata needs to be visible before - * the events are sent). This also makes unit testing very easy. - * - *

This is only instantiated and called from {@link MetadataManager}'s admin thread, therefore - * implementations don't need to be thread-safe. - * - * @see Session#getMetadata() - */ -public interface MetadataRefresh { - - Result compute( - DefaultMetadata oldMetadata, boolean tokenMapEnabled, InternalDriverContext context); - - class Result { - public final DefaultMetadata newMetadata; - public final List events; - - public Result(DefaultMetadata newMetadata, List events) { - this.newMetadata = newMetadata; - this.events = events; - } - - public Result(DefaultMetadata newMetadata) { - this(newMetadata, Collections.emptyList()); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/MultiplexingNodeStateListener.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/MultiplexingNodeStateListener.java deleted file mode 100644 index 8ee6d04bbae..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/MultiplexingNodeStateListener.java +++ /dev/null @@ -1,127 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeStateListener; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.internal.core.util.Loggers; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Arrays; -import java.util.Collection; -import java.util.List; -import java.util.Objects; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.function.Consumer; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Combines multiple node state listeners into a single one. - * - *

Any exception thrown by a child listener is caught and logged. - */ -@ThreadSafe -public class MultiplexingNodeStateListener implements NodeStateListener { - - private static final Logger LOG = LoggerFactory.getLogger(MultiplexingNodeStateListener.class); - - private final List listeners = new CopyOnWriteArrayList<>(); - - public MultiplexingNodeStateListener() {} - - public MultiplexingNodeStateListener(NodeStateListener... listeners) { - this(Arrays.asList(listeners)); - } - - public MultiplexingNodeStateListener(Collection listeners) { - addListeners(listeners); - } - - private void addListeners(Collection source) { - for (NodeStateListener listener : source) { - addListener(listener); - } - } - - private void addListener(NodeStateListener toAdd) { - Objects.requireNonNull(toAdd, "listener cannot be null"); - if (toAdd instanceof MultiplexingNodeStateListener) { - addListeners(((MultiplexingNodeStateListener) toAdd).listeners); - } else { - listeners.add(toAdd); - } - } - - public void register(@NonNull NodeStateListener listener) { - addListener(listener); - } - - @Override - public void onAdd(@NonNull Node node) { - invokeListeners(listener -> listener.onAdd(node), "onAdd"); - } - - @Override - public void onUp(@NonNull Node node) { - invokeListeners(listener -> listener.onUp(node), "onUp"); - } - - @Override - public void onDown(@NonNull Node node) { - invokeListeners(listener -> listener.onDown(node), "onDown"); - } - - @Override - public void onRemove(@NonNull Node node) { - invokeListeners(listener -> listener.onRemove(node), "onRemove"); - } - - @Override - public void onSessionReady(@NonNull Session session) { - invokeListeners(listener -> listener.onSessionReady(session), "onSessionReady"); - } - - @Override - public void close() throws Exception { - for (NodeStateListener listener : listeners) { - try { - listener.close(); - } catch (Exception e) { - Loggers.warnWithException( - LOG, "Unexpected error while closing node state listener {}.", listener, e); - } - } - } - - private void invokeListeners(@NonNull Consumer action, String event) { - for (NodeStateListener listener : listeners) { - try { - action.accept(listener); - } catch (Exception e) { - Loggers.warnWithException( - LOG, - "Unexpected error while notifying node state listener {} of an {} event.", - listener, - event, - e); - } - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/NodeInfo.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/NodeInfo.java deleted file mode 100644 index 6a9651d8376..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/NodeInfo.java +++ /dev/null @@ -1,161 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.api.core.metadata.Metadata; -import com.datastax.oss.driver.api.core.metadata.Node; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.net.InetSocketAddress; -import java.util.Map; -import java.util.Optional; -import java.util.Set; -import java.util.UUID; - -/** - * Information about a node, returned by the {@link TopologyMonitor}. - * - *

This information will be copied to the corresponding {@link Node} in the metadata. - */ -public interface NodeInfo { - - /** - * The endpoint that the driver will use to connect to the node. - * - *

This information is required; the driver will not function properly if this method returns - * {@code null}. - */ - @NonNull - EndPoint getEndPoint(); - - /** - * The node's broadcast RPC address and port. That is, the address that clients are supposed to - * use to communicate with that node. - * - *

This is currently only used to match broadcast RPC addresses received in status events - * coming in on the control connection. The driver does not use this value to actually connect to - * the node, but rather uses {@link #getEndPoint()}. - * - * @see Node#getBroadcastRpcAddress() - */ - @NonNull - Optional getBroadcastRpcAddress(); - - /** - * The node's broadcast address and port. That is, the address that other nodes use to communicate - * with that node. - * - *

This is only used by the default topology monitor, so if you are writing a custom one and - * don't need this information, you can leave it empty. - */ - @NonNull - Optional getBroadcastAddress(); - - /** - * The node's listen address and port. That is, the address that the Cassandra process binds to. - * - *

This is currently not used anywhere in the driver. If you write a custom topology monitor - * and don't need this information, you can leave it empty. - */ - @NonNull - Optional getListenAddress(); - - /** - * The data center that this node belongs to, according to the Cassandra snitch. - * - *

This is used by some {@link LoadBalancingPolicy} implementations to compute the {@link - * NodeDistance}. - */ - @Nullable - String getDatacenter(); - - /** - * The rack that this node belongs to, according to the Cassandra snitch. - * - *

This is used by some {@link LoadBalancingPolicy} implementations to compute the {@link - * NodeDistance}. - */ - @Nullable - String getRack(); - - /** - * The Cassandra version that this node runs. - * - *

This is used when parsing the schema (schema tables sometimes change from one version to the - * next, even if the protocol version stays the same). If this is null, schema parsing will use - * the lowest version for the current protocol version, which might lead to inaccuracies. - */ - @Nullable - String getCassandraVersion(); - - /** - * The fully-qualifier name of the partitioner class that distributes data across the nodes, as it - * appears in {@code system.local.partitioner}. - * - *

This is used to compute the driver-side token metadata (in particular, token-aware routing - * relies on this information). It is only really needed for the first node of the initial node - * list refresh (but it doesn't hurt to always include it if possible). If it is absent, {@link - * Metadata#getTokenMap()} will remain empty. - */ - @Nullable - String getPartitioner(); - - /** - * The tokens that this node owns on the ring. - * - *

This is used to compute the driver-side token metadata (in particular, token-aware routing - * relies on this information). If you're not using token metadata in any way, you may return an - * empty set here. - */ - @Nullable - Set getTokens(); - - /** - * An additional map of free-form properties, that can be used by custom implementations. They - * will be copied as-is into {@link Node#getExtras()}. - * - *

This is not required; if you don't have anything specific to report here, it can be null or - * empty. - */ - @Nullable - Map getExtras(); - - /** - * The host ID that is assigned to this host by cassandra. The driver uses this to uniquely - * identify a node. - * - *

This information is required; the driver will not function properly if this method returns - * {@code null}. - */ - @NonNull - UUID getHostId(); - - /** - * The current version that is associated with the node's schema. - * - *

This is not required; the driver reports it in {@link Node#getSchemaVersion()}, but for - * informational purposes only. It is not used anywhere internally (schema agreement is checked - * with {@link TopologyMonitor#checkSchemaAgreement()}, which by default queries system tables - * directly, not this field). - */ - @Nullable - UUID getSchemaVersion(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/NodeStateEvent.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/NodeStateEvent.java deleted file mode 100644 index 2f5c3c1d230..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/NodeStateEvent.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import com.datastax.oss.driver.api.core.metadata.NodeState; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -/** - * The transition of a node from one {@linkplain NodeState state} to another. - * - *

For simplicity, this is also used to represent a node addition ({@code oldState=null, - * newState=UNKNOWN}) or removal ({@code oldState=newState=null}). - */ -@Immutable -public class NodeStateEvent { - public static NodeStateEvent changed(NodeState oldState, NodeState newState, DefaultNode node) { - Preconditions.checkNotNull(oldState); - Preconditions.checkNotNull(newState); - return new NodeStateEvent(oldState, newState, node); - } - - public static NodeStateEvent added(DefaultNode node) { - return new NodeStateEvent(null, NodeState.UNKNOWN, node); - } - - public static NodeStateEvent removed(DefaultNode node) { - return new NodeStateEvent(null, null, node); - } - - /** The state before the change, or {@code null} if this is an addition or a removal. */ - public final NodeState oldState; - - /** - * The state after the change ({@link NodeState#UNKNOWN} if the node was just added), or {@code - * null} if this is a removal. - */ - public final NodeState newState; - - public final DefaultNode node; - - private NodeStateEvent(NodeState oldState, NodeState newState, DefaultNode node) { - this.node = node; - this.oldState = oldState; - this.newState = newState; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof NodeStateEvent) { - NodeStateEvent that = (NodeStateEvent) other; - return this.oldState == that.oldState - && this.newState == that.newState - && Objects.equals(this.node, that.node); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(oldState, newState, node); - } - - @Override - public String toString() { - return "NodeStateEvent(" + oldState + "=>" + newState + ", " + node + ")"; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/NodeStateManager.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/NodeStateManager.java deleted file mode 100644 index c8a52e4fa00..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/NodeStateManager.java +++ /dev/null @@ -1,352 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import com.datastax.oss.driver.api.core.AsyncAutoCloseable; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeState; -import com.datastax.oss.driver.internal.core.channel.ChannelEvent; -import com.datastax.oss.driver.internal.core.context.EventBus; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.util.Loggers; -import com.datastax.oss.driver.internal.core.util.concurrent.Debouncer; -import com.datastax.oss.driver.internal.core.util.concurrent.RunOrSchedule; -import com.datastax.oss.driver.shaded.guava.common.collect.Maps; -import edu.umd.cs.findbugs.annotations.NonNull; -import io.netty.util.concurrent.EventExecutor; -import java.net.InetSocketAddress; -import java.util.Collection; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Maintains the state of the Cassandra nodes, based on the events received from other components of - * the driver. - * - *

See {@link NodeState} and {@link TopologyEvent} for a description of the state change rules. - */ -@ThreadSafe -public class NodeStateManager implements AsyncAutoCloseable { - private static final Logger LOG = LoggerFactory.getLogger(NodeStateManager.class); - - private final EventExecutor adminExecutor; - private final SingleThreaded singleThreaded; - private final String logPrefix; - - public NodeStateManager(InternalDriverContext context) { - this.adminExecutor = context.getNettyOptions().adminEventExecutorGroup().next(); - this.singleThreaded = new SingleThreaded(context); - this.logPrefix = context.getSessionName(); - } - - /** - * Indicates when the driver initialization is complete (that is, we have performed the first node - * list refresh and are about to initialize the load balancing policy). - */ - public void markInitialized() { - RunOrSchedule.on(adminExecutor, singleThreaded::markInitialized); - } - - @NonNull - @Override - public CompletionStage closeFuture() { - return singleThreaded.closeFuture; - } - - @NonNull - @Override - public CompletionStage closeAsync() { - RunOrSchedule.on(adminExecutor, singleThreaded::close); - return singleThreaded.closeFuture; - } - - @NonNull - @Override - public CompletionStage forceCloseAsync() { - return closeAsync(); - } - - private class SingleThreaded { - - private final MetadataManager metadataManager; - private final EventBus eventBus; - private final Debouncer> topologyEventDebouncer; - private final CompletableFuture closeFuture = new CompletableFuture<>(); - private boolean isInitialized = false; - private boolean closeWasCalled; - - private SingleThreaded(InternalDriverContext context) { - this.metadataManager = context.getMetadataManager(); - - DriverExecutionProfile config = context.getConfig().getDefaultProfile(); - this.topologyEventDebouncer = - new Debouncer<>( - logPrefix + "|topology debouncer", - adminExecutor, - this::coalesceTopologyEvents, - this::flushTopologyEvents, - config.getDuration(DefaultDriverOption.METADATA_TOPOLOGY_WINDOW), - config.getInt(DefaultDriverOption.METADATA_TOPOLOGY_MAX_EVENTS)); - - this.eventBus = context.getEventBus(); - this.eventBus.register( - ChannelEvent.class, RunOrSchedule.on(adminExecutor, this::onChannelEvent)); - this.eventBus.register( - TopologyEvent.class, RunOrSchedule.on(adminExecutor, this::onTopologyEvent)); - // Note: this component exists for the whole life of the driver instance, so don't worry about - // unregistering the listeners. - } - - private void markInitialized() { - assert adminExecutor.inEventLoop(); - isInitialized = true; - } - - // Updates to DefaultNode's volatile fields are confined to the admin thread - @SuppressWarnings({"NonAtomicVolatileUpdate", "NonAtomicOperationOnVolatileField"}) - private void onChannelEvent(ChannelEvent event) { - assert adminExecutor.inEventLoop(); - if (closeWasCalled) { - return; - } - LOG.debug("[{}] Processing {}", logPrefix, event); - DefaultNode node = (DefaultNode) event.node; - assert node != null; - switch (event.type) { - case OPENED: - node.openConnections += 1; - if (node.state == NodeState.DOWN || node.state == NodeState.UNKNOWN) { - setState(node, NodeState.UP, "a new connection was opened to it"); - } - break; - case CLOSED: - node.openConnections -= 1; - if (node.openConnections == 0 && node.reconnections > 0) { - setState(node, NodeState.DOWN, "it was reconnecting and lost its last connection"); - } - break; - case RECONNECTION_STARTED: - node.reconnections += 1; - if (node.openConnections == 0) { - setState(node, NodeState.DOWN, "it has no connections and started reconnecting"); - } - break; - case RECONNECTION_STOPPED: - node.reconnections -= 1; - break; - case CONTROL_CONNECTION_FAILED: - // Special case for init, where this means that a contact point is down. In other - // situations that information is not really useful, we rely on - // openConnections/reconnections instead. - if (!isInitialized) { - setState(node, NodeState.DOWN, "it was tried as a contact point but failed"); - } - break; - } - } - - private void onDebouncedTopologyEvent(TopologyEvent event) { - assert adminExecutor.inEventLoop(); - if (closeWasCalled) { - return; - } - LOG.debug("[{}] Processing {}", logPrefix, event); - Optional maybeNode = metadataManager.getMetadata().findNode(event.broadcastRpcAddress); - switch (event.type) { - case SUGGEST_UP: - if (maybeNode.isPresent()) { - DefaultNode node = (DefaultNode) maybeNode.get(); - if (node.state == NodeState.FORCED_DOWN) { - LOG.debug("[{}] Not setting {} UP because it is FORCED_DOWN", logPrefix, node); - } else if (node.distance == NodeDistance.IGNORED) { - setState(node, NodeState.UP, "it is IGNORED and an UP topology event was received"); - } - } else { - LOG.debug( - "[{}] Received UP event for unknown node {}, refreshing node list", - logPrefix, - event.broadcastRpcAddress); - metadataManager.refreshNodes(); - } - break; - case SUGGEST_DOWN: - if (maybeNode.isPresent()) { - DefaultNode node = (DefaultNode) maybeNode.get(); - if (node.openConnections > 0) { - LOG.debug( - "[{}] Not setting {} DOWN because it still has active connections", - logPrefix, - node); - } else if (node.state == NodeState.FORCED_DOWN) { - LOG.debug("[{}] Not setting {} DOWN because it is FORCED_DOWN", logPrefix, node); - } else if (node.distance == NodeDistance.IGNORED) { - setState( - node, NodeState.DOWN, "it is IGNORED and a DOWN topology event was received"); - } - } else { - LOG.debug( - "[{}] Received DOWN event for unknown node {}, ignoring it", - logPrefix, - event.broadcastRpcAddress); - } - break; - case FORCE_UP: - if (maybeNode.isPresent()) { - DefaultNode node = (DefaultNode) maybeNode.get(); - setState(node, NodeState.UP, "a FORCE_UP topology event was received"); - } else { - LOG.debug( - "[{}] Received FORCE_UP event for unknown node {}, adding it", - logPrefix, - event.broadcastRpcAddress); - metadataManager.addNode(event.broadcastRpcAddress); - } - break; - case FORCE_DOWN: - if (maybeNode.isPresent()) { - DefaultNode node = (DefaultNode) maybeNode.get(); - setState(node, NodeState.FORCED_DOWN, "a FORCE_DOWN topology event was received"); - } else { - LOG.debug( - "[{}] Received FORCE_DOWN event for unknown node {}, ignoring it", - logPrefix, - event.broadcastRpcAddress); - } - break; - case SUGGEST_ADDED: - if (maybeNode.isPresent()) { - DefaultNode node = (DefaultNode) maybeNode.get(); - LOG.debug( - "[{}] Received ADDED event for {} but it is already in our metadata, ignoring", - logPrefix, - node); - } else { - metadataManager.addNode(event.broadcastRpcAddress); - } - break; - case SUGGEST_REMOVED: - if (maybeNode.isPresent()) { - metadataManager.removeNode(event.broadcastRpcAddress); - } else { - LOG.debug( - "[{}] Received REMOVED event for {} but it is not in our metadata, ignoring", - logPrefix, - event.broadcastRpcAddress); - } - break; - } - } - - // Called by the event bus, needs debouncing - private void onTopologyEvent(TopologyEvent event) { - assert adminExecutor.inEventLoop(); - topologyEventDebouncer.receive(event); - } - - // Called to process debounced events before flushing - private Collection coalesceTopologyEvents(List events) { - assert adminExecutor.inEventLoop(); - Collection result; - if (events.size() == 1) { - result = events; - } else { - // Keep the last FORCE* event for each node, or if there is none the last normal event - Map last = Maps.newHashMapWithExpectedSize(events.size()); - for (TopologyEvent event : events) { - if (event.isForceEvent() - || !last.containsKey(event.broadcastRpcAddress) - || !last.get(event.broadcastRpcAddress).isForceEvent()) { - last.put(event.broadcastRpcAddress, event); - } - } - result = last.values(); - } - LOG.debug("[{}] Coalesced topology events: {} => {}", logPrefix, events, result); - return result; - } - - // Called when the debouncer flushes - private void flushTopologyEvents(Collection events) { - assert adminExecutor.inEventLoop(); - for (TopologyEvent event : events) { - onDebouncedTopologyEvent(event); - } - } - - private void close() { - assert adminExecutor.inEventLoop(); - if (closeWasCalled) { - return; - } - closeWasCalled = true; - topologyEventDebouncer.stop(); - closeFuture.complete(null); - } - - private void setState(DefaultNode node, NodeState newState, String reason) { - NodeState oldState = node.state; - if (oldState != newState) { - LOG.debug( - "[{}] Transitioning {} {}=>{} (because {})", - logPrefix, - node, - oldState, - newState, - reason); - node.state = newState; - if (newState == NodeState.UP) { - node.upSinceMillis = System.currentTimeMillis(); - } else { - node.upSinceMillis = -1; - } - // Fire the state change event, either immediately, or after a refresh if the node just came - // back up. - // If oldState == UNKNOWN, the node was just added, we already refreshed while processing - // the addition. - if (oldState == NodeState.UNKNOWN || newState != NodeState.UP) { - eventBus.fire(NodeStateEvent.changed(oldState, newState, node)); - } else { - metadataManager - .refreshNode(node) - .whenComplete( - (success, error) -> { - try { - if (error != null) { - Loggers.warnWithException( - LOG, "[{}] Error while refreshing info for {}", logPrefix, node, error); - } - // Fire the event whether the refresh succeeded or not - eventBus.fire(NodeStateEvent.changed(oldState, newState, node)); - } catch (Throwable t) { - Loggers.warnWithException(LOG, "[{}] Unexpected exception", logPrefix, t); - } - }); - } - } - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/NodesRefresh.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/NodesRefresh.java deleted file mode 100644 index befb55e3740..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/NodesRefresh.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import java.util.Collections; -import java.util.Objects; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@ThreadSafe -abstract class NodesRefresh implements MetadataRefresh { - - private static final Logger LOG = LoggerFactory.getLogger(NodesRefresh.class); - - /** - * @return whether the node's token have changed as a result of this operation (unfortunately we - * mutate the tokens in-place, so there is no way to check this after the fact). - */ - protected static boolean copyInfos( - NodeInfo nodeInfo, DefaultNode node, InternalDriverContext context) { - - node.setEndPoint(nodeInfo.getEndPoint(), context); - node.broadcastRpcAddress = nodeInfo.getBroadcastRpcAddress().orElse(null); - node.broadcastAddress = nodeInfo.getBroadcastAddress().orElse(null); - node.listenAddress = nodeInfo.getListenAddress().orElse(null); - node.datacenter = nodeInfo.getDatacenter(); - node.rack = nodeInfo.getRack(); - node.hostId = Objects.requireNonNull(nodeInfo.getHostId()); - node.schemaVersion = nodeInfo.getSchemaVersion(); - String versionString = nodeInfo.getCassandraVersion(); - try { - node.cassandraVersion = Version.parse(versionString); - } catch (IllegalArgumentException e) { - LOG.warn( - "[{}] Error converting Cassandra version '{}' for {}", - context.getSessionName(), - versionString, - node.getEndPoint()); - } - boolean tokensChanged = !node.rawTokens.equals(nodeInfo.getTokens()); - if (tokensChanged) { - node.rawTokens = nodeInfo.getTokens(); - } - node.extras = - (nodeInfo.getExtras() == null) - ? Collections.emptyMap() - : ImmutableMap.copyOf(nodeInfo.getExtras()); - return tokensChanged; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/NoopNodeStateListener.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/NoopNodeStateListener.java deleted file mode 100644 index b879e1f2104..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/NoopNodeStateListener.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.metadata.NodeStateListenerBase; -import net.jcip.annotations.ThreadSafe; - -/** - * Default node state listener implementation with empty methods. This implementation is used when - * no listeners were registered, neither programmatically nor through the configuration. - */ -@ThreadSafe -public class NoopNodeStateListener extends NodeStateListenerBase { - - public NoopNodeStateListener(@SuppressWarnings("unused") DriverContext context) { - // nothing to do - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/PeerRowValidator.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/PeerRowValidator.java deleted file mode 100644 index 4782d72abbb..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/PeerRowValidator.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.datastax.oss.driver.internal.core.metadata; - -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class PeerRowValidator { - - /** Returns {@code true} if the given peer row is valid, and {@code false} otherwise. */ - public static boolean isValid(@NonNull AdminRow peerRow) { - - boolean hasPeersRpcAddress = !peerRow.isNull("rpc_address"); - boolean hasPeersV2RpcAddress = - !peerRow.isNull("native_address") && !peerRow.isNull("native_port"); - boolean hasRpcAddress = hasPeersRpcAddress || hasPeersV2RpcAddress; - - return hasRpcAddress - && !peerRow.isNull("host_id") - && !peerRow.isNull("data_center") - && !peerRow.isNull("rack") - && !peerRow.isNull("tokens") - && !peerRow.isNull("schema_version"); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/RemoveNodeRefresh.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/RemoveNodeRefresh.java deleted file mode 100644 index 46de1989278..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/RemoveNodeRefresh.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import java.net.InetSocketAddress; -import java.util.Map; -import java.util.UUID; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@ThreadSafe -public class RemoveNodeRefresh extends NodesRefresh { - - private static final Logger LOG = LoggerFactory.getLogger(RemoveNodeRefresh.class); - - @VisibleForTesting final InetSocketAddress broadcastRpcAddressToRemove; - - RemoveNodeRefresh(InetSocketAddress broadcastRpcAddressToRemove) { - this.broadcastRpcAddressToRemove = broadcastRpcAddressToRemove; - } - - @Override - public Result compute( - DefaultMetadata oldMetadata, boolean tokenMapEnabled, InternalDriverContext context) { - - String logPrefix = context.getSessionName(); - - Map oldNodes = oldMetadata.getNodes(); - - ImmutableMap.Builder newNodesBuilder = ImmutableMap.builder(); - Node removedNode = null; - for (Node node : oldNodes.values()) { - if (node.getBroadcastRpcAddress().isPresent() - && node.getBroadcastRpcAddress().get().equals(broadcastRpcAddressToRemove)) { - removedNode = node; - } else { - assert node.getHostId() != null; // nodes in metadata.getNodes() always have their id set - newNodesBuilder.put(node.getHostId(), node); - } - } - - if (removedNode == null) { - // This should never happen because we already check the event in NodeStateManager, but handle - // just in case. - LOG.debug("[{}] Couldn't find node {} to remove", logPrefix, broadcastRpcAddressToRemove); - return new Result(oldMetadata); - } else { - LOG.debug("[{}] Removing node {}", logPrefix, removedNode); - return new Result( - oldMetadata.withNodes(newNodesBuilder.build(), tokenMapEnabled, false, null, context), - ImmutableList.of(NodeStateEvent.removed((DefaultNode) removedNode))); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/SchemaAgreementChecker.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/SchemaAgreementChecker.java deleted file mode 100644 index c5935dba4bb..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/SchemaAgreementChecker.java +++ /dev/null @@ -1,212 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeState; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRequestHandler; -import com.datastax.oss.driver.internal.core.adminrequest.AdminResult; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.util.NanoTime; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import java.net.InetAddress; -import java.net.UnknownHostException; -import java.time.Duration; -import java.util.Iterator; -import java.util.Map; -import java.util.Objects; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.TimeUnit; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@ThreadSafe -class SchemaAgreementChecker { - - private static final Logger LOG = LoggerFactory.getLogger(SchemaAgreementChecker.class); - private static final int INFINITE_PAGE_SIZE = -1; - @VisibleForTesting static final InetAddress BIND_ALL_ADDRESS; - - static { - try { - BIND_ALL_ADDRESS = InetAddress.getByAddress(new byte[4]); - } catch (UnknownHostException e) { - throw new RuntimeException(e); - } - } - - private final DriverChannel channel; - private final InternalDriverContext context; - private final String logPrefix; - private final Duration queryTimeout; - private final long intervalNs; - private final long timeoutNs; - private final boolean warnOnFailure; - private final long start; - private final CompletableFuture result = new CompletableFuture<>(); - - SchemaAgreementChecker(DriverChannel channel, InternalDriverContext context, String logPrefix) { - this.channel = channel; - this.context = context; - this.logPrefix = logPrefix; - DriverExecutionProfile config = context.getConfig().getDefaultProfile(); - this.queryTimeout = config.getDuration(DefaultDriverOption.CONTROL_CONNECTION_TIMEOUT); - this.intervalNs = - config.getDuration(DefaultDriverOption.CONTROL_CONNECTION_AGREEMENT_INTERVAL).toNanos(); - this.timeoutNs = - config.getDuration(DefaultDriverOption.CONTROL_CONNECTION_AGREEMENT_TIMEOUT).toNanos(); - this.warnOnFailure = config.getBoolean(DefaultDriverOption.CONTROL_CONNECTION_AGREEMENT_WARN); - this.start = System.nanoTime(); - } - - public CompletionStage run() { - LOG.debug("[{}] Checking schema agreement", logPrefix); - if (timeoutNs == 0) { - result.complete(false); - } else { - sendQueries(); - } - return result; - } - - private void sendQueries() { - long elapsedNs = System.nanoTime() - start; - if (elapsedNs > timeoutNs) { - String message = - String.format( - "[%s] Schema agreement not reached after %s", logPrefix, NanoTime.format(elapsedNs)); - if (warnOnFailure) { - LOG.warn(message); - } else { - LOG.debug(message); - } - result.complete(false); - } else { - CompletionStage localQuery = - query("SELECT schema_version FROM system.local WHERE key='local'"); - CompletionStage peersQuery = query("SELECT * FROM system.peers"); - - localQuery - .thenCombine(peersQuery, this::extractSchemaVersions) - .whenComplete(this::completeOrReschedule); - } - } - - private Set extractSchemaVersions(AdminResult controlNodeResult, AdminResult peersResult) { - // Gather the versions of all the nodes that are UP - ImmutableSet.Builder schemaVersions = ImmutableSet.builder(); - - // Control node (implicitly UP, we've just queried it) - Iterator iterator = controlNodeResult.iterator(); - if (iterator.hasNext()) { - AdminRow localRow = iterator.next(); - UUID schemaVersion = localRow.getUuid("schema_version"); - if (schemaVersion == null) { - LOG.warn( - "[{}] Missing schema_version for control node {}, " - + "excluding from schema agreement check", - logPrefix, - channel.getEndPoint()); - } else { - schemaVersions.add(schemaVersion); - } - } else { - LOG.warn( - "[{}] Missing system.local row for control node {}, " - + "excluding from schema agreement check", - logPrefix, - channel.getEndPoint()); - } - - Map nodes = context.getMetadataManager().getMetadata().getNodes(); - for (AdminRow peerRow : peersResult) { - if (isPeerValid(peerRow, nodes)) { - UUID schemaVersion = Objects.requireNonNull(peerRow.getUuid("schema_version")); - schemaVersions.add(schemaVersion); - } - } - return schemaVersions.build(); - } - - private void completeOrReschedule(Set uuids, Throwable error) { - if (error != null) { - LOG.debug( - "[{}] Error while checking schema agreement, completing now (false)", logPrefix, error); - result.complete(false); - } else if (uuids.size() == 1) { - LOG.debug( - "[{}] Schema agreement reached ({}), completing", logPrefix, uuids.iterator().next()); - result.complete(true); - } else { - LOG.debug( - "[{}] Schema agreement not reached yet ({}), rescheduling in {}", - logPrefix, - uuids, - NanoTime.format(intervalNs)); - channel - .eventLoop() - .schedule(this::sendQueries, intervalNs, TimeUnit.NANOSECONDS) - .addListener( - f -> { - if (!f.isSuccess()) { - LOG.debug( - "[{}] Error while rescheduling schema agreement, completing now (false)", - logPrefix, - f.cause()); - } - }); - } - } - - @VisibleForTesting - protected CompletionStage query(String queryString) { - return AdminRequestHandler.query( - channel, queryString, queryTimeout, INFINITE_PAGE_SIZE, logPrefix) - .start(); - } - - protected boolean isPeerValid(AdminRow peerRow, Map nodes) { - if (PeerRowValidator.isValid(peerRow)) { - UUID hostId = peerRow.getUuid("host_id"); - Node node = nodes.get(hostId); - if (node == null) { - LOG.warn("[{}] Unknown peer {}, excluding from schema agreement check", logPrefix, hostId); - return false; - } else if (node.getState() != NodeState.UP) { - LOG.debug("[{}] Peer {} is down, excluding from schema agreement check", logPrefix, hostId); - return false; - } - return true; - } else { - LOG.warn( - "[{}] Found invalid system.peers row for peer: {}, excluding from schema agreement check.", - logPrefix, - peerRow.getInetAddress("peer")); - return false; - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/SniEndPoint.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/SniEndPoint.java deleted file mode 100644 index d1ab8eec98d..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/SniEndPoint.java +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.shaded.guava.common.primitives.UnsignedBytes; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.net.UnknownHostException; -import java.util.Arrays; -import java.util.Comparator; -import java.util.Objects; -import java.util.concurrent.atomic.AtomicInteger; - -public class SniEndPoint implements EndPoint { - private static final AtomicInteger OFFSET = new AtomicInteger(); - - private final InetSocketAddress proxyAddress; - private final String serverName; - - /** - * @param proxyAddress the address of the proxy. If it is {@linkplain - * InetSocketAddress#isUnresolved() unresolved}, each call to {@link #resolve()} will - * re-resolve it, fetch all of its A-records, and if there are more than 1 pick one in a - * round-robin fashion. - * @param serverName the SNI server name. In the context of Cloud, this is the string - * representation of the host id. - */ - public SniEndPoint(InetSocketAddress proxyAddress, String serverName) { - this.proxyAddress = Objects.requireNonNull(proxyAddress, "SNI address cannot be null"); - this.serverName = Objects.requireNonNull(serverName, "SNI Server name cannot be null"); - } - - public String getServerName() { - return serverName; - } - - @NonNull - @Override - public InetSocketAddress resolve() { - try { - InetAddress[] aRecords = InetAddress.getAllByName(proxyAddress.getHostName()); - if (aRecords.length == 0) { - // Probably never happens, but the JDK docs don't explicitly say so - throw new IllegalArgumentException( - "Could not resolve proxy address " + proxyAddress.getHostName()); - } - // The order of the returned address is unspecified. Sort by IP to make sure we get a true - // round-robin - Arrays.sort(aRecords, IP_COMPARATOR); - int index = - (aRecords.length == 1) - ? 0 - : OFFSET.getAndUpdate(x -> x == Integer.MAX_VALUE ? 0 : x + 1) % aRecords.length; - return new InetSocketAddress(aRecords[index], proxyAddress.getPort()); - } catch (UnknownHostException e) { - throw new IllegalArgumentException( - "Could not resolve proxy address " + proxyAddress.getHostName(), e); - } - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof SniEndPoint) { - SniEndPoint that = (SniEndPoint) other; - return this.proxyAddress.equals(that.proxyAddress) && this.serverName.equals(that.serverName); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(proxyAddress, serverName); - } - - @Override - public String toString() { - // Note that this uses the original proxy address, so if there are multiple A-records it won't - // show which one was selected. If that turns out to be a problem for debugging, we might need - // to store the result of resolve() in Connection and log that instead of the endpoint. - return proxyAddress.toString() + ":" + serverName; - } - - @NonNull - @Override - public String asMetricPrefix() { - String hostString = proxyAddress.getHostString(); - if (hostString == null) { - throw new IllegalArgumentException( - "Could not extract a host string from provided proxy address " + proxyAddress); - } - return hostString.replace('.', '_') + ':' + proxyAddress.getPort() + '_' + serverName; - } - - @SuppressWarnings("UnnecessaryLambda") - private static final Comparator IP_COMPARATOR = - (InetAddress address1, InetAddress address2) -> - UnsignedBytes.lexicographicalComparator() - .compare(address1.getAddress(), address2.getAddress()); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/TokensChangedRefresh.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/TokensChangedRefresh.java deleted file mode 100644 index 6f60e9a790b..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/TokensChangedRefresh.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -class TokensChangedRefresh implements MetadataRefresh { - - @Override - public Result compute( - DefaultMetadata oldMetadata, boolean tokenMapEnabled, InternalDriverContext context) { - return new Result( - oldMetadata.withNodes(oldMetadata.getNodes(), tokenMapEnabled, true, null, context)); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/TopologyEvent.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/TopologyEvent.java deleted file mode 100644 index c7ea8c93088..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/TopologyEvent.java +++ /dev/null @@ -1,180 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import com.datastax.oss.driver.api.core.metadata.Node; -import java.net.InetSocketAddress; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -/** - * An event emitted from the {@link TopologyMonitor}, indicating a change in the topology of the - * Cassandra cluster. - * - *

Internally, the driver uses this to handle {@code TOPOLOGY_CHANGE} and {@code STATUS_CHANGE} - * events received on the control connection; for historical reasons, those protocol events identify - * nodes by their (untranslated) {@linkplain Node#getBroadcastRpcAddress() broadcast RPC address}. - * - *

As shown by the names, most of these events are mere suggestions, that the driver might choose - * to ignore if they contradict other information it has about the nodes; see the documentation of - * each factory method for detailed explanations. - */ -@Immutable -public class TopologyEvent { - - public enum Type { - SUGGEST_UP, - SUGGEST_DOWN, - FORCE_UP, - FORCE_DOWN, - SUGGEST_ADDED, - SUGGEST_REMOVED, - } - - /** - * Suggests that a node is up. - * - *

    - *
  • if the node is currently ignored by the driver's load balancing policy, this is reflected - * in the driver metadata's corresponding {@link Node}, for information purposes only. - *
  • otherwise: - *
      - *
    • if the driver already had active connections to that node, this has no effect. - *
    • if the driver was currently reconnecting to the node, this causes the current - * {@link - * com.datastax.oss.driver.api.core.connection.ReconnectionPolicy.ReconnectionSchedule} - * to be reset, and the next reconnection attempt to happen immediately. - *
    - *
- */ - public static TopologyEvent suggestUp(InetSocketAddress broadcastRpcAddress) { - return new TopologyEvent(Type.SUGGEST_UP, broadcastRpcAddress); - } - - /** - * Suggests that a node is down. - * - *
    - *
  • if the node is currently ignored by the driver's load balancing policy, this is reflected - * in the driver metadata's corresponding {@link Node}, for information purposes only. - *
  • otherwise, if the driver still has at least one active connection to that node, this is - * ignored. In other words, a functioning connection is considered a more reliable - * indication than a topology event. - *

    If you want to bypass that behavior and force the node down, use {@link - * #forceDown(InetSocketAddress)}. - *

- */ - public static TopologyEvent suggestDown(InetSocketAddress broadcastRpcAddress) { - return new TopologyEvent(Type.SUGGEST_DOWN, broadcastRpcAddress); - } - - /** - * Forces the driver to set a node down. - * - *
    - *
  • if the node is currently ignored by the driver's load balancing policy, this is reflected - * in the driver metadata, for information purposes only. - *
  • otherwise, all active connections to the node are closed, and any active reconnection is - * cancelled. - *
- * - * In all cases, the driver will never try to reconnect to the node again. If you decide to - * reconnect to it later, use {@link #forceUp(InetSocketAddress)}. - * - *

This is intended for deployments that use a custom {@link TopologyMonitor} (for example if - * you do some kind of maintenance on a live node). This is also used internally by the driver - * when it detects an unrecoverable error, such as a node that does not support the current - * protocol version. - */ - public static TopologyEvent forceDown(InetSocketAddress broadcastRpcAddress) { - return new TopologyEvent(Type.FORCE_DOWN, broadcastRpcAddress); - } - - /** - * Cancels a previous {@link #forceDown(InetSocketAddress)} event for the node. - * - *

The node will be set back UP. If it is not ignored by the load balancing policy, a - * connection pool will be reopened. - */ - public static TopologyEvent forceUp(InetSocketAddress broadcastRpcAddress) { - return new TopologyEvent(Type.FORCE_UP, broadcastRpcAddress); - } - - /** - * Suggests that a new node was added in the cluster. - * - *

The driver will ignore this event if the node is already present in its metadata, or if - * information about the node can't be refreshed (i.e. {@link - * TopologyMonitor#getNewNodeInfo(InetSocketAddress)} fails). - */ - public static TopologyEvent suggestAdded(InetSocketAddress broadcastRpcAddress) { - return new TopologyEvent(Type.SUGGEST_ADDED, broadcastRpcAddress); - } - - /** - * Suggests that a node was removed from the cluster. - * - *

The driver ignore this event if the node does not exist in its metadata. - */ - public static TopologyEvent suggestRemoved(InetSocketAddress broadcastRpcAddress) { - return new TopologyEvent(Type.SUGGEST_REMOVED, broadcastRpcAddress); - } - - public final Type type; - - /** - * Note that this is the untranslated broadcast RPC address, as it was received in the - * protocol event. - * - * @see Node#getBroadcastRpcAddress() - */ - public final InetSocketAddress broadcastRpcAddress; - - /** Builds a new instance (the static methods in this class are a preferred alternative). */ - public TopologyEvent(Type type, InetSocketAddress broadcastRpcAddress) { - this.type = type; - this.broadcastRpcAddress = broadcastRpcAddress; - } - - public boolean isForceEvent() { - return type == Type.FORCE_DOWN || type == Type.FORCE_UP; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof TopologyEvent) { - TopologyEvent that = (TopologyEvent) other; - return this.type == that.type - && Objects.equals(this.broadcastRpcAddress, that.broadcastRpcAddress); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(this.type, this.broadcastRpcAddress); - } - - @Override - public String toString() { - return "TopologyEvent(" + type + ", " + broadcastRpcAddress + ")"; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/TopologyMonitor.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/TopologyMonitor.java deleted file mode 100644 index e7741f11196..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/TopologyMonitor.java +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import com.datastax.oss.driver.api.core.AsyncAutoCloseable; -import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.internal.core.context.EventBus; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import java.net.InetSocketAddress; -import java.util.Optional; -import java.util.concurrent.CompletionStage; - -/** - * Monitors the state of the Cassandra cluster. - * - *

It can either push {@link TopologyEvent topology events} to the rest of the driver (to do - * that, retrieve the {@link EventBus}) from the {@link InternalDriverContext}), or receive requests - * to refresh data about the nodes. - * - *

The default implementation uses the control connection: {@code TOPOLOGY_CHANGE} and {@code - * STATUS_CHANGE} events on the connection are converted into {@code TopologyEvent}s, and node - * refreshes are done with queries to system tables. If you prefer to rely on an external monitoring - * tool, this can be completely overridden. - */ -public interface TopologyMonitor extends AsyncAutoCloseable { - - /** - * Triggers the initialization of the monitor. - * - *

The completion of the future returned by this method marks the point when the driver - * considers itself "connected" to the cluster, and proceeds with the rest of the initialization: - * refreshing the list of nodes and the metadata, opening connection pools, etc. By then, the - * topology monitor should be ready to accept calls to its other methods; in particular, {@link - * #refreshNodeList()} will be called shortly after the completion of the future, to load the - * initial list of nodes to connect to. - * - *

If {@code advanced.reconnect-on-init = true} in the configuration, this method is - * responsible for handling reconnection. That is, if the initial attempt to "connect" to the - * cluster fails, it must schedule reattempts, and only complete the returned future when - * connection eventually succeeds. If the user cancels the returned future, then the reconnection - * attempts should stop. - * - *

If this method is called multiple times, it should trigger initialization only once, and - * return the same future on subsequent invocations. - */ - CompletionStage init(); - - /** - * The future returned by {@link #init()}. - * - *

Note that this method may be called before {@link #init()}; at that stage, the future should - * already exist, but be incomplete. - */ - CompletionStage initFuture(); - - /** - * Invoked when the driver needs to refresh the information about an existing node. This is called - * when the node was back and comes back up. - * - *

This will be invoked directly from a driver's internal thread; if the refresh involves - * blocking I/O or heavy computations, it should be scheduled on a separate thread. - * - * @param node the node to refresh. - * @return a future that completes with the information. If the monitor can't fulfill the request - * at this time, it should reply with {@link Optional#empty()}, and the driver will carry on - * with its current information. - */ - CompletionStage> refreshNode(Node node); - - /** - * Invoked when the driver needs to get information about a newly discovered node. - * - *

This will be invoked directly from a driver's internal thread; if the refresh involves - * blocking I/O or heavy computations, it should be scheduled on a separate thread. - * - * @param broadcastRpcAddress the node's broadcast RPC address,. - * @return a future that completes with the information. If the monitor doesn't know any node with - * this address, it should reply with {@link Optional#empty()}; the new node will be ignored. - * @see Node#getBroadcastRpcAddress() - */ - CompletionStage> getNewNodeInfo(InetSocketAddress broadcastRpcAddress); - - /** - * Invoked when the driver needs to refresh information about all the nodes. - * - *

This will be invoked directly from a driver's internal thread; if the refresh involves - * blocking I/O or heavy computations, it should be scheduled on a separate thread. - * - *

The driver calls this at initialization, and uses the result to initialize the {@link - * LoadBalancingPolicy}; successful initialization of the {@link Session} object depends on that - * initial call succeeding. - * - * @return a future that completes with the information. We assume that the full node list will - * always be returned in a single message (no paging). - */ - CompletionStage> refreshNodeList(); - - /** - * Checks whether the nodes in the cluster agree on a common schema version. - * - *

This should typically be implemented with a few retries and a timeout, as the schema can - * take a while to replicate across nodes. - */ - CompletionStage checkSchemaAgreement(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultAggregateMetadata.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultAggregateMetadata.java deleted file mode 100644 index 669f925af65..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultAggregateMetadata.java +++ /dev/null @@ -1,175 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.AggregateMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.FunctionSignature; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.io.Serializable; -import java.util.Objects; -import java.util.Optional; -import net.jcip.annotations.Immutable; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@Immutable -public class DefaultAggregateMetadata implements AggregateMetadata, Serializable { - - private static final Logger LOG = LoggerFactory.getLogger(DefaultAggregateMetadata.class); - - private static final long serialVersionUID = 1; - - @NonNull private final CqlIdentifier keyspace; - @NonNull private final FunctionSignature signature; - @Nullable private final FunctionSignature finalFuncSignature; - @Nullable private final Object initCond; - @Nullable private final String formattedInitCond; - @NonNull private final DataType returnType; - @NonNull private final FunctionSignature stateFuncSignature; - @NonNull private final DataType stateType; - - public DefaultAggregateMetadata( - @NonNull CqlIdentifier keyspace, - @NonNull FunctionSignature signature, - @Nullable FunctionSignature finalFuncSignature, - @Nullable Object initCond, - @NonNull DataType returnType, - @NonNull FunctionSignature stateFuncSignature, - @NonNull DataType stateType, - @NonNull TypeCodec stateTypeCodec) { - this.keyspace = keyspace; - this.signature = signature; - this.finalFuncSignature = finalFuncSignature; - this.initCond = initCond; - this.formattedInitCond = computeFormattedInitCond(initCond, stateTypeCodec); - this.returnType = returnType; - this.stateFuncSignature = stateFuncSignature; - this.stateType = stateType; - } - - @NonNull - @Override - public CqlIdentifier getKeyspace() { - return keyspace; - } - - @NonNull - @Override - public FunctionSignature getSignature() { - return signature; - } - - @NonNull - @Override - public Optional getFinalFuncSignature() { - return Optional.ofNullable(finalFuncSignature); - } - - @NonNull - @Override - public Optional getInitCond() { - return Optional.ofNullable(initCond); - } - - @NonNull - @Override - public DataType getReturnType() { - return returnType; - } - - @NonNull - @Override - public FunctionSignature getStateFuncSignature() { - return stateFuncSignature; - } - - @NonNull - @Override - public DataType getStateType() { - return stateType; - } - - @NonNull - @Override - public Optional formatInitCond() { - return Optional.ofNullable(this.formattedInitCond); - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof AggregateMetadata) { - AggregateMetadata that = (AggregateMetadata) other; - return Objects.equals(this.keyspace, that.getKeyspace()) - && Objects.equals(this.signature, that.getSignature()) - && Objects.equals(this.finalFuncSignature, that.getFinalFuncSignature().orElse(null)) - && Objects.equals(this.initCond, that.getInitCond().orElse(null)) - && Objects.equals(this.returnType, that.getReturnType()) - && Objects.equals(this.stateFuncSignature, that.getStateFuncSignature()) - && Objects.equals(this.stateType, that.getStateType()); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash( - keyspace, - signature, - finalFuncSignature, - initCond, - returnType, - stateFuncSignature, - stateType); - } - - @Override - public String toString() { - return "DefaultAggregateMetadata@" - + Integer.toHexString(hashCode()) - + "(" - + keyspace.asInternal() - + "." - + signature - + ")"; - } - - @Nullable - private String computeFormattedInitCond( - @Nullable Object initCond, @NonNull TypeCodec stateTypeCodec) { - - if (initCond == null) { - return null; - } - try { - return stateTypeCodec.format(initCond); - } catch (Throwable t) { - LOG.warn( - String.format( - "Failed to format INITCOND for %s.%s, using toString instead", - keyspace.asInternal(), signature.getName().asInternal())); - return initCond.toString(); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultColumnMetadata.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultColumnMetadata.java deleted file mode 100644 index 3d0c6209880..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultColumnMetadata.java +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; -import com.datastax.oss.driver.api.core.type.DataType; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.Serializable; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultColumnMetadata implements ColumnMetadata, Serializable { - - private static final long serialVersionUID = 1; - - @NonNull private final CqlIdentifier keyspace; - @NonNull private final CqlIdentifier parent; - @NonNull private final CqlIdentifier name; - @NonNull private final DataType dataType; - private final boolean isStatic; - - public DefaultColumnMetadata( - @NonNull CqlIdentifier keyspace, - @NonNull CqlIdentifier parent, - @NonNull CqlIdentifier name, - @NonNull DataType dataType, - boolean isStatic) { - this.keyspace = keyspace; - this.parent = parent; - this.name = name; - this.dataType = dataType; - this.isStatic = isStatic; - } - - @NonNull - @Override - public CqlIdentifier getKeyspace() { - return keyspace; - } - - @NonNull - @Override - public CqlIdentifier getParent() { - return parent; - } - - @NonNull - @Override - public CqlIdentifier getName() { - return name; - } - - @NonNull - @Override - public DataType getType() { - return dataType; - } - - @Override - public boolean isStatic() { - return isStatic; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof ColumnMetadata) { - ColumnMetadata that = (ColumnMetadata) other; - return Objects.equals(this.keyspace, that.getKeyspace()) - && Objects.equals(this.parent, that.getParent()) - && Objects.equals(this.name, that.getName()) - && Objects.equals(this.dataType, that.getType()) - && this.isStatic == that.isStatic(); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(keyspace, parent, name, dataType, isStatic); - } - - @Override - public String toString() { - return "DefaultColumnMetadata@" - + Integer.toHexString(hashCode()) - + "(" - + keyspace.asInternal() - + "." - + parent.asInternal() - + "." - + name.asInternal() - + " " - + dataType.asCql(true, false) - + ")"; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultFunctionMetadata.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultFunctionMetadata.java deleted file mode 100644 index 75b343d77b1..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultFunctionMetadata.java +++ /dev/null @@ -1,141 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.FunctionMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.FunctionSignature; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.Serializable; -import java.util.List; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultFunctionMetadata implements FunctionMetadata, Serializable { - - private static final long serialVersionUID = 1; - - @NonNull private final CqlIdentifier keyspace; - @NonNull private final FunctionSignature signature; - @NonNull private final List parameterNames; - @NonNull private final String body; - private final boolean calledOnNullInput; - @NonNull private final String language; - @NonNull private final DataType returnType; - - public DefaultFunctionMetadata( - @NonNull CqlIdentifier keyspace, - @NonNull FunctionSignature signature, - @NonNull List parameterNames, - @NonNull String body, - boolean calledOnNullInput, - @NonNull String language, - @NonNull DataType returnType) { - Preconditions.checkArgument( - signature.getParameterTypes().size() == parameterNames.size(), - "Number of parameter names should match number of types in the signature (got %s and %s)", - parameterNames.size(), - signature.getParameterTypes().size()); - this.keyspace = keyspace; - this.signature = signature; - this.parameterNames = parameterNames; - this.body = body; - this.calledOnNullInput = calledOnNullInput; - this.language = language; - this.returnType = returnType; - } - - @NonNull - @Override - public CqlIdentifier getKeyspace() { - return keyspace; - } - - @NonNull - @Override - public FunctionSignature getSignature() { - return signature; - } - - @NonNull - @Override - public List getParameterNames() { - return parameterNames; - } - - @NonNull - @Override - public String getBody() { - return body; - } - - @Override - public boolean isCalledOnNullInput() { - return calledOnNullInput; - } - - @NonNull - @Override - public String getLanguage() { - return language; - } - - @NonNull - @Override - public DataType getReturnType() { - return returnType; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof FunctionMetadata) { - FunctionMetadata that = (FunctionMetadata) other; - return Objects.equals(this.keyspace, that.getKeyspace()) - && Objects.equals(this.signature, that.getSignature()) - && Objects.equals(this.parameterNames, that.getParameterNames()) - && Objects.equals(this.body, that.getBody()) - && this.calledOnNullInput == that.isCalledOnNullInput() - && Objects.equals(this.language, that.getLanguage()) - && Objects.equals(this.returnType, that.getReturnType()); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash( - keyspace, signature, parameterNames, body, calledOnNullInput, language, returnType); - } - - @Override - public String toString() { - return "DefaultFunctionMetadata@" - + Integer.toHexString(hashCode()) - + "(" - + keyspace.asInternal() - + "." - + signature - + ")"; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultIndexMetadata.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultIndexMetadata.java deleted file mode 100644 index 8ff0263fcc8..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultIndexMetadata.java +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.IndexKind; -import com.datastax.oss.driver.api.core.metadata.schema.IndexMetadata; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.Serializable; -import java.util.Map; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultIndexMetadata implements IndexMetadata, Serializable { - - private static final long serialVersionUID = 1; - - @NonNull private final CqlIdentifier keyspace; - @NonNull private final CqlIdentifier table; - @NonNull private final CqlIdentifier name; - @NonNull private final IndexKind kind; - @NonNull private final String target; - @NonNull private final Map options; - - public DefaultIndexMetadata( - @NonNull CqlIdentifier keyspace, - @NonNull CqlIdentifier table, - @NonNull CqlIdentifier name, - @NonNull IndexKind kind, - @NonNull String target, - @NonNull Map options) { - this.keyspace = keyspace; - this.table = table; - this.name = name; - this.kind = kind; - this.target = target; - this.options = options; - } - - @NonNull - @Override - public CqlIdentifier getKeyspace() { - return keyspace; - } - - @NonNull - @Override - public CqlIdentifier getTable() { - return table; - } - - @NonNull - @Override - public CqlIdentifier getName() { - return name; - } - - @NonNull - @Override - public IndexKind getKind() { - return kind; - } - - @NonNull - @Override - public String getTarget() { - return target; - } - - @NonNull - @Override - public Map getOptions() { - return options; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof IndexMetadata) { - IndexMetadata that = (IndexMetadata) other; - return Objects.equals(this.keyspace, that.getKeyspace()) - && Objects.equals(this.table, that.getTable()) - && Objects.equals(this.name, that.getName()) - && Objects.equals(this.kind, that.getKind()) - && Objects.equals(this.target, that.getTarget()) - && Objects.equals(this.options, that.getOptions()); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(keyspace, table, name, kind, target, options); - } - - @Override - public String toString() { - return "DefaultIndexMetadata@" - + Integer.toHexString(hashCode()) - + "(" - + keyspace.asInternal() - + "." - + table.asInternal() - + "." - + name.asInternal() - + ")"; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultKeyspaceMetadata.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultKeyspaceMetadata.java deleted file mode 100644 index 3d443dd8c16..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultKeyspaceMetadata.java +++ /dev/null @@ -1,156 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.AggregateMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.FunctionMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.FunctionSignature; -import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.TableMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.ViewMetadata; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.Serializable; -import java.util.Map; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultKeyspaceMetadata implements KeyspaceMetadata, Serializable { - - private static final long serialVersionUID = 1; - - @NonNull private final CqlIdentifier name; - private final boolean durableWrites; - private final boolean virtual; - @NonNull private final Map replication; - @NonNull private final Map types; - @NonNull private final Map tables; - @NonNull private final Map views; - @NonNull private final Map functions; - @NonNull private final Map aggregates; - - public DefaultKeyspaceMetadata( - @NonNull CqlIdentifier name, - boolean durableWrites, - boolean virtual, - @NonNull Map replication, - @NonNull Map types, - @NonNull Map tables, - @NonNull Map views, - @NonNull Map functions, - @NonNull Map aggregates) { - this.name = name; - this.durableWrites = durableWrites; - this.virtual = virtual; - this.replication = replication; - this.types = types; - this.tables = tables; - this.views = views; - this.functions = functions; - this.aggregates = aggregates; - } - - @NonNull - @Override - public CqlIdentifier getName() { - return name; - } - - @Override - public boolean isDurableWrites() { - return durableWrites; - } - - @Override - public boolean isVirtual() { - return virtual; - } - - @NonNull - @Override - public Map getReplication() { - return replication; - } - - @NonNull - @Override - public Map getUserDefinedTypes() { - return types; - } - - @NonNull - @Override - public Map getTables() { - return tables; - } - - @NonNull - @Override - public Map getViews() { - return views; - } - - @NonNull - @Override - public Map getFunctions() { - return functions; - } - - @NonNull - @Override - public Map getAggregates() { - return aggregates; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof KeyspaceMetadata) { - KeyspaceMetadata that = (KeyspaceMetadata) other; - return Objects.equals(this.name, that.getName()) - && this.durableWrites == that.isDurableWrites() - && this.virtual == that.isVirtual() - && Objects.equals(this.replication, that.getReplication()) - && Objects.equals(this.types, that.getUserDefinedTypes()) - && Objects.equals(this.tables, that.getTables()) - && Objects.equals(this.views, that.getViews()) - && Objects.equals(this.functions, that.getFunctions()) - && Objects.equals(this.aggregates, that.getAggregates()); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash( - name, durableWrites, virtual, replication, types, tables, views, functions, aggregates); - } - - @Override - public String toString() { - return "DefaultKeyspaceMetadata@" - + Integer.toHexString(hashCode()) - + "(" - + name.asInternal() - + ")"; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultTableMetadata.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultTableMetadata.java deleted file mode 100644 index 4c339f89299..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultTableMetadata.java +++ /dev/null @@ -1,179 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.ClusteringOrder; -import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.IndexMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.TableMetadata; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.io.Serializable; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Optional; -import java.util.UUID; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultTableMetadata implements TableMetadata, Serializable { - - private static final long serialVersionUID = 1; - - @NonNull private final CqlIdentifier keyspace; - @NonNull private final CqlIdentifier name; - // null for virtual tables - @Nullable private final UUID id; - private final boolean compactStorage; - private final boolean virtual; - @NonNull private final List partitionKey; - @NonNull private final Map clusteringColumns; - @NonNull private final Map columns; - @NonNull private final Map options; - @NonNull private final Map indexes; - - public DefaultTableMetadata( - @NonNull CqlIdentifier keyspace, - @NonNull CqlIdentifier name, - @Nullable UUID id, - boolean compactStorage, - boolean virtual, - @NonNull List partitionKey, - @NonNull Map clusteringColumns, - @NonNull Map columns, - @NonNull Map options, - @NonNull Map indexes) { - this.keyspace = keyspace; - this.name = name; - this.id = id; - this.compactStorage = compactStorage; - this.virtual = virtual; - this.partitionKey = partitionKey; - this.clusteringColumns = clusteringColumns; - this.columns = columns; - this.options = options; - this.indexes = indexes; - } - - @NonNull - @Override - public CqlIdentifier getKeyspace() { - return keyspace; - } - - @NonNull - @Override - public CqlIdentifier getName() { - return name; - } - - @NonNull - @Override - public Optional getId() { - return Optional.ofNullable(id); - } - - @Override - public boolean isCompactStorage() { - return compactStorage; - } - - @Override - public boolean isVirtual() { - return virtual; - } - - @NonNull - @Override - public List getPartitionKey() { - return partitionKey; - } - - @NonNull - @Override - public Map getClusteringColumns() { - return clusteringColumns; - } - - @NonNull - @Override - public Map getColumns() { - return columns; - } - - @NonNull - @Override - public Map getOptions() { - return options; - } - - @NonNull - @Override - public Map getIndexes() { - return indexes; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof TableMetadata) { - TableMetadata that = (TableMetadata) other; - return Objects.equals(this.keyspace, that.getKeyspace()) - && Objects.equals(this.name, that.getName()) - && Objects.equals(Optional.ofNullable(this.id), that.getId()) - && this.compactStorage == that.isCompactStorage() - && this.virtual == that.isVirtual() - && Objects.equals(this.partitionKey, that.getPartitionKey()) - && Objects.equals(this.clusteringColumns, that.getClusteringColumns()) - && Objects.equals(this.columns, that.getColumns()) - && Objects.equals(this.indexes, that.getIndexes()) - && Objects.equals(this.options, that.getOptions()); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash( - keyspace, - name, - id, - compactStorage, - virtual, - partitionKey, - clusteringColumns, - columns, - indexes, - options); - } - - @Override - public String toString() { - return "DefaultTableMetadata@" - + Integer.toHexString(hashCode()) - + "(" - + keyspace.asInternal() - + "." - + name.asInternal() - + ")"; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultViewMetadata.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultViewMetadata.java deleted file mode 100644 index 2c5e5a9603e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultViewMetadata.java +++ /dev/null @@ -1,180 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.ClusteringOrder; -import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.ViewMetadata; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.io.Serializable; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Optional; -import java.util.UUID; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultViewMetadata implements ViewMetadata, Serializable { - - private static final long serialVersionUID = 1; - - @NonNull private final CqlIdentifier keyspace; - @NonNull private final CqlIdentifier name; - @NonNull private final CqlIdentifier baseTable; - private final boolean includesAllColumns; - @Nullable private final String whereClause; - @NonNull private final UUID id; - @NonNull private final ImmutableList partitionKey; - @NonNull private final ImmutableMap clusteringColumns; - @NonNull private final ImmutableMap columns; - @NonNull private final Map options; - - public DefaultViewMetadata( - @NonNull CqlIdentifier keyspace, - @NonNull CqlIdentifier name, - @NonNull CqlIdentifier baseTable, - boolean includesAllColumns, - @Nullable String whereClause, - @NonNull UUID id, - @NonNull ImmutableList partitionKey, - @NonNull ImmutableMap clusteringColumns, - @NonNull ImmutableMap columns, - @NonNull Map options) { - this.keyspace = keyspace; - this.name = name; - this.baseTable = baseTable; - this.includesAllColumns = includesAllColumns; - this.whereClause = whereClause; - this.id = id; - this.partitionKey = partitionKey; - this.clusteringColumns = clusteringColumns; - this.columns = columns; - this.options = options; - } - - @NonNull - @Override - public CqlIdentifier getKeyspace() { - return keyspace; - } - - @NonNull - @Override - public CqlIdentifier getName() { - return name; - } - - @NonNull - @Override - public Optional getId() { - return Optional.of(id); - } - - @NonNull - @Override - public CqlIdentifier getBaseTable() { - return baseTable; - } - - @Override - public boolean includesAllColumns() { - return includesAllColumns; - } - - @NonNull - @Override - public Optional getWhereClause() { - return Optional.ofNullable(whereClause); - } - - @NonNull - @Override - public List getPartitionKey() { - return partitionKey; - } - - @NonNull - @Override - public Map getClusteringColumns() { - return clusteringColumns; - } - - @NonNull - @Override - public Map getColumns() { - return columns; - } - - @NonNull - @Override - public Map getOptions() { - return options; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof ViewMetadata) { - ViewMetadata that = (ViewMetadata) other; - return Objects.equals(this.keyspace, that.getKeyspace()) - && Objects.equals(this.name, that.getName()) - && Objects.equals(this.baseTable, that.getBaseTable()) - && this.includesAllColumns == that.includesAllColumns() - && Objects.equals(this.whereClause, that.getWhereClause().orElse(null)) - && Objects.equals(Optional.of(this.id), that.getId()) - && Objects.equals(this.partitionKey, that.getPartitionKey()) - && Objects.equals(this.clusteringColumns, that.getClusteringColumns()) - && Objects.equals(this.columns, that.getColumns()) - && Objects.equals(this.options, that.getOptions()); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash( - keyspace, - name, - baseTable, - includesAllColumns, - whereClause, - id, - partitionKey, - clusteringColumns, - columns, - options); - } - - @Override - public String toString() { - return "DefaultViewMetadata@" - + Integer.toHexString(hashCode()) - + "(" - + keyspace.asInternal() - + "." - + name.asInternal() - + ")"; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/MultiplexingSchemaChangeListener.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/MultiplexingSchemaChangeListener.java deleted file mode 100644 index eebe16364d1..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/MultiplexingSchemaChangeListener.java +++ /dev/null @@ -1,211 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema; - -import com.datastax.oss.driver.api.core.metadata.schema.AggregateMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.FunctionMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.SchemaChangeListener; -import com.datastax.oss.driver.api.core.metadata.schema.TableMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.ViewMetadata; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.internal.core.util.Loggers; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Arrays; -import java.util.Collection; -import java.util.List; -import java.util.Objects; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.function.Consumer; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Combines multiple schema change listeners into a single one. - * - *

Any exception thrown by a child listener is caught and logged. - */ -@ThreadSafe -public class MultiplexingSchemaChangeListener implements SchemaChangeListener { - - private static final Logger LOG = LoggerFactory.getLogger(MultiplexingSchemaChangeListener.class); - - private final List listeners = new CopyOnWriteArrayList<>(); - - public MultiplexingSchemaChangeListener() {} - - public MultiplexingSchemaChangeListener(SchemaChangeListener... listeners) { - this(Arrays.asList(listeners)); - } - - public MultiplexingSchemaChangeListener(Collection listeners) { - addListeners(listeners); - } - - private void addListeners(Collection source) { - for (SchemaChangeListener listener : source) { - addListener(listener); - } - } - - private void addListener(SchemaChangeListener toAdd) { - Objects.requireNonNull(toAdd, "listener cannot be null"); - if (toAdd instanceof MultiplexingSchemaChangeListener) { - addListeners(((MultiplexingSchemaChangeListener) toAdd).listeners); - } else { - listeners.add(toAdd); - } - } - - public void register(@NonNull SchemaChangeListener listener) { - addListener(listener); - } - - @Override - public void onKeyspaceCreated(@NonNull KeyspaceMetadata keyspace) { - invokeListeners(listener -> listener.onKeyspaceCreated(keyspace), "onKeyspaceCreated"); - } - - @Override - public void onKeyspaceDropped(@NonNull KeyspaceMetadata keyspace) { - invokeListeners(listener -> listener.onKeyspaceDropped(keyspace), "onKeyspaceDropped"); - } - - @Override - public void onKeyspaceUpdated( - @NonNull KeyspaceMetadata current, @NonNull KeyspaceMetadata previous) { - invokeListeners(listener -> listener.onKeyspaceUpdated(current, previous), "onKeyspaceUpdated"); - } - - @Override - public void onTableCreated(@NonNull TableMetadata table) { - invokeListeners(listener -> listener.onTableCreated(table), "onTableCreated"); - } - - @Override - public void onTableDropped(@NonNull TableMetadata table) { - invokeListeners(listener -> listener.onTableDropped(table), "onTableDropped"); - } - - @Override - public void onTableUpdated(@NonNull TableMetadata current, @NonNull TableMetadata previous) { - invokeListeners(listener -> listener.onTableUpdated(current, previous), "onTableUpdated"); - } - - @Override - public void onUserDefinedTypeCreated(@NonNull UserDefinedType type) { - invokeListeners( - listener -> listener.onUserDefinedTypeCreated(type), "onUserDefinedTypeCreated"); - } - - @Override - public void onUserDefinedTypeDropped(@NonNull UserDefinedType type) { - invokeListeners( - listener -> listener.onUserDefinedTypeDropped(type), "onUserDefinedTypeDropped"); - } - - @Override - public void onUserDefinedTypeUpdated( - @NonNull UserDefinedType current, @NonNull UserDefinedType previous) { - invokeListeners( - listener -> listener.onUserDefinedTypeUpdated(current, previous), - "onUserDefinedTypeUpdated"); - } - - @Override - public void onFunctionCreated(@NonNull FunctionMetadata function) { - invokeListeners(listener -> listener.onFunctionCreated(function), "onFunctionCreated"); - } - - @Override - public void onFunctionDropped(@NonNull FunctionMetadata function) { - invokeListeners(listener -> listener.onFunctionDropped(function), "onFunctionDropped"); - } - - @Override - public void onFunctionUpdated( - @NonNull FunctionMetadata current, @NonNull FunctionMetadata previous) { - invokeListeners(listener -> listener.onFunctionUpdated(current, previous), "onFunctionUpdated"); - } - - @Override - public void onAggregateCreated(@NonNull AggregateMetadata aggregate) { - invokeListeners(listener -> listener.onAggregateCreated(aggregate), "onAggregateCreated"); - } - - @Override - public void onAggregateDropped(@NonNull AggregateMetadata aggregate) { - invokeListeners(listener -> listener.onAggregateDropped(aggregate), "onAggregateDropped"); - } - - @Override - public void onAggregateUpdated( - @NonNull AggregateMetadata current, @NonNull AggregateMetadata previous) { - invokeListeners( - listener -> listener.onAggregateUpdated(current, previous), "onAggregateUpdated"); - } - - @Override - public void onViewCreated(@NonNull ViewMetadata view) { - invokeListeners(listener -> listener.onViewCreated(view), "onViewCreated"); - } - - @Override - public void onViewDropped(@NonNull ViewMetadata view) { - invokeListeners(listener -> listener.onViewDropped(view), "onViewDropped"); - } - - @Override - public void onViewUpdated(@NonNull ViewMetadata current, @NonNull ViewMetadata previous) { - invokeListeners(listener -> listener.onViewUpdated(current, previous), "onViewUpdated"); - } - - @Override - public void onSessionReady(@NonNull Session session) { - invokeListeners(listener -> listener.onSessionReady(session), "onSessionReady"); - } - - @Override - public void close() throws Exception { - for (SchemaChangeListener listener : listeners) { - try { - listener.close(); - } catch (Exception e) { - Loggers.warnWithException( - LOG, "Unexpected error while closing schema change listener {}.", listener, e); - } - } - } - - private void invokeListeners(@NonNull Consumer action, String event) { - for (SchemaChangeListener listener : listeners) { - try { - action.accept(listener); - } catch (Exception e) { - Loggers.warnWithException( - LOG, - "Unexpected error while notifying schema change listener {} of an {} event.", - listener, - event, - e); - } - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/NoopSchemaChangeListener.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/NoopSchemaChangeListener.java deleted file mode 100644 index 76fed2e5d24..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/NoopSchemaChangeListener.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema; - -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.metadata.schema.SchemaChangeListenerBase; -import net.jcip.annotations.ThreadSafe; - -/** - * Default schema change listener implementation with empty methods. This implementation is used - * when no listeners were registered, neither programmatically nor through the configuration. - */ -@ThreadSafe -public class NoopSchemaChangeListener extends SchemaChangeListenerBase { - - public NoopSchemaChangeListener(@SuppressWarnings("unused") DriverContext context) { - // nothing to do - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/SchemaChangeType.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/SchemaChangeType.java deleted file mode 100644 index 5f01d019ee0..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/SchemaChangeType.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema; - -public enum SchemaChangeType { - CREATED, - UPDATED, - DROPPED, - ; -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/ScriptBuilder.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/ScriptBuilder.java deleted file mode 100644 index b762f35b885..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/ScriptBuilder.java +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.Describable; -import com.datastax.oss.driver.shaded.guava.common.base.Strings; -import java.util.function.Consumer; -import net.jcip.annotations.NotThreadSafe; - -/** - * A simple builder that is used internally for the queries of {@link Describable} schema elements. - */ -@NotThreadSafe -public class ScriptBuilder { - private static final int INDENT_SIZE = 4; - - private final boolean pretty; - private final StringBuilder builder = new StringBuilder(); - private int indent; - private boolean isAtLineStart; - private boolean isFirstOption = true; - - public ScriptBuilder(boolean pretty) { - this.pretty = pretty; - } - - public ScriptBuilder append(String s) { - if (pretty && isAtLineStart && indent > 0) { - builder.append(Strings.repeat(" ", indent * INDENT_SIZE)); - } - isAtLineStart = false; - builder.append(s); - return this; - } - - public ScriptBuilder append(CqlIdentifier id) { - append(id.asCql(pretty)); - return this; - } - - public ScriptBuilder newLine() { - if (pretty) { - builder.append('\n'); - } else { - builder.append(' '); - } - isAtLineStart = true; - return this; - } - - public ScriptBuilder forceNewLine(int count) { - builder.append(Strings.repeat("\n", count)); - isAtLineStart = true; - return this; - } - - public ScriptBuilder increaseIndent() { - indent += 1; - return this; - } - - public ScriptBuilder decreaseIndent() { - if (indent > 0) { - indent -= 1; - } - return this; - } - - /** Appends "WITH " the first time it's called, then "AND " the next times. */ - public ScriptBuilder andWith() { - if (isFirstOption) { - append(" WITH "); - isFirstOption = false; - } else { - newLine(); - append("AND "); - } - return this; - } - - public ScriptBuilder forEach(Iterable iterable, Consumer action) { - for (E e : iterable) { - action.accept(e); - } - return this; - } - - public String build() { - return builder.toString(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/ShallowUserDefinedType.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/ShallowUserDefinedType.java deleted file mode 100644 index 069ce3752b2..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/ShallowUserDefinedType.java +++ /dev/null @@ -1,171 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.data.UdtValue; -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.UserDefinedTypeParser; -import com.datastax.oss.driver.internal.core.type.DefaultUserDefinedType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.io.IOException; -import java.io.ObjectInputStream; -import java.io.ObjectOutputStream; -import java.io.Serializable; -import java.util.List; -import net.jcip.annotations.Immutable; - -/** - * A temporary UDT implementation that only contains the keyspace and name. - * - *

When we refresh a keyspace's UDTs, we can't fully materialize them right away, because they - * might depend on each other and the system table query does not return them in topological order. - * So we do a first pass where UDTs that are nested into other UDTsare resolved as instances of this - * class, then a topological sort, then a second pass to replace all shallow definitions by the - * actual instance (which will be a {@link DefaultUserDefinedType}). - * - *

This type is also used in the schema builder's internal representation: the keyspace, name and - * frozen-ness are the only things we need to generate a query string. - * - * @see UserDefinedTypeParser - */ -@Immutable -public class ShallowUserDefinedType implements UserDefinedType, Serializable { - - private static final long serialVersionUID = 1; - - private final CqlIdentifier keyspace; - private final CqlIdentifier name; - private final boolean frozen; - - public ShallowUserDefinedType(CqlIdentifier keyspace, CqlIdentifier name, boolean frozen) { - this.keyspace = keyspace; - this.name = name; - this.frozen = frozen; - } - - @Nullable - @Override - public CqlIdentifier getKeyspace() { - return keyspace; - } - - @NonNull - @Override - public CqlIdentifier getName() { - return name; - } - - @Override - public boolean isFrozen() { - return frozen; - } - - @NonNull - @Override - public List getFieldNames() { - throw new UnsupportedOperationException( - "This implementation should only be used internally, this is likely a driver bug"); - } - - @NonNull - @Override - public List allIndicesOf(@NonNull CqlIdentifier id) { - throw new UnsupportedOperationException( - "This implementation should only be used internally, this is likely a driver bug"); - } - - @Override - public int firstIndexOf(@NonNull CqlIdentifier id) { - throw new UnsupportedOperationException( - "This implementation should only be used internally, this is likely a driver bug"); - } - - @NonNull - @Override - public List allIndicesOf(@NonNull String name) { - throw new UnsupportedOperationException( - "This implementation should only be used internally, this is likely a driver bug"); - } - - @Override - public int firstIndexOf(@NonNull String name) { - throw new UnsupportedOperationException( - "This implementation should only be used internally, this is likely a driver bug"); - } - - @NonNull - @Override - public List getFieldTypes() { - throw new UnsupportedOperationException( - "This implementation should only be used internally, this is likely a driver bug"); - } - - @NonNull - @Override - public UserDefinedType copy(boolean newFrozen) { - throw new UnsupportedOperationException( - "This implementation should only be used internally, this is likely a driver bug"); - } - - @NonNull - @Override - public UdtValue newValue() { - throw new UnsupportedOperationException( - "This implementation should only be used internally, this is likely a driver bug"); - } - - @NonNull - @Override - public UdtValue newValue(@NonNull Object... fields) { - throw new UnsupportedOperationException( - "This implementation should only be used internally, this is likely a driver bug"); - } - - @NonNull - @Override - public AttachmentPoint getAttachmentPoint() { - throw new UnsupportedOperationException( - "This implementation should only be used internally, this is likely a driver bug"); - } - - @Override - public boolean isDetached() { - throw new UnsupportedOperationException( - "This implementation should only be used internally, this is likely a driver bug"); - } - - @Override - public void attach(@NonNull AttachmentPoint attachmentPoint) { - throw new UnsupportedOperationException( - "This implementation should only be used internally, this is likely a driver bug"); - } - - private void readObject(@SuppressWarnings("unused") ObjectInputStream s) throws IOException { - throw new UnsupportedOperationException( - "This implementation should only be used internally, this is likely a driver bug"); - } - - private void writeObject(@SuppressWarnings("unused") ObjectOutputStream s) throws IOException { - throw new UnsupportedOperationException( - "This implementation should only be used internally, this is likely a driver bug"); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/AggregateChangeEvent.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/AggregateChangeEvent.java deleted file mode 100644 index fe175a98579..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/AggregateChangeEvent.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.events; - -import com.datastax.oss.driver.api.core.metadata.schema.AggregateMetadata; -import com.datastax.oss.driver.internal.core.metadata.schema.SchemaChangeType; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -@Immutable -public class AggregateChangeEvent { - - public static AggregateChangeEvent dropped(AggregateMetadata oldAggregate) { - return new AggregateChangeEvent(SchemaChangeType.DROPPED, oldAggregate, null); - } - - public static AggregateChangeEvent created(AggregateMetadata newAggregate) { - return new AggregateChangeEvent(SchemaChangeType.CREATED, null, newAggregate); - } - - public static AggregateChangeEvent updated( - AggregateMetadata oldAggregate, AggregateMetadata newAggregate) { - return new AggregateChangeEvent(SchemaChangeType.UPDATED, oldAggregate, newAggregate); - } - - public final SchemaChangeType changeType; - /** {@code null} if the event is a creation */ - public final AggregateMetadata oldAggregate; - /** {@code null} if the event is a drop */ - public final AggregateMetadata newAggregate; - - private AggregateChangeEvent( - SchemaChangeType changeType, AggregateMetadata oldAggregate, AggregateMetadata newAggregate) { - this.changeType = changeType; - this.oldAggregate = oldAggregate; - this.newAggregate = newAggregate; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof AggregateChangeEvent) { - AggregateChangeEvent that = (AggregateChangeEvent) other; - return this.changeType == that.changeType - && Objects.equals(this.oldAggregate, that.oldAggregate) - && Objects.equals(this.newAggregate, that.newAggregate); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(changeType, oldAggregate, newAggregate); - } - - @Override - public String toString() { - switch (changeType) { - case CREATED: - return String.format("AggregateChangeEvent(CREATED %s)", newAggregate.getSignature()); - case UPDATED: - return String.format( - "AggregateChangeEvent(UPDATED %s=>%s)", - oldAggregate.getSignature(), newAggregate.getSignature()); - case DROPPED: - return String.format("AggregateChangeEvent(DROPPED %s)", oldAggregate.getSignature()); - default: - throw new IllegalStateException("Unsupported change type " + changeType); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/FunctionChangeEvent.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/FunctionChangeEvent.java deleted file mode 100644 index 4ab4f0946ec..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/FunctionChangeEvent.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.events; - -import com.datastax.oss.driver.api.core.metadata.schema.FunctionMetadata; -import com.datastax.oss.driver.internal.core.metadata.schema.SchemaChangeType; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -@Immutable -public class FunctionChangeEvent { - - public static FunctionChangeEvent dropped(FunctionMetadata oldFunction) { - return new FunctionChangeEvent(SchemaChangeType.DROPPED, oldFunction, null); - } - - public static FunctionChangeEvent created(FunctionMetadata newFunction) { - return new FunctionChangeEvent(SchemaChangeType.CREATED, null, newFunction); - } - - public static FunctionChangeEvent updated( - FunctionMetadata oldFunction, FunctionMetadata newFunction) { - return new FunctionChangeEvent(SchemaChangeType.UPDATED, oldFunction, newFunction); - } - - public final SchemaChangeType changeType; - /** {@code null} if the event is a creation */ - public final FunctionMetadata oldFunction; - /** {@code null} if the event is a drop */ - public final FunctionMetadata newFunction; - - private FunctionChangeEvent( - SchemaChangeType changeType, FunctionMetadata oldFunction, FunctionMetadata newFunction) { - this.changeType = changeType; - this.oldFunction = oldFunction; - this.newFunction = newFunction; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof FunctionChangeEvent) { - FunctionChangeEvent that = (FunctionChangeEvent) other; - return this.changeType == that.changeType - && Objects.equals(this.oldFunction, that.oldFunction) - && Objects.equals(this.newFunction, that.newFunction); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(changeType, oldFunction, newFunction); - } - - @Override - public String toString() { - switch (changeType) { - case CREATED: - return String.format("FunctionChangeEvent(CREATED %s)", newFunction.getSignature()); - case UPDATED: - return String.format( - "FunctionChangeEvent(UPDATED %s=>%s)", - oldFunction.getSignature(), newFunction.getSignature()); - case DROPPED: - return String.format("FunctionChangeEvent(DROPPED %s)", oldFunction.getSignature()); - default: - throw new IllegalStateException("Unsupported change type " + changeType); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/KeyspaceChangeEvent.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/KeyspaceChangeEvent.java deleted file mode 100644 index 0bd2a9d75af..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/KeyspaceChangeEvent.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.events; - -import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; -import com.datastax.oss.driver.internal.core.metadata.schema.SchemaChangeType; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -@Immutable -public class KeyspaceChangeEvent { - - public static KeyspaceChangeEvent dropped(KeyspaceMetadata oldKeyspace) { - return new KeyspaceChangeEvent(SchemaChangeType.DROPPED, oldKeyspace, null); - } - - public static KeyspaceChangeEvent created(KeyspaceMetadata newKeyspace) { - return new KeyspaceChangeEvent(SchemaChangeType.CREATED, null, newKeyspace); - } - - public static KeyspaceChangeEvent updated( - KeyspaceMetadata oldKeyspace, KeyspaceMetadata newKeyspace) { - return new KeyspaceChangeEvent(SchemaChangeType.UPDATED, oldKeyspace, newKeyspace); - } - - public final SchemaChangeType changeType; - /** {@code null} if the event is a creation */ - public final KeyspaceMetadata oldKeyspace; - /** {@code null} if the event is a drop */ - public final KeyspaceMetadata newKeyspace; - - private KeyspaceChangeEvent( - SchemaChangeType changeType, KeyspaceMetadata oldKeyspace, KeyspaceMetadata newKeyspace) { - this.changeType = changeType; - this.oldKeyspace = oldKeyspace; - this.newKeyspace = newKeyspace; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof KeyspaceChangeEvent) { - KeyspaceChangeEvent that = (KeyspaceChangeEvent) other; - return this.changeType == that.changeType - && Objects.equals(this.oldKeyspace, that.oldKeyspace) - && Objects.equals(this.newKeyspace, that.newKeyspace); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(changeType, oldKeyspace, newKeyspace); - } - - @Override - public String toString() { - switch (changeType) { - case CREATED: - return String.format("KeyspaceChangeEvent(CREATED %s)", newKeyspace.getName()); - case UPDATED: - return String.format( - "KeyspaceChangeEvent(UPDATED %s=>%s)", oldKeyspace.getName(), newKeyspace.getName()); - case DROPPED: - return String.format("KeyspaceChangeEvent(DROPPED %s)", oldKeyspace.getName()); - default: - throw new IllegalStateException("Unsupported change type " + changeType); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/TableChangeEvent.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/TableChangeEvent.java deleted file mode 100644 index 0902cf4e5b8..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/TableChangeEvent.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.events; - -import com.datastax.oss.driver.api.core.metadata.schema.TableMetadata; -import com.datastax.oss.driver.internal.core.metadata.schema.SchemaChangeType; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -@Immutable -public class TableChangeEvent { - - public static TableChangeEvent dropped(TableMetadata oldTable) { - return new TableChangeEvent(SchemaChangeType.DROPPED, oldTable, null); - } - - public static TableChangeEvent created(TableMetadata newTable) { - return new TableChangeEvent(SchemaChangeType.CREATED, null, newTable); - } - - public static TableChangeEvent updated(TableMetadata oldTable, TableMetadata newTable) { - return new TableChangeEvent(SchemaChangeType.UPDATED, oldTable, newTable); - } - - public final SchemaChangeType changeType; - /** {@code null} if the event is a creation */ - public final TableMetadata oldTable; - /** {@code null} if the event is a drop */ - public final TableMetadata newTable; - - private TableChangeEvent( - SchemaChangeType changeType, TableMetadata oldTable, TableMetadata newTable) { - this.changeType = changeType; - this.oldTable = oldTable; - this.newTable = newTable; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof TableChangeEvent) { - TableChangeEvent that = (TableChangeEvent) other; - return this.changeType == that.changeType - && Objects.equals(this.oldTable, that.oldTable) - && Objects.equals(this.newTable, that.newTable); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(changeType, oldTable, newTable); - } - - @Override - public String toString() { - switch (changeType) { - case CREATED: - return String.format("TableChangeEvent(CREATED %s)", newTable.getName()); - case UPDATED: - return String.format( - "TableChangeEvent(UPDATED %s=>%s)", oldTable.getName(), newTable.getName()); - case DROPPED: - return String.format("TableChangeEvent(DROPPED %s)", oldTable.getName()); - default: - throw new IllegalStateException("Unsupported change type " + changeType); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/TypeChangeEvent.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/TypeChangeEvent.java deleted file mode 100644 index f8048570ac2..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/TypeChangeEvent.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.events; - -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.internal.core.metadata.schema.SchemaChangeType; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -@Immutable -public class TypeChangeEvent { - - public static TypeChangeEvent dropped(UserDefinedType oldType) { - return new TypeChangeEvent(SchemaChangeType.DROPPED, oldType, null); - } - - public static TypeChangeEvent created(UserDefinedType newType) { - return new TypeChangeEvent(SchemaChangeType.CREATED, null, newType); - } - - public static TypeChangeEvent updated(UserDefinedType oldType, UserDefinedType newType) { - return new TypeChangeEvent(SchemaChangeType.UPDATED, oldType, newType); - } - - public final SchemaChangeType changeType; - /** {@code null} if the event is a creation */ - public final UserDefinedType oldType; - /** {@code null} if the event is a drop */ - public final UserDefinedType newType; - - private TypeChangeEvent( - SchemaChangeType changeType, UserDefinedType oldType, UserDefinedType newType) { - this.changeType = changeType; - this.oldType = oldType; - this.newType = newType; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof TypeChangeEvent) { - TypeChangeEvent that = (TypeChangeEvent) other; - return this.changeType == that.changeType - && Objects.equals(this.oldType, that.oldType) - && Objects.equals(this.newType, that.newType); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(changeType, oldType, newType); - } - - @Override - public String toString() { - switch (changeType) { - case CREATED: - return String.format("TypeChangeEvent(CREATED %s)", newType.getName()); - case UPDATED: - return String.format( - "TypeChangeEvent(UPDATED %s=>%s)", oldType.getName(), newType.getName()); - case DROPPED: - return String.format("TypeChangeEvent(DROPPED %s)", oldType.getName()); - default: - throw new IllegalStateException("Unsupported change type " + changeType); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/ViewChangeEvent.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/ViewChangeEvent.java deleted file mode 100644 index 91e59d287f1..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/ViewChangeEvent.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.events; - -import com.datastax.oss.driver.api.core.metadata.schema.ViewMetadata; -import com.datastax.oss.driver.internal.core.metadata.schema.SchemaChangeType; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -@Immutable -public class ViewChangeEvent { - - public static ViewChangeEvent dropped(ViewMetadata oldView) { - return new ViewChangeEvent(SchemaChangeType.DROPPED, oldView, null); - } - - public static ViewChangeEvent created(ViewMetadata newView) { - return new ViewChangeEvent(SchemaChangeType.CREATED, null, newView); - } - - public static ViewChangeEvent updated(ViewMetadata oldView, ViewMetadata newView) { - return new ViewChangeEvent(SchemaChangeType.UPDATED, oldView, newView); - } - - public final SchemaChangeType changeType; - /** {@code null} if the event is a creation */ - public final ViewMetadata oldView; - /** {@code null} if the event is a drop */ - public final ViewMetadata newView; - - private ViewChangeEvent(SchemaChangeType changeType, ViewMetadata oldView, ViewMetadata newView) { - this.changeType = changeType; - this.oldView = oldView; - this.newView = newView; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof ViewChangeEvent) { - ViewChangeEvent that = (ViewChangeEvent) other; - return this.changeType == that.changeType - && Objects.equals(this.oldView, that.oldView) - && Objects.equals(this.newView, that.newView); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(changeType, oldView, newView); - } - - @Override - public String toString() { - switch (changeType) { - case CREATED: - return String.format("ViewChangeEvent(CREATED %s)", newView.getName()); - case UPDATED: - return String.format( - "ViewChangeEvent(UPDATED %s=>%s)", oldView.getName(), newView.getName()); - case DROPPED: - return String.format("ViewChangeEvent(DROPPED %s)", oldView.getName()); - default: - throw new IllegalStateException("Unsupported change type " + changeType); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/AggregateParser.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/AggregateParser.java deleted file mode 100644 index d1f8640a744..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/AggregateParser.java +++ /dev/null @@ -1,125 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.parsing; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.AggregateMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.FunctionSignature; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.schema.DefaultAggregateMetadata; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import java.nio.ByteBuffer; -import java.util.List; -import java.util.Map; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class AggregateParser { - private final DataTypeParser dataTypeParser; - private final InternalDriverContext context; - - public AggregateParser(DataTypeParser dataTypeParser, InternalDriverContext context) { - this.dataTypeParser = dataTypeParser; - this.context = context; - } - - public AggregateMetadata parseAggregate( - AdminRow row, - CqlIdentifier keyspaceId, - Map userDefinedTypes) { - // Cassandra < 3.0: - // CREATE TABLE system.schema_aggregates ( - // keyspace_name text, - // aggregate_name text, - // signature frozen>, - // argument_types list, - // final_func text, - // initcond blob, - // return_type text, - // state_func text, - // state_type text, - // PRIMARY KEY (keyspace_name, aggregate_name, signature) - // ) WITH CLUSTERING ORDER BY (aggregate_name ASC, signature ASC) - // - // Cassandra >= 3.0: - // CREATE TABLE system.schema_aggregates ( - // keyspace_name text, - // aggregate_name text, - // argument_types frozen>, - // final_func text, - // initcond text, - // return_type text, - // state_func text, - // state_type text, - // PRIMARY KEY (keyspace_name, aggregate_name, argument_types) - // ) WITH CLUSTERING ORDER BY (aggregate_name ASC, argument_types ASC) - String simpleName = row.getString("aggregate_name"); - List argumentTypes = row.getListOfString("argument_types"); - FunctionSignature signature = - new FunctionSignature( - CqlIdentifier.fromInternal(simpleName), - dataTypeParser.parse(keyspaceId, argumentTypes, userDefinedTypes, context)); - - DataType stateType = - dataTypeParser.parse(keyspaceId, row.getString("state_type"), userDefinedTypes, context); - TypeCodec stateTypeCodec = context.getCodecRegistry().codecFor(stateType); - - String stateFuncSimpleName = row.getString("state_func"); - FunctionSignature stateFuncSignature = - new FunctionSignature( - CqlIdentifier.fromInternal(stateFuncSimpleName), - ImmutableList.builder() - .add(stateType) - .addAll(signature.getParameterTypes()) - .build()); - - String finalFuncSimpleName = row.getString("final_func"); - FunctionSignature finalFuncSignature = - (finalFuncSimpleName == null) - ? null - : new FunctionSignature(CqlIdentifier.fromInternal(finalFuncSimpleName), stateType); - - DataType returnType = - dataTypeParser.parse(keyspaceId, row.getString("return_type"), userDefinedTypes, context); - - Object initCond; - if (row.isString("initcond")) { // Cassandra 3 - String initCondString = row.getString("initcond"); - initCond = (initCondString == null) ? null : stateTypeCodec.parse(initCondString); - } else { // Cassandra 2.2 - ByteBuffer initCondBlob = row.getByteBuffer("initcond"); - initCond = - (initCondBlob == null) - ? null - : stateTypeCodec.decode(initCondBlob, context.getProtocolVersion()); - } - return new DefaultAggregateMetadata( - keyspaceId, - signature, - finalFuncSignature, - initCond, - returnType, - stateFuncSignature, - stateType, - stateTypeCodec); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/CassandraSchemaParser.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/CassandraSchemaParser.java deleted file mode 100644 index 9749a921aae..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/CassandraSchemaParser.java +++ /dev/null @@ -1,225 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.parsing; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.AggregateMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.FunctionMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.FunctionSignature; -import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.TableMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.ViewMetadata; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.schema.DefaultKeyspaceMetadata; -import com.datastax.oss.driver.internal.core.metadata.schema.queries.SchemaRows; -import com.datastax.oss.driver.internal.core.metadata.schema.refresh.SchemaRefresh; -import com.datastax.oss.driver.internal.core.util.NanoTime; -import com.datastax.oss.driver.shaded.guava.common.base.MoreObjects; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import java.util.Collections; -import java.util.Map; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Default parser implementation for Cassandra. - * - *

For modularity, the code for each element row is split into separate classes (schema stuff is - * not on the hot path, so creating a few extra objects doesn't matter). - */ -@ThreadSafe -public class CassandraSchemaParser implements SchemaParser { - - private static final Logger LOG = LoggerFactory.getLogger(CassandraSchemaParser.class); - - private final SchemaRows rows; - private final UserDefinedTypeParser userDefinedTypeParser; - private final TableParser tableParser; - private final ViewParser viewParser; - private final FunctionParser functionParser; - private final AggregateParser aggregateParser; - private final String logPrefix; - private final long startTimeNs = System.nanoTime(); - - public CassandraSchemaParser(SchemaRows rows, InternalDriverContext context) { - this.rows = rows; - this.logPrefix = context.getSessionName(); - - this.userDefinedTypeParser = new UserDefinedTypeParser(rows.dataTypeParser(), context); - this.tableParser = new TableParser(rows, context); - this.viewParser = new ViewParser(rows, context); - this.functionParser = new FunctionParser(rows.dataTypeParser(), context); - this.aggregateParser = new AggregateParser(rows.dataTypeParser(), context); - } - - @Override - public SchemaRefresh parse() { - ImmutableMap.Builder keyspacesBuilder = ImmutableMap.builder(); - for (AdminRow row : rows.keyspaces()) { - KeyspaceMetadata keyspace = parseKeyspace(row); - keyspacesBuilder.put(keyspace.getName(), keyspace); - } - for (AdminRow row : rows.virtualKeyspaces()) { - KeyspaceMetadata keyspace = parseVirtualKeyspace(row); - keyspacesBuilder.put(keyspace.getName(), keyspace); - } - SchemaRefresh refresh = new SchemaRefresh(keyspacesBuilder.build()); - LOG.debug("[{}] Schema parsing took {}", logPrefix, NanoTime.formatTimeSince(startTimeNs)); - return refresh; - } - - private KeyspaceMetadata parseKeyspace(AdminRow keyspaceRow) { - - // Cassandra <= 2.2 - // CREATE TABLE system.schema_keyspaces ( - // keyspace_name text PRIMARY KEY, - // durable_writes boolean, - // strategy_class text, - // strategy_options text - // ) - // - // Cassandra >= 3.0: - // CREATE TABLE system_schema.keyspaces ( - // keyspace_name text PRIMARY KEY, - // durable_writes boolean, - // replication frozen> - // ) - CqlIdentifier keyspaceId = CqlIdentifier.fromInternal(keyspaceRow.getString("keyspace_name")); - boolean durableWrites = - MoreObjects.firstNonNull(keyspaceRow.getBoolean("durable_writes"), false); - - Map replicationOptions; - if (keyspaceRow.contains("strategy_class")) { - String strategyClass = keyspaceRow.getString("strategy_class"); - Map strategyOptions = - SimpleJsonParser.parseStringMap(keyspaceRow.getString("strategy_options")); - replicationOptions = - ImmutableMap.builder() - .putAll(strategyOptions) - .put("class", strategyClass) - .build(); - } else { - replicationOptions = keyspaceRow.getMapOfStringToString("replication"); - } - - Map types = parseTypes(keyspaceId); - - return new DefaultKeyspaceMetadata( - keyspaceId, - durableWrites, - false, - replicationOptions, - types, - parseTables(keyspaceId, types), - parseViews(keyspaceId, types), - parseFunctions(keyspaceId, types), - parseAggregates(keyspaceId, types)); - } - - private KeyspaceMetadata parseVirtualKeyspace(AdminRow keyspaceRow) { - - CqlIdentifier keyspaceId = CqlIdentifier.fromInternal(keyspaceRow.getString("keyspace_name")); - boolean durableWrites = - MoreObjects.firstNonNull(keyspaceRow.getBoolean("durable_writes"), false); - - Map replicationOptions = Collections.emptyMap(); - ; - - Map types = parseTypes(keyspaceId); - - return new DefaultKeyspaceMetadata( - keyspaceId, - durableWrites, - true, - replicationOptions, - types, - parseVirtualTables(keyspaceId, types), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptyMap()); - } - - private Map parseTypes(CqlIdentifier keyspaceId) { - return userDefinedTypeParser.parse(rows.types().get(keyspaceId), keyspaceId); - } - - private Map parseVirtualTables( - CqlIdentifier keyspaceId, Map types) { - ImmutableMap.Builder tablesBuilder = ImmutableMap.builder(); - for (AdminRow tableRow : rows.virtualTables().get(keyspaceId)) { - TableMetadata table = tableParser.parseVirtualTable(tableRow, keyspaceId, types); - if (table != null) { - tablesBuilder.put(table.getName(), table); - } - } - return tablesBuilder.build(); - } - - private Map parseTables( - CqlIdentifier keyspaceId, Map types) { - ImmutableMap.Builder tablesBuilder = ImmutableMap.builder(); - for (AdminRow tableRow : rows.tables().get(keyspaceId)) { - TableMetadata table = tableParser.parseTable(tableRow, keyspaceId, types); - if (table != null) { - tablesBuilder.put(table.getName(), table); - } - } - return tablesBuilder.build(); - } - - private Map parseViews( - CqlIdentifier keyspaceId, Map types) { - ImmutableMap.Builder viewsBuilder = ImmutableMap.builder(); - for (AdminRow viewRow : rows.views().get(keyspaceId)) { - ViewMetadata view = viewParser.parseView(viewRow, keyspaceId, types); - if (view != null) { - viewsBuilder.put(view.getName(), view); - } - } - return viewsBuilder.build(); - } - - private Map parseFunctions( - CqlIdentifier keyspaceId, Map types) { - ImmutableMap.Builder functionsBuilder = - ImmutableMap.builder(); - for (AdminRow functionRow : rows.functions().get(keyspaceId)) { - FunctionMetadata function = functionParser.parseFunction(functionRow, keyspaceId, types); - if (function != null) { - functionsBuilder.put(function.getSignature(), function); - } - } - return functionsBuilder.build(); - } - - private Map parseAggregates( - CqlIdentifier keyspaceId, Map types) { - ImmutableMap.Builder aggregatesBuilder = - ImmutableMap.builder(); - for (AdminRow aggregateRow : rows.aggregates().get(keyspaceId)) { - AggregateMetadata aggregate = aggregateParser.parseAggregate(aggregateRow, keyspaceId, types); - if (aggregate != null) { - aggregatesBuilder.put(aggregate.getSignature(), aggregate); - } - } - return aggregatesBuilder.build(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeClassNameCompositeParser.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeClassNameCompositeParser.java deleted file mode 100644 index 1037ccda1ae..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeClassNameCompositeParser.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.parsing; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class DataTypeClassNameCompositeParser extends DataTypeClassNameParser { - - public ParseResult parseWithComposite( - String className, - CqlIdentifier keyspaceId, - Map userTypes, - InternalDriverContext context) { - Parser parser = new Parser(className, 0); - - String next = parser.parseNextName(); - if (!isComposite(next)) { - return new ParseResult(parse(keyspaceId, className, userTypes, context), isReversed(next)); - } - - List subClassNames = parser.getTypeParameters(); - int count = subClassNames.size(); - String last = subClassNames.get(count - 1); - Map collections = new HashMap<>(); - if (isCollection(last)) { - count--; - Parser collectionParser = new Parser(last, 0); - collectionParser.parseNextName(); // skips columnToCollectionType - Map params = collectionParser.getCollectionsParameters(); - for (Map.Entry entry : params.entrySet()) { - collections.put(entry.getKey(), parse(keyspaceId, entry.getValue(), userTypes, context)); - } - } - - List types = new ArrayList<>(count); - List reversed = new ArrayList<>(count); - for (int i = 0; i < count; i++) { - types.add(parse(keyspaceId, subClassNames.get(i), userTypes, context)); - reversed.add(isReversed(subClassNames.get(i))); - } - - return new ParseResult(true, types, reversed, collections); - } - - public static class ParseResult { - public final boolean isComposite; - public final List types; - public final List reversed; - public final Map collections; - - private ParseResult(DataType type, boolean reversed) { - this( - false, - Collections.singletonList(type), - Collections.singletonList(reversed), - Collections.emptyMap()); - } - - private ParseResult( - boolean isComposite, - List types, - List reversed, - Map collections) { - this.isComposite = isComposite; - this.types = types; - this.reversed = reversed; - this.collections = collections; - } - } - - private static boolean isComposite(String className) { - return className.startsWith("org.apache.cassandra.db.marshal.CompositeType"); - } - - private static boolean isCollection(String className) { - return className.startsWith("org.apache.cassandra.db.marshal.ColumnToCollectionType"); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeClassNameParser.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeClassNameParser.java deleted file mode 100644 index bf252d0bc57..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeClassNameParser.java +++ /dev/null @@ -1,409 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.parsing; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.type.DefaultTupleType; -import com.datastax.oss.driver.internal.core.type.UserDefinedTypeBuilder; -import com.datastax.oss.driver.internal.core.type.codec.ParseUtils; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Iterator; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Parses data types from schema tables, for Cassandra 2.2 and below. - * - *

In these versions, data types appear as class names, like - * "org.apache.cassandra.db.marshal.AsciiType" or - * "org.apache.cassandra.db.marshal.TupleType(org.apache.cassandra.db.marshal.Int32Type,org.apache.cassandra.db.marshal.Int32Type)". - * - *

This is modified (and simplified) from Cassandra's {@code TypeParser} class to suit our needs. - * In particular it's not very efficient, but it doesn't really matter since it's rarely used and - * never in a critical path. - */ -@ThreadSafe -public class DataTypeClassNameParser implements DataTypeParser { - - private static final Logger LOG = LoggerFactory.getLogger(DataTypeClassNameParser.class); - - @Override - public DataType parse( - CqlIdentifier keyspaceId, - String toParse, - Map userTypes, - InternalDriverContext context) { - // We take keyspaceId as a parameter because of the parent interface, but it's actually unused - // by this implementation. - return parse(toParse, userTypes, context, context.getSessionName()); - } - - /** - * Simplified parse method for external use. - * - *

This is intended for use in Cassandra's UDF implementation (the current version uses the - * similar method from driver 3). - */ - public DataType parse(String toParse, AttachmentPoint attachmentPoint) { - return parse( - toParse, - null, // No caching of user types: nested types will always be fully re-parsed - attachmentPoint, - "parser"); - } - - private DataType parse( - String toParse, - Map userTypes, - AttachmentPoint attachmentPoint, - String logPrefix) { - boolean frozen = false; - if (isReversed(toParse)) { - // Just skip the ReversedType part, we don't care - toParse = getNestedClassName(toParse); - } else if (toParse.startsWith("org.apache.cassandra.db.marshal.FrozenType")) { - frozen = true; - toParse = getNestedClassName(toParse); - } - - Parser parser = new Parser(toParse, 0); - String next = parser.parseNextName(); - - if (next.startsWith("org.apache.cassandra.db.marshal.ListType")) { - DataType elementType = - parse(parser.getTypeParameters().get(0), userTypes, attachmentPoint, logPrefix); - return DataTypes.listOf(elementType, frozen); - } - - if (next.startsWith("org.apache.cassandra.db.marshal.SetType")) { - DataType elementType = - parse(parser.getTypeParameters().get(0), userTypes, attachmentPoint, logPrefix); - return DataTypes.setOf(elementType, frozen); - } - - if (next.startsWith("org.apache.cassandra.db.marshal.MapType")) { - List parameters = parser.getTypeParameters(); - DataType keyType = parse(parameters.get(0), userTypes, attachmentPoint, logPrefix); - DataType valueType = parse(parameters.get(1), userTypes, attachmentPoint, logPrefix); - return DataTypes.mapOf(keyType, valueType, frozen); - } - - if (frozen) - LOG.warn( - "[{}] Got o.a.c.db.marshal.FrozenType for something else than a collection, " - + "this driver version might be too old for your version of Cassandra", - logPrefix); - - if (next.startsWith("org.apache.cassandra.db.marshal.UserType")) { - ++parser.idx; // skipping '(' - - CqlIdentifier keyspace = CqlIdentifier.fromInternal(parser.readOne()); - parser.skipBlankAndComma(); - String typeName = - TypeCodecs.TEXT.decode( - Bytes.fromHexString("0x" + parser.readOne()), attachmentPoint.getProtocolVersion()); - if (typeName == null) { - throw new AssertionError("Type name cannot be null, this is a server bug"); - } - CqlIdentifier typeId = CqlIdentifier.fromInternal(typeName); - Map nameAndTypeParameters = parser.getNameAndTypeParameters(); - - // Avoid re-parsing if we already have the definition - if (userTypes != null && userTypes.containsKey(typeId)) { - // copy as frozen since C* 2.x UDTs are always frozen. - return userTypes.get(typeId).copy(true); - } else { - UserDefinedTypeBuilder builder = new UserDefinedTypeBuilder(keyspace, typeId); - parser.skipBlankAndComma(); - for (Map.Entry entry : nameAndTypeParameters.entrySet()) { - CqlIdentifier fieldName = CqlIdentifier.fromInternal(entry.getKey()); - DataType fieldType = parse(entry.getValue(), userTypes, attachmentPoint, logPrefix); - builder.withField(fieldName, fieldType); - } - // Create a frozen UserType since C* 2.x UDTs are always frozen. - return builder.frozen().withAttachmentPoint(attachmentPoint).build(); - } - } - - if (next.startsWith("org.apache.cassandra.db.marshal.TupleType")) { - List rawTypes = parser.getTypeParameters(); - ImmutableList.Builder componentTypesBuilder = ImmutableList.builder(); - for (String rawType : rawTypes) { - componentTypesBuilder.add(parse(rawType, userTypes, attachmentPoint, logPrefix)); - } - return new DefaultTupleType(componentTypesBuilder.build(), attachmentPoint); - } - - if (next.startsWith("org.apache.cassandra.db.marshal.VectorType")) { - Iterator rawTypes = parser.getTypeParameters().iterator(); - DataType subtype = parse(rawTypes.next(), userTypes, attachmentPoint, logPrefix); - int dimensions = Integer.parseInt(rawTypes.next()); - return DataTypes.vectorOf(subtype, dimensions); - } - - DataType type = NATIVE_TYPES_BY_CLASS_NAME.get(next); - return type == null ? DataTypes.custom(toParse) : type; - } - - static boolean isReversed(String toParse) { - return toParse.startsWith("org.apache.cassandra.db.marshal.ReversedType"); - } - - private static String getNestedClassName(String className) { - Parser p = new Parser(className, 0); - p.parseNextName(); - List l = p.getTypeParameters(); - if (l.size() != 1) { - throw new IllegalStateException(); - } - className = l.get(0); - return className; - } - - static class Parser { - - private final String str; - private int idx; - - Parser(String str, int idx) { - this.str = str; - this.idx = idx; - } - - String parseNextName() { - skipBlank(); - return readNextIdentifier(); - } - - private String readOne() { - String name = parseNextName(); - String args = readRawArguments(); - return name + args; - } - - // Assumes we have just read a class name and read it's potential arguments - // blindly. I.e. it assume that either parsing is done or that we're on a '(' - // and this reads everything up until the corresponding closing ')'. It - // returns everything read, including the enclosing parenthesis. - private String readRawArguments() { - skipBlank(); - - if (isEOS() || str.charAt(idx) == ')' || str.charAt(idx) == ',') { - return ""; - } - - if (str.charAt(idx) != '(') { - throw new IllegalStateException( - String.format( - "Expecting char %d of %s to be '(' but '%c' found", idx, str, str.charAt(idx))); - } - - int i = idx; - int open = 1; - while (open > 0) { - ++idx; - - if (isEOS()) { - throw new IllegalStateException("Non closed parenthesis"); - } - - if (str.charAt(idx) == '(') { - open++; - } else if (str.charAt(idx) == ')') { - open--; - } - } - // we've stopped at the last closing ')' so move past that - ++idx; - return str.substring(i, idx); - } - - List getTypeParameters() { - List list = new ArrayList<>(); - - if (isEOS()) { - return list; - } - - if (str.charAt(idx) != '(') { - throw new IllegalStateException(); - } - - ++idx; // skipping '(' - - while (skipBlankAndComma()) { - if (str.charAt(idx) == ')') { - ++idx; - return list; - } - list.add(readOne()); - } - throw new IllegalArgumentException( - String.format( - "Syntax error parsing '%s' at char %d: unexpected end of string", str, idx)); - } - - Map getCollectionsParameters() { - if (isEOS()) { - return Collections.emptyMap(); - } - if (str.charAt(idx) != '(') { - throw new IllegalStateException(); - } - ++idx; // skipping '(' - return getNameAndTypeParameters(); - } - - // Must be at the start of the first parameter to read - private Map getNameAndTypeParameters() { - // The order of the hashmap matters for UDT - Map map = new LinkedHashMap<>(); - - while (skipBlankAndComma()) { - if (str.charAt(idx) == ')') { - ++idx; - return map; - } - - String bbHex = readNextIdentifier(); - String name = null; - try { - name = - TypeCodecs.TEXT.decode( - Bytes.fromHexString("0x" + bbHex), DefaultProtocolVersion.DEFAULT); - } catch (NumberFormatException e) { - throwSyntaxError(e.getMessage()); - } - - skipBlank(); - if (str.charAt(idx) != ':') { - throwSyntaxError("expecting ':' token"); - } - - ++idx; - skipBlank(); - map.put(name, readOne()); - } - throw new IllegalArgumentException( - String.format( - "Syntax error parsing '%s' at char %d: unexpected end of string", str, idx)); - } - - private void throwSyntaxError(String msg) { - throw new IllegalArgumentException( - String.format("Syntax error parsing '%s' at char %d: %s", str, idx, msg)); - } - - private boolean isEOS() { - return isEOS(str, idx); - } - - private static boolean isEOS(String str, int i) { - return i >= str.length(); - } - - private void skipBlank() { - idx = skipBlank(str, idx); - } - - private static int skipBlank(String str, int i) { - while (!isEOS(str, i) && ParseUtils.isBlank(str.charAt(i))) { - ++i; - } - return i; - } - - // skip all blank and at best one comma, return true if there not EOS - private boolean skipBlankAndComma() { - boolean commaFound = false; - while (!isEOS()) { - int c = str.charAt(idx); - if (c == ',') { - if (commaFound) { - return true; - } else { - commaFound = true; - } - } else if (!ParseUtils.isBlank(c)) { - return true; - } - ++idx; - } - return false; - } - - // left idx positioned on the character stopping the read - private String readNextIdentifier() { - int i = idx; - while (!isEOS() && ParseUtils.isCqlIdentifierChar(str.charAt(idx))) { - ++idx; - } - return str.substring(i, idx); - } - - @Override - public String toString() { - return str.substring(0, idx) - + "[" - + (idx == str.length() ? "" : str.charAt(idx)) - + "]" - + str.substring(idx + 1); - } - } - - @VisibleForTesting - static ImmutableMap NATIVE_TYPES_BY_CLASS_NAME = - new ImmutableMap.Builder() - .put("org.apache.cassandra.db.marshal.AsciiType", DataTypes.ASCII) - .put("org.apache.cassandra.db.marshal.LongType", DataTypes.BIGINT) - .put("org.apache.cassandra.db.marshal.BytesType", DataTypes.BLOB) - .put("org.apache.cassandra.db.marshal.BooleanType", DataTypes.BOOLEAN) - .put("org.apache.cassandra.db.marshal.CounterColumnType", DataTypes.COUNTER) - .put("org.apache.cassandra.db.marshal.DecimalType", DataTypes.DECIMAL) - .put("org.apache.cassandra.db.marshal.DoubleType", DataTypes.DOUBLE) - .put("org.apache.cassandra.db.marshal.FloatType", DataTypes.FLOAT) - .put("org.apache.cassandra.db.marshal.InetAddressType", DataTypes.INET) - .put("org.apache.cassandra.db.marshal.Int32Type", DataTypes.INT) - .put("org.apache.cassandra.db.marshal.UTF8Type", DataTypes.TEXT) - .put("org.apache.cassandra.db.marshal.TimestampType", DataTypes.TIMESTAMP) - .put("org.apache.cassandra.db.marshal.SimpleDateType", DataTypes.DATE) - .put("org.apache.cassandra.db.marshal.TimeType", DataTypes.TIME) - .put("org.apache.cassandra.db.marshal.UUIDType", DataTypes.UUID) - .put("org.apache.cassandra.db.marshal.IntegerType", DataTypes.VARINT) - .put("org.apache.cassandra.db.marshal.TimeUUIDType", DataTypes.TIMEUUID) - .put("org.apache.cassandra.db.marshal.ByteType", DataTypes.TINYINT) - .put("org.apache.cassandra.db.marshal.ShortType", DataTypes.SMALLINT) - .put("org.apache.cassandra.db.marshal.DurationType", DataTypes.DURATION) - .build(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeCqlNameParser.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeCqlNameParser.java deleted file mode 100644 index 8d5e068b431..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeCqlNameParser.java +++ /dev/null @@ -1,340 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.parsing; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.schema.ShallowUserDefinedType; -import com.datastax.oss.driver.internal.core.type.DefaultTupleType; -import com.datastax.oss.driver.internal.core.type.DefaultVectorType; -import com.datastax.oss.driver.internal.core.type.codec.ParseUtils; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import java.util.ArrayList; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import net.jcip.annotations.ThreadSafe; - -/** - * Parses data types from schema tables, for Cassandra 3.0 and above. - * - *

In these versions, data types appear as string literals, like "ascii" or - * "tuple<int,int>". - */ -@ThreadSafe -public class DataTypeCqlNameParser implements DataTypeParser { - - @Override - public DataType parse( - CqlIdentifier keyspaceId, - String toParse, - Map userTypes, - InternalDriverContext context) { - // Top-level is never frozen, it is only set recursively when we encounter the frozen<> keyword - return parse(toParse, keyspaceId, false, userTypes, context); - } - - private DataType parse( - String toParse, - CqlIdentifier keyspaceId, - boolean frozen, - Map userTypes, - InternalDriverContext context) { - - if (toParse.startsWith("'")) { - return DataTypes.custom(toParse.substring(1, toParse.length() - 1)); - } - - Parser parser = new Parser(toParse, 0); - String type = parser.parseTypeName(); - - if (type.equalsIgnoreCase(RawColumn.THRIFT_EMPTY_TYPE)) { - return DataTypes.custom(type); - } - - DataType nativeType = NATIVE_TYPES_BY_NAME.get(type.toLowerCase(Locale.ROOT)); - if (nativeType != null) { - return nativeType; - } - - if (parser.isEOS()) { - // No parameters => it's a UDT - CqlIdentifier name = CqlIdentifier.fromCql(type); - if (userTypes != null) { - UserDefinedType userType = userTypes.get(name); - if (userType == null) { - throw new IllegalStateException( - String.format("Can't find referenced user type %s", type)); - } - return userType.copy(frozen); - } else { - return new ShallowUserDefinedType(keyspaceId, name, frozen); - } - } - - List parameters = parser.parseTypeParameters(); - if (type.equalsIgnoreCase("list")) { - if (parameters.size() != 1) { - throw new IllegalArgumentException( - String.format("Expecting single parameter for list, got %s", parameters)); - } - DataType elementType = parse(parameters.get(0), keyspaceId, false, userTypes, context); - return DataTypes.listOf(elementType, frozen); - } - - if (type.equalsIgnoreCase("set")) { - if (parameters.size() != 1) { - throw new IllegalArgumentException( - String.format("Expecting single parameter for set, got %s", parameters)); - } - DataType elementType = parse(parameters.get(0), keyspaceId, false, userTypes, context); - return DataTypes.setOf(elementType, frozen); - } - - if (type.equalsIgnoreCase("map")) { - if (parameters.size() != 2) { - throw new IllegalArgumentException( - String.format("Expecting two parameters for map, got %s", parameters)); - } - DataType keyType = parse(parameters.get(0), keyspaceId, false, userTypes, context); - DataType valueType = parse(parameters.get(1), keyspaceId, false, userTypes, context); - return DataTypes.mapOf(keyType, valueType, frozen); - } - - if (type.equalsIgnoreCase("frozen")) { - if (parameters.size() != 1) { - throw new IllegalArgumentException( - String.format("Expecting single parameter for frozen keyword, got %s", parameters)); - } - return parse(parameters.get(0), keyspaceId, true, userTypes, context); - } - - if (type.equalsIgnoreCase("tuple")) { - if (parameters.isEmpty()) { - throw new IllegalArgumentException("Expecting at list one parameter for tuple, got none"); - } - ImmutableList.Builder componentTypesBuilder = ImmutableList.builder(); - for (String rawType : parameters) { - componentTypesBuilder.add(parse(rawType, keyspaceId, false, userTypes, context)); - } - return new DefaultTupleType(componentTypesBuilder.build(), context); - } - - if (type.equalsIgnoreCase("vector")) { - if (parameters.size() != 2) { - throw new IllegalArgumentException( - String.format("Expecting two parameters for vector custom type, got %s", parameters)); - } - DataType subType = parse(parameters.get(0), keyspaceId, false, userTypes, context); - int dimensions = Integer.parseInt(parameters.get(1)); - return new DefaultVectorType(subType, dimensions); - } - - throw new IllegalArgumentException("Could not parse type name " + toParse); - } - - private static class Parser { - - private final String str; - - private int idx; - - Parser(String str, int idx) { - this.str = str; - this.idx = idx; - } - - String parseTypeName() { - idx = ParseUtils.skipSpaces(str, idx); - return readNextIdentifier(); - } - - List parseTypeParameters() { - List list = new ArrayList<>(); - - if (isEOS()) { - return list; - } - - skipBlankAndComma(); - - if (str.charAt(idx) != '<') { - throw new IllegalStateException(); - } - - ++idx; // skipping '<' - - while (skipBlankAndComma()) { - if (str.charAt(idx) == '>') { - ++idx; - return list; - } - - String name = parseTypeName(); - String args = readRawTypeParameters(); - list.add(name + args); - } - throw new IllegalArgumentException( - String.format( - "Syntax error parsing '%s' at char %d: unexpected end of string", str, idx)); - } - - // left idx positioned on the character stopping the read - private String readNextIdentifier() { - int startIdx = idx; - if (str.charAt(startIdx) == '"') { // case-sensitive name included in double quotes - ++idx; - // read until closing quote. - while (!isEOS()) { - boolean atQuote = str.charAt(idx) == '"'; - ++idx; - if (atQuote) { - // if the next character is also a quote, this is an escaped quote, continue reading, - // otherwise stop. - if (!isEOS() && str.charAt(idx) == '"') { - ++idx; - } else { - break; - } - } - } - } else if (str.charAt(startIdx) == '\'') { // custom type name included in single quotes - ++idx; - // read until closing quote. - while (!isEOS() && str.charAt(idx++) != '\'') { - /* loop */ - } - } else { - while (!isEOS() - && (ParseUtils.isCqlIdentifierChar(str.charAt(idx)) || str.charAt(idx) == '"')) { - ++idx; - } - } - return str.substring(startIdx, idx); - } - - // Assumes we have just read a type name and read its potential arguments blindly. I.e. it - // assumes that either parsing is done or that we're on a '<' and this reads everything up until - // the corresponding closing '>'. It returns everything read, including the enclosing brackets. - private String readRawTypeParameters() { - idx = ParseUtils.skipSpaces(str, idx); - - if (isEOS() || str.charAt(idx) == '>' || str.charAt(idx) == ',') { - return ""; - } - - if (str.charAt(idx) != '<') { - throw new IllegalStateException( - String.format( - "Expecting char %d of %s to be '<' but '%c' found", idx, str, str.charAt(idx))); - } - - int i = idx; - int open = 1; - boolean inQuotes = false; - while (open > 0) { - ++idx; - - if (isEOS()) { - throw new IllegalStateException("Non closed angle brackets"); - } - - // Only parse for '<' and '>' characters if not within a quoted identifier. - // Note we don't need to handle escaped quotes ("") in type names here, because they just - // cause inQuotes to flip to false and immediately back to true - if (!inQuotes) { - if (str.charAt(idx) == '"') { - inQuotes = true; - } else if (str.charAt(idx) == '<') { - open++; - } else if (str.charAt(idx) == '>') { - open--; - } - } else if (str.charAt(idx) == '"') { - inQuotes = false; - } - } - // we've stopped at the last closing ')' so move past that - ++idx; - return str.substring(i, idx); - } - - // skip all blank and at best one comma, return true if there not EOS - private boolean skipBlankAndComma() { - boolean commaFound = false; - while (!isEOS()) { - int c = str.charAt(idx); - if (c == ',') { - if (commaFound) { - return true; - } else { - commaFound = true; - } - } else if (!ParseUtils.isBlank(c)) { - return true; - } - ++idx; - } - return false; - } - - private boolean isEOS() { - return idx >= str.length(); - } - - @Override - public String toString() { - return str.substring(0, idx) - + "[" - + (idx == str.length() ? "" : str.charAt(idx)) - + "]" - + str.substring(idx + 1); - } - } - - @VisibleForTesting - static final ImmutableMap NATIVE_TYPES_BY_NAME = - new ImmutableMap.Builder() - .put("ascii", DataTypes.ASCII) - .put("bigint", DataTypes.BIGINT) - .put("blob", DataTypes.BLOB) - .put("boolean", DataTypes.BOOLEAN) - .put("counter", DataTypes.COUNTER) - .put("decimal", DataTypes.DECIMAL) - .put("double", DataTypes.DOUBLE) - .put("float", DataTypes.FLOAT) - .put("inet", DataTypes.INET) - .put("int", DataTypes.INT) - .put("text", DataTypes.TEXT) - .put("varchar", DataTypes.TEXT) - .put("timestamp", DataTypes.TIMESTAMP) - .put("date", DataTypes.DATE) - .put("time", DataTypes.TIME) - .put("uuid", DataTypes.UUID) - .put("varint", DataTypes.VARINT) - .put("timeuuid", DataTypes.TIMEUUID) - .put("tinyint", DataTypes.TINYINT) - .put("smallint", DataTypes.SMALLINT) - .put("duration", DataTypes.DURATION) - .build(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeParser.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeParser.java deleted file mode 100644 index 0f191d08a53..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeParser.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.parsing; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.schema.ShallowUserDefinedType; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import java.util.Collections; -import java.util.List; -import java.util.Map; - -/** Parses data types from their string representation in schema tables. */ -public interface DataTypeParser { - - /** - * @param userTypes the UDTs in the current keyspace, if we know them already. This is used to - * resolve subtypes if the type to parse is complex (such as {@code list}). The only - * situation where we don't have them is when we refresh all the UDTs of a keyspace; in that - * case, the filed will be {@code null} and any UDT encountered by this method will always be - * re-created from scratch: for Cassandra < 2.2, this means parsing the whole definition; - * for > 3.0, this means materializing it as a {@link ShallowUserDefinedType} that will be - * resolved in a second pass. - */ - DataType parse( - CqlIdentifier keyspaceId, - String toParse, - Map userTypes, - InternalDriverContext context); - - default List parse( - CqlIdentifier keyspaceId, - List typeStrings, - Map userTypes, - InternalDriverContext context) { - if (typeStrings.isEmpty()) { - return Collections.emptyList(); - } else { - ImmutableList.Builder builder = ImmutableList.builder(); - for (String typeString : typeStrings) { - builder.add(parse(keyspaceId, typeString, userTypes, context)); - } - return builder.build(); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DefaultSchemaParserFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DefaultSchemaParserFactory.java deleted file mode 100644 index 5fa64027be5..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DefaultSchemaParserFactory.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.parsing; - -import com.datastax.dse.driver.api.core.metadata.DseNodeProperties; -import com.datastax.dse.driver.internal.core.metadata.schema.parsing.DseSchemaParser; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.schema.queries.SchemaRows; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class DefaultSchemaParserFactory implements SchemaParserFactory { - - private final InternalDriverContext context; - - public DefaultSchemaParserFactory(InternalDriverContext context) { - this.context = context; - } - - @Override - public SchemaParser newInstance(SchemaRows rows) { - boolean isDse = rows.getNode().getExtras().containsKey(DseNodeProperties.DSE_VERSION); - return isDse ? new DseSchemaParser(rows, context) : new CassandraSchemaParser(rows, context); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/FunctionParser.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/FunctionParser.java deleted file mode 100644 index 54786e999ac..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/FunctionParser.java +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.parsing; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.FunctionMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.FunctionSignature; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.schema.DefaultFunctionMetadata; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import java.util.List; -import java.util.Map; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@ThreadSafe -public class FunctionParser { - - private static final Logger LOG = LoggerFactory.getLogger(FunctionParser.class); - - private final DataTypeParser dataTypeParser; - private final InternalDriverContext context; - private final String logPrefix; - - public FunctionParser(DataTypeParser dataTypeParser, InternalDriverContext context) { - this.dataTypeParser = dataTypeParser; - this.context = context; - this.logPrefix = context.getSessionName(); - } - - public FunctionMetadata parseFunction( - AdminRow row, - CqlIdentifier keyspaceId, - Map userDefinedTypes) { - // Cassandra < 3.0: - // CREATE TABLE system.schema_functions ( - // keyspace_name text, - // function_name text, - // signature frozen>, - // argument_names list, - // argument_types list, - // body text, - // called_on_null_input boolean, - // language text, - // return_type text, - // PRIMARY KEY (keyspace_name, function_name, signature) - // ) WITH CLUSTERING ORDER BY (function_name ASC, signature ASC) - // - // Cassandra >= 3.0: - // CREATE TABLE system_schema.functions ( - // keyspace_name text, - // function_name text, - // argument_names frozen>, - // argument_types frozen>, - // body text, - // called_on_null_input boolean, - // language text, - // return_type text, - // PRIMARY KEY (keyspace_name, function_name, argument_types) - // ) WITH CLUSTERING ORDER BY (function_name ASC, argument_types ASC) - String simpleName = row.getString("function_name"); - List argumentNames = - ImmutableList.copyOf( - Lists.transform(row.getListOfString("argument_names"), CqlIdentifier::fromInternal)); - List argumentTypes = row.getListOfString("argument_types"); - if (argumentNames.size() != argumentTypes.size()) { - LOG.warn( - "[{}] Error parsing system row for function {}.{}, " - + "number of argument names and types don't match (got {} and {}).", - logPrefix, - keyspaceId.asInternal(), - simpleName, - argumentNames.size(), - argumentTypes.size()); - return null; - } - FunctionSignature signature = - new FunctionSignature( - CqlIdentifier.fromInternal(simpleName), - dataTypeParser.parse(keyspaceId, argumentTypes, userDefinedTypes, context)); - String body = row.getString("body"); - Boolean calledOnNullInput = row.getBoolean("called_on_null_input"); - String language = row.getString("language"); - DataType returnType = - dataTypeParser.parse(keyspaceId, row.getString("return_type"), userDefinedTypes, context); - - return new DefaultFunctionMetadata( - keyspaceId, signature, argumentNames, body, calledOnNullInput, language, returnType); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/RawColumn.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/RawColumn.java deleted file mode 100644 index 331f4841f79..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/RawColumn.java +++ /dev/null @@ -1,215 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.parsing; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import com.datastax.oss.driver.shaded.guava.common.primitives.Ints; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.ListIterator; -import java.util.Map; -import net.jcip.annotations.NotThreadSafe; - -/** - * An intermediary format to manipulate columns before we turn them into {@link ColumnMetadata} - * instances. - */ -@NotThreadSafe -public class RawColumn implements Comparable { - - public static final String KIND_PARTITION_KEY = "partition_key"; - public static final String KIND_CLUSTERING_COLUMN = "clustering"; - public static final String KIND_REGULAR = "regular"; - public static final String KIND_COMPACT_VALUE = "compact_value"; - public static final String KIND_STATIC = "static"; - - /** - * Upon migration from thrift to CQL, Cassandra internally creates a surrogate column "value" of - * type {@code EmptyType} for dense tables. This resolves into this CQL type name. - * - *

This column shouldn't be exposed to the user but is currently exposed in system tables. - */ - public static final String THRIFT_EMPTY_TYPE = "empty"; - - public final CqlIdentifier name; - public String kind; - public final int position; - public final String dataType; - public final boolean reversed; - public final String indexName; - public final String indexType; - public final Map indexOptions; - - private RawColumn(AdminRow row) { - // Cassandra < 3.0: - // CREATE TABLE system.schema_columns ( - // keyspace_name text, - // columnfamily_name text, - // column_name text, - // component_index int, - // index_name text, - // index_options text, - // index_type text, - // type text, - // validator text, - // PRIMARY KEY (keyspace_name, columnfamily_name, column_name) - // ) WITH CLUSTERING ORDER BY (columnfamily_name ASC, column_name ASC) - // - // Cassandra >= 3.0: - // CREATE TABLE system_schema.columns ( - // keyspace_name text, - // table_name text, - // column_name text, - // clustering_order text, - // column_name_bytes blob, - // kind text, - // position int, - // type text, - // PRIMARY KEY (keyspace_name, table_name, column_name) - // ) WITH CLUSTERING ORDER BY (table_name ASC, column_name ASC) - this.name = CqlIdentifier.fromInternal(row.getString("column_name")); - if (row.contains("kind")) { - this.kind = row.getString("kind"); - } else { - this.kind = row.getString("type"); - // remap clustering_key to KIND_CLUSTERING_COLUMN so code doesn't have to check for both. - if (this.kind.equals("clustering_key")) { - this.kind = KIND_CLUSTERING_COLUMN; - } - } - - Integer rawPosition = - row.contains("position") ? row.getInteger("position") : row.getInteger("component_index"); - this.position = (rawPosition == null || rawPosition == -1) ? 0 : rawPosition; - - this.dataType = row.contains("validator") ? row.getString("validator") : row.getString("type"); - this.reversed = - row.contains("clustering_order") - ? "desc".equals(row.getString("clustering_order")) - : DataTypeClassNameParser.isReversed(dataType); - this.indexName = row.getString("index_name"); - this.indexType = row.getString("index_type"); - // index_options can apparently contain the string 'null' (JAVA-834) - String indexOptionsString = row.getString("index_options"); - this.indexOptions = - (indexOptionsString == null || indexOptionsString.equals("null")) - ? Collections.emptyMap() - : SimpleJsonParser.parseStringMap(indexOptionsString); - } - - @Override - public int compareTo(@NonNull RawColumn that) { - // First, order by kind. Then order partition key and clustering columns by position. For - // other kinds, order by column name. - if (!this.kind.equals(that.kind)) { - return Ints.compare(rank(this.kind), rank(that.kind)); - } else if (kind.equals(KIND_PARTITION_KEY) || kind.equals(KIND_CLUSTERING_COLUMN)) { - return Integer.compare(this.position, that.position); - } else { - return this.name.asInternal().compareTo(that.name.asInternal()); - } - } - - private static int rank(String kind) { - switch (kind) { - case KIND_PARTITION_KEY: - return 1; - case KIND_CLUSTERING_COLUMN: - return 2; - case KIND_REGULAR: - return 3; - case KIND_COMPACT_VALUE: - return 4; - case KIND_STATIC: - return 5; - default: - return Integer.MAX_VALUE; - } - } - - @SuppressWarnings("MixedMutabilityReturnType") - public static List toRawColumns(Collection rows) { - if (rows.isEmpty()) { - return Collections.emptyList(); - } else { - // Use a mutable list, we might remove some elements later - List result = Lists.newArrayListWithExpectedSize(rows.size()); - for (AdminRow row : rows) { - result.add(new RawColumn(row)); - } - return result; - } - } - - /** - * Helper method to filter columns while parsing a table's metadata. - * - *

Upon migration from thrift to CQL, Cassandra internally creates a pair of surrogate - * clustering/regular columns for compact static tables. These columns shouldn't be exposed to the - * user but are currently returned by C*. We also need to remove the static keyword for all other - * columns in the table. - */ - public static void pruneStaticCompactTableColumns(List columns) { - ListIterator iterator = columns.listIterator(); - while (iterator.hasNext()) { - RawColumn column = iterator.next(); - switch (column.kind) { - case KIND_CLUSTERING_COLUMN: - case KIND_REGULAR: - iterator.remove(); - break; - case KIND_STATIC: - column.kind = KIND_REGULAR; - break; - default: - // nothing to do - } - } - } - - /** Helper method to filter columns while parsing a table's metadata. */ - public static void pruneDenseTableColumnsV3(List columns) { - ListIterator iterator = columns.listIterator(); - while (iterator.hasNext()) { - RawColumn column = iterator.next(); - if (column.kind.equals(KIND_REGULAR) && THRIFT_EMPTY_TYPE.equals(column.dataType)) { - iterator.remove(); - } - } - } - - /** - * Helper method to filter columns while parsing a table's metadata. - * - *

This is similar to {@link #pruneDenseTableColumnsV3(List)}, but for legacy C* versions. - */ - public static void pruneDenseTableColumnsV2(List columns) { - ListIterator iterator = columns.listIterator(); - while (iterator.hasNext()) { - RawColumn column = iterator.next(); - if (column.kind.equals(KIND_COMPACT_VALUE) && column.name.asInternal().isEmpty()) { - iterator.remove(); - } - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/RelationParser.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/RelationParser.java deleted file mode 100644 index 86c914459d7..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/RelationParser.java +++ /dev/null @@ -1,170 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.parsing; - -import com.datastax.dse.driver.api.core.metadata.DseNodeProperties; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.schema.Describable; -import com.datastax.oss.driver.api.core.metadata.schema.RelationMetadata; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.schema.ScriptBuilder; -import com.datastax.oss.driver.internal.core.metadata.schema.queries.SchemaRows; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import java.nio.ByteBuffer; -import java.util.Map; -import net.jcip.annotations.ThreadSafe; - -// Shared code for table and view parsing -@ThreadSafe -public abstract class RelationParser { - - protected final SchemaRows rows; - protected final InternalDriverContext context; - protected final String logPrefix; - - protected RelationParser(SchemaRows rows, InternalDriverContext context) { - this.rows = rows; - this.context = context; - this.logPrefix = context.getSessionName(); - } - - protected Map parseOptions(AdminRow row) { - ImmutableMap.Builder builder = ImmutableMap.builder(); - for (Map.Entry> entry : OPTION_CODECS.entrySet()) { - String name = entry.getKey(); - CqlIdentifier id = CqlIdentifier.fromInternal(name); - TypeCodec codec = entry.getValue(); - - if (name.equals("caching") && row.isString("caching")) { - // C* <=2.2, caching is stored as a string, and also appears as a string in the WITH clause. - builder.put(id, row.getString(name)); - } else if (name.equals("compaction_strategy_class")) { - // C* <=2.2, compaction options split in two columns - String strategyClass = row.getString(name); - if (strategyClass != null) { - builder.put( - CqlIdentifier.fromInternal("compaction"), - ImmutableMap.builder() - .put("class", strategyClass) - .putAll( - SimpleJsonParser.parseStringMap(row.getString("compaction_strategy_options"))) - .build()); - } - } else if (name.equals("compression_parameters")) { - // C* <=2.2, compression stored as a string - String compressionParameters = row.getString(name); - if (compressionParameters != null) { - builder.put( - CqlIdentifier.fromInternal("compression"), - ImmutableMap.copyOf(SimpleJsonParser.parseStringMap(row.getString(name)))); - } - } else if (!isDeprecatedInCassandra4(name)) { - // Default case, read the value in a generic fashion - Object value = row.get(name, codec); - if (value != null) { - builder.put(id, value); - } - } - } - return builder.build(); - } - - /** - * Handle a few oddities in Cassandra 4: some options still appear in system_schema.tables, but - * they are not valid in CREATE statements anymore. We need to exclude them from our metadata, - * otherwise {@link Describable#describe(boolean)} will generate invalid CQL. - */ - private boolean isDeprecatedInCassandra4(String name) { - return isCassandra4OrAbove() - && (name.equals("read_repair_chance") - || name.equals("dclocal_read_repair_chance") - // default_time_to_live is not allowed in CREATE MATERIALIZED VIEW statements - || (name.equals("default_time_to_live") && (this instanceof ViewParser))); - } - - private boolean isCassandra4OrAbove() { - Node node = rows.getNode(); - return !node.getExtras().containsKey(DseNodeProperties.DSE_VERSION) - && node.getCassandraVersion() != null - && node.getCassandraVersion().nextStable().compareTo(Version.V4_0_0) >= 0; - } - - public static void appendOptions(Map options, ScriptBuilder builder) { - for (Map.Entry entry : options.entrySet()) { - CqlIdentifier name = entry.getKey(); - Object value = entry.getValue(); - String formattedValue; - if (name.asInternal().equals("caching") && value instanceof String) { - formattedValue = TypeCodecs.TEXT.format((String) value); - } else { - @SuppressWarnings("unchecked") - TypeCodec codec = - (TypeCodec) RelationParser.OPTION_CODECS.get(name.asInternal()); - formattedValue = codec.format(value); - } - String optionName = name.asCql(true); - if ("local_read_repair_chance".equals(optionName)) { - // Another small quirk in C* <= 2.2 - optionName = "dclocal_read_repair_chance"; - } - builder.andWith().append(optionName).append(" = ").append(formattedValue); - } - } - - public static final TypeCodec> MAP_OF_TEXT_TO_TEXT = - TypeCodecs.mapOf(TypeCodecs.TEXT, TypeCodecs.TEXT); - private static final TypeCodec> MAP_OF_TEXT_TO_BLOB = - TypeCodecs.mapOf(TypeCodecs.TEXT, TypeCodecs.BLOB); - /** - * The columns of the system table that are turned into entries in {@link - * RelationMetadata#getOptions()}. - */ - public static final ImmutableMap> OPTION_CODECS = - ImmutableMap.>builder() - .put("additional_write_policy", TypeCodecs.TEXT) - .put("bloom_filter_fp_chance", TypeCodecs.DOUBLE) - // In C* <= 2.2, this is a string, not a map (this is special-cased in parseOptions): - .put("caching", MAP_OF_TEXT_TO_TEXT) - .put("cdc", TypeCodecs.BOOLEAN) - .put("comment", TypeCodecs.TEXT) - .put("compaction", MAP_OF_TEXT_TO_TEXT) - // In C*<=2.2, must read from this column and another one called - // 'compaction_strategy_options' (this is special-cased in parseOptions): - .put("compaction_strategy_class", TypeCodecs.TEXT) - .put("compression", MAP_OF_TEXT_TO_TEXT) - // In C*<=2.2, must parse this column into a map (this is special-cased in parseOptions): - .put("compression_parameters", TypeCodecs.TEXT) - .put("crc_check_chance", TypeCodecs.DOUBLE) - .put("dclocal_read_repair_chance", TypeCodecs.DOUBLE) - .put("default_time_to_live", TypeCodecs.INT) - .put("extensions", MAP_OF_TEXT_TO_BLOB) - .put("gc_grace_seconds", TypeCodecs.INT) - .put("local_read_repair_chance", TypeCodecs.DOUBLE) - .put("max_index_interval", TypeCodecs.INT) - .put("memtable_flush_period_in_ms", TypeCodecs.INT) - .put("min_index_interval", TypeCodecs.INT) - .put("read_repair", TypeCodecs.TEXT) - .put("read_repair_chance", TypeCodecs.DOUBLE) - .put("speculative_retry", TypeCodecs.TEXT) - .build(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/SchemaParser.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/SchemaParser.java deleted file mode 100644 index 109ebea45c1..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/SchemaParser.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.parsing; - -import com.datastax.oss.driver.internal.core.metadata.schema.queries.SchemaRows; -import com.datastax.oss.driver.internal.core.metadata.schema.refresh.SchemaRefresh; - -/** - * The main entry point for system schema rows parsing. - * - *

Implementations must be thread-safe. - */ -public interface SchemaParser { - - /** - * Process the rows that this parser was initialized with, and creates a refresh that will be - * applied to the metadata. - * - * @see SchemaParserFactory#newInstance(SchemaRows) - */ - SchemaRefresh parse(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/SchemaParserFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/SchemaParserFactory.java deleted file mode 100644 index 93db1472e4d..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/SchemaParserFactory.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.parsing; - -import com.datastax.oss.driver.internal.core.metadata.schema.queries.SchemaRows; - -public interface SchemaParserFactory { - SchemaParser newInstance(SchemaRows rows); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/SimpleJsonParser.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/SimpleJsonParser.java deleted file mode 100644 index e979a8fd822..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/SimpleJsonParser.java +++ /dev/null @@ -1,185 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.parsing; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import net.jcip.annotations.NotThreadSafe; - -/** - * A very simple json parser. The only reason we need to read json in the driver is because for - * historical reason Cassandra encodes a few properties using json in the schema and we need to - * decode them. - * - *

We however don't need a full-blown JSON library because: 1) we know we only need to decode - * string lists and string maps 2) we can basically assume the input is valid, we don't particularly - * have to bother about decoding exactly JSON as long as we at least decode what we need. 3) we - * don't really care much about performance, none of this is done in performance sensitive parts. - * - *

So instead of pulling a new dependency, we roll out our own very dumb parser. We should - * obviously not expose this publicly. - */ -@NotThreadSafe -public class SimpleJsonParser { - - private final String input; - private int idx; - - private SimpleJsonParser(String input) { - this.input = input; - } - - @SuppressWarnings("MixedMutabilityReturnType") - public static List parseStringList(String input) { - if (input == null || input.isEmpty()) { - return Collections.emptyList(); - } - - List output = new ArrayList<>(); - SimpleJsonParser parser = new SimpleJsonParser(input); - if (parser.nextCharSkipSpaces() != '[') { - throw new IllegalArgumentException("Not a JSON list: " + input); - } - - char c = parser.nextCharSkipSpaces(); - if (c == ']') { - return output; - } - - while (true) { - assert c == '"'; - output.add(parser.nextString()); - c = parser.nextCharSkipSpaces(); - if (c == ']') { - return output; - } - assert c == ','; - c = parser.nextCharSkipSpaces(); - } - } - - @SuppressWarnings("MixedMutabilityReturnType") - public static Map parseStringMap(String input) { - if (input == null || input.isEmpty()) { - return Collections.emptyMap(); - } - - Map output = new HashMap<>(); - SimpleJsonParser parser = new SimpleJsonParser(input); - if (parser.nextCharSkipSpaces() != '{') { - throw new IllegalArgumentException("Not a JSON map: " + input); - } - - char c = parser.nextCharSkipSpaces(); - if (c == '}') { - return output; - } - - while (true) { - assert c == '"'; - String key = parser.nextString(); - c = parser.nextCharSkipSpaces(); - assert c == ':'; - c = parser.nextCharSkipSpaces(); - assert c == '"'; - String value = parser.nextString(); - output.put(key, value); - c = parser.nextCharSkipSpaces(); - if (c == '}') { - return output; - } - assert c == ','; - c = parser.nextCharSkipSpaces(); - } - } - - /** Read the next char, the one at position idx, and advance ix. */ - private char nextChar() { - if (idx >= input.length()) { - throw new IllegalArgumentException("Invalid json input: " + input); - } - return input.charAt(idx++); - } - - /** Same as nextChar, except that it skips space characters (' ', '\t' and '\n'). */ - private char nextCharSkipSpaces() { - char c = nextChar(); - while (c == ' ' || c == '\t' || c == '\n') { - c = nextChar(); - } - return c; - } - - /** - * Reads a String, assuming idx is on the first character of the string (i.e. the one after the - * opening double-quote character). After the string has been read, idx will be on the first - * character after the closing double-quote. - */ - private String nextString() { - assert input.charAt(idx - 1) == '"' : "Char is '" + input.charAt(idx - 1) + '\''; - StringBuilder sb = new StringBuilder(); - while (true) { - char c = nextChar(); - switch (c) { - case '\n': - case '\r': - throw new IllegalArgumentException("Unterminated string"); - case '\\': - c = nextChar(); - switch (c) { - case 'b': - sb.append('\b'); - break; - case 't': - sb.append('\t'); - break; - case 'n': - sb.append('\n'); - break; - case 'f': - sb.append('\f'); - break; - case 'r': - sb.append('\r'); - break; - case 'u': - sb.append((char) Integer.parseInt(input.substring(idx, idx + 4), 16)); - idx += 4; - break; - case '"': - case '\'': - case '\\': - case '/': - sb.append(c); - break; - default: - throw new IllegalArgumentException("Illegal escape"); - } - break; - default: - if (c == '"') { - return sb.toString(); - } - sb.append(c); - } - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/TableParser.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/TableParser.java deleted file mode 100644 index a3bda428ef3..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/TableParser.java +++ /dev/null @@ -1,334 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.parsing; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.ClusteringOrder; -import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.IndexKind; -import com.datastax.oss.driver.api.core.metadata.schema.IndexMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.TableMetadata; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.ListType; -import com.datastax.oss.driver.api.core.type.MapType; -import com.datastax.oss.driver.api.core.type.SetType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.schema.DefaultColumnMetadata; -import com.datastax.oss.driver.internal.core.metadata.schema.DefaultIndexMetadata; -import com.datastax.oss.driver.internal.core.metadata.schema.DefaultTableMetadata; -import com.datastax.oss.driver.internal.core.metadata.schema.queries.SchemaRows; -import com.datastax.oss.driver.internal.core.util.Loggers; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMultimap; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@ThreadSafe -public class TableParser extends RelationParser { - - private static final Logger LOG = LoggerFactory.getLogger(TableParser.class); - - public TableParser(SchemaRows rows, InternalDriverContext context) { - super(rows, context); - } - - public TableMetadata parseTable( - AdminRow tableRow, CqlIdentifier keyspaceId, Map userTypes) { - // Cassandra <= 2.2: - // CREATE TABLE system.schema_columnfamilies ( - // keyspace_name text, - // columnfamily_name text, - // bloom_filter_fp_chance double, - // caching text, - // cf_id uuid, - // column_aliases text, (2.1 only) - // comment text, - // compaction_strategy_class text, - // compaction_strategy_options text, - // comparator text, - // compression_parameters text, - // default_time_to_live int, - // default_validator text, - // dropped_columns map, - // gc_grace_seconds int, - // index_interval int, - // is_dense boolean, (2.1 only) - // key_aliases text, (2.1 only) - // key_validator text, - // local_read_repair_chance double, - // max_compaction_threshold int, - // max_index_interval int, - // memtable_flush_period_in_ms int, - // min_compaction_threshold int, - // min_index_interval int, - // read_repair_chance double, - // speculative_retry text, - // subcomparator text, - // type text, - // value_alias text, (2.1 only) - // PRIMARY KEY (keyspace_name, columnfamily_name) - // ) WITH CLUSTERING ORDER BY (columnfamily_name ASC) - // - // Cassandra 3.0: - // CREATE TABLE system_schema.tables ( - // keyspace_name text, - // table_name text, - // bloom_filter_fp_chance double, - // caching frozen>, - // cdc boolean, - // comment text, - // compaction frozen>, - // compression frozen>, - // crc_check_chance double, - // dclocal_read_repair_chance double, - // default_time_to_live int, - // extensions frozen>, - // flags frozen>, - // gc_grace_seconds int, - // id uuid, - // max_index_interval int, - // memtable_flush_period_in_ms int, - // min_index_interval int, - // read_repair_chance double, - // speculative_retry text, - // PRIMARY KEY (keyspace_name, table_name) - // ) WITH CLUSTERING ORDER BY (table_name ASC) - CqlIdentifier tableId = - CqlIdentifier.fromInternal( - tableRow.getString( - tableRow.contains("table_name") ? "table_name" : "columnfamily_name")); - - UUID uuid = tableRow.contains("id") ? tableRow.getUuid("id") : tableRow.getUuid("cf_id"); - - List rawColumns = - RawColumn.toRawColumns( - rows.columns().getOrDefault(keyspaceId, ImmutableMultimap.of()).get(tableId)); - if (rawColumns.isEmpty()) { - LOG.warn( - "[{}] Processing TABLE refresh for {}.{} but found no matching rows, skipping", - logPrefix, - keyspaceId, - tableId); - return null; - } - - boolean isCompactStorage; - if (tableRow.contains("flags")) { - Set flags = tableRow.getSetOfString("flags"); - boolean isDense = flags.contains("dense"); - boolean isSuper = flags.contains("super"); - boolean isCompound = flags.contains("compound"); - isCompactStorage = isSuper || isDense || !isCompound; - boolean isStaticCompact = !isSuper && !isDense && !isCompound; - if (isStaticCompact) { - RawColumn.pruneStaticCompactTableColumns(rawColumns); - } else if (isDense) { - RawColumn.pruneDenseTableColumnsV3(rawColumns); - } - } else { - boolean isDense = tableRow.getBoolean("is_dense"); - if (isDense) { - RawColumn.pruneDenseTableColumnsV2(rawColumns); - } - DataTypeClassNameCompositeParser.ParseResult comparator = - new DataTypeClassNameCompositeParser() - .parseWithComposite(tableRow.getString("comparator"), keyspaceId, userTypes, context); - isCompactStorage = isDense || !comparator.isComposite; - } - - Collections.sort(rawColumns); - ImmutableMap.Builder allColumnsBuilder = ImmutableMap.builder(); - ImmutableList.Builder partitionKeyBuilder = ImmutableList.builder(); - ImmutableMap.Builder clusteringColumnsBuilder = - ImmutableMap.builder(); - ImmutableMap.Builder indexesBuilder = ImmutableMap.builder(); - - for (RawColumn raw : rawColumns) { - DataType dataType = rows.dataTypeParser().parse(keyspaceId, raw.dataType, userTypes, context); - ColumnMetadata column = - new DefaultColumnMetadata( - keyspaceId, tableId, raw.name, dataType, raw.kind.equals(RawColumn.KIND_STATIC)); - switch (raw.kind) { - case RawColumn.KIND_PARTITION_KEY: - partitionKeyBuilder.add(column); - break; - case RawColumn.KIND_CLUSTERING_COLUMN: - clusteringColumnsBuilder.put( - column, raw.reversed ? ClusteringOrder.DESC : ClusteringOrder.ASC); - break; - default: - // nothing to do - } - allColumnsBuilder.put(column.getName(), column); - - IndexMetadata index = buildLegacyIndex(raw, column); - if (index != null) { - indexesBuilder.put(index.getName(), index); - } - } - - Map options; - try { - options = parseOptions(tableRow); - } catch (Exception e) { - // Options change the most often, so be especially lenient if anything goes wrong. - Loggers.warnWithException( - LOG, - "[{}] Error while parsing options for {}.{}, getOptions() will be empty", - logPrefix, - keyspaceId, - tableId, - e); - options = Collections.emptyMap(); - } - - Collection indexRows = - rows.indexes().getOrDefault(keyspaceId, ImmutableMultimap.of()).get(tableId); - for (AdminRow indexRow : indexRows) { - IndexMetadata index = buildModernIndex(keyspaceId, tableId, indexRow); - indexesBuilder.put(index.getName(), index); - } - - return new DefaultTableMetadata( - keyspaceId, - tableId, - uuid, - isCompactStorage, - false, - partitionKeyBuilder.build(), - clusteringColumnsBuilder.build(), - allColumnsBuilder.build(), - options, - indexesBuilder.build()); - } - - TableMetadata parseVirtualTable( - AdminRow tableRow, CqlIdentifier keyspaceId, Map userTypes) { - - CqlIdentifier tableId = CqlIdentifier.fromInternal(tableRow.getString("table_name")); - - List rawColumns = - RawColumn.toRawColumns( - rows.virtualColumns().getOrDefault(keyspaceId, ImmutableMultimap.of()).get(tableId)); - if (rawColumns.isEmpty()) { - LOG.warn( - "[{}] Processing TABLE refresh for {}.{} but found no matching rows, skipping", - logPrefix, - keyspaceId, - tableId); - return null; - } - - Collections.sort(rawColumns); - ImmutableMap.Builder allColumnsBuilder = ImmutableMap.builder(); - ImmutableList.Builder partitionKeyBuilder = ImmutableList.builder(); - ImmutableMap.Builder clusteringColumnsBuilder = - ImmutableMap.builder(); - - for (RawColumn raw : rawColumns) { - DataType dataType = rows.dataTypeParser().parse(keyspaceId, raw.dataType, userTypes, context); - ColumnMetadata column = - new DefaultColumnMetadata( - keyspaceId, tableId, raw.name, dataType, raw.kind.equals(RawColumn.KIND_STATIC)); - switch (raw.kind) { - case RawColumn.KIND_PARTITION_KEY: - partitionKeyBuilder.add(column); - break; - case RawColumn.KIND_CLUSTERING_COLUMN: - clusteringColumnsBuilder.put( - column, raw.reversed ? ClusteringOrder.DESC : ClusteringOrder.ASC); - break; - default: - } - - allColumnsBuilder.put(column.getName(), column); - } - - return new DefaultTableMetadata( - keyspaceId, - tableId, - null, - false, - true, - partitionKeyBuilder.build(), - clusteringColumnsBuilder.build(), - allColumnsBuilder.build(), - Collections.emptyMap(), - Collections.emptyMap()); - } - - // In C*<=2.2, index information is stored alongside the column. - private IndexMetadata buildLegacyIndex(RawColumn raw, ColumnMetadata column) { - if (raw.indexName == null) { - return null; - } - return new DefaultIndexMetadata( - column.getKeyspace(), - column.getParent(), - CqlIdentifier.fromInternal(raw.indexName), - IndexKind.valueOf(raw.indexType), - buildLegacyIndexTarget(column, raw.indexOptions), - raw.indexOptions); - } - - private static String buildLegacyIndexTarget(ColumnMetadata column, Map options) { - String columnName = column.getName().asCql(true); - DataType columnType = column.getType(); - if (options.containsKey("index_keys")) { - return String.format("keys(%s)", columnName); - } - if (options.containsKey("index_keys_and_values")) { - return String.format("entries(%s)", columnName); - } - if ((columnType instanceof ListType && ((ListType) columnType).isFrozen()) - || (columnType instanceof SetType && ((SetType) columnType).isFrozen()) - || (columnType instanceof MapType && ((MapType) columnType).isFrozen())) { - return String.format("full(%s)", columnName); - } - // Note: the keyword 'values' is not accepted as a valid index target function until 3.0 - return columnName; - } - - // In C*>=3.0, index information is stored in a dedicated table: - // CREATE TABLE system_schema.indexes ( - // keyspace_name text, - // table_name text, - // index_name text, - // kind text, - // options frozen>, - // PRIMARY KEY (keyspace_name, table_name, index_name) - // ) WITH CLUSTERING ORDER BY (table_name ASC, index_name ASC) - private IndexMetadata buildModernIndex( - CqlIdentifier keyspaceId, CqlIdentifier tableId, AdminRow row) { - CqlIdentifier name = CqlIdentifier.fromInternal(row.getString("index_name")); - IndexKind kind = IndexKind.valueOf(row.getString("kind")); - Map options = row.getMapOfStringToString("options"); - String target = options.get("target"); - return new DefaultIndexMetadata(keyspaceId, tableId, name, kind, target, options); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/UserDefinedTypeParser.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/UserDefinedTypeParser.java deleted file mode 100644 index 442f46ee432..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/UserDefinedTypeParser.java +++ /dev/null @@ -1,164 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.parsing; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.ListType; -import com.datastax.oss.driver.api.core.type.MapType; -import com.datastax.oss.driver.api.core.type.SetType; -import com.datastax.oss.driver.api.core.type.TupleType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.type.DefaultUserDefinedType; -import com.datastax.oss.driver.internal.core.util.DirectedGraph; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class UserDefinedTypeParser { - private final DataTypeParser dataTypeParser; - private final InternalDriverContext context; - - public UserDefinedTypeParser(DataTypeParser dataTypeParser, InternalDriverContext context) { - this.dataTypeParser = dataTypeParser; - this.context = context; - } - - /** - * Contrary to other element parsers, this one processes all the types of a keyspace in one go. - * UDTs can depend on each other, but the system table returns them in alphabetical order. In - * order to properly build the definitions, we need to do a topological sort of the rows first, so - * that each type is parsed after its dependencies. - */ - public Map parse( - Collection typeRows, CqlIdentifier keyspaceId) { - if (typeRows.isEmpty()) { - return Collections.emptyMap(); - } else { - Map types = new LinkedHashMap<>(); - for (AdminRow row : topologicalSort(typeRows, keyspaceId)) { - UserDefinedType type = parseType(row, keyspaceId, types); - types.put(type.getName(), type); - } - return ImmutableMap.copyOf(types); - } - } - - @VisibleForTesting - Map parse(CqlIdentifier keyspaceId, AdminRow... typeRows) { - return parse(Arrays.asList(typeRows), keyspaceId); - } - - private List topologicalSort(Collection typeRows, CqlIdentifier keyspaceId) { - if (typeRows.size() == 1) { - AdminRow row = typeRows.iterator().next(); - return Collections.singletonList(row); - } else { - DirectedGraph graph = new DirectedGraph<>(typeRows); - for (AdminRow dependent : typeRows) { - for (AdminRow dependency : typeRows) { - if (dependent != dependency && dependsOn(dependent, dependency, keyspaceId)) { - // Edges mean "is depended upon by"; we want the types with no dependencies to come - // first in the sort. - graph.addEdge(dependency, dependent); - } - } - } - return graph.topologicalSort(); - } - } - - private boolean dependsOn(AdminRow dependent, AdminRow dependency, CqlIdentifier keyspaceId) { - CqlIdentifier dependencyId = CqlIdentifier.fromInternal(dependency.getString("type_name")); - for (String fieldTypeName : dependent.getListOfString("field_types")) { - DataType fieldType = dataTypeParser.parse(keyspaceId, fieldTypeName, null, context); - if (references(fieldType, dependencyId)) { - return true; - } - } - return false; - } - - private boolean references(DataType dependent, CqlIdentifier dependency) { - if (dependent instanceof UserDefinedType) { - UserDefinedType userType = (UserDefinedType) dependent; - return userType.getName().equals(dependency); - } else if (dependent instanceof ListType) { - ListType listType = (ListType) dependent; - return references(listType.getElementType(), dependency); - } else if (dependent instanceof SetType) { - SetType setType = (SetType) dependent; - return references(setType.getElementType(), dependency); - } else if (dependent instanceof MapType) { - MapType mapType = (MapType) dependent; - return references(mapType.getKeyType(), dependency) - || references(mapType.getValueType(), dependency); - } else if (dependent instanceof TupleType) { - TupleType tupleType = (TupleType) dependent; - for (DataType componentType : tupleType.getComponentTypes()) { - if (references(componentType, dependency)) { - return true; - } - } - } - return false; - } - - private UserDefinedType parseType( - AdminRow row, - CqlIdentifier keyspaceId, - Map userDefinedTypes) { - // Cassandra < 3.0: - // CREATE TABLE system.schema_usertypes ( - // keyspace_name text, - // type_name text, - // field_names list, - // field_types list, - // PRIMARY KEY (keyspace_name, type_name) - // ) WITH CLUSTERING ORDER BY (type_name ASC) - // - // Cassandra >= 3.0: - // CREATE TABLE system_schema.types ( - // keyspace_name text, - // type_name text, - // field_names frozen>, - // field_types frozen>, - // PRIMARY KEY (keyspace_name, type_name) - // ) WITH CLUSTERING ORDER BY (type_name ASC) - CqlIdentifier name = CqlIdentifier.fromInternal(row.getString("type_name")); - List fieldNames = - ImmutableList.copyOf( - Lists.transform(row.getListOfString("field_names"), CqlIdentifier::fromInternal)); - List fieldTypes = - dataTypeParser.parse( - keyspaceId, row.getListOfString("field_types"), userDefinedTypes, context); - - return new DefaultUserDefinedType(keyspaceId, name, false, fieldNames, fieldTypes, context); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/ViewParser.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/ViewParser.java deleted file mode 100644 index 52773ea1c45..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/ViewParser.java +++ /dev/null @@ -1,154 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.parsing; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.ClusteringOrder; -import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.ViewMetadata; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.schema.DefaultColumnMetadata; -import com.datastax.oss.driver.internal.core.metadata.schema.DefaultViewMetadata; -import com.datastax.oss.driver.internal.core.metadata.schema.queries.SchemaRows; -import com.datastax.oss.driver.internal.core.util.Loggers; -import com.datastax.oss.driver.shaded.guava.common.base.MoreObjects; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMultimap; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@ThreadSafe -public class ViewParser extends RelationParser { - - private static final Logger LOG = LoggerFactory.getLogger(ViewParser.class); - - public ViewParser(SchemaRows rows, InternalDriverContext context) { - super(rows, context); - } - - public ViewMetadata parseView( - AdminRow viewRow, CqlIdentifier keyspaceId, Map userTypes) { - // Cassandra 3.0 (no views in earlier versions): - // CREATE TABLE system_schema.views ( - // keyspace_name text, - // view_name text, - // base_table_id uuid, - // base_table_name text, - // bloom_filter_fp_chance double, - // caching frozen>, - // cdc boolean, - // comment text, - // compaction frozen>, - // compression frozen>, - // crc_check_chance double, - // dclocal_read_repair_chance double, - // default_time_to_live int, - // extensions frozen>, - // gc_grace_seconds int, - // id uuid, - // include_all_columns boolean, - // max_index_interval int, - // memtable_flush_period_in_ms int, - // min_index_interval int, - // read_repair_chance double, - // speculative_retry text, - // where_clause text, - // PRIMARY KEY (keyspace_name, view_name) - // ) WITH CLUSTERING ORDER BY (view_name ASC) - CqlIdentifier viewId = CqlIdentifier.fromInternal(viewRow.getString("view_name")); - - UUID uuid = viewRow.getUuid("id"); - CqlIdentifier baseTableId = CqlIdentifier.fromInternal(viewRow.getString("base_table_name")); - boolean includesAllColumns = - MoreObjects.firstNonNull(viewRow.getBoolean("include_all_columns"), false); - String whereClause = viewRow.getString("where_clause"); - - List rawColumns = - RawColumn.toRawColumns( - rows.columns().getOrDefault(keyspaceId, ImmutableMultimap.of()).get(viewId)); - if (rawColumns.isEmpty()) { - LOG.warn( - "[{}] Processing VIEW refresh for {}.{} but found no matching rows, skipping", - logPrefix, - keyspaceId, - viewId); - return null; - } - - Collections.sort(rawColumns); - ImmutableMap.Builder allColumnsBuilder = ImmutableMap.builder(); - ImmutableList.Builder partitionKeyBuilder = ImmutableList.builder(); - ImmutableMap.Builder clusteringColumnsBuilder = - ImmutableMap.builder(); - - for (RawColumn raw : rawColumns) { - DataType dataType = rows.dataTypeParser().parse(keyspaceId, raw.dataType, userTypes, context); - ColumnMetadata column = - new DefaultColumnMetadata( - keyspaceId, viewId, raw.name, dataType, raw.kind.equals(RawColumn.KIND_STATIC)); - switch (raw.kind) { - case RawColumn.KIND_PARTITION_KEY: - partitionKeyBuilder.add(column); - break; - case RawColumn.KIND_CLUSTERING_COLUMN: - clusteringColumnsBuilder.put( - column, raw.reversed ? ClusteringOrder.DESC : ClusteringOrder.ASC); - break; - default: - // nothing to do - } - allColumnsBuilder.put(column.getName(), column); - } - - Map options; - try { - options = parseOptions(viewRow); - } catch (Exception e) { - // Options change the most often, so be especially lenient if anything goes wrong. - Loggers.warnWithException( - LOG, - "[{}] Error while parsing options for {}.{}, getOptions() will be empty", - logPrefix, - keyspaceId, - viewId, - e); - options = Collections.emptyMap(); - } - - return new DefaultViewMetadata( - keyspaceId, - viewId, - baseTableId, - includesAllColumns, - whereClause, - uuid, - partitionKeyBuilder.build(), - clusteringColumnsBuilder.build(), - allColumnsBuilder.build(), - options); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra21SchemaQueries.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra21SchemaQueries.java deleted file mode 100644 index 7577fd1bb92..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra21SchemaQueries.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.queries; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import java.util.Optional; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class Cassandra21SchemaQueries extends CassandraSchemaQueries { - public Cassandra21SchemaQueries( - DriverChannel channel, Node node, DriverExecutionProfile config, String logPrefix) { - super(channel, node, config, logPrefix); - } - - @Override - protected String selectKeyspacesQuery() { - return "SELECT * FROM system.schema_keyspaces"; - } - - @Override - protected String selectTablesQuery() { - return "SELECT * FROM system.schema_columnfamilies"; - } - - @Override - protected Optional selectViewsQuery() { - return Optional.empty(); - } - - @Override - protected Optional selectIndexesQuery() { - return Optional.empty(); - } - - @Override - protected String selectColumnsQuery() { - return "SELECT * FROM system.schema_columns"; - } - - @Override - protected String selectTypesQuery() { - return "SELECT * FROM system.schema_usertypes"; - } - - @Override - protected Optional selectFunctionsQuery() { - return Optional.empty(); - } - - @Override - protected Optional selectAggregatesQuery() { - return Optional.empty(); - } - - @Override - protected Optional selectVirtualKeyspacesQuery() { - return Optional.empty(); - } - - @Override - protected Optional selectVirtualTablesQuery() { - return Optional.empty(); - } - - @Override - protected Optional selectVirtualColumnsQuery() { - return Optional.empty(); - } - - @Override - protected Optional selectEdgesQuery() { - return Optional.empty(); - } - - @Override - protected Optional selectVerticiesQuery() { - return Optional.empty(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra22SchemaQueries.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra22SchemaQueries.java deleted file mode 100644 index ff09917b3c7..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra22SchemaQueries.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.queries; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import java.util.Optional; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class Cassandra22SchemaQueries extends CassandraSchemaQueries { - public Cassandra22SchemaQueries( - DriverChannel channel, Node node, DriverExecutionProfile config, String logPrefix) { - super(channel, node, config, logPrefix); - } - - @Override - protected String selectKeyspacesQuery() { - return "SELECT * FROM system.schema_keyspaces"; - } - - @Override - protected String selectTablesQuery() { - return "SELECT * FROM system.schema_columnfamilies"; - } - - @Override - protected Optional selectViewsQuery() { - return Optional.empty(); - } - - @Override - protected Optional selectIndexesQuery() { - return Optional.empty(); - } - - @Override - protected String selectColumnsQuery() { - return "SELECT * FROM system.schema_columns"; - } - - @Override - protected String selectTypesQuery() { - return "SELECT * FROM system.schema_usertypes"; - } - - @Override - protected Optional selectFunctionsQuery() { - return Optional.of("SELECT * FROM system.schema_functions"); - } - - @Override - protected Optional selectAggregatesQuery() { - return Optional.of("SELECT * FROM system.schema_aggregates"); - } - - @Override - protected Optional selectVirtualKeyspacesQuery() { - return Optional.empty(); - } - - @Override - protected Optional selectVirtualTablesQuery() { - return Optional.empty(); - } - - @Override - protected Optional selectVirtualColumnsQuery() { - return Optional.empty(); - } - - @Override - protected Optional selectEdgesQuery() { - return Optional.empty(); - } - - @Override - protected Optional selectVerticiesQuery() { - return Optional.empty(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra3SchemaQueries.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra3SchemaQueries.java deleted file mode 100644 index 8c36d0f4217..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra3SchemaQueries.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.queries; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import java.util.Optional; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class Cassandra3SchemaQueries extends CassandraSchemaQueries { - public Cassandra3SchemaQueries( - DriverChannel channel, Node node, DriverExecutionProfile config, String logPrefix) { - super(channel, node, config, logPrefix); - } - - @Override - protected String selectKeyspacesQuery() { - return "SELECT * FROM system_schema.keyspaces"; - } - - @Override - protected String selectTablesQuery() { - return "SELECT * FROM system_schema.tables"; - } - - @Override - protected Optional selectViewsQuery() { - return Optional.of("SELECT * FROM system_schema.views"); - } - - @Override - protected Optional selectIndexesQuery() { - return Optional.of("SELECT * FROM system_schema.indexes"); - } - - @Override - protected String selectColumnsQuery() { - return "SELECT * FROM system_schema.columns"; - } - - @Override - protected String selectTypesQuery() { - return "SELECT * FROM system_schema.types"; - } - - @Override - protected Optional selectFunctionsQuery() { - return Optional.of("SELECT * FROM system_schema.functions"); - } - - @Override - protected Optional selectAggregatesQuery() { - return Optional.of("SELECT * FROM system_schema.aggregates"); - } - - @Override - protected Optional selectVirtualKeyspacesQuery() { - return Optional.empty(); - } - - @Override - protected Optional selectVirtualTablesQuery() { - return Optional.empty(); - } - - @Override - protected Optional selectVirtualColumnsQuery() { - return Optional.empty(); - } - - @Override - protected Optional selectEdgesQuery() { - return Optional.empty(); - } - - @Override - protected Optional selectVerticiesQuery() { - return Optional.empty(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra4SchemaQueries.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra4SchemaQueries.java deleted file mode 100644 index e2de0b419ed..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra4SchemaQueries.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.queries; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import java.util.Optional; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class Cassandra4SchemaQueries extends Cassandra3SchemaQueries { - public Cassandra4SchemaQueries( - DriverChannel channel, Node node, DriverExecutionProfile config, String logPrefix) { - super(channel, node, config, logPrefix); - } - - @Override - protected Optional selectVirtualKeyspacesQuery() { - return Optional.of("SELECT * FROM system_virtual_schema.keyspaces"); - } - - @Override - protected Optional selectVirtualTablesQuery() { - return Optional.of("SELECT * FROM system_virtual_schema.tables"); - } - - @Override - protected Optional selectVirtualColumnsQuery() { - return Optional.of("SELECT * FROM system_virtual_schema.columns"); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/CassandraSchemaQueries.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/CassandraSchemaQueries.java deleted file mode 100644 index 92ab2501c12..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/CassandraSchemaQueries.java +++ /dev/null @@ -1,187 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.queries; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRequestHandler; -import com.datastax.oss.driver.internal.core.adminrequest.AdminResult; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.util.NanoTime; -import com.datastax.oss.driver.internal.core.util.concurrent.RunOrSchedule; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import io.netty.util.concurrent.EventExecutor; -import java.time.Duration; -import java.util.Collections; -import java.util.List; -import java.util.Optional; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.function.Function; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@ThreadSafe -public abstract class CassandraSchemaQueries implements SchemaQueries { - - private static final Logger LOG = LoggerFactory.getLogger(CassandraSchemaQueries.class); - - private final DriverChannel channel; - private final EventExecutor adminExecutor; - private final Node node; - private final String logPrefix; - private final Duration timeout; - private final int pageSize; - private final KeyspaceFilter keyspaceFilter; - // The future we return from execute, completes when all the queries are done. - private final CompletableFuture schemaRowsFuture = new CompletableFuture<>(); - private final long startTimeNs = System.nanoTime(); - - // All non-final fields are accessed exclusively on adminExecutor - private CassandraSchemaRows.Builder schemaRowsBuilder; - private int pendingQueries; - - protected CassandraSchemaQueries( - DriverChannel channel, Node node, DriverExecutionProfile config, String logPrefix) { - this.channel = channel; - this.adminExecutor = channel.eventLoop(); - this.node = node; - this.logPrefix = logPrefix; - this.timeout = config.getDuration(DefaultDriverOption.METADATA_SCHEMA_REQUEST_TIMEOUT); - this.pageSize = config.getInt(DefaultDriverOption.METADATA_SCHEMA_REQUEST_PAGE_SIZE); - - List refreshedKeyspaces = - config.getStringList( - DefaultDriverOption.METADATA_SCHEMA_REFRESHED_KEYSPACES, Collections.emptyList()); - assert refreshedKeyspaces != null; // per the default value - this.keyspaceFilter = KeyspaceFilter.newInstance(logPrefix, refreshedKeyspaces); - } - - protected abstract String selectKeyspacesQuery(); - - protected abstract Optional selectVirtualKeyspacesQuery(); - - protected abstract String selectTablesQuery(); - - protected abstract Optional selectVirtualTablesQuery(); - - protected abstract Optional selectViewsQuery(); - - protected abstract Optional selectIndexesQuery(); - - protected abstract String selectColumnsQuery(); - - protected abstract Optional selectVirtualColumnsQuery(); - - protected abstract String selectTypesQuery(); - - protected abstract Optional selectFunctionsQuery(); - - protected abstract Optional selectAggregatesQuery(); - - protected abstract Optional selectEdgesQuery(); - - protected abstract Optional selectVerticiesQuery(); - - @Override - public CompletionStage execute() { - RunOrSchedule.on(adminExecutor, this::executeOnAdminExecutor); - return schemaRowsFuture; - } - - private void executeOnAdminExecutor() { - assert adminExecutor.inEventLoop(); - - schemaRowsBuilder = new CassandraSchemaRows.Builder(node, keyspaceFilter, logPrefix); - String whereClause = keyspaceFilter.getWhereClause(); - - query(selectKeyspacesQuery() + whereClause, schemaRowsBuilder::withKeyspaces); - query(selectTypesQuery() + whereClause, schemaRowsBuilder::withTypes); - query(selectTablesQuery() + whereClause, schemaRowsBuilder::withTables); - query(selectColumnsQuery() + whereClause, schemaRowsBuilder::withColumns); - selectIndexesQuery() - .ifPresent(select -> query(select + whereClause, schemaRowsBuilder::withIndexes)); - selectViewsQuery() - .ifPresent(select -> query(select + whereClause, schemaRowsBuilder::withViews)); - selectFunctionsQuery() - .ifPresent(select -> query(select + whereClause, schemaRowsBuilder::withFunctions)); - selectAggregatesQuery() - .ifPresent(select -> query(select + whereClause, schemaRowsBuilder::withAggregates)); - selectVirtualKeyspacesQuery() - .ifPresent(select -> query(select + whereClause, schemaRowsBuilder::withVirtualKeyspaces)); - selectVirtualTablesQuery() - .ifPresent(select -> query(select + whereClause, schemaRowsBuilder::withVirtualTables)); - selectVirtualColumnsQuery() - .ifPresent(select -> query(select + whereClause, schemaRowsBuilder::withVirtualColumns)); - selectEdgesQuery() - .ifPresent(select -> query(select + whereClause, schemaRowsBuilder::withEdges)); - selectVerticiesQuery() - .ifPresent(select -> query(select + whereClause, schemaRowsBuilder::withVertices)); - } - - private void query( - String queryString, - Function, CassandraSchemaRows.Builder> builderUpdater) { - assert adminExecutor.inEventLoop(); - - pendingQueries += 1; - query(queryString) - .whenCompleteAsync( - (result, error) -> handleResult(result, error, builderUpdater), adminExecutor); - } - - @VisibleForTesting - protected CompletionStage query(String query) { - return AdminRequestHandler.query(channel, query, timeout, pageSize, logPrefix).start(); - } - - private void handleResult( - AdminResult result, - Throwable error, - Function, CassandraSchemaRows.Builder> builderUpdater) { - - // If another query already failed, we've already propagated the failure so just ignore this one - if (schemaRowsFuture.isCompletedExceptionally()) { - return; - } - - if (error != null) { - schemaRowsFuture.completeExceptionally(error); - } else { - // Store the rows of the current page in the builder - schemaRowsBuilder = builderUpdater.apply(result); - if (result.hasNextPage()) { - result - .nextPage() - .whenCompleteAsync( - (nextResult, nextError) -> handleResult(nextResult, nextError, builderUpdater), - adminExecutor); - } else { - pendingQueries -= 1; - if (pendingQueries == 0) { - LOG.debug( - "[{}] Schema queries took {}", logPrefix, NanoTime.formatTimeSince(startTimeNs)); - schemaRowsFuture.complete(schemaRowsBuilder.build()); - } - } - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/CassandraSchemaRows.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/CassandraSchemaRows.java deleted file mode 100644 index 95af0739300..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/CassandraSchemaRows.java +++ /dev/null @@ -1,393 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.queries; - -import com.datastax.dse.driver.api.core.metadata.DseNodeProperties; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.DataTypeClassNameParser; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.DataTypeCqlNameParser; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.DataTypeParser; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableListMultimap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMultimap; -import com.datastax.oss.driver.shaded.guava.common.collect.Multimap; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import net.jcip.annotations.Immutable; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@Immutable -public class CassandraSchemaRows implements SchemaRows { - - private final Node node; - private final DataTypeParser dataTypeParser; - private final List keyspaces; - private final List virtualKeyspaces; - private final Multimap tables; - private final Multimap virtualTables; - private final Multimap views; - private final Multimap types; - private final Multimap functions; - private final Multimap aggregates; - private final Map> columns; - private final Map> virtualColumns; - private final Map> indexes; - private final Map> vertices; - private final Map> edges; - - private CassandraSchemaRows( - Node node, - DataTypeParser dataTypeParser, - List keyspaces, - List virtualKeyspaces, - Multimap tables, - Multimap virtualTables, - Multimap views, - Map> columns, - Map> virtualColumns, - Map> indexes, - Multimap types, - Multimap functions, - Multimap aggregates, - Map> vertices, - Map> edges) { - this.node = node; - this.dataTypeParser = dataTypeParser; - this.keyspaces = keyspaces; - this.virtualKeyspaces = virtualKeyspaces; - this.tables = tables; - this.virtualTables = virtualTables; - this.views = views; - this.columns = columns; - this.virtualColumns = virtualColumns; - this.indexes = indexes; - this.types = types; - this.functions = functions; - this.aggregates = aggregates; - this.vertices = vertices; - this.edges = edges; - } - - @NonNull - @Override - public Node getNode() { - return node; - } - - @Override - public DataTypeParser dataTypeParser() { - return dataTypeParser; - } - - @Override - public List keyspaces() { - return keyspaces; - } - - @Override - public List virtualKeyspaces() { - return virtualKeyspaces; - } - - @Override - public Multimap tables() { - return tables; - } - - @Override - public Multimap virtualTables() { - return virtualTables; - } - - @Override - public Multimap views() { - return views; - } - - @Override - public Multimap types() { - return types; - } - - @Override - public Multimap functions() { - return functions; - } - - @Override - public Multimap aggregates() { - return aggregates; - } - - @Override - public Map> columns() { - return columns; - } - - @Override - public Map> virtualColumns() { - return virtualColumns; - } - - @Override - public Map> indexes() { - return indexes; - } - - @Override - public Map> vertices() { - return vertices; - } - - @Override - public Map> edges() { - return edges; - } - - public static class Builder { - private static final Logger LOG = LoggerFactory.getLogger(Builder.class); - - private final Node node; - private final DataTypeParser dataTypeParser; - private final String tableNameColumn; - private final KeyspaceFilter keyspaceFilter; - private final String logPrefix; - private final ImmutableList.Builder keyspacesBuilder = ImmutableList.builder(); - private final ImmutableList.Builder virtualKeyspacesBuilder = ImmutableList.builder(); - private final ImmutableMultimap.Builder tablesBuilder = - ImmutableListMultimap.builder(); - private final ImmutableMultimap.Builder virtualTablesBuilder = - ImmutableListMultimap.builder(); - private final ImmutableMultimap.Builder viewsBuilder = - ImmutableListMultimap.builder(); - private final ImmutableMultimap.Builder typesBuilder = - ImmutableListMultimap.builder(); - private final ImmutableMultimap.Builder functionsBuilder = - ImmutableListMultimap.builder(); - private final ImmutableMultimap.Builder aggregatesBuilder = - ImmutableListMultimap.builder(); - private final Map> - columnsBuilders = new LinkedHashMap<>(); - private final Map> - virtualColumnsBuilders = new LinkedHashMap<>(); - private final Map> - indexesBuilders = new LinkedHashMap<>(); - private final Map> - verticesBuilders = new LinkedHashMap<>(); - private final Map> - edgesBuilders = new LinkedHashMap<>(); - - public Builder(Node node, KeyspaceFilter keyspaceFilter, String logPrefix) { - this.node = node; - this.keyspaceFilter = keyspaceFilter; - this.logPrefix = logPrefix; - if (isCassandraV3OrAbove(node)) { - this.tableNameColumn = "table_name"; - this.dataTypeParser = new DataTypeCqlNameParser(); - } else { - this.tableNameColumn = "columnfamily_name"; - this.dataTypeParser = new DataTypeClassNameParser(); - } - } - - private static boolean isCassandraV3OrAbove(Node node) { - // We already did those checks in DefaultSchemaQueriesFactory. - // We could pass along booleans (isCassandraV3, isDse...), but passing the whole Node is - // better for maintainability, in case we need to do more checks in downstream components in - // the future. - Version dseVersion = (Version) node.getExtras().get(DseNodeProperties.DSE_VERSION); - if (dseVersion != null) { - dseVersion = dseVersion.nextStable(); - return dseVersion.compareTo(Version.V5_0_0) >= 0; - } else { - Version cassandraVersion = node.getCassandraVersion(); - if (cassandraVersion == null) { - cassandraVersion = Version.V3_0_0; - } else { - cassandraVersion = cassandraVersion.nextStable(); - } - return cassandraVersion.compareTo(Version.V3_0_0) >= 0; - } - } - - public Builder withKeyspaces(Iterable rows) { - for (AdminRow row : rows) { - put(keyspacesBuilder, row); - } - return this; - } - - public Builder withVirtualKeyspaces(Iterable rows) { - for (AdminRow row : rows) { - put(virtualKeyspacesBuilder, row); - } - return this; - } - - public Builder withTables(Iterable rows) { - for (AdminRow row : rows) { - putByKeyspace(row, tablesBuilder); - } - return this; - } - - public Builder withVirtualTables(Iterable rows) { - for (AdminRow row : rows) { - putByKeyspace(row, virtualTablesBuilder); - } - return this; - } - - public Builder withViews(Iterable rows) { - for (AdminRow row : rows) { - putByKeyspace(row, viewsBuilder); - } - return this; - } - - public Builder withTypes(Iterable rows) { - for (AdminRow row : rows) { - putByKeyspace(row, typesBuilder); - } - return this; - } - - public Builder withFunctions(Iterable rows) { - for (AdminRow row : rows) { - putByKeyspace(row, functionsBuilder); - } - return this; - } - - public Builder withAggregates(Iterable rows) { - for (AdminRow row : rows) { - putByKeyspace(row, aggregatesBuilder); - } - return this; - } - - public Builder withColumns(Iterable rows) { - for (AdminRow row : rows) { - putByKeyspaceAndTable(row, columnsBuilders); - } - return this; - } - - public Builder withVirtualColumns(Iterable rows) { - for (AdminRow row : rows) { - putByKeyspaceAndTable(row, virtualColumnsBuilders); - } - return this; - } - - public Builder withIndexes(Iterable rows) { - for (AdminRow row : rows) { - putByKeyspaceAndTable(row, indexesBuilders); - } - return this; - } - - public Builder withVertices(Iterable rows) { - for (AdminRow row : rows) { - putByKeyspaceAndTable(row, verticesBuilders); - } - return this; - } - - public Builder withEdges(Iterable rows) { - for (AdminRow row : rows) { - putByKeyspaceAndTable(row, edgesBuilders); - } - return this; - } - - private void put(ImmutableList.Builder builder, AdminRow row) { - String keyspace = row.getString("keyspace_name"); - if (keyspace == null) { - LOG.warn("[{}] Skipping system row with missing keyspace name", logPrefix); - } else if (keyspaceFilter.includes(keyspace)) { - builder.add(row); - } - } - - private void putByKeyspace( - AdminRow row, ImmutableMultimap.Builder builder) { - String keyspace = row.getString("keyspace_name"); - if (keyspace == null) { - LOG.warn("[{}] Skipping system row with missing keyspace name", logPrefix); - } else if (keyspaceFilter.includes(keyspace)) { - builder.put(CqlIdentifier.fromInternal(keyspace), row); - } - } - - private void putByKeyspaceAndTable( - AdminRow row, - Map> builders) { - String keyspace = row.getString("keyspace_name"); - String table = row.getString(tableNameColumn); - if (keyspace == null) { - LOG.warn("[{}] Skipping system row with missing keyspace name", logPrefix); - } else if (table == null) { - LOG.warn("[{}] Skipping system row with missing table name", logPrefix); - } else if (keyspaceFilter.includes(keyspace)) { - ImmutableMultimap.Builder builder = - builders.computeIfAbsent( - CqlIdentifier.fromInternal(keyspace), s -> ImmutableListMultimap.builder()); - builder.put(CqlIdentifier.fromInternal(table), row); - } - } - - public CassandraSchemaRows build() { - return new CassandraSchemaRows( - node, - dataTypeParser, - keyspacesBuilder.build(), - virtualKeyspacesBuilder.build(), - tablesBuilder.build(), - virtualTablesBuilder.build(), - viewsBuilder.build(), - build(columnsBuilders), - build(virtualColumnsBuilders), - build(indexesBuilders), - typesBuilder.build(), - functionsBuilder.build(), - aggregatesBuilder.build(), - build(verticesBuilders), - build(edgesBuilders)); - } - - private static Map> build( - Map> builders) { - ImmutableMap.Builder> builder = ImmutableMap.builder(); - builders - .entrySet() - .forEach( - (entry) -> { - builder.put(entry.getKey(), entry.getValue().build()); - }); - return builder.build(); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/DefaultSchemaQueriesFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/DefaultSchemaQueriesFactory.java deleted file mode 100644 index e537475ed7b..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/DefaultSchemaQueriesFactory.java +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.queries; - -import com.datastax.dse.driver.api.core.metadata.DseNodeProperties; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@ThreadSafe -public class DefaultSchemaQueriesFactory implements SchemaQueriesFactory { - - private static final Logger LOG = LoggerFactory.getLogger(DefaultSchemaQueriesFactory.class); - - protected final InternalDriverContext context; - protected final String logPrefix; - - public DefaultSchemaQueriesFactory(InternalDriverContext context) { - this.context = context; - this.logPrefix = context.getSessionName(); - } - - @Override - public SchemaQueries newInstance() { - DriverChannel channel = context.getControlConnection().channel(); - if (channel == null || channel.closeFuture().isDone()) { - throw new IllegalStateException("Control channel not available, aborting schema refresh"); - } - Node node = - context - .getMetadataManager() - .getMetadata() - .findNode(channel.getEndPoint()) - .orElseThrow( - () -> - new IllegalStateException( - "Could not find control node metadata " - + channel.getEndPoint() - + ", aborting schema refresh")); - return newInstance(node, channel); - } - - protected SchemaQueries newInstance(Node node, DriverChannel channel) { - - DriverExecutionProfile config = context.getConfig().getDefaultProfile(); - - Version dseVersion = (Version) node.getExtras().get(DseNodeProperties.DSE_VERSION); - if (dseVersion != null) { - dseVersion = dseVersion.nextStable(); - - LOG.debug( - "[{}] Sending schema queries to {} with DSE version {}", logPrefix, node, dseVersion); - // 4.8 is the oldest version supported, which uses C* 2.1 schema - if (dseVersion.compareTo(Version.V5_0_0) < 0) { - return new Cassandra21SchemaQueries(channel, node, config, logPrefix); - } else if (dseVersion.compareTo(Version.V6_7_0) < 0) { - // 5.0 - 6.7 uses C* 3.0 schema - return new Cassandra3SchemaQueries(channel, node, config, logPrefix); - } else if (dseVersion.compareTo(Version.V6_8_0) < 0) { - // 6.7 uses C* 4.0 schema - return new Cassandra4SchemaQueries(channel, node, config, logPrefix); - } else { - // 6.8+ uses DSE 6.8 schema (C* 4.0 schema with graph metadata) (JAVA-1898) - return new Dse68SchemaQueries(channel, node, config, logPrefix); - } - } else { - Version cassandraVersion = node.getCassandraVersion(); - if (cassandraVersion == null) { - LOG.warn( - "[{}] Cassandra version missing for {}, defaulting to {}", - logPrefix, - node, - Version.V3_0_0); - cassandraVersion = Version.V3_0_0; - } else { - cassandraVersion = cassandraVersion.nextStable(); - } - LOG.debug( - "[{}] Sending schema queries to {} with version {}", logPrefix, node, cassandraVersion); - if (cassandraVersion.compareTo(Version.V2_2_0) < 0) { - return new Cassandra21SchemaQueries(channel, node, config, logPrefix); - } else if (cassandraVersion.compareTo(Version.V3_0_0) < 0) { - return new Cassandra22SchemaQueries(channel, node, config, logPrefix); - } else if (cassandraVersion.compareTo(Version.V4_0_0) < 0) { - return new Cassandra3SchemaQueries(channel, node, config, logPrefix); - } else { - return new Cassandra4SchemaQueries(channel, node, config, logPrefix); - } - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Dse68SchemaQueries.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Dse68SchemaQueries.java deleted file mode 100644 index 460df8b59e5..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Dse68SchemaQueries.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.queries; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import java.util.Optional; - -/** - * The system table queries to refresh the schema in DSE 6.8. - * - *

There are two additional tables for per-table graph metadata. - */ -public class Dse68SchemaQueries extends Cassandra4SchemaQueries { - - public Dse68SchemaQueries( - DriverChannel channel, Node node, DriverExecutionProfile config, String logPrefix) { - super(channel, node, config, logPrefix); - } - - @Override - protected Optional selectEdgesQuery() { - return Optional.of("SELECT * FROM system_schema.edges"); - } - - @Override - protected Optional selectVerticiesQuery() { - return Optional.of("SELECT * FROM system_schema.vertices"); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/KeyspaceFilter.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/KeyspaceFilter.java deleted file mode 100644 index a483a904f6e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/KeyspaceFilter.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.queries; - -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.List; - -/** - * Filters keyspaces during schema metadata queries. - * - *

Depending on the circumstances, we do it either on the server side with a WHERE IN clause that - * will be appended to every query, or on the client side with a predicate that will be applied to - * every fetched row. - */ -public interface KeyspaceFilter { - - static KeyspaceFilter newInstance(@NonNull String logPrefix, @NonNull List specs) { - if (specs.isEmpty()) { - return INCLUDE_ALL; - } else { - return new RuleBasedKeyspaceFilter(logPrefix, specs); - } - } - - /** The WHERE IN clause, or an empty string if there is no server-side filtering. */ - @NonNull - String getWhereClause(); - - /** The predicate that will be invoked for client-side filtering. */ - boolean includes(@NonNull String keyspace); - - KeyspaceFilter INCLUDE_ALL = - new KeyspaceFilter() { - @NonNull - @Override - public String getWhereClause() { - return ""; - } - - @Override - public boolean includes(@NonNull String keyspace) { - return true; - } - }; -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/RuleBasedKeyspaceFilter.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/RuleBasedKeyspaceFilter.java deleted file mode 100644 index 38a8c116c45..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/RuleBasedKeyspaceFilter.java +++ /dev/null @@ -1,203 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.queries; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.ArrayList; -import java.util.HashSet; -import java.util.List; -import java.util.Optional; -import java.util.Set; -import java.util.function.Predicate; -import java.util.regex.Matcher; -import java.util.regex.Pattern; -import java.util.regex.PatternSyntaxException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Filters keyspaces during schema metadata queries. - * - *

Depending on the circumstances, we do it either on the server side with a WHERE IN clause that - * will be appended to every query, or on the client side with a predicate that will be applied to - * every fetched row. - */ -class RuleBasedKeyspaceFilter implements KeyspaceFilter { - - private static final Logger LOG = LoggerFactory.getLogger(RuleBasedKeyspaceFilter.class); - - private static final Pattern EXACT_INCLUDE = Pattern.compile("\\w+"); - private static final Pattern EXACT_EXCLUDE = Pattern.compile("!\\s*(\\w+)"); - private static final Pattern REGEX_INCLUDE = Pattern.compile("/(.+)/"); - private static final Pattern REGEX_EXCLUDE = Pattern.compile("!\\s*/(.+)/"); - - private final String logPrefix; - private final String whereClause; - private final Set exactIncludes = new HashSet<>(); - private final Set exactExcludes = new HashSet<>(); - private final List> regexIncludes = new ArrayList<>(); - private final List> regexExcludes = new ArrayList<>(); - - private final boolean isDebugEnabled; - private final Set loggedKeyspaces; - - RuleBasedKeyspaceFilter(@NonNull String logPrefix, @NonNull List specs) { - assert !specs.isEmpty(); // see KeyspaceFilter#newInstance - - this.logPrefix = logPrefix; - for (String spec : specs) { - spec = spec.trim(); - Matcher matcher; - if (EXACT_INCLUDE.matcher(spec).matches()) { - exactIncludes.add(spec); - if (exactExcludes.remove(spec)) { - LOG.warn( - "[{}] '{}' is both included and excluded, ignoring the exclusion", logPrefix, spec); - } - } else if ((matcher = EXACT_EXCLUDE.matcher(spec)).matches()) { - String name = matcher.group(1); - if (exactIncludes.contains(name)) { - LOG.warn( - "[{}] '{}' is both included and excluded, ignoring the exclusion", logPrefix, name); - } else { - exactExcludes.add(name); - } - } else if ((matcher = REGEX_INCLUDE.matcher(spec)).matches()) { - compile(matcher.group(1)).map(regexIncludes::add); - } else if ((matcher = REGEX_EXCLUDE.matcher(spec)).matches()) { - compile(matcher.group(1)).map(regexExcludes::add); - } else { - LOG.warn( - "[{}] Error while parsing {}: invalid element '{}', skipping", - logPrefix, - DefaultDriverOption.METADATA_SCHEMA_REFRESHED_KEYSPACES.getPath(), - spec); - } - } - - if (!exactIncludes.isEmpty() && regexIncludes.isEmpty() && regexExcludes.isEmpty()) { - // We can filter on the server - whereClause = buildWhereClause(exactIncludes); - if (!exactExcludes.isEmpty()) { - // Proceed, but this is probably a mistake - LOG.warn( - "[{}] {} only has exact includes and excludes, the excludes are redundant", - logPrefix, - DefaultDriverOption.METADATA_SCHEMA_REFRESHED_KEYSPACES.getPath()); - } - LOG.debug("[{}] Filtering server-side with '{}'", logPrefix, whereClause); - } else { - whereClause = ""; - LOG.debug("[{}] No server-side filtering", logPrefix); - } - - isDebugEnabled = LOG.isDebugEnabled(); - loggedKeyspaces = isDebugEnabled ? new HashSet<>() : null; - } - - @NonNull - @Override - public String getWhereClause() { - return whereClause; - } - - @Override - public boolean includes(@NonNull String keyspace) { - if (exactIncludes.contains(keyspace)) { - log(keyspace, true, "it is included by name"); - return true; - } else if (exactExcludes.contains(keyspace)) { - log(keyspace, false, "it is excluded by name"); - return false; - } else if (regexIncludes.isEmpty()) { - if (regexExcludes.isEmpty()) { - log(keyspace, false, "it is not included by name"); - return false; - } else if (matchesAny(keyspace, regexExcludes)) { - log(keyspace, false, "it matches at least one regex exclude"); - return false; - } else { - log(keyspace, true, "it does not match any regex exclude"); - return true; - } - } else { // !regexIncludes.isEmpty() - if (regexExcludes.isEmpty()) { - if (matchesAny(keyspace, regexIncludes)) { - log(keyspace, true, "it matches at least one regex include"); - return true; - } else { - log(keyspace, false, "it does not match any regex include"); - return false; - } - } else { - if (matchesAny(keyspace, regexIncludes) && !matchesAny(keyspace, regexExcludes)) { - log(keyspace, true, "it matches at least one regex include, and no regex exclude"); - return true; - } else { - log(keyspace, false, "it matches either no regex include, or at least one regex exclude"); - return false; - } - } - } - } - - private void log(@NonNull String keyspace, boolean include, @NonNull String reason) { - if (isDebugEnabled && loggedKeyspaces.add(keyspace)) { - LOG.debug( - "[{}] Filtering {} '{}' because {}", logPrefix, include ? "in" : "out", keyspace, reason); - } - } - - private boolean matchesAny(String keyspace, List> rules) { - for (Predicate rule : rules) { - if (rule.test(keyspace)) { - return true; - } - } - return false; - } - - private Optional> compile(String regex) { - try { - return Optional.of(Pattern.compile(regex).asPredicate()); - } catch (PatternSyntaxException e) { - LOG.warn( - "[{}] Error while parsing {}: syntax error in regex /{}/ ({}), skipping", - this.logPrefix, - DefaultDriverOption.METADATA_SCHEMA_REFRESHED_KEYSPACES.getPath(), - regex, - e.getMessage()); - return Optional.empty(); - } - } - - private static String buildWhereClause(Set keyspaces) { - StringBuilder builder = new StringBuilder(" WHERE keyspace_name IN ("); - boolean first = true; - for (String keyspace : keyspaces) { - if (first) { - first = false; - } else { - builder.append(","); - } - builder.append('\'').append(keyspace).append('\''); - } - return builder.append(')').toString(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/SchemaQueries.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/SchemaQueries.java deleted file mode 100644 index 613f43197e2..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/SchemaQueries.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.queries; - -import java.util.concurrent.CompletionStage; - -/** - * Manages the queries to system tables during a schema refresh. - * - *

They are all asynchronous, and possibly paged. This class abstracts all the details and - * exposes a common result type. - * - *

Implementations must be thread-safe. - */ -public interface SchemaQueries { - - /** - * Launch the queries asynchronously, returning a future that will complete when they have all - * succeeded. - */ - CompletionStage execute(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/SchemaQueriesFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/SchemaQueriesFactory.java deleted file mode 100644 index 32d1ae684ef..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/SchemaQueriesFactory.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.queries; - -public interface SchemaQueriesFactory { - SchemaQueries newInstance(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/SchemaRows.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/SchemaRows.java deleted file mode 100644 index 0507b8cffd1..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/SchemaRows.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.queries; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.DataTypeParser; -import com.datastax.oss.driver.shaded.guava.common.collect.Multimap; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; - -/** - * The system rows returned by the queries for a schema refresh, categorized by keyspace/table where - * relevant. - * - *

Implementations must be thread-safe. - */ -public interface SchemaRows { - - /** The node that was used to retrieve the schema information. */ - @NonNull - Node getNode(); - - List keyspaces(); - - List virtualKeyspaces(); - - Multimap tables(); - - Multimap virtualTables(); - - Multimap views(); - - Multimap types(); - - Multimap functions(); - - Multimap aggregates(); - - Map> columns(); - - Map> virtualColumns(); - - Map> indexes(); - - DataTypeParser dataTypeParser(); - - default Map> vertices() { - return new LinkedHashMap<>(); - } - - default Map> edges() { - return new LinkedHashMap<>(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/refresh/SchemaRefresh.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/refresh/SchemaRefresh.java deleted file mode 100644 index 86a4d1912f4..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/refresh/SchemaRefresh.java +++ /dev/null @@ -1,150 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.refresh; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.DefaultMetadata; -import com.datastax.oss.driver.internal.core.metadata.MetadataRefresh; -import com.datastax.oss.driver.internal.core.metadata.schema.events.AggregateChangeEvent; -import com.datastax.oss.driver.internal.core.metadata.schema.events.FunctionChangeEvent; -import com.datastax.oss.driver.internal.core.metadata.schema.events.KeyspaceChangeEvent; -import com.datastax.oss.driver.internal.core.metadata.schema.events.TableChangeEvent; -import com.datastax.oss.driver.internal.core.metadata.schema.events.TypeChangeEvent; -import com.datastax.oss.driver.internal.core.metadata.schema.events.ViewChangeEvent; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.Sets; -import java.util.Map; -import java.util.function.BiFunction; -import java.util.function.Function; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class SchemaRefresh implements MetadataRefresh { - - @VisibleForTesting public final Map newKeyspaces; - - public SchemaRefresh(Map newKeyspaces) { - this.newKeyspaces = newKeyspaces; - } - - @Override - public Result compute( - DefaultMetadata oldMetadata, boolean tokenMapEnabled, InternalDriverContext context) { - ImmutableList.Builder events = ImmutableList.builder(); - - Map oldKeyspaces = oldMetadata.getKeyspaces(); - for (CqlIdentifier removedKey : Sets.difference(oldKeyspaces.keySet(), newKeyspaces.keySet())) { - events.add(KeyspaceChangeEvent.dropped(oldKeyspaces.get(removedKey))); - } - for (Map.Entry entry : newKeyspaces.entrySet()) { - CqlIdentifier key = entry.getKey(); - computeEvents(oldKeyspaces.get(key), entry.getValue(), events); - } - - return new Result( - oldMetadata.withSchema(this.newKeyspaces, tokenMapEnabled, context), events.build()); - } - - /** - * Computes the exact set of events to emit when a keyspace has changed. - * - *

We can't simply emit {@link KeyspaceChangeEvent#updated(KeyspaceMetadata, KeyspaceMetadata)} - * because this method might be called as part of a full schema refresh, or a keyspace refresh - * initiated by coalesced child element refreshes. We need to traverse all children to check what - * has exactly changed. - */ - private void computeEvents( - KeyspaceMetadata oldKeyspace, - KeyspaceMetadata newKeyspace, - ImmutableList.Builder events) { - if (oldKeyspace == null) { - events.add(KeyspaceChangeEvent.created(newKeyspace)); - } else { - if (!oldKeyspace.shallowEquals(newKeyspace)) { - events.add(KeyspaceChangeEvent.updated(oldKeyspace, newKeyspace)); - } - computeChildEvents(oldKeyspace, newKeyspace, events); - } - } - - private void computeChildEvents( - KeyspaceMetadata oldKeyspace, - KeyspaceMetadata newKeyspace, - ImmutableList.Builder events) { - computeChildEvents( - oldKeyspace.getTables(), - newKeyspace.getTables(), - TableChangeEvent::dropped, - TableChangeEvent::created, - TableChangeEvent::updated, - events); - computeChildEvents( - oldKeyspace.getViews(), - newKeyspace.getViews(), - ViewChangeEvent::dropped, - ViewChangeEvent::created, - ViewChangeEvent::updated, - events); - computeChildEvents( - oldKeyspace.getUserDefinedTypes(), - newKeyspace.getUserDefinedTypes(), - TypeChangeEvent::dropped, - TypeChangeEvent::created, - TypeChangeEvent::updated, - events); - computeChildEvents( - oldKeyspace.getFunctions(), - newKeyspace.getFunctions(), - FunctionChangeEvent::dropped, - FunctionChangeEvent::created, - FunctionChangeEvent::updated, - events); - computeChildEvents( - oldKeyspace.getAggregates(), - newKeyspace.getAggregates(), - AggregateChangeEvent::dropped, - AggregateChangeEvent::created, - AggregateChangeEvent::updated, - events); - } - - private void computeChildEvents( - Map oldChildren, - Map newChildren, - Function newDroppedEvent, - Function newCreatedEvent, - BiFunction newUpdatedEvent, - ImmutableList.Builder events) { - for (K removedKey : Sets.difference(oldChildren.keySet(), newChildren.keySet())) { - events.add(newDroppedEvent.apply(oldChildren.get(removedKey))); - } - for (Map.Entry entry : newChildren.entrySet()) { - K key = entry.getKey(); - V newChild = entry.getValue(); - V oldChild = oldChildren.get(key); - if (oldChild == null) { - events.add(newCreatedEvent.apply(newChild)); - } else if (!oldChild.equals(newChild)) { - events.add(newUpdatedEvent.apply(oldChild, newChild)); - } - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ByteOrderedToken.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ByteOrderedToken.java deleted file mode 100644 index ff7642d0c18..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ByteOrderedToken.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.driver.shaded.guava.common.primitives.UnsignedBytes; -import com.datastax.oss.protocol.internal.util.Bytes; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import net.jcip.annotations.Immutable; - -/** A token generated by {@code ByteOrderedPartitioner}. */ -@Immutable -public class ByteOrderedToken implements Token { - - private final ByteBuffer value; - - public ByteOrderedToken(@NonNull ByteBuffer value) { - this.value = ByteBuffer.wrap(Bytes.getArray(value)).asReadOnlyBuffer(); - } - - @NonNull - public ByteBuffer getValue() { - return value; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof ByteOrderedToken) { - ByteOrderedToken that = (ByteOrderedToken) other; - return this.value.equals(that.getValue()); - } else { - return false; - } - } - - @Override - public int hashCode() { - return value.hashCode(); - } - - @Override - public int compareTo(@NonNull Token other) { - Preconditions.checkArgument( - other instanceof ByteOrderedToken, "Can only compare tokens of the same type"); - return UnsignedBytes.lexicographicalComparator() - .compare(Bytes.getArray(value), Bytes.getArray(((ByteOrderedToken) other).value)); - } - - @Override - public String toString() { - return "ByteOrderedToken(" + Bytes.toHexString(value) + ")"; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ByteOrderedTokenFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ByteOrderedTokenFactory.java deleted file mode 100644 index 5dc3aa3aa45..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ByteOrderedTokenFactory.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.metadata.token.TokenRange; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.nio.ByteBuffer; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class ByteOrderedTokenFactory implements TokenFactory { - - public static final String PARTITIONER_NAME = "org.apache.cassandra.dht.ByteOrderedPartitioner"; - - public static final ByteOrderedToken MIN_TOKEN = new ByteOrderedToken(ByteBuffer.allocate(0)); - - @Override - public String getPartitionerName() { - return PARTITIONER_NAME; - } - - @Override - public Token hash(ByteBuffer partitionKey) { - return new ByteOrderedToken(partitionKey); - } - - @Override - public Token parse(String tokenString) { - // This method must be able to parse the contents of system.peers.tokens, which do not have the - // "0x" prefix. On the other hand, OPPToken#toString has the "0x" because it should be usable in - // a CQL query, and it's nice to have fromString and toString symmetrical. So handle both cases: - if (!tokenString.startsWith("0x")) { - String prefix = (tokenString.length() % 2 == 0) ? "0x" : "0x0"; - tokenString = prefix + tokenString; - } - ByteBuffer value = Bytes.fromHexString(tokenString); - return new ByteOrderedToken(value); - } - - @Override - public String format(Token token) { - Preconditions.checkArgument( - token instanceof ByteOrderedToken, "Can only format ByteOrderedToken instances"); - return Bytes.toHexString(((ByteOrderedToken) token).getValue()); - } - - @Override - public Token minToken() { - return MIN_TOKEN; - } - - @Override - public TokenRange range(Token start, Token end) { - Preconditions.checkArgument( - start instanceof ByteOrderedToken && end instanceof ByteOrderedToken, - "Can only build ranges of ByteOrderedToken instances"); - return new ByteOrderedTokenRange(((ByteOrderedToken) start), ((ByteOrderedToken) end)); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ByteOrderedTokenRange.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ByteOrderedTokenRange.java deleted file mode 100644 index 7e95b7c01c9..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ByteOrderedTokenRange.java +++ /dev/null @@ -1,145 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.metadata.token.TokenRange; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.math.BigInteger; -import java.nio.ByteBuffer; -import java.util.List; -import net.jcip.annotations.Immutable; - -@Immutable -public class ByteOrderedTokenRange extends TokenRangeBase { - - private static final BigInteger TWO = BigInteger.valueOf(2); - - public ByteOrderedTokenRange(ByteOrderedToken start, ByteOrderedToken end) { - super(start, end, ByteOrderedTokenFactory.MIN_TOKEN); - } - - @Override - protected TokenRange newTokenRange(Token start, Token end) { - return new ByteOrderedTokenRange(((ByteOrderedToken) start), ((ByteOrderedToken) end)); - } - - @Override - protected List split(Token rawStartToken, Token rawEndToken, int numberOfSplits) { - int tokenOrder = rawStartToken.compareTo(rawEndToken); - - // ]min,min] means the whole ring. However, since there is no "max token" with this partitioner, - // we can't come up with a magic end value that would cover the whole ring - if (tokenOrder == 0 && rawStartToken.equals(ByteOrderedTokenFactory.MIN_TOKEN)) { - throw new IllegalArgumentException("Cannot split whole ring with ordered partitioner"); - } - - ByteOrderedToken startToken = (ByteOrderedToken) rawStartToken; - ByteOrderedToken endToken = (ByteOrderedToken) rawEndToken; - - int significantBytes; - BigInteger start, end, range, ringEnd, ringLength; - BigInteger bigNumberOfSplits = BigInteger.valueOf(numberOfSplits); - if (tokenOrder < 0) { - // Since tokens are compared lexicographically, convert to integers using the largest length - // (ex: given 0x0A and 0x0BCD, switch to 0x0A00 and 0x0BCD) - significantBytes = Math.max(startToken.getValue().capacity(), endToken.getValue().capacity()); - - // If the number of splits does not fit in the difference between the two integers, use more - // bytes (ex: cannot fit 4 splits between 0x01 and 0x03, so switch to 0x0100 and 0x0300) - // At most 4 additional bytes will be needed, since numberOfSplits is an integer. - int addedBytes = 0; - while (true) { - start = toBigInteger(startToken.getValue(), significantBytes); - end = toBigInteger(endToken.getValue(), significantBytes); - range = end.subtract(start); - if (addedBytes == 4 || start.equals(end) || range.compareTo(bigNumberOfSplits) >= 0) { - break; - } - significantBytes += 1; - addedBytes += 1; - } - ringEnd = ringLength = null; // won't be used - } else { - // Same logic except that we wrap around the ring - significantBytes = Math.max(startToken.getValue().capacity(), endToken.getValue().capacity()); - int addedBytes = 0; - while (true) { - start = toBigInteger(startToken.getValue(), significantBytes); - end = toBigInteger(endToken.getValue(), significantBytes); - ringLength = TWO.pow(significantBytes * 8); - ringEnd = ringLength.subtract(BigInteger.ONE); - range = end.subtract(start).add(ringLength); - if (addedBytes == 4 || range.compareTo(bigNumberOfSplits) >= 0) { - break; - } - significantBytes += 1; - addedBytes += 1; - } - } - - List values = super.split(start, range, ringEnd, ringLength, numberOfSplits); - List tokens = Lists.newArrayListWithExpectedSize(values.size()); - for (BigInteger value : values) { - tokens.add(new ByteOrderedToken(toBytes(value, significantBytes))); - } - return tokens; - } - - // Convert a token's byte array to a number in order to perform computations. - // This depends on the number of "significant bytes" that we use to normalize all tokens to the - // same size. - // For example if the token is 0x01 but significantBytes is 2, the result is 8 (0x0100). - private BigInteger toBigInteger(ByteBuffer bb, int significantBytes) { - byte[] bytes = Bytes.getArray(bb); - byte[] target; - if (significantBytes != bytes.length) { - target = new byte[significantBytes]; - System.arraycopy(bytes, 0, target, 0, bytes.length); - } else { - target = bytes; - } - return new BigInteger(1, target); - } - - // Convert a numeric representation back to a byte array. - // Again, the number of significant bytes matters: if the input value is 1 but significantBytes is - // 2, the - // expected result is 0x0001 (a simple conversion would produce 0x01). - protected ByteBuffer toBytes(BigInteger value, int significantBytes) { - byte[] rawBytes = value.toByteArray(); - byte[] result; - if (rawBytes.length == significantBytes) { - result = rawBytes; - } else { - result = new byte[significantBytes]; - int start, length; - if (rawBytes[0] == 0) { - // that's a sign byte, ignore (it can cause rawBytes.length == significantBytes + 1) - start = 1; - length = rawBytes.length - 1; - } else { - start = 0; - length = rawBytes.length; - } - System.arraycopy(rawBytes, start, result, significantBytes - length, length); - } - return ByteBuffer.wrap(result); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/CanonicalNodeSetBuilder.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/CanonicalNodeSetBuilder.java deleted file mode 100644 index 099d8b55129..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/CanonicalNodeSetBuilder.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import net.jcip.annotations.NotThreadSafe; - -/** - * A reusable set builder that guarantees that identical sets (same elements in the same order) will - * be represented by the same instance. - */ -@NotThreadSafe -class CanonicalNodeSetBuilder { - - private final Map, Set> canonicalSets = new HashMap<>(); - private final List elements = new ArrayList<>(); - - void add(Node node) { - // This is O(n), but the cardinality is low (max possible size is the replication factor). - if (!elements.contains(node)) { - elements.add(node); - } - } - - int size() { - return elements.size(); - } - - Set build() { - return canonicalSets.computeIfAbsent(elements, ImmutableSet::copyOf); - } - - void clear() { - elements.clear(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/DefaultReplicationStrategyFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/DefaultReplicationStrategyFactory.java deleted file mode 100644 index a5da85195c6..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/DefaultReplicationStrategyFactory.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import java.util.Map; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class DefaultReplicationStrategyFactory implements ReplicationStrategyFactory { - - private final String logPrefix; - - public DefaultReplicationStrategyFactory(InternalDriverContext context) { - this.logPrefix = context.getSessionName(); - } - - @Override - public ReplicationStrategy newInstance(Map replicationConfig) { - String strategyClass = replicationConfig.get("class"); - Preconditions.checkNotNull( - strategyClass, "Missing replication strategy class in " + replicationConfig); - switch (strategyClass) { - case "org.apache.cassandra.locator.LocalStrategy": - return new LocalReplicationStrategy(); - case "org.apache.cassandra.locator.SimpleStrategy": - return new SimpleReplicationStrategy(replicationConfig); - case "org.apache.cassandra.locator.NetworkTopologyStrategy": - return new NetworkTopologyReplicationStrategy(replicationConfig, logPrefix); - case "org.apache.cassandra.locator.EverywhereStrategy": - return new EverywhereReplicationStrategy(); - default: - throw new IllegalArgumentException("Unsupported replication strategy: " + strategyClass); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/DefaultTokenFactoryRegistry.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/DefaultTokenFactoryRegistry.java deleted file mode 100644 index 8226ddee2c7..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/DefaultTokenFactoryRegistry.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@ThreadSafe -public class DefaultTokenFactoryRegistry implements TokenFactoryRegistry { - - private static final Logger LOG = LoggerFactory.getLogger(DefaultTokenFactoryRegistry.class); - - private final String logPrefix; - - public DefaultTokenFactoryRegistry(InternalDriverContext context) { - this.logPrefix = context.getSessionName(); - } - - @Override - public TokenFactory tokenFactoryFor(String partitioner) { - if (Murmur3TokenFactory.PARTITIONER_NAME.equals(partitioner)) { - LOG.debug("[{}] Detected Murmur3 partitioner ({})", logPrefix, partitioner); - return new Murmur3TokenFactory(); - } else if (RandomTokenFactory.PARTITIONER_NAME.equals(partitioner)) { - LOG.debug("[{}] Detected random partitioner ({})", logPrefix, partitioner); - return new RandomTokenFactory(); - } else if (ByteOrderedTokenFactory.PARTITIONER_NAME.equals(partitioner)) { - LOG.debug("[{}] Detected byte ordered partitioner ({})", logPrefix, partitioner); - return new ByteOrderedTokenFactory(); - } else { - LOG.warn( - "[{}] Unsupported partitioner '{}', token map will be empty.", logPrefix, partitioner); - return null; - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/DefaultTokenMap.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/DefaultTokenMap.java deleted file mode 100644 index 8c59fb73847..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/DefaultTokenMap.java +++ /dev/null @@ -1,303 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.TokenMap; -import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.metadata.token.TokenRange; -import com.datastax.oss.driver.internal.core.metadata.DefaultNode; -import com.datastax.oss.driver.internal.core.util.RoutingKey; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSetMultimap; -import com.datastax.oss.driver.shaded.guava.common.collect.SetMultimap; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.SortedSet; -import java.util.TreeSet; -import net.jcip.annotations.Immutable; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@Immutable -public class DefaultTokenMap implements TokenMap { - - private static final Logger LOG = LoggerFactory.getLogger(DefaultTokenMap.class); - - public static DefaultTokenMap build( - @NonNull Collection nodes, - @NonNull Collection keyspaces, - @NonNull TokenFactory tokenFactory, - @NonNull ReplicationStrategyFactory replicationStrategyFactory, - @NonNull String logPrefix) { - - TokenToPrimaryAndRing tmp = buildTokenToPrimaryAndRing(nodes, tokenFactory); - Map tokenToPrimary = tmp.tokenToPrimary; - List ring = tmp.ring; - LOG.debug("[{}] Rebuilt ring ({} tokens)", logPrefix, ring.size()); - - Set tokenRanges = buildTokenRanges(ring, tokenFactory); - - ImmutableSetMultimap.Builder tokenRangesByPrimary = - ImmutableSetMultimap.builder(); - for (TokenRange range : tokenRanges) { - if (range.isFullRing()) { - // The full ring is always ]min, min], so getEnd() doesn't match the node's token - assert tokenToPrimary.size() == 1; - tokenRangesByPrimary.put(tokenToPrimary.values().iterator().next(), range); - } else { - tokenRangesByPrimary.put(tokenToPrimary.get(range.getEnd()), range); - } - } - - Map> replicationConfigs = - buildReplicationConfigs(keyspaces, logPrefix); - - ImmutableMap.Builder, KeyspaceTokenMap> keyspaceMapsBuilder = - ImmutableMap.builder(); - for (Map config : ImmutableSet.copyOf(replicationConfigs.values())) { - LOG.debug("[{}] Computing keyspace-level data for {}", logPrefix, config); - keyspaceMapsBuilder.put( - config, - KeyspaceTokenMap.build( - config, - tokenToPrimary, - ring, - tokenRanges, - tokenFactory, - replicationStrategyFactory, - logPrefix)); - } - return new DefaultTokenMap( - tokenFactory, - tokenRanges, - tokenRangesByPrimary.build(), - replicationConfigs, - keyspaceMapsBuilder.build(), - logPrefix); - } - - private final TokenFactory tokenFactory; - @VisibleForTesting final Set tokenRanges; - @VisibleForTesting final SetMultimap tokenRangesByPrimary; - @VisibleForTesting final Map> replicationConfigs; - @VisibleForTesting final Map, KeyspaceTokenMap> keyspaceMaps; - private final String logPrefix; - - private DefaultTokenMap( - TokenFactory tokenFactory, - Set tokenRanges, - SetMultimap tokenRangesByPrimary, - Map> replicationConfigs, - Map, KeyspaceTokenMap> keyspaceMaps, - String logPrefix) { - this.tokenFactory = tokenFactory; - this.tokenRanges = tokenRanges; - this.tokenRangesByPrimary = tokenRangesByPrimary; - this.replicationConfigs = replicationConfigs; - this.keyspaceMaps = keyspaceMaps; - this.logPrefix = logPrefix; - } - - public TokenFactory getTokenFactory() { - return tokenFactory; - } - - @NonNull - @Override - public Token parse(@NonNull String tokenString) { - return tokenFactory.parse(tokenString); - } - - @NonNull - @Override - public String format(@NonNull Token token) { - return tokenFactory.format(token); - } - - @NonNull - @Override - public Token newToken(@NonNull ByteBuffer... partitionKey) { - return tokenFactory.hash(RoutingKey.compose(partitionKey)); - } - - @NonNull - @Override - public TokenRange newTokenRange(@NonNull Token start, @NonNull Token end) { - return tokenFactory.range(start, end); - } - - @NonNull - @Override - public Set getTokenRanges() { - return tokenRanges; - } - - @NonNull - @Override - public Set getTokenRanges(@NonNull Node node) { - return tokenRangesByPrimary.get(node); - } - - @NonNull - @Override - public Set getTokenRanges(@NonNull CqlIdentifier keyspace, @NonNull Node replica) { - KeyspaceTokenMap keyspaceMap = getKeyspaceMap(keyspace); - return (keyspaceMap == null) ? Collections.emptySet() : keyspaceMap.getTokenRanges(replica); - } - - @NonNull - @Override - public Set getReplicas(@NonNull CqlIdentifier keyspace, @NonNull ByteBuffer partitionKey) { - KeyspaceTokenMap keyspaceMap = getKeyspaceMap(keyspace); - return (keyspaceMap == null) ? Collections.emptySet() : keyspaceMap.getReplicas(partitionKey); - } - - @NonNull - @Override - public Set getReplicas(@NonNull CqlIdentifier keyspace, @NonNull Token token) { - KeyspaceTokenMap keyspaceMap = getKeyspaceMap(keyspace); - return (keyspaceMap == null) ? Collections.emptySet() : keyspaceMap.getReplicas(token); - } - - @NonNull - @Override - public String getPartitionerName() { - return tokenFactory.getPartitionerName(); - } - - private KeyspaceTokenMap getKeyspaceMap(CqlIdentifier keyspace) { - Map config = replicationConfigs.get(keyspace); - return (config == null) ? null : keyspaceMaps.get(config); - } - - /** Called when only the schema has changed. */ - public DefaultTokenMap refresh( - @NonNull Collection nodes, - @NonNull Collection keyspaces, - @NonNull ReplicationStrategyFactory replicationStrategyFactory) { - - Map> newReplicationConfigs = - buildReplicationConfigs(keyspaces, logPrefix); - if (newReplicationConfigs.equals(replicationConfigs)) { - LOG.debug("[{}] Schema changes do not impact the token map, no refresh needed", logPrefix); - return this; - } - ImmutableMap.Builder, KeyspaceTokenMap> newKeyspaceMapsBuilder = - ImmutableMap.builder(); - - // Will only be built if needed: - Map tokenToPrimary = null; - List ring = null; - - for (Map config : ImmutableSet.copyOf(newReplicationConfigs.values())) { - KeyspaceTokenMap oldKeyspaceMap = keyspaceMaps.get(config); - if (oldKeyspaceMap != null) { - LOG.debug("[{}] Reusing existing keyspace-level data for {}", logPrefix, config); - newKeyspaceMapsBuilder.put(config, oldKeyspaceMap); - } else { - LOG.debug("[{}] Computing new keyspace-level data for {}", logPrefix, config); - if (tokenToPrimary == null) { - TokenToPrimaryAndRing tmp = buildTokenToPrimaryAndRing(nodes, tokenFactory); - tokenToPrimary = tmp.tokenToPrimary; - ring = tmp.ring; - } - newKeyspaceMapsBuilder.put( - config, - KeyspaceTokenMap.build( - config, - tokenToPrimary, - ring, - tokenRanges, - tokenFactory, - replicationStrategyFactory, - logPrefix)); - } - } - return new DefaultTokenMap( - tokenFactory, - tokenRanges, - tokenRangesByPrimary, - newReplicationConfigs, - newKeyspaceMapsBuilder.build(), - logPrefix); - } - - private static TokenToPrimaryAndRing buildTokenToPrimaryAndRing( - Collection nodes, TokenFactory tokenFactory) { - ImmutableMap.Builder tokenToPrimaryBuilder = ImmutableMap.builder(); - SortedSet sortedTokens = new TreeSet<>(); - for (Node node : nodes) { - for (String tokenString : ((DefaultNode) node).getRawTokens()) { - Token token = tokenFactory.parse(tokenString); - sortedTokens.add(token); - tokenToPrimaryBuilder.put(token, node); - } - } - return new TokenToPrimaryAndRing( - tokenToPrimaryBuilder.build(), ImmutableList.copyOf(sortedTokens)); - } - - static class TokenToPrimaryAndRing { - final Map tokenToPrimary; - final List ring; - - private TokenToPrimaryAndRing(Map tokenToPrimary, List ring) { - this.tokenToPrimary = tokenToPrimary; - this.ring = ring; - } - } - - private static Map> buildReplicationConfigs( - Collection keyspaces, String logPrefix) { - ImmutableMap.Builder> builder = ImmutableMap.builder(); - for (KeyspaceMetadata keyspace : keyspaces) { - if (!keyspace.isVirtual()) { - builder.put(keyspace.getName(), keyspace.getReplication()); - } - } - ImmutableMap> result = builder.build(); - LOG.debug("[{}] Computing keyspace-level data for {}", logPrefix, result); - return result; - } - - private static Set buildTokenRanges(List ring, TokenFactory factory) { - ImmutableSet.Builder builder = ImmutableSet.builder(); - // JAVA-684: if there is only one token, return the full ring (]minToken, minToken]) - if (ring.size() == 1) { - builder.add(factory.range(factory.minToken(), factory.minToken())); - } else { - for (int i = 0; i < ring.size(); i++) { - Token start = ring.get(i); - Token end = ring.get((i + 1) % ring.size()); - builder.add(factory.range(start, end)); - } - } - return builder.build(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/EverywhereReplicationStrategy.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/EverywhereReplicationStrategy.java deleted file mode 100644 index 1973c07f5f8..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/EverywhereReplicationStrategy.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class EverywhereReplicationStrategy implements ReplicationStrategy { - - @Override - public Map> computeReplicasByToken( - Map tokenToPrimary, List ring) { - ImmutableMap.Builder> result = ImmutableMap.builder(); - Set allNodes = ImmutableSet.copyOf(tokenToPrimary.values()); - for (Token token : tokenToPrimary.keySet()) { - result = result.put(token, allNodes); - } - return result.build(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/KeyspaceTokenMap.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/KeyspaceTokenMap.java deleted file mode 100644 index 80bad8a36b1..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/KeyspaceTokenMap.java +++ /dev/null @@ -1,132 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.metadata.token.TokenRange; -import com.datastax.oss.driver.internal.core.util.NanoTime; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSetMultimap; -import com.datastax.oss.driver.shaded.guava.common.collect.SetMultimap; -import java.nio.ByteBuffer; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.Set; -import net.jcip.annotations.Immutable; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * The token data for a given replication configuration. It's shared by all keyspaces that use that - * configuration. - */ -@Immutable -class KeyspaceTokenMap { - - private static final Logger LOG = LoggerFactory.getLogger(KeyspaceTokenMap.class); - - static KeyspaceTokenMap build( - Map replicationConfig, - Map tokenToPrimary, - List ring, - Set tokenRanges, - TokenFactory tokenFactory, - ReplicationStrategyFactory replicationStrategyFactory, - String logPrefix) { - - long start = System.nanoTime(); - try { - ReplicationStrategy strategy = replicationStrategyFactory.newInstance(replicationConfig); - - Map> replicasByToken = strategy.computeReplicasByToken(tokenToPrimary, ring); - SetMultimap tokenRangesByNode; - if (ring.size() == 1) { - // We forced the single range to ]minToken,minToken], make sure to use that instead of - // relying - // on the node's token - ImmutableSetMultimap.Builder builder = ImmutableSetMultimap.builder(); - for (Node node : tokenToPrimary.values()) { - builder.putAll(node, tokenRanges); - } - tokenRangesByNode = builder.build(); - } else { - tokenRangesByNode = buildTokenRangesByNode(tokenRanges, replicasByToken); - } - return new KeyspaceTokenMap(ring, tokenRangesByNode, replicasByToken, tokenFactory); - } finally { - LOG.debug( - "[{}] Computing keyspace-level data for {} took {}", - logPrefix, - replicationConfig, - NanoTime.formatTimeSince(start)); - } - } - - private final List ring; - private final SetMultimap tokenRangesByNode; - private final Map> replicasByToken; - private final TokenFactory tokenFactory; - - private KeyspaceTokenMap( - List ring, - SetMultimap tokenRangesByNode, - Map> replicasByToken, - TokenFactory tokenFactory) { - this.ring = ring; - this.tokenRangesByNode = tokenRangesByNode; - this.replicasByToken = replicasByToken; - this.tokenFactory = tokenFactory; - } - - Set getTokenRanges(Node replica) { - return tokenRangesByNode.get(replica); - } - - Set getReplicas(ByteBuffer partitionKey) { - return getReplicas(tokenFactory.hash(partitionKey)); - } - - Set getReplicas(Token token) { - // If the token happens to be one of the "primary" tokens, get result directly - Set nodes = replicasByToken.get(token); - if (nodes != null) { - return nodes; - } - // Otherwise, find the closest "primary" token on the ring - int i = Collections.binarySearch(ring, token); - if (i < 0) { - i = -i - 1; - if (i >= ring.size()) { - i = 0; - } - } - return replicasByToken.get(ring.get(i)); - } - - private static SetMultimap buildTokenRangesByNode( - Set tokenRanges, Map> replicasByToken) { - ImmutableSetMultimap.Builder result = ImmutableSetMultimap.builder(); - for (TokenRange range : tokenRanges) { - for (Node node : replicasByToken.get(range.getEnd())) { - result.put(node, range); - } - } - return result.build(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/LocalReplicationStrategy.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/LocalReplicationStrategy.java deleted file mode 100644 index 916947e598c..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/LocalReplicationStrategy.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -class LocalReplicationStrategy implements ReplicationStrategy { - - @Override - public Map> computeReplicasByToken( - Map tokenToPrimary, List ring) { - ImmutableMap.Builder> result = ImmutableMap.builder(); - // Each token maps to exactly one node - for (Map.Entry entry : tokenToPrimary.entrySet()) { - result.put(entry.getKey(), ImmutableSet.of(entry.getValue())); - } - return result.build(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/Murmur3Token.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/Murmur3Token.java deleted file mode 100644 index 1b3072d4f22..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/Murmur3Token.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.driver.shaded.guava.common.primitives.Longs; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.Immutable; - -/** A token generated by {@code Murmur3Partitioner}. */ -@Immutable -public class Murmur3Token implements Token { - - private final long value; - - public Murmur3Token(long value) { - this.value = value; - } - - public long getValue() { - return value; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof Murmur3Token) { - Murmur3Token that = (Murmur3Token) other; - return this.value == that.value; - } else { - return false; - } - } - - @Override - public int hashCode() { - return (int) (value ^ (value >>> 32)); - } - - @Override - public int compareTo(@NonNull Token other) { - Preconditions.checkArgument( - other instanceof Murmur3Token, "Can only compare tokens of the same type"); - Murmur3Token that = (Murmur3Token) other; - return Longs.compare(this.value, that.value); - } - - @Override - public String toString() { - return "Murmur3Token(" + value + ")"; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/Murmur3TokenFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/Murmur3TokenFactory.java deleted file mode 100644 index 2d4dc975a63..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/Murmur3TokenFactory.java +++ /dev/null @@ -1,214 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.metadata.token.TokenRange; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import java.nio.ByteBuffer; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class Murmur3TokenFactory implements TokenFactory { - - public static final String PARTITIONER_NAME = "org.apache.cassandra.dht.Murmur3Partitioner"; - - public static final Murmur3Token MIN_TOKEN = new Murmur3Token(Long.MIN_VALUE); - public static final Murmur3Token MAX_TOKEN = new Murmur3Token(Long.MAX_VALUE); - - @Override - public String getPartitionerName() { - return PARTITIONER_NAME; - } - - @Override - public Token hash(ByteBuffer partitionKey) { - long v = murmur(partitionKey); - return new Murmur3Token(v == Long.MIN_VALUE ? Long.MAX_VALUE : v); - } - - @Override - public Token parse(String tokenString) { - return new Murmur3Token(Long.parseLong(tokenString)); - } - - @Override - public String format(Token token) { - Preconditions.checkArgument( - token instanceof Murmur3Token, "Can only format Murmur3Token instances"); - return Long.toString(((Murmur3Token) token).getValue()); - } - - @Override - public Token minToken() { - return MIN_TOKEN; - } - - @Override - public TokenRange range(Token start, Token end) { - Preconditions.checkArgument( - start instanceof Murmur3Token && end instanceof Murmur3Token, - "Can only build ranges of Murmur3Token instances"); - return new Murmur3TokenRange((Murmur3Token) start, (Murmur3Token) end); - } - - // This is an adapted version of the MurmurHash.hash3_x64_128 from Cassandra used - // for M3P. Compared to that methods, there's a few inlining of arguments and we - // only return the first 64-bits of the result since that's all M3P uses. - private long murmur(ByteBuffer data) { - int offset = data.position(); - int length = data.remaining(); - - int nblocks = length >> 4; // Process as 128-bit blocks. - - long h1 = 0; - long h2 = 0; - - long c1 = 0x87c37b91114253d5L; - long c2 = 0x4cf5ad432745937fL; - - // ---------- - // body - - for (int i = 0; i < nblocks; i++) { - long k1 = getblock(data, offset, i * 2); - long k2 = getblock(data, offset, i * 2 + 1); - - k1 *= c1; - k1 = rotl64(k1, 31); - k1 *= c2; - h1 ^= k1; - h1 = rotl64(h1, 27); - h1 += h2; - h1 = h1 * 5 + 0x52dce729; - k2 *= c2; - k2 = rotl64(k2, 33); - k2 *= c1; - h2 ^= k2; - h2 = rotl64(h2, 31); - h2 += h1; - h2 = h2 * 5 + 0x38495ab5; - } - - // ---------- - // tail - - // Advance offset to the unprocessed tail of the data. - offset += nblocks * 16; - - long k1 = 0; - long k2 = 0; - - switch (length & 15) { - case 15: - k2 ^= ((long) data.get(offset + 14)) << 48; - // fall through - case 14: - k2 ^= ((long) data.get(offset + 13)) << 40; - // fall through - case 13: - k2 ^= ((long) data.get(offset + 12)) << 32; - // fall through - case 12: - k2 ^= ((long) data.get(offset + 11)) << 24; - // fall through - case 11: - k2 ^= ((long) data.get(offset + 10)) << 16; - // fall through - case 10: - k2 ^= ((long) data.get(offset + 9)) << 8; - // fall through - case 9: - k2 ^= ((long) data.get(offset + 8)); - k2 *= c2; - k2 = rotl64(k2, 33); - k2 *= c1; - h2 ^= k2; - // fall through - case 8: - k1 ^= ((long) data.get(offset + 7)) << 56; - // fall through - case 7: - k1 ^= ((long) data.get(offset + 6)) << 48; - // fall through - case 6: - k1 ^= ((long) data.get(offset + 5)) << 40; - // fall through - case 5: - k1 ^= ((long) data.get(offset + 4)) << 32; - // fall through - case 4: - k1 ^= ((long) data.get(offset + 3)) << 24; - // fall through - case 3: - k1 ^= ((long) data.get(offset + 2)) << 16; - // fall through - case 2: - k1 ^= ((long) data.get(offset + 1)) << 8; - // fall through - case 1: - k1 ^= ((long) data.get(offset)); - k1 *= c1; - k1 = rotl64(k1, 31); - k1 *= c2; - h1 ^= k1; - } - - // ---------- - // finalization - - h1 ^= length; - h2 ^= length; - - h1 += h2; - h2 += h1; - - h1 = fmix(h1); - h2 = fmix(h2); - - h1 += h2; - - return h1; - } - - private long getblock(ByteBuffer key, int offset, int index) { - int i_8 = index << 3; - int blockOffset = offset + i_8; - return ((long) key.get(blockOffset) & 0xff) - + (((long) key.get(blockOffset + 1) & 0xff) << 8) - + (((long) key.get(blockOffset + 2) & 0xff) << 16) - + (((long) key.get(blockOffset + 3) & 0xff) << 24) - + (((long) key.get(blockOffset + 4) & 0xff) << 32) - + (((long) key.get(blockOffset + 5) & 0xff) << 40) - + (((long) key.get(blockOffset + 6) & 0xff) << 48) - + (((long) key.get(blockOffset + 7) & 0xff) << 56); - } - - private long rotl64(long v, int n) { - return ((v << n) | (v >>> (64 - n))); - } - - private long fmix(long k) { - k ^= k >>> 33; - k *= 0xff51afd7ed558ccdL; - k ^= k >>> 33; - k *= 0xc4ceb9fe1a85ec53L; - k ^= k >>> 33; - return k; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/Murmur3TokenRange.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/Murmur3TokenRange.java deleted file mode 100644 index 2a87cd2c3b6..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/Murmur3TokenRange.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.metadata.token.TokenRange; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import java.math.BigInteger; -import java.util.List; -import net.jcip.annotations.Immutable; - -@Immutable -public class Murmur3TokenRange extends TokenRangeBase { - - private static final BigInteger RING_END = BigInteger.valueOf(Long.MAX_VALUE); - private static final BigInteger RING_LENGTH = - RING_END.subtract(BigInteger.valueOf(Long.MIN_VALUE)); - - public Murmur3TokenRange(Murmur3Token start, Murmur3Token end) { - super(start, end, Murmur3TokenFactory.MIN_TOKEN); - } - - @Override - protected TokenRange newTokenRange(Token start, Token end) { - return new Murmur3TokenRange((Murmur3Token) start, (Murmur3Token) end); - } - - @Override - protected List split(Token startToken, Token endToken, int numberOfSplits) { - // edge case: ]min, min] means the whole ring - if (startToken.equals(endToken) && startToken.equals(Murmur3TokenFactory.MIN_TOKEN)) { - endToken = Murmur3TokenFactory.MAX_TOKEN; - } - - BigInteger start = BigInteger.valueOf(((Murmur3Token) startToken).getValue()); - BigInteger end = BigInteger.valueOf(((Murmur3Token) endToken).getValue()); - - BigInteger range = end.subtract(start); - if (range.compareTo(BigInteger.ZERO) < 0) { - range = range.add(RING_LENGTH); - } - - List values = super.split(start, range, RING_END, RING_LENGTH, numberOfSplits); - List tokens = Lists.newArrayListWithExpectedSize(values.size()); - for (BigInteger value : values) { - tokens.add(new Murmur3Token(value.longValue())); - } - return tokens; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/NetworkTopologyReplicationStrategy.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/NetworkTopologyReplicationStrategy.java deleted file mode 100644 index 0ed81083ad6..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/NetworkTopologyReplicationStrategy.java +++ /dev/null @@ -1,175 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.Maps; -import com.datastax.oss.driver.shaded.guava.common.collect.Sets; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@ThreadSafe -class NetworkTopologyReplicationStrategy implements ReplicationStrategy { - - private static final Logger LOG = - LoggerFactory.getLogger(NetworkTopologyReplicationStrategy.class); - - private final Map replicationConfig; - private final Map replicationFactors; - private final String logPrefix; - - NetworkTopologyReplicationStrategy(Map replicationConfig, String logPrefix) { - this.replicationConfig = replicationConfig; - ImmutableMap.Builder factorsBuilder = ImmutableMap.builder(); - for (Map.Entry entry : replicationConfig.entrySet()) { - if (!entry.getKey().equals("class")) { - factorsBuilder.put(entry.getKey(), ReplicationFactor.fromString(entry.getValue())); - } - } - this.replicationFactors = factorsBuilder.build(); - this.logPrefix = logPrefix; - } - - @Override - public Map> computeReplicasByToken( - Map tokenToPrimary, List ring) { - - // The implementation of this method was adapted from - // org.apache.cassandra.locator.NetworkTopologyStrategy - - ImmutableMap.Builder> result = ImmutableMap.builder(); - Map> racks = getRacksInDcs(tokenToPrimary.values()); - Map dcNodeCount = Maps.newHashMapWithExpectedSize(replicationFactors.size()); - Set warnedDcs = Sets.newHashSetWithExpectedSize(replicationFactors.size()); - CanonicalNodeSetBuilder replicasBuilder = new CanonicalNodeSetBuilder(); - - // find maximum number of nodes in each DC - for (Node node : Sets.newHashSet(tokenToPrimary.values())) { - String dc = node.getDatacenter(); - dcNodeCount.merge(dc, 1, Integer::sum); - } - for (int i = 0; i < ring.size(); i++) { - replicasBuilder.clear(); - - Map> allDcReplicas = new HashMap<>(); - Map> seenRacks = new HashMap<>(); - Map> skippedDcEndpoints = new HashMap<>(); - for (String dc : replicationFactors.keySet()) { - allDcReplicas.put(dc, new HashSet<>()); - seenRacks.put(dc, new HashSet<>()); - skippedDcEndpoints.put(dc, new LinkedHashSet<>()); // preserve order - } - - for (int j = 0; j < ring.size() && !allDone(allDcReplicas, dcNodeCount); j++) { - Node h = tokenToPrimary.get(getTokenWrapping(i + j, ring)); - String dc = h.getDatacenter(); - if (dc == null || !allDcReplicas.containsKey(dc)) { - continue; - } - ReplicationFactor dcConfig = replicationFactors.get(dc); - assert dcConfig != null; // since allDcReplicas.containsKey(dc) - int rf = dcConfig.fullReplicas(); - Set dcReplicas = allDcReplicas.get(dc); - if (dcReplicas.size() >= rf) { - continue; - } - String rack = h.getRack(); - // Check if we already visited all racks in dc - if (rack == null || seenRacks.get(dc).size() == racks.get(dc).size()) { - replicasBuilder.add(h); - dcReplicas.add(h); - } else { - // Is this a new rack? - if (seenRacks.get(dc).contains(rack)) { - skippedDcEndpoints.get(dc).add(h); - } else { - replicasBuilder.add(h); - dcReplicas.add(h); - seenRacks.get(dc).add(rack); - // If we've run out of distinct racks, add the nodes skipped so far - if (seenRacks.get(dc).size() == racks.get(dc).size()) { - Iterator skippedIt = skippedDcEndpoints.get(dc).iterator(); - while (skippedIt.hasNext() && dcReplicas.size() < rf) { - Node nextSkipped = skippedIt.next(); - replicasBuilder.add(nextSkipped); - dcReplicas.add(nextSkipped); - } - } - } - } - } - // If we haven't found enough replicas after a whole trip around the ring, this probably - // means that the replication factors are broken. - // Warn the user because that leads to quadratic performance of this method (JAVA-702). - for (Map.Entry> entry : allDcReplicas.entrySet()) { - String dcName = entry.getKey(); - int expectedFactor = replicationFactors.get(dcName).fullReplicas(); - int achievedFactor = entry.getValue().size(); - if (achievedFactor < expectedFactor && !warnedDcs.contains(dcName)) { - LOG.warn( - "[{}] Error while computing token map for replication settings {}: " - + "could not achieve replication factor {} for datacenter {} (found only {} replicas).", - logPrefix, - replicationConfig, - expectedFactor, - dcName, - achievedFactor); - // only warn once per DC - warnedDcs.add(dcName); - } - } - - result.put(ring.get(i), replicasBuilder.build()); - } - return result.build(); - } - - private boolean allDone(Map> map, Map dcNodeCount) { - for (Map.Entry> entry : map.entrySet()) { - String dc = entry.getKey(); - int dcCount = (dcNodeCount.get(dc) == null) ? 0 : dcNodeCount.get(dc); - if (entry.getValue().size() < Math.min(replicationFactors.get(dc).fullReplicas(), dcCount)) { - return false; - } - } - return true; - } - - private Map> getRacksInDcs(Iterable nodes) { - Map> result = new HashMap<>(); - for (Node node : nodes) { - Set racks = result.computeIfAbsent(node.getDatacenter(), k -> new HashSet<>()); - racks.add(node.getRack()); - } - return result; - } - - private static Token getTokenWrapping(int i, List ring) { - return ring.get(i % ring.size()); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/RandomToken.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/RandomToken.java deleted file mode 100644 index 52e32fef522..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/RandomToken.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.math.BigInteger; -import net.jcip.annotations.Immutable; - -/** A token generated by {@code RandomPartitioner}. */ -@Immutable -public class RandomToken implements Token { - - private final BigInteger value; - - public RandomToken(BigInteger value) { - this.value = value; - } - - public BigInteger getValue() { - return value; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof RandomToken) { - RandomToken that = (RandomToken) other; - return this.value.equals(that.value); - } else { - return false; - } - } - - @Override - public int hashCode() { - return value.hashCode(); - } - - @Override - public int compareTo(@NonNull Token other) { - Preconditions.checkArgument( - other instanceof RandomToken, "Can only compare tokens of the same type"); - RandomToken that = (RandomToken) other; - return this.value.compareTo(that.getValue()); - } - - @Override - public String toString() { - return "RandomToken(" + value + ")"; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/RandomTokenFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/RandomTokenFactory.java deleted file mode 100644 index 59f1bcc865b..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/RandomTokenFactory.java +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.metadata.token.TokenRange; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import java.math.BigInteger; -import java.nio.ByteBuffer; -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class RandomTokenFactory implements TokenFactory { - - public static final String PARTITIONER_NAME = "org.apache.cassandra.dht.RandomPartitioner"; - - private static final BigInteger MIN_VALUE = BigInteger.ONE.negate(); - static final BigInteger MAX_VALUE = BigInteger.valueOf(2).pow(127); - public static final RandomToken MIN_TOKEN = new RandomToken(MIN_VALUE); - public static final RandomToken MAX_TOKEN = new RandomToken(MAX_VALUE); - - private final MessageDigest prototype; - private final boolean supportsClone; - - public RandomTokenFactory() { - prototype = createMessageDigest(); - boolean supportsClone; - try { - prototype.clone(); - supportsClone = true; - } catch (CloneNotSupportedException e) { - supportsClone = false; - } - this.supportsClone = supportsClone; - } - - @Override - public String getPartitionerName() { - return PARTITIONER_NAME; - } - - @Override - public Token hash(ByteBuffer partitionKey) { - return new RandomToken(md5(partitionKey)); - } - - @Override - public Token parse(String tokenString) { - return new RandomToken(new BigInteger(tokenString)); - } - - @Override - public String format(Token token) { - Preconditions.checkArgument( - token instanceof RandomToken, "Can only format RandomToken instances"); - return ((RandomToken) token).getValue().toString(); - } - - @Override - public Token minToken() { - return MIN_TOKEN; - } - - @Override - public TokenRange range(Token start, Token end) { - Preconditions.checkArgument( - start instanceof RandomToken && end instanceof RandomToken, - "Can only build ranges of RandomToken instances"); - return new RandomTokenRange((RandomToken) start, (RandomToken) end); - } - - private static MessageDigest createMessageDigest() { - try { - return MessageDigest.getInstance("MD5"); - } catch (NoSuchAlgorithmException e) { - throw new RuntimeException("MD5 doesn't seem to be available on this JVM", e); - } - } - - private BigInteger md5(ByteBuffer data) { - MessageDigest digest = newMessageDigest(); - digest.update(data.duplicate()); - return new BigInteger(digest.digest()).abs(); - } - - private MessageDigest newMessageDigest() { - if (supportsClone) { - try { - return (MessageDigest) prototype.clone(); - } catch (CloneNotSupportedException ignored) { - } - } - return createMessageDigest(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/RandomTokenRange.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/RandomTokenRange.java deleted file mode 100644 index d1a98a185db..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/RandomTokenRange.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import static com.datastax.oss.driver.internal.core.metadata.token.RandomTokenFactory.MAX_VALUE; - -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.metadata.token.TokenRange; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import java.math.BigInteger; -import java.util.List; -import net.jcip.annotations.Immutable; - -@Immutable -public class RandomTokenRange extends TokenRangeBase { - - private static final BigInteger RING_LENGTH = MAX_VALUE.add(BigInteger.ONE); - - public RandomTokenRange(RandomToken start, RandomToken end) { - super(start, end, RandomTokenFactory.MIN_TOKEN); - } - - @Override - protected TokenRange newTokenRange(Token start, Token end) { - return new RandomTokenRange(((RandomToken) start), ((RandomToken) end)); - } - - @Override - protected List split(Token startToken, Token endToken, int numberOfSplits) { - // edge case: ]min, min] means the whole ring - if (startToken.equals(endToken) && startToken.equals(RandomTokenFactory.MIN_TOKEN)) { - endToken = RandomTokenFactory.MAX_TOKEN; - } - - BigInteger start = ((RandomToken) startToken).getValue(); - BigInteger end = ((RandomToken) endToken).getValue(); - - BigInteger range = end.subtract(start); - if (range.compareTo(BigInteger.ZERO) < 0) { - range = range.add(RING_LENGTH); - } - - List values = super.split(start, range, MAX_VALUE, RING_LENGTH, numberOfSplits); - List tokens = Lists.newArrayListWithExpectedSize(values.size()); - for (BigInteger value : values) { - tokens.add(new RandomToken(value)); - } - return tokens; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ReplicationFactor.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ReplicationFactor.java deleted file mode 100644 index 966372da621..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ReplicationFactor.java +++ /dev/null @@ -1,82 +0,0 @@ -package com.datastax.oss.driver.internal.core.metadata.token; -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -import java.util.Objects; - -// This class is a subset of server version at org.apache.cassandra.locator.ReplicationFactor -public class ReplicationFactor { - private final int allReplicas; - private final int fullReplicas; - private final int transientReplicas; - - public ReplicationFactor(int allReplicas, int transientReplicas) { - this.allReplicas = allReplicas; - this.transientReplicas = transientReplicas; - this.fullReplicas = allReplicas - transientReplicas; - } - - public ReplicationFactor(int allReplicas) { - this(allReplicas, 0); - } - - public int fullReplicas() { - return fullReplicas; - } - - public int transientReplicas() { - return transientReplicas; - } - - public boolean hasTransientReplicas() { - return allReplicas != fullReplicas; - } - - public static ReplicationFactor fromString(String s) { - if (s.contains("/")) { - - int slash = s.indexOf('/'); - String allPart = s.substring(0, slash); - String transientPart = s.substring(slash + 1); - return new ReplicationFactor(Integer.parseInt(allPart), Integer.parseInt(transientPart)); - } else { - return new ReplicationFactor(Integer.parseInt(s), 0); - } - } - - @Override - public String toString() { - return allReplicas + (hasTransientReplicas() ? "/" + transientReplicas() : ""); - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof ReplicationFactor)) { - return false; - } - ReplicationFactor that = (ReplicationFactor) o; - return allReplicas == that.allReplicas && fullReplicas == that.fullReplicas; - } - - @Override - public int hashCode() { - return Objects.hash(allReplicas, fullReplicas); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ReplicationStrategy.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ReplicationStrategy.java deleted file mode 100644 index e16841e5107..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ReplicationStrategy.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import java.util.List; -import java.util.Map; -import java.util.Set; - -public interface ReplicationStrategy { - Map> computeReplicasByToken(Map tokenToPrimary, List ring); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ReplicationStrategyFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ReplicationStrategyFactory.java deleted file mode 100644 index 4f01d2ac920..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ReplicationStrategyFactory.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import java.util.Map; - -public interface ReplicationStrategyFactory { - ReplicationStrategy newInstance(Map replicationConfig); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/SimpleReplicationStrategy.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/SimpleReplicationStrategy.java deleted file mode 100644 index db2c16112a1..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/SimpleReplicationStrategy.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -class SimpleReplicationStrategy implements ReplicationStrategy { - - private final ReplicationFactor replicationFactor; - - SimpleReplicationStrategy(Map replicationConfig) { - this(extractReplicationFactor(replicationConfig)); - } - - @VisibleForTesting - SimpleReplicationStrategy(ReplicationFactor replicationFactor) { - this.replicationFactor = replicationFactor; - } - - @Override - public Map> computeReplicasByToken( - Map tokenToPrimary, List ring) { - - int rf = Math.min(replicationFactor.fullReplicas(), ring.size()); - - ImmutableMap.Builder> result = ImmutableMap.builder(); - CanonicalNodeSetBuilder replicasBuilder = new CanonicalNodeSetBuilder(); - - for (int i = 0; i < ring.size(); i++) { - replicasBuilder.clear(); - for (int j = 0; j < ring.size() && replicasBuilder.size() < rf; j++) { - replicasBuilder.add(tokenToPrimary.get(getTokenWrapping(i + j, ring))); - } - result.put(ring.get(i), replicasBuilder.build()); - } - return result.build(); - } - - private static Token getTokenWrapping(int i, List ring) { - return ring.get(i % ring.size()); - } - - private static ReplicationFactor extractReplicationFactor(Map replicationConfig) { - String factorString = replicationConfig.get("replication_factor"); - Preconditions.checkNotNull(factorString, "Missing replication factor in " + replicationConfig); - return ReplicationFactor.fromString(factorString); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/TokenFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/TokenFactory.java deleted file mode 100644 index 8a1731be385..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/TokenFactory.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.metadata.token.TokenRange; -import java.nio.ByteBuffer; - -/** Manages token instances for a partitioner implementation. */ -public interface TokenFactory { - - String getPartitionerName(); - - Token hash(ByteBuffer partitionKey); - - Token parse(String tokenString); - - String format(Token token); - - /** - * The minimum token is a special value that no key ever hashes to, it's used both as lower and - * upper bound. - */ - Token minToken(); - - TokenRange range(Token start, Token end); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/TokenFactoryRegistry.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/TokenFactoryRegistry.java deleted file mode 100644 index f7e31da9870..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/TokenFactoryRegistry.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -/** A thin layer of indirection to make token factories pluggable. */ -public interface TokenFactoryRegistry { - TokenFactory tokenFactoryFor(String partitioner); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/TokenRangeBase.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/TokenRangeBase.java deleted file mode 100644 index f63f9dd1ab4..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/TokenRangeBase.java +++ /dev/null @@ -1,297 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.metadata.token.TokenRange; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.math.BigInteger; -import java.util.ArrayList; -import java.util.List; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -@Immutable -public abstract class TokenRangeBase implements TokenRange { - - private final Token start; - private final Token end; - private final Token minToken; - - protected TokenRangeBase(Token start, Token end, Token minToken) { - this.start = start; - this.end = end; - this.minToken = minToken; - } - - @NonNull - @Override - public Token getStart() { - return start; - } - - @NonNull - @Override - public Token getEnd() { - return end; - } - - @NonNull - @Override - public List splitEvenly(int numberOfSplits) { - if (numberOfSplits < 1) - throw new IllegalArgumentException( - String.format("numberOfSplits (%d) must be greater than 0.", numberOfSplits)); - if (isEmpty()) { - throw new IllegalArgumentException("Can't split empty range " + this); - } - - List tokenRanges = new ArrayList<>(); - List splitPoints = split(start, end, numberOfSplits); - Token splitStart = start; - for (Token splitEnd : splitPoints) { - tokenRanges.add(newTokenRange(splitStart, splitEnd)); - splitStart = splitEnd; - } - tokenRanges.add(newTokenRange(splitStart, end)); - return tokenRanges; - } - - protected abstract List split(Token start, Token end, int numberOfSplits); - - /** This is used by {@link #split(Token, Token, int)} implementations. */ - protected List split( - BigInteger start, - BigInteger range, - BigInteger ringEnd, - BigInteger ringLength, - int numberOfSplits) { - BigInteger[] tmp = range.divideAndRemainder(BigInteger.valueOf(numberOfSplits)); - BigInteger divider = tmp[0]; - int remainder = tmp[1].intValue(); - - List results = Lists.newArrayListWithExpectedSize(numberOfSplits - 1); - BigInteger current = start; - BigInteger dividerPlusOne = - (remainder == 0) - ? null // won't be used - : divider.add(BigInteger.ONE); - - for (int i = 1; i < numberOfSplits; i++) { - current = current.add(remainder-- > 0 ? dividerPlusOne : divider); - if (ringEnd != null && current.compareTo(ringEnd) > 0) current = current.subtract(ringLength); - results.add(current); - } - return results; - } - - protected abstract TokenRange newTokenRange(Token start, Token end); - - @Override - public boolean isEmpty() { - return start.equals(end) && !start.equals(minToken); - } - - @Override - public boolean isWrappedAround() { - return start.compareTo(end) > 0 && !end.equals(minToken); - } - - @Override - public boolean isFullRing() { - return start.equals(minToken) && end.equals(minToken); - } - - @NonNull - @Override - public List unwrap() { - if (isWrappedAround()) { - return ImmutableList.of(newTokenRange(start, minToken), newTokenRange(minToken, end)); - } else { - return ImmutableList.of(this); - } - } - - @Override - public boolean intersects(@NonNull TokenRange that) { - // Empty ranges never intersect any other range - if (this.isEmpty() || that.isEmpty()) { - return false; - } - - return contains(this, that.getStart(), true) - || contains(this, that.getEnd(), false) - || contains(that, this.start, true) - || contains(that, this.end, false); - } - - @NonNull - @Override - public List intersectWith(@NonNull TokenRange that) { - if (!this.intersects(that)) { - throw new IllegalArgumentException( - "The two ranges do not intersect, use intersects() before calling this method"); - } - - List intersected = Lists.newArrayList(); - - // Compare the unwrapped ranges to one another. - List unwrappedForThis = this.unwrap(); - List unwrappedForThat = that.unwrap(); - for (TokenRange t1 : unwrappedForThis) { - for (TokenRange t2 : unwrappedForThat) { - if (t1.intersects(t2)) { - intersected.add( - newTokenRange( - contains(t1, t2.getStart(), true) ? t2.getStart() : t1.getStart(), - contains(t1, t2.getEnd(), false) ? t2.getEnd() : t1.getEnd())); - } - } - } - - // If two intersecting ranges were produced, merge them if they are adjacent. - // This could happen in the case that two wrapped ranges intersected. - if (intersected.size() == 2) { - TokenRange t1 = intersected.get(0); - TokenRange t2 = intersected.get(1); - if (t1.getEnd().equals(t2.getStart()) || t2.getEnd().equals(t1.getStart())) { - return ImmutableList.of(t1.mergeWith(t2)); - } - } - - return intersected; - } - - @Override - public boolean contains(@NonNull Token token) { - return contains(this, token, false); - } - - // isStart handles the case where the token is the start of another range, for example: - // * ]1,2] contains 2, but it does not contain the start of ]2,3] - // * ]1,2] does not contain 1, but it contains the start of ]1,3] - @VisibleForTesting - boolean contains(TokenRange range, Token token, boolean isStart) { - if (range.isEmpty()) { - return false; - } - if (range.getEnd().equals(minToken)) { - if (range.getStart().equals(minToken)) { // ]min, min] = full ring, contains everything - return true; - } else if (token.equals(minToken)) { - return !isStart; - } else { - return isStart - ? token.compareTo(range.getStart()) >= 0 - : token.compareTo(range.getStart()) > 0; - } - } else { - boolean isAfterStart = - isStart ? token.compareTo(range.getStart()) >= 0 : token.compareTo(range.getStart()) > 0; - boolean isBeforeEnd = - isStart ? token.compareTo(range.getEnd()) < 0 : token.compareTo(range.getEnd()) <= 0; - return range.isWrappedAround() - ? isAfterStart || isBeforeEnd // ####]----]#### - : isAfterStart && isBeforeEnd; // ----]####]---- - } - } - - @NonNull - @Override - public TokenRange mergeWith(@NonNull TokenRange that) { - if (this.equals(that)) { - return this; - } - - if (!(this.intersects(that) - || this.end.equals(that.getStart()) - || that.getEnd().equals(this.start))) { - throw new IllegalArgumentException( - String.format( - "Can't merge %s with %s because they neither intersect nor are adjacent", - this, that)); - } - - if (this.isEmpty()) { - return that; - } - - if (that.isEmpty()) { - return this; - } - - // That's actually "starts in or is adjacent to the end of" - boolean thisStartsInThat = contains(that, this.start, true) || this.start.equals(that.getEnd()); - boolean thatStartsInThis = - contains(this, that.getStart(), true) || that.getStart().equals(this.end); - - // This takes care of all the cases that return the full ring, so that we don't have to worry - // about them below - if (thisStartsInThat && thatStartsInThis) { - return fullRing(); - } - - // Starting at this.start, see how far we can go while staying in at least one of the ranges. - Token mergedEnd = - (thatStartsInThis && !contains(this, that.getEnd(), false)) ? that.getEnd() : this.end; - - // Repeat in the other direction. - Token mergedStart = thisStartsInThat ? that.getStart() : this.start; - - return newTokenRange(mergedStart, mergedEnd); - } - - private TokenRange fullRing() { - return newTokenRange(minToken, minToken); - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof TokenRange) { - TokenRange that = (TokenRange) other; - return this.start.equals(that.getStart()) && this.end.equals(that.getEnd()); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(start, end); - } - - @Override - public int compareTo(@NonNull TokenRange that) { - if (this.equals(that)) { - return 0; - } else { - int compareStart = this.start.compareTo(that.getStart()); - return compareStart != 0 ? compareStart : this.end.compareTo(that.getEnd()); - } - } - - @Override - public String toString() { - return String.format("%s(%s, %s)", getClass().getSimpleName(), start, end); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/AbstractMetricUpdater.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/AbstractMetricUpdater.java deleted file mode 100644 index 3d7dc50a7c0..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/AbstractMetricUpdater.java +++ /dev/null @@ -1,182 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; -import com.datastax.oss.driver.api.core.session.throttling.RequestThrottler; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.cql.CqlPrepareAsyncProcessor; -import com.datastax.oss.driver.internal.core.cql.CqlPrepareSyncProcessor; -import com.datastax.oss.driver.internal.core.pool.ChannelPool; -import com.datastax.oss.driver.internal.core.session.RequestProcessor; -import com.datastax.oss.driver.internal.core.session.throttling.ConcurrencyLimitingRequestThrottler; -import com.datastax.oss.driver.internal.core.session.throttling.RateLimitingRequestThrottler; -import com.datastax.oss.driver.shaded.guava.common.cache.Cache; -import edu.umd.cs.findbugs.annotations.Nullable; -import io.netty.util.Timeout; -import java.time.Duration; -import java.util.Set; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicReference; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public abstract class AbstractMetricUpdater implements MetricUpdater { - - private static final Logger LOG = LoggerFactory.getLogger(AbstractMetricUpdater.class); - - // Not final for testing purposes - public static Duration MIN_EXPIRE_AFTER = Duration.ofMinutes(5); - - protected final InternalDriverContext context; - protected final Set enabledMetrics; - - private final AtomicReference metricsExpirationTimeoutRef = new AtomicReference<>(); - private final Duration expireAfter; - - protected AbstractMetricUpdater(InternalDriverContext context, Set enabledMetrics) { - this.context = context; - this.enabledMetrics = enabledMetrics; - DriverExecutionProfile config = context.getConfig().getDefaultProfile(); - Duration expireAfter = config.getDuration(DefaultDriverOption.METRICS_NODE_EXPIRE_AFTER); - if (expireAfter.compareTo(MIN_EXPIRE_AFTER) < 0) { - LOG.warn( - "[{}] Value too low for {}: {}. Forcing to {} instead.", - context.getSessionName(), - DefaultDriverOption.METRICS_NODE_EXPIRE_AFTER.getPath(), - expireAfter, - MIN_EXPIRE_AFTER); - expireAfter = MIN_EXPIRE_AFTER; - } - this.expireAfter = expireAfter; - } - - @Override - public boolean isEnabled(MetricT metric, String profileName) { - return enabledMetrics.contains(metric); - } - - public Duration getExpireAfter() { - return expireAfter; - } - - protected int connectedNodes() { - int count = 0; - for (Node node : context.getMetadataManager().getMetadata().getNodes().values()) { - if (node.getOpenConnections() > 0) { - count++; - } - } - return count; - } - - protected int throttlingQueueSize() { - RequestThrottler requestThrottler = context.getRequestThrottler(); - if (requestThrottler instanceof ConcurrencyLimitingRequestThrottler) { - return ((ConcurrencyLimitingRequestThrottler) requestThrottler).getQueueSize(); - } - if (requestThrottler instanceof RateLimitingRequestThrottler) { - return ((RateLimitingRequestThrottler) requestThrottler).getQueueSize(); - } - LOG.warn( - "[{}] Metric {} does not support {}, it will always return 0", - context.getSessionName(), - DefaultSessionMetric.THROTTLING_QUEUE_SIZE.getPath(), - requestThrottler.getClass().getName()); - return 0; - } - - protected long preparedStatementCacheSize() { - Cache cache = getPreparedStatementCache(); - if (cache == null) { - LOG.warn( - "[{}] Metric {} is enabled in the config, " - + "but it looks like no CQL prepare processor is registered. " - + "The gauge will always return 0", - context.getSessionName(), - DefaultSessionMetric.CQL_PREPARED_CACHE_SIZE.getPath()); - return 0L; - } - return cache.size(); - } - - @Nullable - protected Cache getPreparedStatementCache() { - // By default, both the sync processor and the async ones are registered and they share the same - // cache. But with a custom processor registry, there could be only one of the two present. - for (RequestProcessor processor : context.getRequestProcessorRegistry().getProcessors()) { - if (processor instanceof CqlPrepareAsyncProcessor) { - return ((CqlPrepareAsyncProcessor) processor).getCache(); - } else if (processor instanceof CqlPrepareSyncProcessor) { - return ((CqlPrepareSyncProcessor) processor).getCache(); - } - } - return null; - } - - protected int availableStreamIds(Node node) { - ChannelPool pool = context.getPoolManager().getPools().get(node); - return (pool == null) ? 0 : pool.getAvailableIds(); - } - - protected int inFlightRequests(Node node) { - ChannelPool pool = context.getPoolManager().getPools().get(node); - return (pool == null) ? 0 : pool.getInFlight(); - } - - protected int orphanedStreamIds(Node node) { - ChannelPool pool = context.getPoolManager().getPools().get(node); - return (pool == null) ? 0 : pool.getOrphanedIds(); - } - - protected void startMetricsExpirationTimeout() { - metricsExpirationTimeoutRef.accumulateAndGet( - newTimeout(), - (current, update) -> { - if (current == null) { - return update; - } else { - update.cancel(); - return current; - } - }); - } - - protected void cancelMetricsExpirationTimeout() { - Timeout t = metricsExpirationTimeoutRef.getAndSet(null); - if (t != null) { - t.cancel(); - } - } - - protected Timeout newTimeout() { - return context - .getNettyOptions() - .getTimer() - .newTimeout( - t -> { - clearMetrics(); - cancelMetricsExpirationTimeout(); - }, - expireAfter.toNanos(), - TimeUnit.NANOSECONDS); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetricId.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetricId.java deleted file mode 100644 index c1c2e80e387..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetricId.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; -import java.util.Objects; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public final class DefaultMetricId implements MetricId { - - private final String name; - private final ImmutableMap tags; - - public DefaultMetricId(String name, Map tags) { - this.name = Objects.requireNonNull(name, "name cannot be null"); - this.tags = ImmutableMap.copyOf(Objects.requireNonNull(tags, "tags cannot be null")); - } - - @NonNull - @Override - public String getName() { - return name; - } - - @NonNull - @Override - public Map getTags() { - return tags; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - DefaultMetricId that = (DefaultMetricId) o; - return name.equals(that.name) && tags.equals(that.tags); - } - - @Override - public int hashCode() { - return Objects.hash(name, tags); - } - - @Override - public String toString() { - return name + tags; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetricIdGenerator.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetricIdGenerator.java deleted file mode 100644 index d4bacb35df9..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetricIdGenerator.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.NodeMetric; -import com.datastax.oss.driver.api.core.metrics.SessionMetric; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Objects; - -/** - * The default {@link MetricIdGenerator}. - * - *

This generator generates unique names, containing the session name, the node endpoint (for - * node metrics), and the metric prefix. It does not generate tags. - */ -public class DefaultMetricIdGenerator implements MetricIdGenerator { - - private final String sessionPrefix; - private final String nodePrefix; - - @SuppressWarnings("unused") - public DefaultMetricIdGenerator(DriverContext context) { - String sessionName = context.getSessionName(); - String prefix = - Objects.requireNonNull( - context - .getConfig() - .getDefaultProfile() - .getString(DefaultDriverOption.METRICS_ID_GENERATOR_PREFIX, "")); - sessionPrefix = prefix.isEmpty() ? sessionName + '.' : prefix + '.' + sessionName + '.'; - nodePrefix = sessionPrefix + "nodes."; - } - - @NonNull - @Override - public MetricId sessionMetricId(@NonNull SessionMetric metric) { - return new DefaultMetricId(sessionPrefix + metric.getPath(), ImmutableMap.of()); - } - - @NonNull - @Override - public MetricId nodeMetricId(@NonNull Node node, @NonNull NodeMetric metric) { - return new DefaultMetricId( - nodePrefix + node.getEndPoint().asMetricPrefix() + '.' + metric.getPath(), - ImmutableMap.of()); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetrics.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetrics.java deleted file mode 100644 index b15dc955760..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetrics.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import com.codahale.metrics.Metric; -import com.codahale.metrics.MetricRegistry; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.Metrics; -import com.datastax.oss.driver.api.core.metrics.NodeMetric; -import com.datastax.oss.driver.api.core.metrics.SessionMetric; -import com.datastax.oss.driver.internal.core.metadata.DefaultNode; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Optional; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class DefaultMetrics implements Metrics { - - private final MetricRegistry registry; - private final DropwizardSessionMetricUpdater sessionUpdater; - - public DefaultMetrics(MetricRegistry registry, DropwizardSessionMetricUpdater sessionUpdater) { - this.registry = registry; - this.sessionUpdater = sessionUpdater; - } - - @NonNull - @Override - public MetricRegistry getRegistry() { - return registry; - } - - @NonNull - @Override - @SuppressWarnings("TypeParameterUnusedInFormals") - public Optional getSessionMetric( - @NonNull SessionMetric metric, String profileName) { - return Optional.ofNullable(sessionUpdater.getMetric(metric, profileName)); - } - - @NonNull - @Override - @SuppressWarnings("TypeParameterUnusedInFormals") - public Optional getNodeMetric( - @NonNull Node node, @NonNull NodeMetric metric, String profileName) { - NodeMetricUpdater nodeUpdater = ((DefaultNode) node).getMetricUpdater(); - return Optional.ofNullable( - ((DropwizardNodeMetricUpdater) nodeUpdater).getMetric(metric, profileName)); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetricsFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetricsFactory.java deleted file mode 100644 index 7869f8a8af6..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetricsFactory.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import static com.datastax.oss.driver.internal.core.util.Dependency.DROPWIZARD; - -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.Metrics; -import com.datastax.oss.driver.internal.core.util.DefaultDependencyChecker; -import java.util.Optional; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@ThreadSafe -public class DefaultMetricsFactory implements MetricsFactory { - - private static final Logger LOG = LoggerFactory.getLogger(DefaultMetricsFactory.class); - - private final MetricsFactory delegate; - - @SuppressWarnings("unused") - public DefaultMetricsFactory(DriverContext context) { - if (DefaultDependencyChecker.isPresent(DROPWIZARD)) { - this.delegate = new DropwizardMetricsFactory(context); - } else { - this.delegate = new NoopMetricsFactory(context); - } - LOG.debug("[{}] Using {}", context.getSessionName(), delegate.getClass().getSimpleName()); - } - - @Override - public Optional getMetrics() { - return delegate.getMetrics(); - } - - @Override - public SessionMetricUpdater getSessionUpdater() { - return delegate.getSessionUpdater(); - } - - @Override - public NodeMetricUpdater newNodeUpdater(Node node) { - return delegate.newNodeUpdater(node); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetricsFactorySubstitutions.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetricsFactorySubstitutions.java deleted file mode 100644 index 8332cdcca18..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetricsFactorySubstitutions.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import static com.datastax.oss.driver.internal.core.util.Dependency.DROPWIZARD; - -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.internal.core.util.GraalDependencyChecker; -import com.oracle.svm.core.annotate.Alias; -import com.oracle.svm.core.annotate.Delete; -import com.oracle.svm.core.annotate.Substitute; -import com.oracle.svm.core.annotate.TargetClass; -import com.oracle.svm.core.annotate.TargetElement; -import java.util.function.BooleanSupplier; - -@SuppressWarnings("unused") -public class DefaultMetricsFactorySubstitutions { - - @TargetClass(value = DefaultMetricsFactory.class, onlyWith = DropwizardMissing.class) - public static final class DefaultMetricsFactoryDropwizardMissing { - - @Alias - @TargetElement(name = "delegate") - @SuppressWarnings({"FieldCanBeLocal", "FieldMayBeFinal"}) - private MetricsFactory delegate; - - @Substitute - @TargetElement(name = TargetElement.CONSTRUCTOR_NAME) - public DefaultMetricsFactoryDropwizardMissing(DriverContext context) { - this.delegate = new NoopMetricsFactory(context); - } - } - - @TargetClass(value = DropwizardMetricsFactory.class, onlyWith = DropwizardMissing.class) - @Delete - public static final class DeleteDropwizardMetricsFactory {} - - public static class DropwizardMissing implements BooleanSupplier { - @Override - public boolean getAsBoolean() { - return !GraalDependencyChecker.isPresent(DROPWIZARD); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DropwizardMetricUpdater.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DropwizardMetricUpdater.java deleted file mode 100644 index 9377fb3a17e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DropwizardMetricUpdater.java +++ /dev/null @@ -1,199 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import com.codahale.metrics.Counter; -import com.codahale.metrics.Histogram; -import com.codahale.metrics.Meter; -import com.codahale.metrics.Metric; -import com.codahale.metrics.MetricRegistry; -import com.codahale.metrics.Reservoir; -import com.codahale.metrics.Timer; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.config.DriverOption; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.time.Duration; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.TimeUnit; -import java.util.function.Supplier; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@ThreadSafe -public abstract class DropwizardMetricUpdater extends AbstractMetricUpdater { - - private static final Logger LOG = LoggerFactory.getLogger(DropwizardMetricUpdater.class); - - protected final MetricRegistry registry; - - protected final ConcurrentMap metrics = new ConcurrentHashMap<>(); - - protected final ConcurrentMap reservoirs = new ConcurrentHashMap<>(); - - protected DropwizardMetricUpdater( - InternalDriverContext context, Set enabledMetrics, MetricRegistry registry) { - super(context, enabledMetrics); - this.registry = registry; - } - - @SuppressWarnings({"unchecked", "TypeParameterUnusedInFormals"}) - public T getMetric( - MetricT metric, @SuppressWarnings("unused") String profileName) { - return (T) metrics.get(metric); - } - - @Override - public void incrementCounter(MetricT metric, @Nullable String profileName, long amount) { - if (isEnabled(metric, profileName)) { - getOrCreateCounterFor(metric).inc(amount); - } - } - - @Override - public void updateHistogram(MetricT metric, @Nullable String profileName, long value) { - if (isEnabled(metric, profileName)) { - getOrCreateHistogramFor(metric).update(value); - } - } - - @Override - public void markMeter(MetricT metric, @Nullable String profileName, long amount) { - if (isEnabled(metric, profileName)) { - getOrCreateMeterFor(metric).mark(amount); - } - } - - @Override - public void updateTimer( - MetricT metric, @Nullable String profileName, long duration, TimeUnit unit) { - if (isEnabled(metric, profileName)) { - getOrCreateTimerFor(metric).update(duration, unit); - } - } - - @Override - public void clearMetrics() { - for (MetricT metric : metrics.keySet()) { - MetricId id = getMetricId(metric); - registry.remove(id.getName()); - } - metrics.clear(); - reservoirs.clear(); - } - - protected abstract MetricId getMetricId(MetricT metric); - - protected void initializeGauge( - MetricT metric, DriverExecutionProfile profile, Supplier supplier) { - if (isEnabled(metric, profile.getName())) { - metrics.computeIfAbsent( - metric, - m -> { - MetricId id = getMetricId(m); - return registry.gauge(id.getName(), () -> supplier::get); - }); - } - } - - protected void initializeCounter(MetricT metric, DriverExecutionProfile profile) { - if (isEnabled(metric, profile.getName())) { - getOrCreateCounterFor(metric); - } - } - - protected void initializeHdrTimer( - MetricT metric, - DriverExecutionProfile profile, - DriverOption highestLatency, - DriverOption significantDigits, - DriverOption interval) { - if (isEnabled(metric, profile.getName())) { - reservoirs.computeIfAbsent( - metric, m -> createHdrReservoir(m, profile, highestLatency, significantDigits, interval)); - getOrCreateTimerFor(metric); - } - } - - protected Counter getOrCreateCounterFor(MetricT metric) { - return (Counter) - metrics.computeIfAbsent( - metric, - m -> { - MetricId id = getMetricId(m); - return registry.counter(id.getName()); - }); - } - - protected Meter getOrCreateMeterFor(MetricT metric) { - return (Meter) - metrics.computeIfAbsent( - metric, - m -> { - MetricId id = getMetricId(m); - return registry.meter(id.getName()); - }); - } - - protected Histogram getOrCreateHistogramFor(MetricT metric) { - return (Histogram) - metrics.computeIfAbsent( - metric, - m -> { - MetricId id = getMetricId(m); - return registry.histogram(id.getName()); - }); - } - - protected Timer getOrCreateTimerFor(MetricT metric) { - return (Timer) - metrics.computeIfAbsent( - metric, - m -> { - MetricId id = getMetricId(m); - Reservoir reservoir = reservoirs.get(metric); - Timer timer = reservoir == null ? new Timer() : new Timer(reservoir); - return registry.timer(id.getName(), () -> timer); - }); - } - - protected HdrReservoir createHdrReservoir( - MetricT metric, - DriverExecutionProfile profile, - DriverOption highestLatencyOption, - DriverOption significantDigitsOption, - DriverOption intervalOption) { - MetricId id = getMetricId(metric); - Duration highestLatency = profile.getDuration(highestLatencyOption); - int significantDigits = profile.getInt(significantDigitsOption); - if (significantDigits < 0 || significantDigits > 5) { - LOG.warn( - "[{}] Configuration option {} is out of range (expected between 0 and 5, " - + "found {}); using 3 instead.", - id.getName(), - significantDigitsOption, - significantDigits); - significantDigits = 3; - } - Duration refreshInterval = profile.getDuration(intervalOption); - return new HdrReservoir(highestLatency, significantDigits, refreshInterval, id.getName()); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DropwizardMetricsFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DropwizardMetricsFactory.java deleted file mode 100644 index 5f28f8f5060..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DropwizardMetricsFactory.java +++ /dev/null @@ -1,132 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import com.codahale.metrics.MetricRegistry; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeState; -import com.datastax.oss.driver.api.core.metrics.Metrics; -import com.datastax.oss.driver.api.core.metrics.NodeMetric; -import com.datastax.oss.driver.api.core.metrics.SessionMetric; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.NodeStateEvent; -import com.datastax.oss.driver.internal.core.util.concurrent.RunOrSchedule; -import edu.umd.cs.findbugs.annotations.Nullable; -import io.netty.util.concurrent.EventExecutor; -import java.util.Optional; -import java.util.Set; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@ThreadSafe -public class DropwizardMetricsFactory implements MetricsFactory { - - private static final Logger LOG = LoggerFactory.getLogger(DropwizardMetricsFactory.class); - - private final InternalDriverContext context; - private final Set enabledNodeMetrics; - private final MetricRegistry registry; - @Nullable private final Metrics metrics; - private final SessionMetricUpdater sessionUpdater; - - public DropwizardMetricsFactory(DriverContext context) { - this.context = (InternalDriverContext) context; - String logPrefix = context.getSessionName(); - DriverExecutionProfile config = context.getConfig().getDefaultProfile(); - Set enabledSessionMetrics = - MetricPaths.parseSessionMetricPaths( - config.getStringList(DefaultDriverOption.METRICS_SESSION_ENABLED), logPrefix); - this.enabledNodeMetrics = - MetricPaths.parseNodeMetricPaths( - config.getStringList(DefaultDriverOption.METRICS_NODE_ENABLED), logPrefix); - if (enabledSessionMetrics.isEmpty() && enabledNodeMetrics.isEmpty()) { - LOG.debug("[{}] All metrics are disabled, Session.getMetrics will be empty", logPrefix); - this.registry = null; - this.sessionUpdater = NoopSessionMetricUpdater.INSTANCE; - this.metrics = null; - } else { - // try to get the metric registry from the context - Object possibleMetricRegistry = this.context.getMetricRegistry(); - if (possibleMetricRegistry == null) { - // metrics are enabled, but a metric registry was not supplied to the context - // create a registry object - possibleMetricRegistry = new MetricRegistry(); - } - if (possibleMetricRegistry instanceof MetricRegistry) { - this.registry = (MetricRegistry) possibleMetricRegistry; - DropwizardSessionMetricUpdater dropwizardSessionUpdater = - new DropwizardSessionMetricUpdater(this.context, enabledSessionMetrics, registry); - this.sessionUpdater = dropwizardSessionUpdater; - this.metrics = new DefaultMetrics(registry, dropwizardSessionUpdater); - } else { - // Metrics are enabled, but the registry object is not an expected type - throw new IllegalArgumentException( - "Unexpected Metrics registry object. Expected registry object to be of type '" - + MetricRegistry.class.getName() - + "', but was '" - + possibleMetricRegistry.getClass().getName() - + "'"); - } - if (!enabledNodeMetrics.isEmpty()) { - EventExecutor adminEventExecutor = - this.context.getNettyOptions().adminEventExecutorGroup().next(); - this.context - .getEventBus() - .register( - NodeStateEvent.class, - RunOrSchedule.on(adminEventExecutor, this::processNodeStateEvent)); - } - } - } - - @Override - public Optional getMetrics() { - return Optional.ofNullable(metrics); - } - - @Override - public SessionMetricUpdater getSessionUpdater() { - return sessionUpdater; - } - - @Override - public NodeMetricUpdater newNodeUpdater(Node node) { - if (registry == null) { - return NoopNodeMetricUpdater.INSTANCE; - } else { - return new DropwizardNodeMetricUpdater(node, context, enabledNodeMetrics, registry); - } - } - - protected void processNodeStateEvent(NodeStateEvent event) { - if (event.newState == NodeState.DOWN - || event.newState == NodeState.FORCED_DOWN - || event.newState == null) { - // node is DOWN or REMOVED - ((DropwizardNodeMetricUpdater) event.node.getMetricUpdater()).startMetricsExpirationTimeout(); - } else if (event.newState == NodeState.UP || event.newState == NodeState.UNKNOWN) { - // node is UP or ADDED - ((DropwizardNodeMetricUpdater) event.node.getMetricUpdater()) - .cancelMetricsExpirationTimeout(); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DropwizardNodeMetricUpdater.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DropwizardNodeMetricUpdater.java deleted file mode 100644 index 2e5e6c8db3d..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DropwizardNodeMetricUpdater.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import com.codahale.metrics.MetricRegistry; -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.dse.driver.api.core.metrics.DseNodeMetric; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; -import com.datastax.oss.driver.api.core.metrics.NodeMetric; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import java.util.Set; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class DropwizardNodeMetricUpdater extends DropwizardMetricUpdater - implements NodeMetricUpdater { - - private final Node node; - - public DropwizardNodeMetricUpdater( - Node node, - InternalDriverContext context, - Set enabledMetrics, - MetricRegistry registry) { - super(context, enabledMetrics, registry); - this.node = node; - - DriverExecutionProfile profile = context.getConfig().getDefaultProfile(); - - initializeGauge(DefaultNodeMetric.OPEN_CONNECTIONS, profile, node::getOpenConnections); - initializeGauge(DefaultNodeMetric.AVAILABLE_STREAMS, profile, () -> availableStreamIds(node)); - initializeGauge(DefaultNodeMetric.IN_FLIGHT, profile, () -> inFlightRequests(node)); - initializeGauge(DefaultNodeMetric.ORPHANED_STREAMS, profile, () -> orphanedStreamIds(node)); - - initializeCounter(DefaultNodeMetric.UNSENT_REQUESTS, profile); - initializeCounter(DefaultNodeMetric.ABORTED_REQUESTS, profile); - initializeCounter(DefaultNodeMetric.WRITE_TIMEOUTS, profile); - initializeCounter(DefaultNodeMetric.READ_TIMEOUTS, profile); - initializeCounter(DefaultNodeMetric.UNAVAILABLES, profile); - initializeCounter(DefaultNodeMetric.OTHER_ERRORS, profile); - initializeCounter(DefaultNodeMetric.RETRIES, profile); - initializeCounter(DefaultNodeMetric.RETRIES_ON_ABORTED, profile); - initializeCounter(DefaultNodeMetric.RETRIES_ON_READ_TIMEOUT, profile); - initializeCounter(DefaultNodeMetric.RETRIES_ON_WRITE_TIMEOUT, profile); - initializeCounter(DefaultNodeMetric.RETRIES_ON_UNAVAILABLE, profile); - initializeCounter(DefaultNodeMetric.RETRIES_ON_OTHER_ERROR, profile); - initializeCounter(DefaultNodeMetric.IGNORES, profile); - initializeCounter(DefaultNodeMetric.IGNORES_ON_ABORTED, profile); - initializeCounter(DefaultNodeMetric.IGNORES_ON_READ_TIMEOUT, profile); - initializeCounter(DefaultNodeMetric.IGNORES_ON_WRITE_TIMEOUT, profile); - initializeCounter(DefaultNodeMetric.IGNORES_ON_UNAVAILABLE, profile); - initializeCounter(DefaultNodeMetric.IGNORES_ON_OTHER_ERROR, profile); - initializeCounter(DefaultNodeMetric.SPECULATIVE_EXECUTIONS, profile); - initializeCounter(DefaultNodeMetric.CONNECTION_INIT_ERRORS, profile); - initializeCounter(DefaultNodeMetric.AUTHENTICATION_ERRORS, profile); - - initializeHdrTimer( - DefaultNodeMetric.CQL_MESSAGES, - profile, - DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_HIGHEST, - DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_DIGITS, - DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_INTERVAL); - initializeHdrTimer( - DseNodeMetric.GRAPH_MESSAGES, - profile, - DseDriverOption.METRICS_NODE_GRAPH_MESSAGES_HIGHEST, - DseDriverOption.METRICS_NODE_GRAPH_MESSAGES_DIGITS, - DseDriverOption.METRICS_NODE_GRAPH_MESSAGES_INTERVAL); - } - - @Override - protected MetricId getMetricId(NodeMetric metric) { - MetricId id = context.getMetricIdGenerator().nodeMetricId(node, metric); - if (!id.getTags().isEmpty()) { - throw new IllegalStateException("Cannot use metric tags with Dropwizard"); - } - return id; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DropwizardSessionMetricUpdater.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DropwizardSessionMetricUpdater.java deleted file mode 100644 index 94e10ad6936..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DropwizardSessionMetricUpdater.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import com.codahale.metrics.MetricRegistry; -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.dse.driver.api.core.metrics.DseSessionMetric; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; -import com.datastax.oss.driver.api.core.metrics.SessionMetric; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import java.util.Set; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class DropwizardSessionMetricUpdater extends DropwizardMetricUpdater - implements SessionMetricUpdater { - - public DropwizardSessionMetricUpdater( - InternalDriverContext context, Set enabledMetrics, MetricRegistry registry) { - super(context, enabledMetrics, registry); - - DriverExecutionProfile profile = context.getConfig().getDefaultProfile(); - - initializeGauge(DefaultSessionMetric.CONNECTED_NODES, profile, this::connectedNodes); - initializeGauge(DefaultSessionMetric.THROTTLING_QUEUE_SIZE, profile, this::throttlingQueueSize); - initializeGauge( - DefaultSessionMetric.CQL_PREPARED_CACHE_SIZE, profile, this::preparedStatementCacheSize); - - initializeCounter(DefaultSessionMetric.CQL_CLIENT_TIMEOUTS, profile); - initializeCounter(DefaultSessionMetric.THROTTLING_ERRORS, profile); - initializeCounter(DseSessionMetric.GRAPH_CLIENT_TIMEOUTS, profile); - - initializeHdrTimer( - DefaultSessionMetric.CQL_REQUESTS, - profile, - DefaultDriverOption.METRICS_SESSION_CQL_REQUESTS_HIGHEST, - DefaultDriverOption.METRICS_SESSION_CQL_REQUESTS_DIGITS, - DefaultDriverOption.METRICS_SESSION_CQL_REQUESTS_INTERVAL); - initializeHdrTimer( - DefaultSessionMetric.THROTTLING_DELAY, - profile, - DefaultDriverOption.METRICS_SESSION_THROTTLING_HIGHEST, - DefaultDriverOption.METRICS_SESSION_THROTTLING_DIGITS, - DefaultDriverOption.METRICS_SESSION_THROTTLING_INTERVAL); - initializeHdrTimer( - DseSessionMetric.CONTINUOUS_CQL_REQUESTS, - profile, - DseDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_HIGHEST, - DseDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_DIGITS, - DseDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_INTERVAL); - initializeHdrTimer( - DseSessionMetric.GRAPH_REQUESTS, - profile, - DseDriverOption.METRICS_SESSION_GRAPH_REQUESTS_HIGHEST, - DseDriverOption.METRICS_SESSION_GRAPH_REQUESTS_DIGITS, - DseDriverOption.METRICS_SESSION_GRAPH_REQUESTS_INTERVAL); - } - - @Override - protected MetricId getMetricId(SessionMetric metric) { - MetricId id = context.getMetricIdGenerator().sessionMetricId(metric); - if (!id.getTags().isEmpty()) { - throw new IllegalStateException("Cannot use metric tags with Dropwizard"); - } - return id; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/HdrReservoir.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/HdrReservoir.java deleted file mode 100644 index c66fe1dbf8a..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/HdrReservoir.java +++ /dev/null @@ -1,270 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import com.codahale.metrics.Reservoir; -import com.codahale.metrics.Snapshot; -import java.io.OutputStream; -import java.time.Duration; -import java.util.concurrent.locks.ReadWriteLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; -import net.jcip.annotations.GuardedBy; -import net.jcip.annotations.ThreadSafe; -import org.HdrHistogram.Histogram; -import org.HdrHistogram.Recorder; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A reservoir implementation backed by the HdrHistogram library. - * - *

It uses a {@link Recorder} to capture snapshots at a configurable interval: calls to {@link - * #update(long)} are recorded in a "live" histogram, while {@link #getSnapshot()} is based on a - * "cached", read-only histogram. Each time the cached histogram becomes older than the interval, - * the two histograms are switched (therefore statistics won't be available during the first - * interval after initialization, since we don't have a cached histogram yet). - * - *

Note that this class does not implement {@link #size()}. - * - * @see HdrHistogram - */ -@ThreadSafe -public class HdrReservoir implements Reservoir { - - private static final Logger LOG = LoggerFactory.getLogger(HdrReservoir.class); - - private final String logPrefix; - private final Recorder recorder; - private final long refreshIntervalNanos; - - // The lock only orchestrates `getSnapshot()` calls; `update()` is fed directly to the recorder, - // which is lock-free. `getSnapshot()` calls are comparatively rare, so locking is not a - // bottleneck. - private final ReadWriteLock cacheLock = new ReentrantReadWriteLock(); - - @GuardedBy("cacheLock") - private Histogram cachedHistogram; - - @GuardedBy("cacheLock") - private long cachedHistogramTimestampNanos; - - @GuardedBy("cacheLock") - private Snapshot cachedSnapshot; - - public HdrReservoir( - Duration highestTrackableLatency, - int numberOfSignificantValueDigits, - Duration refreshInterval, - String logPrefix) { - this.logPrefix = logPrefix; - // The Reservoir interface is supposed to be agnostic to the unit. However, the Metrics library - // heavily leans towards nanoseconds (for example, Timer feeds nanoseconds to update(); JmxTimer - // assumes that the snapshot results are in nanoseconds). - // In our case, microseconds are precise enough for request metrics, and we don't want to waste - // space unnecessarily. So we simply use microseconds for our internal storage, and do the - // conversion when needed. - this.recorder = - new Recorder(highestTrackableLatency.toNanos() / 1000, numberOfSignificantValueDigits); - this.refreshIntervalNanos = refreshInterval.toNanos(); - this.cachedHistogramTimestampNanos = System.nanoTime(); - this.cachedSnapshot = EMPTY_SNAPSHOT; - } - - @Override - public void update(long value) { - try { - recorder.recordValue(value / 1000); - } catch (ArrayIndexOutOfBoundsException e) { - LOG.warn("[{}] Recorded value ({}) is out of bounds, discarding", logPrefix, value); - } - } - - /** - * Not implemented: this reservoir implementation is intended for use with a {@link - * com.codahale.metrics.Histogram}, which doesn't use this method. - * - *

(original description: {@inheritDoc}) - */ - @Override - public int size() { - throw new UnsupportedOperationException("HdrReservoir does not implement size()"); - } - - /** - * {@inheritDoc} - * - *

Note that the snapshots returned from this method do not implement {@link - * Snapshot#getValues()} nor {@link Snapshot#dump(OutputStream)}. In addition, due to the way that - * internal data structures are recycled, you should not hold onto a snapshot for more than the - * refresh interval; one way to ensure this is to never cache the result of this method. - */ - @Override - public Snapshot getSnapshot() { - long now = System.nanoTime(); - - cacheLock.readLock().lock(); - try { - if (now - cachedHistogramTimestampNanos < refreshIntervalNanos) { - return cachedSnapshot; - } - } finally { - cacheLock.readLock().unlock(); - } - - cacheLock.writeLock().lock(); - try { - // Might have raced with another writer => re-check the timestamp - if (now - cachedHistogramTimestampNanos >= refreshIntervalNanos) { - LOG.debug("Cached snapshot is too old, refreshing"); - cachedHistogram = recorder.getIntervalHistogram(cachedHistogram); - cachedSnapshot = new HdrSnapshot(cachedHistogram); - cachedHistogramTimestampNanos = now; - } - return cachedSnapshot; - } finally { - cacheLock.writeLock().unlock(); - } - } - - private class HdrSnapshot extends Snapshot { - - private final Histogram histogram; - private final double meanNanos; - private final double stdDevNanos; - - private HdrSnapshot(Histogram histogram) { - this.histogram = histogram; - - // Cache those values because they rely on HdrHistogram's internal iterators, which are not - // safe if the snapshot is accessed by concurrent reporters. - // In contrast, getMin(), getMax() and getValue() are safe. - this.meanNanos = histogram.getMean() * 1000; - this.stdDevNanos = histogram.getStdDeviation() * 1000; - } - - @Override - public double getValue(double quantile) { - return histogram.getValueAtPercentile(quantile * 100) * 1000; - } - - /** - * Not implemented: this reservoir implementation is intended for use with a {@link - * com.codahale.metrics.Histogram}, which doesn't use this method. - * - *

(original description: {@inheritDoc}) - */ - @Override - public long[] getValues() { - // This can be implemented, but we ran into issues when accessed by concurrent reporters - // because HdrHistogram uses an unsafe shared iterator. - // So throwing instead since this method should be seldom used anyway. - throw new UnsupportedOperationException( - "HdrReservoir's snapshots do not implement getValues()"); - } - - @Override - public int size() { - long longSize = histogram.getTotalCount(); - // The Metrics API requires an int. It's very unlikely that we get an overflow here, unless - // the refresh interval is ridiculously high (at 10k requests/s, it would have to be more than - // 59 hours). However handle gracefully just in case. - int size; - if (longSize > Integer.MAX_VALUE) { - LOG.warn("[{}] Too many recorded values, truncating", logPrefix); - size = Integer.MAX_VALUE; - } else { - size = (int) longSize; - } - return size; - } - - @Override - public long getMax() { - return histogram.getMaxValue() * 1000; - } - - @Override - public double getMean() { - return meanNanos; - } - - @Override - public long getMin() { - return histogram.getMinValue() * 1000; - } - - @Override - public double getStdDev() { - return stdDevNanos; - } - - /** - * Not implemented: this reservoir implementation is intended for use with a {@link - * com.codahale.metrics.Histogram}, which doesn't use this method. - * - *

(original description: {@inheritDoc}) - */ - @Override - public void dump(OutputStream output) { - throw new UnsupportedOperationException("HdrReservoir's snapshots do not implement dump()"); - } - } - - private static final Snapshot EMPTY_SNAPSHOT = - new Snapshot() { - @Override - public double getValue(double quantile) { - return 0; - } - - @Override - public long[] getValues() { - return new long[0]; - } - - @Override - public int size() { - return 0; - } - - @Override - public long getMax() { - return 0; - } - - @Override - public double getMean() { - return 0; - } - - @Override - public long getMin() { - return 0; - } - - @Override - public double getStdDev() { - return 0; - } - - @Override - public void dump(OutputStream output) { - // nothing to do - } - }; -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/MetricId.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/MetricId.java deleted file mode 100644 index 039fb96d34b..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/MetricId.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; - -/** - * The identifier of a metric. - * - *

The driver will use the reported name and tags to register the described metric against the - * current metric registry. - * - *

A metric identifier is unique, that is, the combination of its name and its tags is expected - * to be unique for a given metric registry. - */ -public interface MetricId { - - /** - * Returns this metric name. - * - *

Metric names can be any non-empty string, but it is recommended to create metric names that - * have path-like structures separated by a dot, e.g. {@code path.to.my.custom.metric}. Driver - * built-in implementations of this interface abide by this rule. - * - * @return The metric name; cannot be empty nor null. - */ - @NonNull - String getName(); - - /** @return The metric tags, or empty if no tag is defined; cannot be null. */ - @NonNull - Map getTags(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/MetricIdGenerator.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/MetricIdGenerator.java deleted file mode 100644 index 7a33a81b966..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/MetricIdGenerator.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.NodeMetric; -import com.datastax.oss.driver.api.core.metrics.SessionMetric; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * A {@link MetricIdGenerator} is used to generate the unique identifiers by which a metric should - * be registered against the current metrics registry. - * - *

The driver ships with two implementations of this interface; {@code DefaultMetricIdGenerator} - * and {@code TaggingMetricIdGenerator}. - * - *

{@code DefaultMetricIdGenerator} is the default implementation; it generates metric - * identifiers with unique names and no tags. - * - *

{@code TaggingMetricIdGenerator} generates metric identifiers whose uniqueness stems from the - * combination of their names and tags. - * - *

See the driver's {@code reference.conf} file. - */ -public interface MetricIdGenerator { - - /** Generates a {@link MetricId} for the given {@link SessionMetric}. */ - @NonNull - MetricId sessionMetricId(@NonNull SessionMetric metric); - - /** Generates a {@link MetricId} for the given {@link Node} and {@link NodeMetric}. */ - @NonNull - MetricId nodeMetricId(@NonNull Node node, @NonNull NodeMetric metric); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/MetricPaths.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/MetricPaths.java deleted file mode 100644 index 92b3fc569f7..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/MetricPaths.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import com.datastax.dse.driver.api.core.metrics.DseNodeMetric; -import com.datastax.dse.driver.api.core.metrics.DseSessionMetric; -import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; -import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; -import com.datastax.oss.driver.api.core.metrics.NodeMetric; -import com.datastax.oss.driver.api.core.metrics.SessionMetric; -import java.util.Collections; -import java.util.HashSet; -import java.util.List; -import java.util.Set; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class MetricPaths { - - private static final Logger LOG = LoggerFactory.getLogger(MetricPaths.class); - - public static Set parseSessionMetricPaths(List paths, String logPrefix) { - Set result = new HashSet<>(); - for (String path : paths) { - try { - result.add(DefaultSessionMetric.fromPath(path)); - } catch (IllegalArgumentException e) { - try { - result.add(DseSessionMetric.fromPath(path)); - } catch (IllegalArgumentException e1) { - LOG.warn("[{}] Unknown session metric {}, skipping", logPrefix, path); - } - } - } - return Collections.unmodifiableSet(result); - } - - public static Set parseNodeMetricPaths(List paths, String logPrefix) { - Set result = new HashSet<>(); - for (String path : paths) { - try { - result.add(DefaultNodeMetric.fromPath(path)); - } catch (IllegalArgumentException e) { - try { - result.add(DseNodeMetric.fromPath(path)); - } catch (IllegalArgumentException e1) { - LOG.warn("[{}] Unknown node metric {}, skipping", logPrefix, path); - } - } - } - return Collections.unmodifiableSet(result); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/MetricUpdater.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/MetricUpdater.java deleted file mode 100644 index c07d1b136af..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/MetricUpdater.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.concurrent.TimeUnit; - -/** - * Note about profiles names: they are included to keep the possibility to break up metrics per - * profile in the future, but right now the default updater implementations ignore them. The driver - * internals provide a profile name when it makes sense and is practical; in other cases, it passes - * {@code null}. - */ -public interface MetricUpdater { - - void incrementCounter(MetricT metric, @Nullable String profileName, long amount); - - default void incrementCounter(MetricT metric, @Nullable String profileName) { - incrementCounter(metric, profileName, 1); - } - - // note: currently unused - void updateHistogram(MetricT metric, @Nullable String profileName, long value); - - void markMeter(MetricT metric, @Nullable String profileName, long amount); - - default void markMeter(MetricT metric, @Nullable String profileName) { - markMeter(metric, profileName, 1); - } - - void updateTimer(MetricT metric, @Nullable String profileName, long duration, TimeUnit unit); - - boolean isEnabled(MetricT metric, @Nullable String profileName); - - void clearMetrics(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/MetricsFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/MetricsFactory.java deleted file mode 100644 index 6440b79fb75..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/MetricsFactory.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.Metrics; -import java.util.Optional; - -public interface MetricsFactory { - - Optional getMetrics(); - - /** @return the unique instance for this session (this must return the same object every time). */ - SessionMetricUpdater getSessionUpdater(); - - NodeMetricUpdater newNodeUpdater(Node node); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/NodeMetricUpdater.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/NodeMetricUpdater.java deleted file mode 100644 index 93d003f0a03..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/NodeMetricUpdater.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import com.datastax.oss.driver.api.core.metrics.NodeMetric; - -public interface NodeMetricUpdater extends MetricUpdater {} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/NoopMetricsFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/NoopMetricsFactory.java deleted file mode 100644 index 59ebd3d314b..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/NoopMetricsFactory.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.Metrics; -import java.util.List; -import java.util.Optional; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@ThreadSafe -public class NoopMetricsFactory implements MetricsFactory { - - private static final Logger LOG = LoggerFactory.getLogger(NoopMetricsFactory.class); - - @SuppressWarnings("unused") - public NoopMetricsFactory(DriverContext context) { - String logPrefix = context.getSessionName(); - DriverExecutionProfile config = context.getConfig().getDefaultProfile(); - List enabledSessionMetrics = - config.getStringList(DefaultDriverOption.METRICS_SESSION_ENABLED); - List enabledNodeMetrics = - config.getStringList(DefaultDriverOption.METRICS_NODE_ENABLED); - if (!enabledSessionMetrics.isEmpty() || !enabledNodeMetrics.isEmpty()) { - LOG.warn( - "[{}] Some session-level or node-level metrics were enabled, " - + "but NoopMetricsFactory is being used: all metrics will be empty", - logPrefix); - } - } - - @Override - public Optional getMetrics() { - return Optional.empty(); - } - - @Override - public SessionMetricUpdater getSessionUpdater() { - return NoopSessionMetricUpdater.INSTANCE; - } - - @Override - public NodeMetricUpdater newNodeUpdater(Node node) { - return NoopNodeMetricUpdater.INSTANCE; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/NoopNodeMetricUpdater.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/NoopNodeMetricUpdater.java deleted file mode 100644 index 8d216990331..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/NoopNodeMetricUpdater.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import com.datastax.oss.driver.api.core.metrics.NodeMetric; -import java.util.concurrent.TimeUnit; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class NoopNodeMetricUpdater implements NodeMetricUpdater { - - public static NoopNodeMetricUpdater INSTANCE = new NoopNodeMetricUpdater(); - - private NoopNodeMetricUpdater() {} - - @Override - public void incrementCounter(NodeMetric metric, String profileName, long amount) { - // nothing to do - } - - @Override - public void updateHistogram(NodeMetric metric, String profileName, long value) { - // nothing to do - } - - @Override - public void markMeter(NodeMetric metric, String profileName, long amount) { - // nothing to do - } - - @Override - public void updateTimer(NodeMetric metric, String profileName, long duration, TimeUnit unit) { - // nothing to do - } - - @Override - public boolean isEnabled(NodeMetric metric, String profileName) { - // since methods don't do anything, return false - return false; - } - - @Override - public void clearMetrics() { - // nothing to do - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/NoopSessionMetricUpdater.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/NoopSessionMetricUpdater.java deleted file mode 100644 index 7099a8ddcac..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/NoopSessionMetricUpdater.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import com.datastax.oss.driver.api.core.metrics.SessionMetric; -import java.util.concurrent.TimeUnit; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class NoopSessionMetricUpdater implements SessionMetricUpdater { - - public static NoopSessionMetricUpdater INSTANCE = new NoopSessionMetricUpdater(); - - private NoopSessionMetricUpdater() {} - - @Override - public void incrementCounter(SessionMetric metric, String profileName, long amount) { - // nothing to do - } - - @Override - public void updateHistogram(SessionMetric metric, String profileName, long value) { - // nothing to do - } - - @Override - public void markMeter(SessionMetric metric, String profileName, long amount) { - // nothing to do - } - - @Override - public void updateTimer(SessionMetric metric, String profileName, long duration, TimeUnit unit) { - // nothing to do - } - - @Override - public boolean isEnabled(SessionMetric metric, String profileName) { - // since methods don't do anything, return false - return false; - } - - @Override - public void clearMetrics() {} -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/SessionMetricUpdater.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/SessionMetricUpdater.java deleted file mode 100644 index b7fc51dd134..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/SessionMetricUpdater.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import com.datastax.oss.driver.api.core.metrics.SessionMetric; - -public interface SessionMetricUpdater extends MetricUpdater {} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/TaggingMetricIdGenerator.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/TaggingMetricIdGenerator.java deleted file mode 100644 index 393651929c9..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/TaggingMetricIdGenerator.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.NodeMetric; -import com.datastax.oss.driver.api.core.metrics.SessionMetric; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Objects; - -/** - * A {@link MetricIdGenerator} that generates metric identifiers using a combination of names and - * tags. - * - *

Session metric identifiers contain a name starting with "session." and ending with the metric - * path, and a tag with the key "session" and the value of the current session name. - * - *

Node metric identifiers contain a name starting with "nodes." and ending with the metric path, - * and two tags: one with the key "session" and the value of the current session name, the other - * with the key "node" and the value of the current node endpoint. - */ -public class TaggingMetricIdGenerator implements MetricIdGenerator { - - private final String sessionName; - private final String sessionPrefix; - private final String nodePrefix; - - @SuppressWarnings("unused") - public TaggingMetricIdGenerator(DriverContext context) { - sessionName = context.getSessionName(); - String prefix = - Objects.requireNonNull( - context - .getConfig() - .getDefaultProfile() - .getString(DefaultDriverOption.METRICS_ID_GENERATOR_PREFIX, "")); - sessionPrefix = prefix.isEmpty() ? "session." : prefix + ".session."; - nodePrefix = prefix.isEmpty() ? "nodes." : prefix + ".nodes."; - } - - @NonNull - @Override - public MetricId sessionMetricId(@NonNull SessionMetric metric) { - return new DefaultMetricId( - sessionPrefix + metric.getPath(), ImmutableMap.of("session", sessionName)); - } - - @NonNull - @Override - public MetricId nodeMetricId(@NonNull Node node, @NonNull NodeMetric metric) { - return new DefaultMetricId( - nodePrefix + metric.getPath(), - ImmutableMap.of("session", sessionName, "node", node.getEndPoint().toString())); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/os/CpuInfo.java b/core/src/main/java/com/datastax/oss/driver/internal/core/os/CpuInfo.java deleted file mode 100644 index dffc23c4c8f..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/os/CpuInfo.java +++ /dev/null @@ -1,144 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.os; - -import java.util.Locale; - -public class CpuInfo { - - /* Copied from equivalent op in jnr.ffi.Platform. We have to have this here as it has to be defined - * before its (multiple) uses in determineCpu() */ - private static final Locale LOCALE = Locale.ENGLISH; - - /* The remainder of this class is largely based on jnr.ffi.Platform in jnr-ffi version 2.1.10. - * We copy it manually here in order to avoid introducing an extra dependency merely for the sake of - * evaluating some system properties. - * - * jnr-ffi copyright notice follows: - * - * Copyright (C) 2008-2010 Wayne Meissner - * - * This file is part of the JNR project. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - /** The supported CPU architectures. */ - public enum Cpu { - /* - * Note The names of the enum values are used in other parts of the - * code to determine where to find the native stub library. Do NOT rename. - */ - - /** 32 bit legacy Intel */ - I386, - - /** 64 bit AMD (aka EM64T/X64) */ - X86_64, - - /** 32 bit Power PC */ - PPC, - - /** 64 bit Power PC */ - PPC64, - - /** 64 bit Power PC little endian */ - PPC64LE, - - /** 32 bit Sun sparc */ - SPARC, - - /** 64 bit Sun sparc */ - SPARCV9, - - /** IBM zSeries S/390 */ - S390X, - - /** 32 bit MIPS (used by nestedvm) */ - MIPS32, - - /** 32 bit ARM */ - ARM, - - /** 64 bit ARM */ - AARCH64, - - /** - * Unknown CPU architecture. A best effort will be made to infer architecture specific values - * such as address and long size. - */ - UNKNOWN; - - @Override - public String toString() { - return name().toLowerCase(LOCALE); - } - } - - public static Cpu determineCpu() { - String archString = System.getProperty("os.arch"); - if (equalsIgnoreCase("x86", archString) - || equalsIgnoreCase("i386", archString) - || equalsIgnoreCase("i86pc", archString) - || equalsIgnoreCase("i686", archString)) { - return Cpu.I386; - } else if (equalsIgnoreCase("x86_64", archString) || equalsIgnoreCase("amd64", archString)) { - return Cpu.X86_64; - } else if (equalsIgnoreCase("ppc", archString) || equalsIgnoreCase("powerpc", archString)) { - return Cpu.PPC; - } else if (equalsIgnoreCase("ppc64", archString) || equalsIgnoreCase("powerpc64", archString)) { - if ("little".equals(System.getProperty("sun.cpu.endian"))) { - return Cpu.PPC64LE; - } - return Cpu.PPC64; - } else if (equalsIgnoreCase("ppc64le", archString) - || equalsIgnoreCase("powerpc64le", archString)) { - return Cpu.PPC64LE; - } else if (equalsIgnoreCase("s390", archString) || equalsIgnoreCase("s390x", archString)) { - return Cpu.S390X; - } else if (equalsIgnoreCase("aarch64", archString)) { - return Cpu.AARCH64; - } else if (equalsIgnoreCase("arm", archString) || equalsIgnoreCase("armv7l", archString)) { - return Cpu.ARM; - } - - // Try to find by lookup up in the CPU list - for (Cpu cpu : Cpu.values()) { - if (equalsIgnoreCase(cpu.name(), archString)) { - return cpu; - } - } - - return Cpu.UNKNOWN; - } - - private static boolean equalsIgnoreCase(String s1, String s2) { - return s1.equalsIgnoreCase(s2) - || s1.toUpperCase(LOCALE).equals(s2.toUpperCase(LOCALE)) - || s1.toLowerCase(LOCALE).equals(s2.toLowerCase(LOCALE)); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/os/EmptyLibc.java b/core/src/main/java/com/datastax/oss/driver/internal/core/os/EmptyLibc.java deleted file mode 100644 index 5b57a01564c..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/os/EmptyLibc.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.os; - -import java.util.Optional; - -/** A no-op NativeImpl implementation; useful if we can't load one of the others */ -public class EmptyLibc implements Libc { - - @Override - public boolean available() { - return false; - } - - @Override - public Optional gettimeofday() { - return Optional.empty(); - } - - @Override - public Optional getpid() { - return Optional.empty(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/os/GraalGetpid.java b/core/src/main/java/com/datastax/oss/driver/internal/core/os/GraalGetpid.java deleted file mode 100644 index fc9dd8d50c7..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/os/GraalGetpid.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.os; - -import java.util.Collections; -import java.util.List; -import org.graalvm.nativeimage.c.CContext; -import org.graalvm.nativeimage.c.function.CFunction; - -@CContext(GraalGetpid.Directives.class) -public class GraalGetpid { - - static class Directives implements CContext.Directives { - - @Override - public List getHeaderFiles() { - - return Collections.singletonList(""); - } - } - - @CFunction - public static native int getpid(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/os/GraalLibc.java b/core/src/main/java/com/datastax/oss/driver/internal/core/os/GraalLibc.java deleted file mode 100644 index a6535c2c653..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/os/GraalLibc.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.os; - -import java.util.Locale; -import java.util.Optional; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class GraalLibc implements Libc { - - private static final Logger LOG = LoggerFactory.getLogger(GraalLibc.class); - - private static final Locale LOCALE = Locale.ENGLISH; - - private static final String MAC_PLATFORM_STR = "mac".toLowerCase(LOCALE); - private static final String DARWIN_PLATFORM_STR = "darwin".toLowerCase(LOCALE); - private static final String LINUX_PLATFORM_STR = "linux".toLowerCase(LOCALE); - - private final boolean available = checkAvailability(); - - /* This method is adapted from of jnr.ffi.Platform.determineOS() in jnr-ffi version 2.1.10. **/ - private boolean checkPlatform() { - - String osName = System.getProperty("os.name").split(" ", -1)[0]; - String compareStr = osName.toLowerCase(Locale.ENGLISH); - return compareStr.startsWith(MAC_PLATFORM_STR) - || compareStr.startsWith(DARWIN_PLATFORM_STR) - || compareStr.startsWith(LINUX_PLATFORM_STR); - } - - private boolean checkAvailability() { - - if (!checkPlatform()) { - return false; - } - - try { - getpidRaw(); - } catch (Throwable t) { - - LOG.debug("Error calling getpid()", t); - return false; - } - - try { - gettimeofdayRaw(); - } catch (Throwable t) { - - LOG.debug("Error calling gettimeofday()", t); - return false; - } - - return true; - } - - @Override - public boolean available() { - return this.available; - } - - /* Substrate includes a substitution for Linux + Darwin which redefines System.nanoTime() to use - * gettimeofday() (unless platform-specific higher-res clocks are available, which is even better). */ - @Override - public Optional gettimeofday() { - return this.available ? Optional.of(gettimeofdayRaw()) : Optional.empty(); - } - - private long gettimeofdayRaw() { - return Math.round(System.nanoTime() / 1_000d); - } - - @Override - public Optional getpid() { - return this.available ? Optional.of(getpidRaw()) : Optional.empty(); - } - - private int getpidRaw() { - return GraalGetpid.getpid(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/os/JnrLibc.java b/core/src/main/java/com/datastax/oss/driver/internal/core/os/JnrLibc.java deleted file mode 100644 index 25236dee837..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/os/JnrLibc.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.os; - -import java.util.Optional; -import java.util.function.Consumer; -import jnr.posix.POSIX; -import jnr.posix.POSIXFactory; -import jnr.posix.Timeval; -import jnr.posix.util.DefaultPOSIXHandler; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class JnrLibc implements Libc { - - private static final Logger LOG = LoggerFactory.getLogger(JnrLibc.class); - - private final Optional posix; - - public JnrLibc() { - - this.posix = loadPosix(); - } - - @Override - public Optional gettimeofday() { - - return this.posix.flatMap(this::gettimeofdayImpl); - } - - @Override - public Optional getpid() { - - return this.posix.map(POSIX::getpid); - } - - @Override - public boolean available() { - return this.posix.isPresent(); - } - - private Optional loadPosix() { - - try { - return Optional.of(POSIXFactory.getPOSIX(new DefaultPOSIXHandler(), true)) - .flatMap(p -> catchAll(p, posix -> posix.getpid(), "Error calling getpid()")) - .flatMap(p -> catchAll(p, this::gettimeofdayImpl, "Error calling gettimeofday()")); - } catch (Throwable t) { - LOG.debug("Error loading POSIX", t); - return Optional.empty(); - } - } - - private Optional catchAll(POSIX posix, Consumer fn, String debugStr) { - try { - fn.accept(posix); - return Optional.of(posix); - } catch (Throwable t) { - - LOG.debug(debugStr, t); - return Optional.empty(); - } - } - - private Optional gettimeofdayImpl(POSIX posix) { - - Timeval tv = posix.allocateTimeval(); - int rv = posix.gettimeofday(tv); - if (rv != 0) { - LOG.debug("Expected 0 return value from gettimeofday(), observed " + rv); - return Optional.empty(); - } - return Optional.of(tv.sec() * 1_000_000 + tv.usec()); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/os/JnrLibcSubstitution.java b/core/src/main/java/com/datastax/oss/driver/internal/core/os/JnrLibcSubstitution.java deleted file mode 100644 index 532001498f4..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/os/JnrLibcSubstitution.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.os; - -import com.oracle.svm.core.annotate.Substitute; -import com.oracle.svm.core.annotate.TargetClass; -import java.util.Optional; - -/** - * Add an explicit Graal substitution for {@link JnrLibc}. If we don't implement something like this - * the analysis done at Graal native image build time will discover the jnr-posix references in - * JnrLibc even though they won't be used at runtime. By default jnr-ffi (used by jnr-posix to do - * it's work) will use {@link ClassLoader#defineClass(String, byte[], int, int)} which isn't - * supported by Graal. This behaviour can be changed with a system property but the cleanest - * solution is simply to remove the references to jnr-posix code via a Graal substitution. - */ -@TargetClass(JnrLibc.class) -@Substitute -final class JnrLibcSubstitution implements Libc { - - @Substitute - public JnrLibcSubstitution() {} - - @Substitute - @Override - public boolean available() { - return false; - } - - @Substitute - @Override - public Optional gettimeofday() { - return Optional.empty(); - } - - @Substitute - @Override - public Optional getpid() { - return Optional.empty(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/os/Libc.java b/core/src/main/java/com/datastax/oss/driver/internal/core/os/Libc.java deleted file mode 100644 index f3bda6a8c88..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/os/Libc.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.os; - -import java.util.Optional; - -public interface Libc { - - /* Maintained to allow Native.isXAvailable() functionality without trying to make a native call if - * the underlying support _is_ available. */ - boolean available(); - - Optional gettimeofday(); - - Optional getpid(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/os/Native.java b/core/src/main/java/com/datastax/oss/driver/internal/core/os/Native.java deleted file mode 100644 index e292914bb4b..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/os/Native.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.os; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** A gateway to perform system calls. */ -public class Native { - - private static final Logger LOG = LoggerFactory.getLogger(Native.class); - - private static class LibcLoader { - - /* These values come from Graal's imageinfo API which aims to offer the ability to detect - * when we're in the Graal build/run time via system props. The maintainers of Graal have - * agreed that this API will not change over time. We reference these props as literals - * to avoid introducing a dependency on Graal code for non-Graal users here. */ - private static final String GRAAL_STATUS_PROP = "org.graalvm.nativeimage.imagecode"; - private static final String GRAAL_BUILDTIME_STATUS = "buildtime"; - private static final String GRAAL_RUNTIME_STATUS = "runtime"; - - public Libc load() { - try { - if (isGraal()) { - LOG.info("Using Graal-specific native functions"); - return new GraalLibc(); - } - return new JnrLibc(); - } catch (Throwable t) { - LOG.info( - "Unable to load JNR native implementation. This could be normal if JNR is excluded from the classpath", - t); - return new EmptyLibc(); - } - } - - private boolean isGraal() { - - String val = System.getProperty(GRAAL_STATUS_PROP); - return val != null - && (val.equals(GRAAL_RUNTIME_STATUS) || val.equalsIgnoreCase(GRAAL_BUILDTIME_STATUS)); - } - } - - private static final Libc LIBC = new LibcLoader().load(); - private static final CpuInfo.Cpu CPU = CpuInfo.determineCpu(); - - private static final String NATIVE_CALL_ERR_MSG = "Native call failed or was not available"; - - /** Whether {@link Native#currentTimeMicros()} is available on this system. */ - public static boolean isCurrentTimeMicrosAvailable() { - return LIBC.available(); - } - - /** - * The current time in microseconds, as returned by libc.gettimeofday(); can only be used if - * {@link #isCurrentTimeMicrosAvailable()} is true. - */ - public static long currentTimeMicros() { - return LIBC.gettimeofday().orElseThrow(() -> new IllegalStateException(NATIVE_CALL_ERR_MSG)); - } - - public static boolean isGetProcessIdAvailable() { - return LIBC.available(); - } - - public static int getProcessId() { - return LIBC.getpid().orElseThrow(() -> new IllegalStateException(NATIVE_CALL_ERR_MSG)); - } - - /** - * Returns the current processor architecture the JVM is running on. This value should match up to - * what's returned by jnr-ffi's Platform.getCPU() method. - * - * @return the current processor architecture. - */ - public static String getCpu() { - return CPU.toString(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/pool/ChannelPool.java b/core/src/main/java/com/datastax/oss/driver/internal/core/pool/ChannelPool.java deleted file mode 100644 index 6b7d06045bd..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/pool/ChannelPool.java +++ /dev/null @@ -1,585 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.pool; - -import com.datastax.oss.driver.api.core.AsyncAutoCloseable; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.InvalidKeyspaceException; -import com.datastax.oss.driver.api.core.UnsupportedProtocolVersionException; -import com.datastax.oss.driver.api.core.auth.AuthenticationException; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.connection.ReconnectionPolicy; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; -import com.datastax.oss.driver.internal.core.channel.ChannelEvent; -import com.datastax.oss.driver.internal.core.channel.ChannelFactory; -import com.datastax.oss.driver.internal.core.channel.ClusterNameMismatchException; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.channel.DriverChannelOptions; -import com.datastax.oss.driver.internal.core.config.ConfigChangeEvent; -import com.datastax.oss.driver.internal.core.context.EventBus; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.DefaultNode; -import com.datastax.oss.driver.internal.core.metadata.TopologyEvent; -import com.datastax.oss.driver.internal.core.util.Loggers; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.driver.internal.core.util.concurrent.Reconnection; -import com.datastax.oss.driver.internal.core.util.concurrent.RunOrSchedule; -import com.datastax.oss.driver.internal.core.util.concurrent.UncaughtExceptions; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.collect.Sets; -import edu.umd.cs.findbugs.annotations.NonNull; -import io.netty.util.concurrent.EventExecutor; -import io.netty.util.concurrent.Future; -import io.netty.util.concurrent.GenericFutureListener; -import java.util.ArrayList; -import java.util.HashSet; -import java.util.List; -import java.util.Set; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.atomic.AtomicInteger; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * The channel pool maintains a set of {@link DriverChannel} instances connected to a given node. - * - *

It allows clients to obtain a channel to execute their requests. - * - *

If one or more channels go down, a reconnection process starts in order to replace them; it - * runs until the channel count is back to its intended target. - */ -@ThreadSafe -public class ChannelPool implements AsyncAutoCloseable { - private static final Logger LOG = LoggerFactory.getLogger(ChannelPool.class); - - /** - * Initializes a new pool. - * - *

The returned completion stage will complete when all the underlying channels have finished - * their initialization. If one or more channels fail, a reconnection will be started immediately. - * Note that this method succeeds even if all channels fail, so you might get a pool that has no - * channels (i.e. {@link #next()} return {@code null}) and is reconnecting. - */ - public static CompletionStage init( - Node node, - CqlIdentifier keyspaceName, - NodeDistance distance, - InternalDriverContext context, - String sessionLogPrefix) { - ChannelPool pool = new ChannelPool(node, keyspaceName, distance, context, sessionLogPrefix); - return pool.connect(); - } - - // This is read concurrently, but only mutated on adminExecutor (by methods in SingleThreaded) - @VisibleForTesting final ChannelSet channels = new ChannelSet(); - - private final Node node; - private final CqlIdentifier initialKeyspaceName; - private final EventExecutor adminExecutor; - private final String sessionLogPrefix; - private final String logPrefix; - private final SingleThreaded singleThreaded; - private volatile boolean invalidKeyspace; - - private ChannelPool( - Node node, - CqlIdentifier keyspaceName, - NodeDistance distance, - InternalDriverContext context, - String sessionLogPrefix) { - this.node = node; - this.initialKeyspaceName = keyspaceName; - this.adminExecutor = context.getNettyOptions().adminEventExecutorGroup().next(); - this.sessionLogPrefix = sessionLogPrefix; - this.logPrefix = sessionLogPrefix + "|" + node.getEndPoint(); - this.singleThreaded = new SingleThreaded(keyspaceName, distance, context); - } - - private CompletionStage connect() { - RunOrSchedule.on(adminExecutor, singleThreaded::connect); - return singleThreaded.connectFuture; - } - - public Node getNode() { - return node; - } - - /** - * The keyspace with which the pool was initialized (therefore a constant, it's not updated if the - * keyspace is switched later). - */ - public CqlIdentifier getInitialKeyspaceName() { - return initialKeyspaceName; - } - - /** - * Whether all channels failed due to an invalid keyspace. This is only used at initialization. We - * don't make the decision to close the pool here yet, that's done at the session level. - */ - public boolean isInvalidKeyspace() { - return invalidKeyspace; - } - - /** - * @return the channel that has the most available stream ids. This is called on the direct - * request path, and we want to avoid complex check-then-act semantics; therefore this might - * race and return a channel that is already closed, or {@code null}. In those cases, it is up - * to the caller to fail fast and move to the next node. - */ - public DriverChannel next() { - return channels.next(); - } - - /** @return the number of active channels in the pool. */ - public int size() { - return channels.size(); - } - - /** @return the number of available stream ids on all channels in the pool. */ - public int getAvailableIds() { - return channels.getAvailableIds(); - } - - /** - * @return the number of requests currently executing on all channels in this pool (including - * {@link #getOrphanedIds() orphaned ids}). - */ - public int getInFlight() { - return channels.getInFlight(); - } - - /** - * @return the number of stream ids for requests in all channels in this pool that have either - * timed out or been cancelled, but for which we can't release the stream id because a request - * might still come from the server. - */ - public int getOrphanedIds() { - return channels.getOrphanedIds(); - } - - /** - * Sets a new distance for the node this pool belongs to. This method returns immediately, the new - * distance will be set asynchronously. - * - * @param newDistance the new distance to set. - */ - public void resize(NodeDistance newDistance) { - RunOrSchedule.on(adminExecutor, () -> singleThreaded.resize(newDistance)); - } - - /** - * Changes the keyspace name on all the channels in this pool. - * - *

Note that this is not called directly by the user, but happens only on a SetKeyspace - * response after a successful "USE ..." query, so the name should be valid. If the keyspace - * switch fails on any channel, that channel is closed and a reconnection is started. - */ - public CompletionStage setKeyspace(CqlIdentifier newKeyspaceName) { - return RunOrSchedule.on(adminExecutor, () -> singleThreaded.setKeyspace(newKeyspaceName)); - } - - public void reconnectNow() { - RunOrSchedule.on(adminExecutor, singleThreaded::reconnectNow); - } - - @NonNull - @Override - public CompletionStage closeFuture() { - return singleThreaded.closeFuture; - } - - @NonNull - @Override - public CompletionStage closeAsync() { - RunOrSchedule.on(adminExecutor, singleThreaded::close); - return singleThreaded.closeFuture; - } - - @NonNull - @Override - public CompletionStage forceCloseAsync() { - RunOrSchedule.on(adminExecutor, singleThreaded::forceClose); - return singleThreaded.closeFuture; - } - - /** Holds all administration tasks, that are confined to the admin executor. */ - private class SingleThreaded { - - private final DriverConfig config; - private final ChannelFactory channelFactory; - private final EventBus eventBus; - // The channels that are currently connecting - private final List> pendingChannels = new ArrayList<>(); - private final Set closingChannels = new HashSet<>(); - private final Reconnection reconnection; - private final Object configListenerKey; - - private NodeDistance distance; - private int wantedCount; - private final CompletableFuture connectFuture = new CompletableFuture<>(); - private boolean isConnecting; - private final CompletableFuture closeFuture = new CompletableFuture<>(); - private boolean isClosing; - private CompletableFuture setKeyspaceFuture; - - private CqlIdentifier keyspaceName; - - private SingleThreaded( - CqlIdentifier keyspaceName, NodeDistance distance, InternalDriverContext context) { - this.keyspaceName = keyspaceName; - this.config = context.getConfig(); - this.distance = distance; - this.wantedCount = getConfiguredSize(distance); - this.channelFactory = context.getChannelFactory(); - this.eventBus = context.getEventBus(); - ReconnectionPolicy reconnectionPolicy = context.getReconnectionPolicy(); - this.reconnection = - new Reconnection( - logPrefix, - adminExecutor, - () -> reconnectionPolicy.newNodeSchedule(node), - this::addMissingChannels, - () -> eventBus.fire(ChannelEvent.reconnectionStarted(node)), - () -> eventBus.fire(ChannelEvent.reconnectionStopped(node))); - this.configListenerKey = - eventBus.register( - ConfigChangeEvent.class, RunOrSchedule.on(adminExecutor, this::onConfigChanged)); - } - - private void connect() { - assert adminExecutor.inEventLoop(); - if (isConnecting) { - return; - } - isConnecting = true; - CompletionStage initialChannels = - addMissingChannels() - .thenApply( - allConnected -> { - if (!allConnected) { - reconnection.start(); - } - return ChannelPool.this; - }); - CompletableFutures.completeFrom(initialChannels, connectFuture); - } - - private CompletionStage addMissingChannels() { - assert adminExecutor.inEventLoop(); - // We always wait for all attempts to succeed or fail before scheduling a reconnection - assert pendingChannels.isEmpty(); - - int missing = wantedCount - channels.size(); - LOG.debug("[{}] Trying to create {} missing channels", logPrefix, missing); - DriverChannelOptions options = - DriverChannelOptions.builder() - .withKeyspace(keyspaceName) - .withOwnerLogPrefix(sessionLogPrefix) - .build(); - for (int i = 0; i < missing; i++) { - CompletionStage channelFuture = channelFactory.connect(node, options); - pendingChannels.add(channelFuture); - } - return CompletableFutures.allDone(pendingChannels) - .thenApplyAsync(this::onAllConnected, adminExecutor); - } - - private boolean onAllConnected(@SuppressWarnings("unused") Void v) { - assert adminExecutor.inEventLoop(); - Throwable fatalError = null; - int invalidKeyspaceErrors = 0; - for (CompletionStage pendingChannel : pendingChannels) { - CompletableFuture future = pendingChannel.toCompletableFuture(); - assert future.isDone(); - if (future.isCompletedExceptionally()) { - Throwable error = CompletableFutures.getFailed(future); - ((DefaultNode) node) - .getMetricUpdater() - .incrementCounter( - error instanceof AuthenticationException - ? DefaultNodeMetric.AUTHENTICATION_ERRORS - : DefaultNodeMetric.CONNECTION_INIT_ERRORS, - null); - if (error instanceof ClusterNameMismatchException - || error instanceof UnsupportedProtocolVersionException) { - // This will likely be thrown by all channels, but finish the loop cleanly - fatalError = error; - } else if (error instanceof AuthenticationException) { - // Always warn because this is most likely something the operator needs to fix. - // Keep going to reconnect if it can be fixed without bouncing the client. - Loggers.warnWithException(LOG, "[{}] Authentication error", logPrefix, error); - } else if (error instanceof InvalidKeyspaceException) { - invalidKeyspaceErrors += 1; - } else { - if (config - .getDefaultProfile() - .getBoolean(DefaultDriverOption.CONNECTION_WARN_INIT_ERROR)) { - Loggers.warnWithException( - LOG, "[{}] Error while opening new channel", logPrefix, error); - } else { - LOG.debug("[{}] Error while opening new channel", logPrefix, error); - } - } - } else { - DriverChannel channel = CompletableFutures.getCompleted(future); - if (isClosing) { - LOG.debug( - "[{}] New channel added ({}) but the pool was closed, closing it", - logPrefix, - channel); - channel.forceClose(); - } else { - LOG.debug("[{}] New channel added {}", logPrefix, channel); - channels.add(channel); - eventBus.fire(ChannelEvent.channelOpened(node)); - channel - .closeStartedFuture() - .addListener( - f -> - adminExecutor - .submit(() -> onChannelCloseStarted(channel)) - .addListener(UncaughtExceptions::log)); - channel - .closeFuture() - .addListener( - f -> - adminExecutor - .submit(() -> onChannelClosed(channel)) - .addListener(UncaughtExceptions::log)); - } - } - } - // If all channels failed, assume the keyspace is wrong - invalidKeyspace = - invalidKeyspaceErrors > 0 && invalidKeyspaceErrors == pendingChannels.size(); - - pendingChannels.clear(); - - if (fatalError != null) { - Loggers.warnWithException( - LOG, - "[{}] Fatal error while initializing pool, forcing the node down", - logPrefix, - fatalError); - // Note: getBroadcastRpcAddress() can only be empty for the control node (and not for modern - // C* versions anyway). If we already have a control connection open to that node, it's - // impossible to get a protocol version or cluster name mismatch error while creating the - // pool, so it's safe to ignore this case. - node.getBroadcastRpcAddress() - .ifPresent(address -> eventBus.fire(TopologyEvent.forceDown(address))); - // Don't bother continuing, the pool will get shut down soon anyway - return true; - } - - shrinkIfTooManyChannels(); // Can happen if the pool was shrinked during the reconnection - - int currentCount = channels.size(); - LOG.debug( - "[{}] Reconnection attempt complete, {}/{} channels", - logPrefix, - currentCount, - wantedCount); - // Stop reconnecting if we have the wanted count - return currentCount >= wantedCount; - } - - private void onChannelCloseStarted(DriverChannel channel) { - assert adminExecutor.inEventLoop(); - if (!isClosing) { - LOG.debug("[{}] Channel {} started graceful shutdown", logPrefix, channel); - channels.remove(channel); - closingChannels.add(channel); - eventBus.fire(ChannelEvent.channelClosed(node)); - reconnection.start(); - } - } - - private void onChannelClosed(DriverChannel channel) { - assert adminExecutor.inEventLoop(); - if (!isClosing) { - // Either it was closed abruptly and was still in the live set, or it was an orderly - // shutdown and it had moved to the closing set. - if (channels.remove(channel)) { - LOG.debug("[{}] Lost channel {}", logPrefix, channel); - eventBus.fire(ChannelEvent.channelClosed(node)); - reconnection.start(); - } else { - LOG.debug("[{}] Channel {} completed graceful shutdown", logPrefix, channel); - closingChannels.remove(channel); - } - } - } - - private void resize(NodeDistance newDistance) { - assert adminExecutor.inEventLoop(); - distance = newDistance; - int newChannelCount = getConfiguredSize(newDistance); - if (newChannelCount > wantedCount) { - LOG.debug("[{}] Growing ({} => {} channels)", logPrefix, wantedCount, newChannelCount); - wantedCount = newChannelCount; - reconnection.start(); - } else if (newChannelCount < wantedCount) { - LOG.debug("[{}] Shrinking ({} => {} channels)", logPrefix, wantedCount, newChannelCount); - wantedCount = newChannelCount; - if (!reconnection.isRunning()) { - shrinkIfTooManyChannels(); - } // else it will be handled at the end of the reconnection attempt - } - } - - private void shrinkIfTooManyChannels() { - assert adminExecutor.inEventLoop(); - int extraCount = channels.size() - wantedCount; - if (extraCount > 0) { - LOG.debug("[{}] Closing {} extra channels", logPrefix, extraCount); - Set toRemove = Sets.newHashSetWithExpectedSize(extraCount); - for (DriverChannel channel : channels) { - toRemove.add(channel); - if (--extraCount == 0) { - break; - } - } - for (DriverChannel channel : toRemove) { - channels.remove(channel); - channel.close(); - eventBus.fire(ChannelEvent.channelClosed(node)); - } - } - } - - private void onConfigChanged(@SuppressWarnings("unused") ConfigChangeEvent event) { - assert adminExecutor.inEventLoop(); - // resize re-reads the pool size from the configuration and does nothing if it hasn't changed, - // which is exactly what we want. - resize(distance); - } - - private CompletionStage setKeyspace(CqlIdentifier newKeyspaceName) { - assert adminExecutor.inEventLoop(); - if (setKeyspaceFuture != null && !setKeyspaceFuture.isDone()) { - return CompletableFutures.failedFuture( - new IllegalStateException( - "Can't call setKeyspace while a keyspace switch is already in progress")); - } - keyspaceName = newKeyspaceName; - setKeyspaceFuture = new CompletableFuture<>(); - - // Switch the keyspace on all live channels. - // We can read the size before iterating because mutations are confined to this thread: - int toSwitch = channels.size(); - if (toSwitch == 0) { - setKeyspaceFuture.complete(null); - } else { - AtomicInteger remaining = new AtomicInteger(toSwitch); - for (DriverChannel channel : channels) { - channel - .setKeyspace(newKeyspaceName) - .addListener( - f -> { - // Don't handle errors: if a channel fails to switch the keyspace, it closes - if (remaining.decrementAndGet() == 0) { - setKeyspaceFuture.complete(null); - } - }); - } - } - - // pending channels were scheduled with the old keyspace name, ensure they eventually switch - for (CompletionStage channelFuture : pendingChannels) { - // errors are swallowed here, this is fine because a setkeyspace error will close the - // channel, so it will eventually get reported - channelFuture.thenAccept(channel -> channel.setKeyspace(newKeyspaceName)); - } - - return setKeyspaceFuture; - } - - private void reconnectNow() { - assert adminExecutor.inEventLoop(); - // Don't force because if the reconnection is stopped, it means either we have enough channels - // or the pool is shutting down. - reconnection.reconnectNow(false); - } - - private void close() { - assert adminExecutor.inEventLoop(); - if (isClosing) { - return; - } - isClosing = true; - - // If an attempt was in progress right now, it might open new channels but they will be - // handled in onAllConnected - reconnection.stop(); - - eventBus.unregister(configListenerKey, ConfigChangeEvent.class); - - // Close all channels, the pool future completes when all the channels futures have completed - int toClose = closingChannels.size() + channels.size(); - if (toClose == 0) { - closeFuture.complete(null); - } else { - AtomicInteger remaining = new AtomicInteger(toClose); - GenericFutureListener> channelCloseListener = - f -> { - if (!f.isSuccess()) { - Loggers.warnWithException(LOG, "[{}] Error closing channel", logPrefix, f.cause()); - } - if (remaining.decrementAndGet() == 0) { - closeFuture.complete(null); - } - }; - for (DriverChannel channel : channels) { - eventBus.fire(ChannelEvent.channelClosed(node)); - channel.close().addListener(channelCloseListener); - } - for (DriverChannel channel : closingChannels) { - // don't fire the close event, onChannelCloseStarted() already did it - channel.closeFuture().addListener(channelCloseListener); - } - } - } - - private void forceClose() { - assert adminExecutor.inEventLoop(); - if (!isClosing) { - close(); - } - for (DriverChannel channel : channels) { - channel.forceClose(); - } - for (DriverChannel channel : closingChannels) { - channel.forceClose(); - } - } - - private int getConfiguredSize(NodeDistance distance) { - return config - .getDefaultProfile() - .getInt( - (distance == NodeDistance.LOCAL) - ? DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE - : DefaultDriverOption.CONNECTION_POOL_REMOTE_SIZE); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolFactory.java deleted file mode 100644 index b854f4c326c..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolFactory.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.pool; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import java.util.concurrent.CompletionStage; -import net.jcip.annotations.ThreadSafe; - -/** Just a level of indirection to make testing easier. */ -@ThreadSafe -public class ChannelPoolFactory { - public CompletionStage init( - Node node, - CqlIdentifier keyspaceName, - NodeDistance distance, - InternalDriverContext context, - String sessionLogPrefix) { - return ChannelPool.init(node, keyspaceName, distance, context, sessionLogPrefix); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/pool/ChannelSet.java b/core/src/main/java/com/datastax/oss/driver/internal/core/pool/ChannelSet.java deleted file mode 100644 index b02e15819d9..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/pool/ChannelSet.java +++ /dev/null @@ -1,178 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.pool; - -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.driver.shaded.guava.common.collect.Iterators; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Arrays; -import java.util.Iterator; -import java.util.concurrent.locks.ReentrantLock; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Concurrent structure used to store the channels of a pool. - * - *

Its write semantics are similar to "copy-on-write" JDK collections, selection operations are - * expected to vastly outnumber mutations. - */ -@ThreadSafe -class ChannelSet implements Iterable { - - private static final Logger LOG = LoggerFactory.getLogger(ChannelSet.class); - /** - * The maximum number of iterations in the busy wait loop in {@link #next()} when there are - * multiple channels. This is a backstop to protect against thread starvation, in practice we've - * never observed more than 3 iterations in tests. - */ - private static final int MAX_ITERATIONS = 50; - - private volatile DriverChannel[] channels; - private final ReentrantLock lock = new ReentrantLock(); // must be held when mutating the array - - ChannelSet() { - this.channels = new DriverChannel[] {}; - } - - void add(DriverChannel toAdd) { - Preconditions.checkNotNull(toAdd); - lock.lock(); - try { - assert indexOf(channels, toAdd) < 0; - DriverChannel[] newChannels = Arrays.copyOf(channels, channels.length + 1); - newChannels[newChannels.length - 1] = toAdd; - channels = newChannels; - } finally { - lock.unlock(); - } - } - - boolean remove(DriverChannel toRemove) { - Preconditions.checkNotNull(toRemove); - lock.lock(); - try { - int index = indexOf(channels, toRemove); - if (index < 0) { - return false; - } else { - DriverChannel[] newChannels = new DriverChannel[channels.length - 1]; - int newI = 0; - for (int i = 0; i < channels.length; i++) { - if (i != index) { - newChannels[newI] = channels[i]; - newI += 1; - } - } - channels = newChannels; - return true; - } - } finally { - lock.unlock(); - } - } - - /** @return null if the set is empty or all are full */ - DriverChannel next() { - DriverChannel[] snapshot = this.channels; - switch (snapshot.length) { - case 0: - return null; - case 1: - DriverChannel onlyChannel = snapshot[0]; - return onlyChannel.preAcquireId() ? onlyChannel : null; - default: - for (int i = 0; i < MAX_ITERATIONS; i++) { - DriverChannel best = null; - int bestScore = 0; - for (DriverChannel channel : snapshot) { - int score = channel.getAvailableIds(); - if (score > bestScore) { - bestScore = score; - best = channel; - } - } - if (best == null) { - return null; - } else if (best.preAcquireId()) { - return best; - } - } - LOG.trace("Could not select a channel after {} iterations", MAX_ITERATIONS); - return null; - } - } - - /** @return the number of available stream ids on all channels in this channel set. */ - int getAvailableIds() { - int availableIds = 0; - DriverChannel[] snapshot = this.channels; - for (DriverChannel channel : snapshot) { - availableIds += channel.getAvailableIds(); - } - return availableIds; - } - - /** - * @return the number of requests currently executing on all channels in this channel set - * (including {@link #getOrphanedIds() orphaned ids}). - */ - int getInFlight() { - int inFlight = 0; - DriverChannel[] snapshot = this.channels; - for (DriverChannel channel : snapshot) { - inFlight += channel.getInFlight(); - } - return inFlight; - } - - /** - * @return the number of stream ids for requests in all channels in this channel set that have - * either timed out or been cancelled, but for which we can't release the stream id because a - * request might still come from the server. - */ - int getOrphanedIds() { - int orphanedIds = 0; - DriverChannel[] snapshot = this.channels; - for (DriverChannel channel : snapshot) { - orphanedIds += channel.getOrphanedIds(); - } - return orphanedIds; - } - - int size() { - return this.channels.length; - } - - @NonNull - @Override - public Iterator iterator() { - return Iterators.forArray(this.channels); - } - - private static int indexOf(DriverChannel[] channels, DriverChannel key) { - for (int i = 0; i < channels.length; i++) { - if (channels[i] == key) { - return i; - } - } - return -1; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/BuiltInCompressors.java b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/BuiltInCompressors.java deleted file mode 100644 index 74270caef91..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/BuiltInCompressors.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.protocol; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.protocol.internal.Compressor; -import io.netty.buffer.ByteBuf; -import java.util.Locale; - -/** - * Provides a single entry point to create compressor instances in the driver. - * - *

Note that this class also serves as a convenient target for GraalVM substitutions, see {@link - * CompressorSubstitutions}. - */ -public class BuiltInCompressors { - - public static Compressor newInstance(String name, DriverContext context) { - switch (name.toLowerCase(Locale.ROOT)) { - case "lz4": - return new Lz4Compressor(context); - case "snappy": - return new SnappyCompressor(context); - case "none": - return Compressor.none(); - default: - throw new IllegalArgumentException( - String.format( - "Unsupported compression algorithm '%s' (from configuration option %s)", - name, DefaultDriverOption.PROTOCOL_COMPRESSION.getPath())); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/ByteBufCompressor.java b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/ByteBufCompressor.java deleted file mode 100644 index 95e6be07434..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/ByteBufCompressor.java +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.protocol; - -import com.datastax.oss.protocol.internal.Compressor; -import io.netty.buffer.ByteBuf; -import java.nio.ByteBuffer; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public abstract class ByteBufCompressor implements Compressor { - - @Override - public ByteBuf compress(ByteBuf uncompressed) { - return uncompressed.isDirect() - ? compressDirect(uncompressed, true) - : compressHeap(uncompressed, true); - } - - @Override - public ByteBuf compressWithoutLength(ByteBuf uncompressed) { - return uncompressed.isDirect() - ? compressDirect(uncompressed, false) - : compressHeap(uncompressed, false); - } - - protected abstract ByteBuf compressDirect(ByteBuf input, boolean prependWithUncompressedLength); - - protected abstract ByteBuf compressHeap(ByteBuf input, boolean prependWithUncompressedLength); - - @Override - public ByteBuf decompress(ByteBuf compressed) { - return decompressWithoutLength(compressed, readUncompressedLength(compressed)); - } - - protected abstract int readUncompressedLength(ByteBuf compressed); - - @Override - public ByteBuf decompressWithoutLength(ByteBuf compressed, int uncompressedLength) { - return compressed.isDirect() - ? decompressDirect(compressed, uncompressedLength) - : decompressHeap(compressed, uncompressedLength); - } - - protected abstract ByteBuf decompressDirect(ByteBuf input, int uncompressedLength); - - protected abstract ByteBuf decompressHeap(ByteBuf input, int uncompressedLength); - - protected static ByteBuffer inputNioBuffer(ByteBuf buf) { - // Using internalNioBuffer(...) as we only hold the reference in this method and so can - // reduce Object allocations. - int index = buf.readerIndex(); - int len = buf.readableBytes(); - return buf.nioBufferCount() == 1 - ? buf.internalNioBuffer(index, len) - : buf.nioBuffer(index, len); - } - - protected static ByteBuffer outputNioBuffer(ByteBuf buf) { - int index = buf.writerIndex(); - int len = buf.writableBytes(); - return buf.nioBufferCount() == 1 - ? buf.internalNioBuffer(index, len) - : buf.nioBuffer(index, len); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/ByteBufPrimitiveCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/ByteBufPrimitiveCodec.java deleted file mode 100644 index 1371009f989..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/ByteBufPrimitiveCodec.java +++ /dev/null @@ -1,246 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.protocol; - -import com.datastax.oss.protocol.internal.PrimitiveCodec; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.ByteBufAllocator; -import io.netty.buffer.CompositeByteBuf; -import io.netty.util.CharsetUtil; -import java.net.InetAddress; -import java.net.UnknownHostException; -import java.nio.ByteBuffer; -import java.util.Arrays; -import java.util.zip.CRC32; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class ByteBufPrimitiveCodec implements PrimitiveCodec { - - private final ByteBufAllocator allocator; - - public ByteBufPrimitiveCodec(ByteBufAllocator allocator) { - this.allocator = allocator; - } - - @Override - public ByteBuf allocate(int size) { - return allocator.ioBuffer(size, size); - } - - @Override - public void release(ByteBuf toRelease) { - toRelease.release(); - } - - @Override - public int sizeOf(ByteBuf toMeasure) { - return toMeasure.readableBytes(); - } - - @Override - public ByteBuf concat(ByteBuf left, ByteBuf right) { - if (!left.isReadable()) { - return right.duplicate(); - } else if (!right.isReadable()) { - return left.duplicate(); - } else { - CompositeByteBuf c = allocator.compositeBuffer(2); - c.addComponents(left, right); - // c.readerIndex() is 0, which is the first readable byte in left - c.writerIndex( - left.writerIndex() - left.readerIndex() + right.writerIndex() - right.readerIndex()); - return c; - } - } - - @Override - public void markReaderIndex(ByteBuf source) { - source.markReaderIndex(); - } - - @Override - public void resetReaderIndex(ByteBuf source) { - source.resetReaderIndex(); - } - - @Override - public byte readByte(ByteBuf source) { - return source.readByte(); - } - - @Override - public int readInt(ByteBuf source) { - return source.readInt(); - } - - @Override - public int readInt(ByteBuf source, int offset) { - return source.getInt(source.readerIndex() + offset); - } - - @Override - public InetAddress readInetAddr(ByteBuf source) { - int length = readByte(source) & 0xFF; - byte[] bytes = new byte[length]; - source.readBytes(bytes); - return newInetAddress(bytes); - } - - @Override - public long readLong(ByteBuf source) { - return source.readLong(); - } - - @Override - public int readUnsignedShort(ByteBuf source) { - return source.readUnsignedShort(); - } - - @Override - public ByteBuffer readBytes(ByteBuf source) { - int length = readInt(source); - if (length < 0) return null; - byte[] bytes = new byte[length]; - source.readBytes(bytes); - return ByteBuffer.wrap(bytes); - } - - @Override - public byte[] readShortBytes(ByteBuf source) { - try { - int length = readUnsignedShort(source); - byte[] bytes = new byte[length]; - source.readBytes(bytes); - return bytes; - } catch (IndexOutOfBoundsException e) { - throw new IllegalArgumentException( - "Not enough bytes to read a byte array preceded by its 2 bytes length"); - } - } - - @Override - public String readString(ByteBuf source) { - int length = readUnsignedShort(source); - return readString(source, length); - } - - @Override - public String readLongString(ByteBuf source) { - int length = readInt(source); - return readString(source, length); - } - - @Override - public ByteBuf readRetainedSlice(ByteBuf source, int sliceLength) { - return source.readRetainedSlice(sliceLength); - } - - @Override - public void updateCrc(ByteBuf source, CRC32 crc) { - crc.update(source.internalNioBuffer(source.readerIndex(), source.readableBytes())); - } - - @Override - public void writeByte(byte b, ByteBuf dest) { - dest.writeByte(b); - } - - @Override - public void writeInt(int i, ByteBuf dest) { - dest.writeInt(i); - } - - @Override - public void writeInetAddr(InetAddress inetAddr, ByteBuf dest) { - byte[] bytes = inetAddr.getAddress(); - writeByte((byte) bytes.length, dest); - dest.writeBytes(bytes); - } - - @Override - public void writeLong(long l, ByteBuf dest) { - dest.writeLong(l); - } - - @Override - public void writeUnsignedShort(int i, ByteBuf dest) { - dest.writeShort(i); - } - - @Override - public void writeString(String s, ByteBuf dest) { - byte[] bytes = s.getBytes(CharsetUtil.UTF_8); - writeUnsignedShort(bytes.length, dest); - dest.writeBytes(bytes); - } - - @Override - public void writeLongString(String s, ByteBuf dest) { - byte[] bytes = s.getBytes(CharsetUtil.UTF_8); - writeInt(bytes.length, dest); - dest.writeBytes(bytes); - } - - @Override - public void writeBytes(ByteBuffer bytes, ByteBuf dest) { - if (bytes == null) { - writeInt(-1, dest); - } else { - writeInt(bytes.remaining(), dest); - dest.writeBytes(bytes.duplicate()); - } - } - - @Override - public void writeBytes(byte[] bytes, ByteBuf dest) { - if (bytes == null) { - writeInt(-1, dest); - } else { - writeInt(bytes.length, dest); - dest.writeBytes(bytes); - } - } - - @Override - public void writeShortBytes(byte[] bytes, ByteBuf dest) { - writeUnsignedShort(bytes.length, dest); - dest.writeBytes(bytes); - } - - private static String readString(ByteBuf source, int length) { - try { - String str = source.toString(source.readerIndex(), length, CharsetUtil.UTF_8); - source.readerIndex(source.readerIndex() + length); - return str; - } catch (IndexOutOfBoundsException e) { - throw new IllegalArgumentException( - "Not enough bytes to read an UTF-8 serialized string of size " + length, e); - } - } - - private InetAddress newInetAddress(byte[] bytes) { - try { - return InetAddress.getByAddress(bytes); - } catch (UnknownHostException e) { - // Per the Javadoc, the only way this can happen is if the length is illegal - throw new IllegalArgumentException( - String.format("Invalid address length: %d (%s)", bytes.length, Arrays.toString(bytes))); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/ByteBufSegmentBuilder.java b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/ByteBufSegmentBuilder.java deleted file mode 100644 index 9b112559aab..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/ByteBufSegmentBuilder.java +++ /dev/null @@ -1,184 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.protocol; - -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.protocol.internal.Frame; -import com.datastax.oss.protocol.internal.FrameCodec; -import com.datastax.oss.protocol.internal.PrimitiveCodec; -import com.datastax.oss.protocol.internal.Segment; -import com.datastax.oss.protocol.internal.SegmentBuilder; -import edu.umd.cs.findbugs.annotations.NonNull; -import io.netty.buffer.ByteBuf; -import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelPromise; -import io.netty.util.concurrent.Future; -import io.netty.util.concurrent.GenericFutureListener; -import java.util.ArrayList; -import java.util.List; -import net.jcip.annotations.NotThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@NotThreadSafe -public class ByteBufSegmentBuilder extends SegmentBuilder { - - private static final Logger LOG = LoggerFactory.getLogger(ByteBufSegmentBuilder.class); - - private final ChannelHandlerContext context; - private final String logPrefix; - - public ByteBufSegmentBuilder( - @NonNull ChannelHandlerContext context, - @NonNull PrimitiveCodec primitiveCodec, - @NonNull FrameCodec frameCodec, - @NonNull String logPrefix) { - super(primitiveCodec, frameCodec); - this.context = context; - this.logPrefix = logPrefix; - } - - @Override - @NonNull - protected ChannelPromise mergeStates(@NonNull List framePromises) { - if (framePromises.size() == 1) { - return framePromises.get(0); - } - // We concatenate multiple frames into one segment. When the segment is written, all the frames - // are written. - ChannelPromise segmentPromise = context.newPromise(); - ImmutableList dependents = ImmutableList.copyOf(framePromises); - segmentPromise.addListener( - future -> { - if (future.isSuccess()) { - for (ChannelPromise framePromise : dependents) { - framePromise.setSuccess(); - } - } else { - Throwable cause = future.cause(); - for (ChannelPromise framePromise : dependents) { - framePromise.setFailure(cause); - } - } - }); - return segmentPromise; - } - - @Override - @NonNull - protected List splitState(@NonNull ChannelPromise framePromise, int sliceCount) { - // We split one frame into multiple slices. When all slices are written, the frame is written. - List slicePromises = new ArrayList<>(sliceCount); - for (int i = 0; i < sliceCount; i++) { - slicePromises.add(context.newPromise()); - } - GenericFutureListener> sliceListener = - new SliceWriteListener(framePromise, slicePromises); - for (int i = 0; i < sliceCount; i++) { - slicePromises.get(i).addListener(sliceListener); - } - return slicePromises; - } - - @Override - protected void processSegment( - @NonNull Segment segment, @NonNull ChannelPromise segmentPromise) { - context.write(segment, segmentPromise); - } - - @Override - protected void onLargeFrameSplit(@NonNull Frame frame, int frameLength, int sliceCount) { - LOG.trace( - "[{}] Frame {} is too large ({} > {}), splitting into {} segments", - logPrefix, - frame.streamId, - frameLength, - Segment.MAX_PAYLOAD_LENGTH, - sliceCount); - } - - @Override - protected void onSegmentFull( - @NonNull Frame frame, int frameLength, int currentPayloadLength, int currentFrameCount) { - LOG.trace( - "[{}] Current self-contained segment is full ({}/{} bytes, {} frames), processing now", - logPrefix, - currentPayloadLength, - Segment.MAX_PAYLOAD_LENGTH, - currentFrameCount); - } - - @Override - protected void onSmallFrameAdded( - @NonNull Frame frame, int frameLength, int currentPayloadLength, int currentFrameCount) { - LOG.trace( - "[{}] Added frame {} to current self-contained segment " - + "(bringing it to {}/{} bytes, {} frames)", - logPrefix, - frame.streamId, - currentPayloadLength, - Segment.MAX_PAYLOAD_LENGTH, - currentFrameCount); - } - - @Override - protected void onLastSegmentFlushed(int currentPayloadLength, int currentFrameCount) { - LOG.trace( - "[{}] Flushing last self-contained segment ({}/{} bytes, {} frames)", - logPrefix, - currentPayloadLength, - Segment.MAX_PAYLOAD_LENGTH, - currentFrameCount); - } - - @NotThreadSafe - static class SliceWriteListener implements GenericFutureListener> { - - private final ChannelPromise parentPromise; - private final List slicePromises; - - // All slices are written to the same channel, and the segment is built from the Flusher which - // also runs on the same event loop, so we don't need synchronization. - private int remainingSlices; - - SliceWriteListener(@NonNull ChannelPromise parentPromise, List slicePromises) { - this.parentPromise = parentPromise; - this.slicePromises = slicePromises; - this.remainingSlices = slicePromises.size(); - } - - @Override - public void operationComplete(@NonNull Future future) { - if (!parentPromise.isDone()) { - if (future.isSuccess()) { - remainingSlices -= 1; - if (remainingSlices == 0) { - parentPromise.setSuccess(); - } - } else { - // If any slice fails, we can immediately mark the whole frame as failed: - parentPromise.setFailure(future.cause()); - // Cancel any remaining slice, Netty will not send the bytes. - for (ChannelPromise slicePromise : slicePromises) { - slicePromise.cancel(/*Netty ignores this*/ false); - } - } - } - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/BytesToSegmentDecoder.java b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/BytesToSegmentDecoder.java deleted file mode 100644 index 03125bd33a5..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/BytesToSegmentDecoder.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.protocol; - -import com.datastax.oss.driver.api.core.connection.CrcMismatchException; -import com.datastax.oss.protocol.internal.Segment; -import com.datastax.oss.protocol.internal.SegmentCodec; -import edu.umd.cs.findbugs.annotations.NonNull; -import io.netty.buffer.ByteBuf; -import io.netty.channel.ChannelHandlerContext; -import io.netty.handler.codec.LengthFieldBasedFrameDecoder; -import java.nio.ByteOrder; -import net.jcip.annotations.NotThreadSafe; - -/** - * Decodes {@link Segment}s from a stream of bytes. - * - *

This works like a regular length-field-based decoder, but we override {@link - * #getUnadjustedFrameLength} to handle two peculiarities: the length is encoded on 17 bits, and we - * also want to check the header CRC before we use it. So we parse the whole segment header ahead of - * time, and store it until we're ready to build the segment. - */ -@NotThreadSafe -public class BytesToSegmentDecoder extends LengthFieldBasedFrameDecoder { - - private final SegmentCodec segmentCodec; - private SegmentCodec.Header header; - - public BytesToSegmentDecoder(@NonNull SegmentCodec segmentCodec) { - super( - // max length (Netty wants this to be the overall length including everything): - segmentCodec.headerLength() - + SegmentCodec.CRC24_LENGTH - + Segment.MAX_PAYLOAD_LENGTH - + SegmentCodec.CRC32_LENGTH, - // offset and size of the "length" field: that's the whole header - 0, - segmentCodec.headerLength() + SegmentCodec.CRC24_LENGTH, - // length adjustment: add the trailing CRC to the declared length - SegmentCodec.CRC32_LENGTH, - // bytes to skip: the header (we've already parsed it while reading the length) - segmentCodec.headerLength() + SegmentCodec.CRC24_LENGTH); - this.segmentCodec = segmentCodec; - } - - @Override - protected Object decode(ChannelHandlerContext ctx, ByteBuf in) throws Exception { - try { - ByteBuf payloadAndCrc = (ByteBuf) super.decode(ctx, in); - if (payloadAndCrc == null) { - return null; - } else { - assert header != null; - try { - Segment segment = segmentCodec.decode(header, payloadAndCrc); - header = null; - return segment; - } catch (com.datastax.oss.protocol.internal.CrcMismatchException e) { - throw new CrcMismatchException(e.getMessage()); - } - } - } catch (Exception e) { - // Don't hold on to a stale header if we failed to decode the rest of the segment - header = null; - throw e; - } - } - - @Override - protected long getUnadjustedFrameLength(ByteBuf buffer, int offset, int length, ByteOrder order) { - // The parent class calls this repeatedly for the same "frame" if there weren't enough - // accumulated bytes the first time. Only decode the header the first time: - if (header == null) { - try { - header = segmentCodec.decodeHeader(buffer.slice(offset, length)); - } catch (com.datastax.oss.protocol.internal.CrcMismatchException e) { - throw new CrcMismatchException(e.getMessage()); - } - } - return header.payloadLength; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/CompressorSubstitutions.java b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/CompressorSubstitutions.java deleted file mode 100644 index 8a551a039db..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/CompressorSubstitutions.java +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.protocol; - -import static com.datastax.oss.driver.internal.core.util.Dependency.LZ4; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.internal.core.util.GraalDependencyChecker; -import com.datastax.oss.protocol.internal.Compressor; -import com.oracle.svm.core.annotate.Substitute; -import com.oracle.svm.core.annotate.TargetClass; -import io.netty.buffer.ByteBuf; -import java.util.Locale; -import java.util.function.BooleanSupplier; - -/** - * Handles GraalVM substitutions for compressors: LZ4 is only supported if we can find the native - * library in the classpath, and Snappy is never supported. - * - *

When a compressor is not supported, we delete its class, and modify {@link - * BuiltInCompressors#newInstance(String, DriverContext)} to throw an error if the user attempts to - * configure it. - */ -@SuppressWarnings("unused") -public class CompressorSubstitutions { - - @TargetClass(value = BuiltInCompressors.class, onlyWith = Lz4Present.class) - public static final class BuiltInCompressorsLz4Only { - @Substitute - public static Compressor newInstance(String name, DriverContext context) { - switch (name.toLowerCase(Locale.ROOT)) { - case "lz4": - return new Lz4Compressor(context); - case "snappy": - throw new UnsupportedOperationException( - "Snappy compression is not supported for native images"); - case "none": - return Compressor.none(); - default: - throw new IllegalArgumentException( - String.format( - "Unsupported compression algorithm '%s' (from configuration option %s)", - name, DefaultDriverOption.PROTOCOL_COMPRESSION.getPath())); - } - } - } - - @TargetClass(value = BuiltInCompressors.class, onlyWith = Lz4Missing.class) - public static final class NoBuiltInCompressors { - @Substitute - public static Compressor newInstance(String name, DriverContext context) { - switch (name.toLowerCase(Locale.ROOT)) { - case "lz4": - throw new UnsupportedOperationException( - "This native image was not built with support for LZ4 compression"); - case "snappy": - throw new UnsupportedOperationException( - "Snappy compression is not supported for native images"); - case "none": - return Compressor.none(); - default: - throw new IllegalArgumentException( - String.format( - "Unsupported compression algorithm '%s' (from configuration option %s)", - name, DefaultDriverOption.PROTOCOL_COMPRESSION.getPath())); - } - } - } - - public static class Lz4Present implements BooleanSupplier { - @Override - public boolean getAsBoolean() { - return GraalDependencyChecker.isPresent(LZ4); - } - } - - public static class Lz4Missing extends Lz4Present { - @Override - public boolean getAsBoolean() { - return !super.getAsBoolean(); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/FrameDecoder.java b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/FrameDecoder.java deleted file mode 100644 index 20816ba581b..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/FrameDecoder.java +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.protocol; - -import com.datastax.oss.driver.api.core.connection.FrameTooLongException; -import com.datastax.oss.driver.internal.core.util.Loggers; -import com.datastax.oss.protocol.internal.Frame; -import com.datastax.oss.protocol.internal.FrameCodec; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.response.Error; -import io.netty.buffer.ByteBuf; -import io.netty.channel.ChannelHandlerContext; -import io.netty.handler.codec.LengthFieldBasedFrameDecoder; -import io.netty.handler.codec.TooLongFrameException; -import java.util.Collections; -import net.jcip.annotations.NotThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@NotThreadSafe -public class FrameDecoder extends LengthFieldBasedFrameDecoder { - private static final Logger LOG = LoggerFactory.getLogger(FrameDecoder.class); - - // Where the length of the frame is located in the payload - private static final int LENGTH_FIELD_OFFSET = 5; - private static final int LENGTH_FIELD_LENGTH = 4; - - private final FrameCodec frameCodec; - private boolean isFirstResponse; - - public FrameDecoder(FrameCodec frameCodec, int maxFrameLengthInBytes) { - super(maxFrameLengthInBytes, LENGTH_FIELD_OFFSET, LENGTH_FIELD_LENGTH, 0, 0, true); - this.frameCodec = frameCodec; - } - - @Override - protected Object decode(ChannelHandlerContext ctx, ByteBuf in) throws Exception { - int startIndex = in.readerIndex(); - if (isFirstResponse) { - isFirstResponse = false; - - // Must read at least the protocol v1/v2 header (see below) - if (in.readableBytes() < 8) { - return null; - } - // Special case for obsolete protocol versions (< v3): the length field is at a different - // position, so we can't delegate to super.decode() which would read the wrong length. - int protocolVersion = (int) in.getByte(startIndex) & 0b0111_1111; - if (protocolVersion < 3) { - int streamId = in.getByte(startIndex + 2); - int length = in.getInt(startIndex + 4); - // We don't need a full-blown decoder, just to signal the protocol error. So discard the - // incoming data and spoof a server-side protocol error. - if (in.readableBytes() < 8 + length) { - return null; // keep reading until we can discard the whole message at once - } else { - in.readerIndex(startIndex + 8 + length); - } - return Frame.forResponse( - protocolVersion, - streamId, - null, - Frame.NO_PAYLOAD, - Collections.emptyList(), - new Error( - ProtocolConstants.ErrorCode.PROTOCOL_ERROR, - "Invalid or unsupported protocol version")); - } - } - - try { - ByteBuf buffer = (ByteBuf) super.decode(ctx, in); - return (buffer == null) - ? null // did not receive whole frame yet, keep reading - : frameCodec.decode(buffer); - } catch (Exception e) { - // If decoding failed, try to read at least the stream id, so that the error can be - // propagated to the client request matching that id (otherwise we have to fail all - // pending requests on this channel) - int streamId; - try { - streamId = in.getShort(startIndex + 2); - } catch (Exception e1) { - // Should never happen, super.decode does not return a non-null buffer until the length - // field has been read, and the stream id comes before - Loggers.warnWithException(LOG, "Unexpected error while reading stream id", e1); - streamId = -1; - } - if (e instanceof TooLongFrameException) { - // Translate the Netty error to our own type - e = new FrameTooLongException(ctx.channel().remoteAddress(), e.getMessage()); - } - throw new FrameDecodingException(streamId, e); - } - } - - @Override - protected ByteBuf extractFrame(ChannelHandlerContext ctx, ByteBuf buffer, int index, int length) { - return buffer.slice(index, length); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/FrameDecodingException.java b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/FrameDecodingException.java deleted file mode 100644 index c209f3f263b..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/FrameDecodingException.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.protocol; - -import io.netty.handler.codec.DecoderException; - -/** - * Wraps an error while decoding an incoming protocol frame. - * - *

This is only used internally, never exposed to the client. - */ -public class FrameDecodingException extends DecoderException { - public final int streamId; - - public FrameDecodingException(int streamId, Throwable cause) { - super("Error decoding frame for streamId " + streamId, cause); - this.streamId = streamId; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/FrameEncoder.java b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/FrameEncoder.java deleted file mode 100644 index 6504ab29728..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/FrameEncoder.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.protocol; - -import com.datastax.oss.driver.api.core.connection.FrameTooLongException; -import com.datastax.oss.protocol.internal.Frame; -import com.datastax.oss.protocol.internal.FrameCodec; -import io.netty.buffer.ByteBuf; -import io.netty.channel.ChannelHandler; -import io.netty.channel.ChannelHandlerContext; -import io.netty.handler.codec.MessageToMessageEncoder; -import java.util.List; -import net.jcip.annotations.ThreadSafe; - -@ChannelHandler.Sharable -@ThreadSafe -public class FrameEncoder extends MessageToMessageEncoder { - - private final FrameCodec frameCodec; - private final int maxFrameLength; - - public FrameEncoder(FrameCodec frameCodec, int maxFrameLength) { - super(Frame.class); - this.frameCodec = frameCodec; - this.maxFrameLength = maxFrameLength; - } - - @Override - protected void encode(ChannelHandlerContext ctx, Frame frame, List out) throws Exception { - ByteBuf buffer = frameCodec.encode(frame); - int actualLength = buffer.readableBytes(); - if (actualLength > maxFrameLength) { - throw new FrameTooLongException( - ctx.channel().remoteAddress(), - String.format("Outgoing frame length exceeds %d: %d", maxFrameLength, actualLength)); - } - out.add(buffer); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/FrameToSegmentEncoder.java b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/FrameToSegmentEncoder.java deleted file mode 100644 index 46c872f4adc..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/FrameToSegmentEncoder.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.protocol; - -import com.datastax.oss.protocol.internal.Frame; -import com.datastax.oss.protocol.internal.FrameCodec; -import com.datastax.oss.protocol.internal.PrimitiveCodec; -import edu.umd.cs.findbugs.annotations.NonNull; -import io.netty.buffer.ByteBuf; -import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelOutboundHandlerAdapter; -import io.netty.channel.ChannelPromise; -import net.jcip.annotations.NotThreadSafe; - -@NotThreadSafe -public class FrameToSegmentEncoder extends ChannelOutboundHandlerAdapter { - - private final PrimitiveCodec primitiveCodec; - private final FrameCodec frameCodec; - private final String logPrefix; - - private ByteBufSegmentBuilder segmentBuilder; - - public FrameToSegmentEncoder( - @NonNull PrimitiveCodec primitiveCodec, - @NonNull FrameCodec frameCodec, - @NonNull String logPrefix) { - this.primitiveCodec = primitiveCodec; - this.frameCodec = frameCodec; - this.logPrefix = logPrefix; - } - - @Override - public void handlerAdded(@NonNull ChannelHandlerContext ctx) { - segmentBuilder = new ByteBufSegmentBuilder(ctx, primitiveCodec, frameCodec, logPrefix); - } - - @Override - public void write( - @NonNull ChannelHandlerContext ctx, @NonNull Object msg, @NonNull ChannelPromise promise) - throws Exception { - if (msg instanceof Frame) { - segmentBuilder.addFrame(((Frame) msg), promise); - } else { - super.write(ctx, msg, promise); - } - } - - @Override - public void flush(@NonNull ChannelHandlerContext ctx) throws Exception { - segmentBuilder.flush(); - super.flush(ctx); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/Lz4Compressor.java b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/Lz4Compressor.java deleted file mode 100644 index d376cefc216..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/Lz4Compressor.java +++ /dev/null @@ -1,190 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.protocol; - -import static com.datastax.oss.driver.internal.core.util.Dependency.LZ4; - -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.internal.core.util.DefaultDependencyChecker; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import io.netty.buffer.ByteBuf; -import java.nio.ByteBuffer; -import net.jcip.annotations.ThreadSafe; -import net.jpountz.lz4.LZ4Compressor; -import net.jpountz.lz4.LZ4Factory; -import net.jpountz.lz4.LZ4FastDecompressor; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@ThreadSafe -public class Lz4Compressor extends ByteBufCompressor { - - private static final Logger LOG = LoggerFactory.getLogger(Lz4Compressor.class); - - private final LZ4Compressor compressor; - private final LZ4FastDecompressor decompressor; - - public Lz4Compressor(DriverContext context) { - this(context.getSessionName()); - } - - @VisibleForTesting - Lz4Compressor(String sessionName) { - if (DefaultDependencyChecker.isPresent(LZ4)) { - LZ4Factory lz4Factory = LZ4Factory.fastestInstance(); - LOG.info("[{}] Using {}", sessionName, lz4Factory.toString()); - this.compressor = lz4Factory.fastCompressor(); - this.decompressor = lz4Factory.fastDecompressor(); - } else { - throw new IllegalStateException( - "Could not find the LZ4 library on the classpath " - + "(the driver declares it as an optional dependency, " - + "so you need to declare it explicitly)"); - } - } - - @Override - public String algorithm() { - return "lz4"; - } - - @Override - protected ByteBuf compressDirect(ByteBuf input, boolean prependWithUncompressedLength) { - int maxCompressedLength = compressor.maxCompressedLength(input.readableBytes()); - // If the input is direct we will allocate a direct output buffer as well as this will allow us - // to use LZ4Compressor.compress and so eliminate memory copies. - ByteBuf output = - input.alloc().directBuffer((prependWithUncompressedLength ? 4 : 0) + maxCompressedLength); - try { - ByteBuffer in = inputNioBuffer(input); - // Increase reader index. - input.readerIndex(input.writerIndex()); - - if (prependWithUncompressedLength) { - output.writeInt(in.remaining()); - } - - ByteBuffer out = outputNioBuffer(output); - int written = - compressor.compress( - in, in.position(), in.remaining(), out, out.position(), out.remaining()); - // Set the writer index so the amount of written bytes is reflected - output.writerIndex(output.writerIndex() + written); - } catch (Exception e) { - // release output buffer so we not leak and rethrow exception. - output.release(); - throw e; - } - return output; - } - - @Override - protected ByteBuf compressHeap(ByteBuf input, boolean prependWithUncompressedLength) { - int maxCompressedLength = compressor.maxCompressedLength(input.readableBytes()); - - // Not a direct buffer so use byte arrays... - int inOffset = input.arrayOffset() + input.readerIndex(); - byte[] in = input.array(); - int len = input.readableBytes(); - // Increase reader index. - input.readerIndex(input.writerIndex()); - - // Allocate a heap buffer from the ByteBufAllocator as we may use a PooledByteBufAllocator and - // so can eliminate the overhead of allocate a new byte[]. - ByteBuf output = - input.alloc().heapBuffer((prependWithUncompressedLength ? 4 : 0) + maxCompressedLength); - try { - if (prependWithUncompressedLength) { - output.writeInt(len); - } - // calculate the correct offset. - int offset = output.arrayOffset() + output.writerIndex(); - byte[] out = output.array(); - int written = compressor.compress(in, inOffset, len, out, offset); - - // Set the writer index so the amount of written bytes is reflected - output.writerIndex(output.writerIndex() + written); - } catch (Exception e) { - // release output buffer so we not leak and rethrow exception. - output.release(); - throw e; - } - return output; - } - - @Override - protected int readUncompressedLength(ByteBuf compressed) { - return compressed.readInt(); - } - - @Override - protected ByteBuf decompressDirect(ByteBuf input, int uncompressedLength) { - // If the input is direct we will allocate a direct output buffer as well as this will allow us - // to use LZ4Compressor.decompress and so eliminate memory copies. - int readable = input.readableBytes(); - ByteBuffer in = inputNioBuffer(input); - // Increase reader index. - input.readerIndex(input.writerIndex()); - ByteBuf output = input.alloc().directBuffer(uncompressedLength); - try { - ByteBuffer out = outputNioBuffer(output); - int read = decompressor.decompress(in, in.position(), out, out.position(), out.remaining()); - if (read != readable) { - throw new IllegalArgumentException("Compressed lengths mismatch"); - } - - // Set the writer index so the amount of written bytes is reflected - output.writerIndex(output.writerIndex() + uncompressedLength); - } catch (Exception e) { - // release output buffer so we not leak and rethrow exception. - output.release(); - throw e; - } - return output; - } - - @Override - protected ByteBuf decompressHeap(ByteBuf input, int uncompressedLength) { - // Not a direct buffer so use byte arrays... - byte[] in = input.array(); - int len = input.readableBytes(); - int inOffset = input.arrayOffset() + input.readerIndex(); - // Increase reader index. - input.readerIndex(input.writerIndex()); - - // Allocate a heap buffer from the ByteBufAllocator as we may use a PooledByteBufAllocator and - // so can eliminate the overhead of allocate a new byte[]. - ByteBuf output = input.alloc().heapBuffer(uncompressedLength); - try { - int offset = output.arrayOffset() + output.writerIndex(); - byte[] out = output.array(); - int read = decompressor.decompress(in, inOffset, out, offset, uncompressedLength); - if (read != len) { - throw new IllegalArgumentException("Compressed lengths mismatch"); - } - - // Set the writer index so the amount of written bytes is reflected - output.writerIndex(output.writerIndex() + uncompressedLength); - } catch (Exception e) { - // release output buffer so we not leak and rethrow exception. - output.release(); - throw e; - } - return output; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/SegmentToBytesEncoder.java b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/SegmentToBytesEncoder.java deleted file mode 100644 index c7845545df4..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/SegmentToBytesEncoder.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.protocol; - -import com.datastax.oss.protocol.internal.Segment; -import com.datastax.oss.protocol.internal.SegmentCodec; -import edu.umd.cs.findbugs.annotations.NonNull; -import io.netty.buffer.ByteBuf; -import io.netty.channel.ChannelHandler; -import io.netty.channel.ChannelHandlerContext; -import io.netty.handler.codec.MessageToMessageEncoder; -import java.util.List; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -@ChannelHandler.Sharable -public class SegmentToBytesEncoder extends MessageToMessageEncoder> { - - private final SegmentCodec segmentCodec; - - public SegmentToBytesEncoder(@NonNull SegmentCodec segmentCodec) { - this.segmentCodec = segmentCodec; - } - - @Override - protected void encode( - @NonNull ChannelHandlerContext ctx, - @NonNull Segment segment, - @NonNull List out) { - segmentCodec.encode(segment, out); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/SegmentToFrameDecoder.java b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/SegmentToFrameDecoder.java deleted file mode 100644 index b15a17bb87f..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/SegmentToFrameDecoder.java +++ /dev/null @@ -1,129 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.protocol; - -import com.datastax.oss.protocol.internal.Frame; -import com.datastax.oss.protocol.internal.FrameCodec; -import com.datastax.oss.protocol.internal.Segment; -import edu.umd.cs.findbugs.annotations.NonNull; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.ByteBufAllocator; -import io.netty.buffer.CompositeByteBuf; -import io.netty.channel.ChannelHandlerContext; -import io.netty.handler.codec.MessageToMessageDecoder; -import java.util.ArrayList; -import java.util.List; -import net.jcip.annotations.NotThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Converts the segments decoded by {@link BytesToSegmentDecoder} into legacy frames understood by - * the rest of the driver. - */ -@NotThreadSafe -public class SegmentToFrameDecoder extends MessageToMessageDecoder> { - - private static final Logger LOG = LoggerFactory.getLogger(SegmentToFrameDecoder.class); - - private static final int UNKNOWN_LENGTH = Integer.MIN_VALUE; - - private final FrameCodec frameCodec; - private final String logPrefix; - - // Accumulated state when we are reading a sequence of slices - private int targetLength = UNKNOWN_LENGTH; - private final List accumulatedSlices = new ArrayList<>(); - private int accumulatedLength; - - public SegmentToFrameDecoder(@NonNull FrameCodec frameCodec, @NonNull String logPrefix) { - this.logPrefix = logPrefix; - this.frameCodec = frameCodec; - } - - @Override - protected void decode( - @NonNull ChannelHandlerContext ctx, - @NonNull Segment segment, - @NonNull List out) { - if (segment.isSelfContained) { - decodeSelfContained(segment, out); - } else { - decodeSlice(segment, ctx.alloc(), out); - } - } - - private void decodeSelfContained(Segment segment, List out) { - ByteBuf payload = segment.payload; - int frameCount = 0; - try { - do { - Frame frame = frameCodec.decode(payload); - LOG.trace( - "[{}] Decoded response frame {} from self-contained segment", - logPrefix, - frame.streamId); - out.add(frame); - frameCount += 1; - } while (payload.isReadable()); - } finally { - payload.release(); - } - LOG.trace("[{}] Done processing self-contained segment ({} frames)", logPrefix, frameCount); - } - - private void decodeSlice(Segment segment, ByteBufAllocator allocator, List out) { - assert targetLength != UNKNOWN_LENGTH ^ (accumulatedSlices.isEmpty() && accumulatedLength == 0); - ByteBuf slice = segment.payload; - if (targetLength == UNKNOWN_LENGTH) { - // First slice, read ahead to find the target length - targetLength = FrameCodec.V3_ENCODED_HEADER_SIZE + frameCodec.decodeBodySize(slice); - } - accumulatedSlices.add(slice); - accumulatedLength += slice.readableBytes(); - int accumulatedSlicesSize = accumulatedSlices.size(); - LOG.trace( - "[{}] Decoded slice {}, {}/{} bytes", - logPrefix, - accumulatedSlicesSize, - accumulatedLength, - targetLength); - assert accumulatedLength <= targetLength; - if (accumulatedLength == targetLength) { - // We've received enough data to reassemble the whole message - CompositeByteBuf encodedFrame = allocator.compositeBuffer(accumulatedSlicesSize); - encodedFrame.addComponents(true, accumulatedSlices); - Frame frame; - try { - frame = frameCodec.decode(encodedFrame); - } finally { - encodedFrame.release(); - // Reset our state - targetLength = UNKNOWN_LENGTH; - accumulatedSlices.clear(); - accumulatedLength = 0; - } - LOG.trace( - "[{}] Decoded response frame {} from {} slices", - logPrefix, - frame.streamId, - accumulatedSlicesSize); - out.add(frame); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/SnappyCompressor.java b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/SnappyCompressor.java deleted file mode 100644 index 21165d808b9..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/SnappyCompressor.java +++ /dev/null @@ -1,177 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.protocol; - -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.internal.core.util.DefaultDependencyChecker; -import com.datastax.oss.driver.internal.core.util.Dependency; -import io.netty.buffer.ByteBuf; -import java.io.IOException; -import java.nio.ByteBuffer; -import net.jcip.annotations.ThreadSafe; -import org.xerial.snappy.Snappy; - -/** - * @implNote The Snappy protocol already encodes the uncompressed length in the compressed payload, - * so {@link #compress(ByteBuf)} and {@link #compressWithoutLength(ByteBuf)} produce the same - * output for this compressor. The corresponding parameters {@code - * prependWithUncompressedLength} and {@code uncompressedLength} are ignored by their respective - * methods. - */ -@ThreadSafe -public class SnappyCompressor extends ByteBufCompressor { - - public SnappyCompressor(@SuppressWarnings("unused") DriverContext context) { - if (!DefaultDependencyChecker.isPresent(Dependency.SNAPPY)) { - throw new IllegalStateException( - "Could not find the Snappy library on the classpath " - + "(the driver declares it as an optional dependency, " - + "so you need to declare it explicitly)"); - } - } - - @Override - public String algorithm() { - return "snappy"; - } - - @Override - protected ByteBuf compressDirect( - ByteBuf input, /*ignored*/ boolean prependWithUncompressedLength) { - int maxCompressedLength = Snappy.maxCompressedLength(input.readableBytes()); - // If the input is direct we will allocate a direct output buffer as well as this will allow us - // to use Snappy.compress(ByteBuffer, ByteBuffer) and so eliminate memory copies. - ByteBuf output = input.alloc().directBuffer(maxCompressedLength); - try { - ByteBuffer in = inputNioBuffer(input); - // Increase reader index. - input.readerIndex(input.writerIndex()); - - ByteBuffer out = outputNioBuffer(output); - int written = Snappy.compress(in, out); - // Set the writer index so the amount of written bytes is reflected - output.writerIndex(output.writerIndex() + written); - return output; - } catch (IOException e) { - // release output buffer so we not leak and rethrow exception. - output.release(); - throw new RuntimeException(e); - } - } - - @Override - protected ByteBuf compressHeap(ByteBuf input, /*ignored*/ boolean prependWithUncompressedLength) { - int maxCompressedLength = Snappy.maxCompressedLength(input.readableBytes()); - int inOffset = input.arrayOffset() + input.readerIndex(); - byte[] in = input.array(); - int len = input.readableBytes(); - // Increase reader index. - input.readerIndex(input.writerIndex()); - - // Allocate a heap buffer from the ByteBufAllocator as we may use a PooledByteBufAllocator and - // so can eliminate the overhead of allocate a new byte[]. - ByteBuf output = input.alloc().heapBuffer(maxCompressedLength); - try { - // Calculate the correct offset. - int offset = output.arrayOffset() + output.writerIndex(); - byte[] out = output.array(); - int written = Snappy.compress(in, inOffset, len, out, offset); - - // Increase the writerIndex with the written bytes. - output.writerIndex(output.writerIndex() + written); - return output; - } catch (IOException e) { - // release output buffer so we not leak and rethrow exception. - output.release(); - throw new RuntimeException(e); - } - } - - @Override - protected int readUncompressedLength(ByteBuf compressed) { - // Since compress methods don't actually prepend with a length, we have nothing to read here. - // Return a bogus length (it will be ignored by the decompress methods, so the actual value - // doesn't matter). - return -1; - } - - @Override - protected ByteBuf decompressDirect(ByteBuf input, /*ignored*/ int uncompressedLength) { - ByteBuffer in = inputNioBuffer(input); - // Increase reader index. - input.readerIndex(input.writerIndex()); - - ByteBuf output = null; - try { - if (!Snappy.isValidCompressedBuffer(in)) { - throw new IllegalArgumentException( - "Provided frame does not appear to be Snappy compressed"); - } - // If the input is direct we will allocate a direct output buffer as well as this will allow - // us to use Snappy.compress(ByteBuffer, ByteBuffer) and so eliminate memory copies. - output = input.alloc().directBuffer(Snappy.uncompressedLength(in)); - ByteBuffer out = outputNioBuffer(output); - - int size = Snappy.uncompress(in, out); - // Set the writer index so the amount of written bytes is reflected - output.writerIndex(output.writerIndex() + size); - return output; - } catch (IOException e) { - // release output buffer so we not leak and rethrow exception. - if (output != null) { - output.release(); - } - throw new RuntimeException(e); - } - } - - @Override - protected ByteBuf decompressHeap(ByteBuf input, /*ignored*/ int uncompressedLength) { - // Not a direct buffer so use byte arrays... - int inOffset = input.arrayOffset() + input.readerIndex(); - byte[] in = input.array(); - int len = input.readableBytes(); - // Increase reader index. - input.readerIndex(input.writerIndex()); - - ByteBuf output = null; - try { - if (!Snappy.isValidCompressedBuffer(in, inOffset, len)) { - throw new IllegalArgumentException( - "Provided frame does not appear to be Snappy compressed"); - } - // Allocate a heap buffer from the ByteBufAllocator as we may use a PooledByteBufAllocator and - // so can eliminate the overhead of allocate a new byte[]. - output = input.alloc().heapBuffer(Snappy.uncompressedLength(in, inOffset, len)); - // Calculate the correct offset. - int offset = output.arrayOffset() + output.writerIndex(); - byte[] out = output.array(); - int written = Snappy.uncompress(in, inOffset, len, out, offset); - - // Increase the writerIndex with the written bytes. - output.writerIndex(output.writerIndex() + written); - return output; - } catch (IOException e) { - // release output buffer so we not leak and rethrow exception. - if (output != null) { - output.release(); - } - throw new RuntimeException(e); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/package-info.java b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/package-info.java deleted file mode 100644 index 05da030eec3..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/package-info.java +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** Specialization of the native protocol layer for the driver, based on Netty. */ -package com.datastax.oss.driver.internal.core.protocol; diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/retry/ConsistencyDowngradingRetryPolicy.java b/core/src/main/java/com/datastax/oss/driver/internal/core/retry/ConsistencyDowngradingRetryPolicy.java deleted file mode 100644 index dbf534459a3..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/retry/ConsistencyDowngradingRetryPolicy.java +++ /dev/null @@ -1,376 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.retry; - -import static com.datastax.oss.driver.api.core.servererrors.WriteType.BATCH; -import static com.datastax.oss.driver.api.core.servererrors.WriteType.BATCH_LOG; -import static com.datastax.oss.driver.api.core.servererrors.WriteType.SIMPLE; -import static com.datastax.oss.driver.api.core.servererrors.WriteType.UNLOGGED_BATCH; - -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.connection.ClosedConnectionException; -import com.datastax.oss.driver.api.core.connection.HeartbeatException; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.retry.RetryDecision; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import com.datastax.oss.driver.api.core.retry.RetryVerdict; -import com.datastax.oss.driver.api.core.servererrors.CoordinatorException; -import com.datastax.oss.driver.api.core.servererrors.ReadFailureException; -import com.datastax.oss.driver.api.core.servererrors.WriteFailureException; -import com.datastax.oss.driver.api.core.servererrors.WriteType; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import edu.umd.cs.findbugs.annotations.NonNull; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A retry policy that sometimes retries with a lower consistency level than the one initially - * requested. - * - *

BEWARE: this policy may retry queries using a lower consistency level than the one - * initially requested. By doing so, it may break consistency guarantees. In other words, if you use - * this retry policy, there are cases (documented below) where a read at {@code QUORUM} may - * not see a preceding write at {@code QUORUM}. Furthermore, this policy doesn't always respect - * datacenter locality; for example, it may downgrade {@code LOCAL_QUORUM} to {@code ONE}, and thus - * could accidentally send a write that was intended for the local datacenter to another - * datacenter.Do not use this policy unless you have understood the cases where this can happen and - * are ok with that. - * - *

This policy implements the same retries than the {@link DefaultRetryPolicy} policy. But on top - * of that, it also retries in the following cases: - * - *

    - *
  • On a read timeout: if the number of replicas that responded is greater than one, but lower - * than is required by the requested consistency level, the operation is retried at a lower - * consistency level. - *
  • On a write timeout: if the operation is a {@code WriteType.UNLOGGED_BATCH} and at least one - * replica acknowledged the write, the operation is retried at a lower consistency level. - * Furthermore, for other operations, if at least one replica acknowledged the write, the - * timeout is ignored. - *
  • On an unavailable exception: if at least one replica is alive, the operation is retried at - * a lower consistency level. - *
- * - * The lower consistency level to use for retries is determined by the following rules: - * - *
    - *
  • if more than 3 replicas responded, use {@code THREE}. - *
  • if 1, 2 or 3 replicas responded, use the corresponding level {@code ONE}, {@code TWO} or - * {@code THREE}. - *
- * - * Note that if the initial consistency level was {@code EACH_QUORUM}, Cassandra returns the number - * of live replicas in the datacenter that failed to reach consistency, not the overall - * number in the cluster. Therefore if this number is 0, we still retry at {@code ONE}, on the - * assumption that a host may still be up in another datacenter. - * - *

The reasoning behind this retry policy is the following one. If, based on the information the - * Cassandra coordinator node returns, retrying the operation with the initially requested - * consistency has a chance to succeed, do it. Otherwise, if based on this information, we know that - * the initially requested consistency level cannot be achieved currently, then: - * - *

    - *
  • For writes, ignore the exception (thus silently failing the consistency requirement) if we - * know the write has been persisted on at least one replica. - *
  • For reads, try reading at a lower consistency level (thus silently failing the consistency - * requirement). - *
- * - * In other words, this policy implements the idea that if the requested consistency level cannot be - * achieved, the next best thing for writes is to make sure the data is persisted, and that reading - * something is better than reading nothing, even if there is a risk of reading stale data. - */ -public class ConsistencyDowngradingRetryPolicy implements RetryPolicy { - - private static final Logger LOG = - LoggerFactory.getLogger(ConsistencyDowngradingRetryPolicy.class); - - @VisibleForTesting - public static final String VERDICT_ON_READ_TIMEOUT = - "[{}] Verdict on read timeout (consistency: {}, required responses: {}, " - + "received responses: {}, data retrieved: {}, retries: {}): {}"; - - @VisibleForTesting - public static final String VERDICT_ON_WRITE_TIMEOUT = - "[{}] Verdict on write timeout (consistency: {}, write type: {}, " - + "required acknowledgments: {}, received acknowledgments: {}, retries: {}): {}"; - - @VisibleForTesting - public static final String VERDICT_ON_UNAVAILABLE = - "[{}] Verdict on unavailable exception (consistency: {}, " - + "required replica: {}, alive replica: {}, retries: {}): {}"; - - @VisibleForTesting - public static final String VERDICT_ON_ABORTED = - "[{}] Verdict on aborted request (type: {}, message: '{}', retries: {}): {}"; - - @VisibleForTesting - public static final String VERDICT_ON_ERROR = - "[{}] Verdict on node error (type: {}, message: '{}', retries: {}): {}"; - - private final String logPrefix; - - @SuppressWarnings("unused") - public ConsistencyDowngradingRetryPolicy( - @NonNull DriverContext context, @NonNull String profileName) { - this(context.getSessionName() + "|" + profileName); - } - - public ConsistencyDowngradingRetryPolicy(@NonNull String logPrefix) { - this.logPrefix = logPrefix; - } - - /** - * {@inheritDoc} - * - *

This implementation triggers a maximum of one retry. If less replicas responded than - * required by the consistency level (but at least one replica did respond), the operation is - * retried at a lower consistency level. If enough replicas responded but data was not retrieved, - * the operation is retried with the initial consistency level. Otherwise, an exception is thrown. - */ - @Override - public RetryVerdict onReadTimeoutVerdict( - @NonNull Request request, - @NonNull ConsistencyLevel cl, - int blockFor, - int received, - boolean dataPresent, - int retryCount) { - RetryVerdict verdict; - if (retryCount != 0) { - verdict = RetryVerdict.RETHROW; - } else if (cl.isSerial()) { - // CAS reads are not all that useful in terms of visibility of the writes since CAS write - // supports the normal consistency levels on the committing phase. So the main use case for - // CAS reads is probably for when you've timed out on a CAS write and want to make sure what - // happened. Downgrading in that case would be always wrong so we just special-case to - // rethrow. - verdict = RetryVerdict.RETHROW; - } else if (received < blockFor) { - verdict = maybeDowngrade(received, cl); - } else if (!dataPresent) { - // Retry with same CL since this usually means that enough replica are alive to satisfy the - // consistency but the coordinator picked a dead one for data retrieval, not having detected - // that replica as dead yet. - verdict = RetryVerdict.RETRY_SAME; - } else { - // This usually means a digest mismatch, in which case it's pointless to retry since - // the inconsistency has to be repaired first. - verdict = RetryVerdict.RETHROW; - } - if (LOG.isTraceEnabled()) { - LOG.trace( - VERDICT_ON_READ_TIMEOUT, - logPrefix, - cl, - blockFor, - received, - dataPresent, - retryCount, - verdict); - } - return verdict; - } - - /** - * {@inheritDoc} - * - *

This implementation triggers a maximum of one retry. If {@code writeType == - * WriteType.BATCH_LOG}, the write is retried with the initial consistency level. If {@code - * writeType == WriteType.UNLOGGED_BATCH} and at least one replica acknowledged, the write is - * retried with a lower consistency level (with unlogged batch, a write timeout can always - * mean that part of the batch haven't been persisted at all, even if {@code receivedAcks > 0}). - * For other write types ({@code WriteType.SIMPLE} and {@code WriteType.BATCH}), if we know the - * write has been persisted on at least one replica, we ignore the exception. Otherwise, an - * exception is thrown. - */ - @Override - public RetryVerdict onWriteTimeoutVerdict( - @NonNull Request request, - @NonNull ConsistencyLevel cl, - @NonNull WriteType writeType, - int blockFor, - int received, - int retryCount) { - RetryVerdict verdict; - if (retryCount != 0) { - verdict = RetryVerdict.RETHROW; - } else if (SIMPLE.equals(writeType) || BATCH.equals(writeType)) { - // Since we provide atomicity, if at least one replica acknowledged the write, - // there is no point in retrying - verdict = received > 0 ? RetryVerdict.IGNORE : RetryVerdict.RETHROW; - } else if (UNLOGGED_BATCH.equals(writeType)) { - // Since only part of the batch could have been persisted, - // retry with whatever consistency should allow to persist all - verdict = maybeDowngrade(received, cl); - } else if (BATCH_LOG.equals(writeType)) { - verdict = RetryVerdict.RETRY_SAME; - } else { - verdict = RetryVerdict.RETHROW; - } - if (LOG.isTraceEnabled()) { - LOG.trace( - VERDICT_ON_WRITE_TIMEOUT, - logPrefix, - cl, - writeType, - blockFor, - received, - retryCount, - verdict); - } - return verdict; - } - - /** - * {@inheritDoc} - * - *

This implementation triggers a maximum of one retry. If at least one replica is known to be - * alive, the operation is retried at a lower consistency level. - */ - @Override - public RetryVerdict onUnavailableVerdict( - @NonNull Request request, - @NonNull ConsistencyLevel cl, - int required, - int alive, - int retryCount) { - RetryVerdict verdict; - if (retryCount != 0) { - verdict = RetryVerdict.RETHROW; - } else if (cl.isSerial()) { - // JAVA-764: if the requested consistency level is serial, it means that the - // operation failed at the paxos phase of a LWT. - // Retry on the next host, on the assumption that the initial coordinator could be - // network-isolated. - verdict = RetryVerdict.RETRY_NEXT; - } else { - verdict = maybeDowngrade(alive, cl); - } - if (LOG.isTraceEnabled()) { - LOG.trace(VERDICT_ON_UNAVAILABLE, logPrefix, cl, required, alive, retryCount, verdict); - } - return verdict; - } - - @Override - public RetryVerdict onRequestAbortedVerdict( - @NonNull Request request, @NonNull Throwable error, int retryCount) { - RetryVerdict verdict = - error instanceof ClosedConnectionException || error instanceof HeartbeatException - ? RetryVerdict.RETRY_NEXT - : RetryVerdict.RETHROW; - if (LOG.isTraceEnabled()) { - LOG.trace( - VERDICT_ON_ABORTED, - logPrefix, - error.getClass().getSimpleName(), - error.getMessage(), - retryCount, - verdict); - } - return verdict; - } - - @Override - public RetryVerdict onErrorResponseVerdict( - @NonNull Request request, @NonNull CoordinatorException error, int retryCount) { - RetryVerdict verdict = - error instanceof WriteFailureException || error instanceof ReadFailureException - ? RetryVerdict.RETHROW - : RetryVerdict.RETRY_NEXT; - if (LOG.isTraceEnabled()) { - LOG.trace( - VERDICT_ON_ERROR, - logPrefix, - error.getClass().getSimpleName(), - error.getMessage(), - retryCount, - verdict); - } - return verdict; - } - - @Override - @Deprecated - public RetryDecision onReadTimeout( - @NonNull Request request, - @NonNull ConsistencyLevel cl, - int blockFor, - int received, - boolean dataPresent, - int retryCount) { - throw new UnsupportedOperationException("onReadTimeout"); - } - - @Override - @Deprecated - public RetryDecision onWriteTimeout( - @NonNull Request request, - @NonNull ConsistencyLevel cl, - @NonNull WriteType writeType, - int blockFor, - int received, - int retryCount) { - throw new UnsupportedOperationException("onWriteTimeout"); - } - - @Override - @Deprecated - public RetryDecision onUnavailable( - @NonNull Request request, - @NonNull ConsistencyLevel cl, - int required, - int alive, - int retryCount) { - throw new UnsupportedOperationException("onUnavailable"); - } - - @Override - @Deprecated - public RetryDecision onRequestAborted( - @NonNull Request request, @NonNull Throwable error, int retryCount) { - throw new UnsupportedOperationException("onRequestAborted"); - } - - @Override - @Deprecated - public RetryDecision onErrorResponse( - @NonNull Request request, @NonNull CoordinatorException error, int retryCount) { - throw new UnsupportedOperationException("onErrorResponse"); - } - - @Override - public void close() {} - - private RetryVerdict maybeDowngrade(int alive, ConsistencyLevel current) { - if (alive >= 3) { - return new ConsistencyDowngradingRetryVerdict(ConsistencyLevel.THREE); - } - if (alive == 2) { - return new ConsistencyDowngradingRetryVerdict(ConsistencyLevel.TWO); - } - // JAVA-1005: EACH_QUORUM does not report a global number of alive replicas - // so even if we get 0 alive replicas, there might be a node up in some other datacenter - if (alive == 1 || current.getProtocolCode() == ConsistencyLevel.EACH_QUORUM.getProtocolCode()) { - return new ConsistencyDowngradingRetryVerdict(ConsistencyLevel.ONE); - } - return RetryVerdict.RETHROW; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/retry/ConsistencyDowngradingRetryVerdict.java b/core/src/main/java/com/datastax/oss/driver/internal/core/retry/ConsistencyDowngradingRetryVerdict.java deleted file mode 100644 index d78f80c7354..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/retry/ConsistencyDowngradingRetryVerdict.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.retry; - -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.retry.RetryDecision; -import com.datastax.oss.driver.api.core.retry.RetryVerdict; -import com.datastax.oss.driver.api.core.session.Request; -import edu.umd.cs.findbugs.annotations.NonNull; - -public class ConsistencyDowngradingRetryVerdict implements RetryVerdict { - - private final ConsistencyLevel consistencyLevel; - - public ConsistencyDowngradingRetryVerdict(@NonNull ConsistencyLevel consistencyLevel) { - this.consistencyLevel = consistencyLevel; - } - - @NonNull - @Override - public RetryDecision getRetryDecision() { - return RetryDecision.RETRY_SAME; - } - - @NonNull - @Override - public RequestT getRetryRequest(@NonNull RequestT previous) { - if (previous instanceof Statement) { - Statement statement = (Statement) previous; - @SuppressWarnings("unchecked") - RequestT toRetry = (RequestT) statement.setConsistencyLevel(consistencyLevel); - return toRetry; - } - return previous; - } - - @Override - public String toString() { - return getRetryDecision() + " at consistency " + consistencyLevel; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/retry/DefaultRetryPolicy.java b/core/src/main/java/com/datastax/oss/driver/internal/core/retry/DefaultRetryPolicy.java deleted file mode 100644 index 8cea1a564b5..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/retry/DefaultRetryPolicy.java +++ /dev/null @@ -1,239 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.retry; - -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.connection.ClosedConnectionException; -import com.datastax.oss.driver.api.core.connection.HeartbeatException; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.retry.RetryDecision; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import com.datastax.oss.driver.api.core.servererrors.CoordinatorException; -import com.datastax.oss.driver.api.core.servererrors.DefaultWriteType; -import com.datastax.oss.driver.api.core.servererrors.ReadFailureException; -import com.datastax.oss.driver.api.core.servererrors.WriteFailureException; -import com.datastax.oss.driver.api.core.servererrors.WriteType; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * The default retry policy. - * - *

This is a very conservative implementation: it triggers a maximum of one retry per request, - * and only in cases that have a high chance of success (see the method javadocs for detailed - * explanations of each case). - * - *

To activate this policy, modify the {@code advanced.retry-policy} section in the driver - * configuration, for example: - * - *

- * datastax-java-driver {
- *   advanced.retry-policy {
- *     class = DefaultRetryPolicy
- *   }
- * }
- * 
- * - * See {@code reference.conf} (in the manual or core driver JAR) for more details. - */ -@ThreadSafe -public class DefaultRetryPolicy implements RetryPolicy { - - private static final Logger LOG = LoggerFactory.getLogger(DefaultRetryPolicy.class); - - @VisibleForTesting - public static final String RETRYING_ON_READ_TIMEOUT = - "[{}] Retrying on read timeout on same host (consistency: {}, required responses: {}, " - + "received responses: {}, data retrieved: {}, retries: {})"; - - @VisibleForTesting - public static final String RETRYING_ON_WRITE_TIMEOUT = - "[{}] Retrying on write timeout on same host (consistency: {}, write type: {}, " - + "required acknowledgments: {}, received acknowledgments: {}, retries: {})"; - - @VisibleForTesting - public static final String RETRYING_ON_UNAVAILABLE = - "[{}] Retrying on unavailable exception on next host (consistency: {}, " - + "required replica: {}, alive replica: {}, retries: {})"; - - @VisibleForTesting - public static final String RETRYING_ON_ABORTED = - "[{}] Retrying on aborted request on next host (retries: {})"; - - @VisibleForTesting - public static final String RETRYING_ON_ERROR = - "[{}] Retrying on node error on next host (retries: {})"; - - private final String logPrefix; - - public DefaultRetryPolicy(DriverContext context, String profileName) { - this.logPrefix = (context != null ? context.getSessionName() : null) + "|" + profileName; - } - - /** - * {@inheritDoc} - * - *

This implementation triggers a maximum of one retry (to the same node), and only if enough - * replicas had responded to the read request but data was not retrieved amongst those. That - * usually means that enough replicas are alive to satisfy the consistency, but the coordinator - * picked a dead one for data retrieval, not having detected that replica as dead yet. The - * reasoning is that by the time we get the timeout, the dead replica will likely have been - * detected as dead and the retry has a high chance of success. - * - *

Otherwise, the exception is rethrown. - */ - @Override - @Deprecated - public RetryDecision onReadTimeout( - @NonNull Request request, - @NonNull ConsistencyLevel cl, - int blockFor, - int received, - boolean dataPresent, - int retryCount) { - - RetryDecision decision = - (retryCount == 0 && received >= blockFor && !dataPresent) - ? RetryDecision.RETRY_SAME - : RetryDecision.RETHROW; - - if (decision == RetryDecision.RETRY_SAME && LOG.isTraceEnabled()) { - LOG.trace(RETRYING_ON_READ_TIMEOUT, logPrefix, cl, blockFor, received, false, retryCount); - } - - return decision; - } - - /** - * {@inheritDoc} - * - *

This implementation triggers a maximum of one retry (to the same node), and only for a - * {@code WriteType.BATCH_LOG} write. The reasoning is that the coordinator tries to write the - * distributed batch log against a small subset of nodes in the local datacenter; a timeout - * usually means that none of these nodes were alive but the coordinator hadn't detected them as - * dead yet. By the time we get the timeout, the dead nodes will likely have been detected as - * dead, and the retry has thus a high chance of success. - * - *

Otherwise, the exception is rethrown. - */ - @Override - @Deprecated - public RetryDecision onWriteTimeout( - @NonNull Request request, - @NonNull ConsistencyLevel cl, - @NonNull WriteType writeType, - int blockFor, - int received, - int retryCount) { - - RetryDecision decision = - (retryCount == 0 && writeType == DefaultWriteType.BATCH_LOG) - ? RetryDecision.RETRY_SAME - : RetryDecision.RETHROW; - - if (decision == RetryDecision.RETRY_SAME && LOG.isTraceEnabled()) { - LOG.trace( - RETRYING_ON_WRITE_TIMEOUT, logPrefix, cl, writeType, blockFor, received, retryCount); - } - return decision; - } - - /** - * {@inheritDoc} - * - *

This implementation triggers a maximum of one retry, to the next node in the query plan. The - * rationale is that the first coordinator might have been network-isolated from all other nodes - * (thinking they're down), but still able to communicate with the client; in that case, retrying - * on the same host has almost no chance of success, but moving to the next host might solve the - * issue. - * - *

Otherwise, the exception is rethrown. - */ - @Override - @Deprecated - public RetryDecision onUnavailable( - @NonNull Request request, - @NonNull ConsistencyLevel cl, - int required, - int alive, - int retryCount) { - - RetryDecision decision = (retryCount == 0) ? RetryDecision.RETRY_NEXT : RetryDecision.RETHROW; - - if (decision == RetryDecision.RETRY_NEXT && LOG.isTraceEnabled()) { - LOG.trace(RETRYING_ON_UNAVAILABLE, logPrefix, cl, required, alive, retryCount); - } - - return decision; - } - - /** - * {@inheritDoc} - * - *

This implementation retries on the next node if the connection was closed, and rethrows - * (assuming a driver bug) in all other cases. - */ - @Override - @Deprecated - public RetryDecision onRequestAborted( - @NonNull Request request, @NonNull Throwable error, int retryCount) { - - RetryDecision decision = - (error instanceof ClosedConnectionException || error instanceof HeartbeatException) - ? RetryDecision.RETRY_NEXT - : RetryDecision.RETHROW; - - if (decision == RetryDecision.RETRY_NEXT && LOG.isTraceEnabled()) { - LOG.trace(RETRYING_ON_ABORTED, logPrefix, retryCount, error); - } - - return decision; - } - - /** - * {@inheritDoc} - * - *

This implementation rethrows read and write failures, and retries other errors on the next - * node. - */ - @Override - @Deprecated - public RetryDecision onErrorResponse( - @NonNull Request request, @NonNull CoordinatorException error, int retryCount) { - - RetryDecision decision = - (error instanceof ReadFailureException || error instanceof WriteFailureException) - ? RetryDecision.RETHROW - : RetryDecision.RETRY_NEXT; - - if (decision == RetryDecision.RETRY_NEXT && LOG.isTraceEnabled()) { - LOG.trace(RETRYING_ON_ERROR, logPrefix, retryCount, error); - } - - return decision; - } - - @Override - public void close() { - // nothing to do - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/retry/DefaultRetryVerdict.java b/core/src/main/java/com/datastax/oss/driver/internal/core/retry/DefaultRetryVerdict.java deleted file mode 100644 index e74651e30de..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/retry/DefaultRetryVerdict.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.retry; - -import com.datastax.oss.driver.api.core.retry.RetryDecision; -import com.datastax.oss.driver.api.core.retry.RetryVerdict; -import edu.umd.cs.findbugs.annotations.NonNull; - -public class DefaultRetryVerdict implements RetryVerdict { - - private final RetryDecision decision; - - public DefaultRetryVerdict(@NonNull RetryDecision decision) { - this.decision = decision; - } - - @NonNull - @Override - public RetryDecision getRetryDecision() { - return decision; - } - - @Override - public String toString() { - return getRetryDecision().name(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/servererrors/DefaultWriteTypeRegistry.java b/core/src/main/java/com/datastax/oss/driver/internal/core/servererrors/DefaultWriteTypeRegistry.java deleted file mode 100644 index 7abe49a98c0..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/servererrors/DefaultWriteTypeRegistry.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.servererrors; - -import com.datastax.oss.driver.api.core.servererrors.DefaultWriteType; -import com.datastax.oss.driver.api.core.servererrors.WriteType; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class DefaultWriteTypeRegistry implements WriteTypeRegistry { - - private static final ImmutableList values = - ImmutableList.builder().add(DefaultWriteType.values()).build(); - - @Override - public WriteType fromName(String name) { - return DefaultWriteType.valueOf(name); - } - - @Override - public ImmutableList getValues() { - return values; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/servererrors/WriteTypeRegistry.java b/core/src/main/java/com/datastax/oss/driver/internal/core/servererrors/WriteTypeRegistry.java deleted file mode 100644 index 537c3922f0f..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/servererrors/WriteTypeRegistry.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.servererrors; - -import com.datastax.oss.driver.api.core.servererrors.WriteType; - -public interface WriteTypeRegistry { - WriteType fromName(String name); - - /** @return all the values known to this driver instance. */ - Iterable getValues(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/session/BuiltInRequestProcessors.java b/core/src/main/java/com/datastax/oss/driver/internal/core/session/BuiltInRequestProcessors.java deleted file mode 100644 index dc6e6a295a1..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/session/BuiltInRequestProcessors.java +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.session; - -import static com.datastax.oss.driver.internal.core.util.Dependency.REACTIVE_STREAMS; -import static com.datastax.oss.driver.internal.core.util.Dependency.TINKERPOP; - -import com.datastax.dse.driver.internal.core.cql.continuous.ContinuousCqlRequestAsyncProcessor; -import com.datastax.dse.driver.internal.core.cql.continuous.ContinuousCqlRequestSyncProcessor; -import com.datastax.dse.driver.internal.core.cql.continuous.reactive.ContinuousCqlRequestReactiveProcessor; -import com.datastax.dse.driver.internal.core.cql.reactive.CqlRequestReactiveProcessor; -import com.datastax.dse.driver.internal.core.graph.GraphRequestAsyncProcessor; -import com.datastax.dse.driver.internal.core.graph.GraphRequestSyncProcessor; -import com.datastax.dse.driver.internal.core.graph.GraphSupportChecker; -import com.datastax.dse.driver.internal.core.graph.reactive.ReactiveGraphRequestProcessor; -import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; -import com.datastax.oss.driver.internal.core.cql.CqlPrepareAsyncProcessor; -import com.datastax.oss.driver.internal.core.cql.CqlPrepareSyncProcessor; -import com.datastax.oss.driver.internal.core.cql.CqlRequestAsyncProcessor; -import com.datastax.oss.driver.internal.core.cql.CqlRequestSyncProcessor; -import com.datastax.oss.driver.internal.core.util.DefaultDependencyChecker; -import java.util.ArrayList; -import java.util.List; -import java.util.Optional; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class BuiltInRequestProcessors { - - private static final Logger LOG = LoggerFactory.getLogger(BuiltInRequestProcessors.class); - - public static List> createDefaultProcessors(DefaultDriverContext context) { - List> processors = new ArrayList<>(); - addBasicProcessors(processors, context); - if (DefaultDependencyChecker.isPresent(TINKERPOP)) { - addGraphProcessors(context, processors); - } else { - LOG.debug("Tinkerpop was not found on the classpath: graph extensions will not be available"); - } - if (DefaultDependencyChecker.isPresent(REACTIVE_STREAMS)) { - addReactiveProcessors(processors); - } else { - LOG.debug( - "Reactive Streams was not found on the classpath: reactive extensions will not be available"); - } - if (DefaultDependencyChecker.isPresent(REACTIVE_STREAMS) - && DefaultDependencyChecker.isPresent(TINKERPOP)) { - addGraphReactiveProcessors(context, processors); - } - return processors; - } - - public static void addBasicProcessors( - List> processors, DefaultDriverContext context) { - // regular requests (sync and async) - CqlRequestAsyncProcessor cqlRequestAsyncProcessor = new CqlRequestAsyncProcessor(); - CqlRequestSyncProcessor cqlRequestSyncProcessor = - new CqlRequestSyncProcessor(cqlRequestAsyncProcessor); - processors.add(cqlRequestAsyncProcessor); - processors.add(cqlRequestSyncProcessor); - - // prepare requests (sync and async) - CqlPrepareAsyncProcessor cqlPrepareAsyncProcessor = - new CqlPrepareAsyncProcessor(Optional.of(context)); - CqlPrepareSyncProcessor cqlPrepareSyncProcessor = - new CqlPrepareSyncProcessor(cqlPrepareAsyncProcessor); - processors.add(cqlPrepareAsyncProcessor); - processors.add(cqlPrepareSyncProcessor); - - // continuous requests (sync and async) - ContinuousCqlRequestAsyncProcessor continuousCqlRequestAsyncProcessor = - new ContinuousCqlRequestAsyncProcessor(); - ContinuousCqlRequestSyncProcessor continuousCqlRequestSyncProcessor = - new ContinuousCqlRequestSyncProcessor(continuousCqlRequestAsyncProcessor); - processors.add(continuousCqlRequestAsyncProcessor); - processors.add(continuousCqlRequestSyncProcessor); - } - - public static void addGraphProcessors( - DefaultDriverContext context, List> processors) { - GraphRequestAsyncProcessor graphRequestAsyncProcessor = - new GraphRequestAsyncProcessor(context, new GraphSupportChecker()); - GraphRequestSyncProcessor graphRequestSyncProcessor = - new GraphRequestSyncProcessor(graphRequestAsyncProcessor); - processors.add(graphRequestAsyncProcessor); - processors.add(graphRequestSyncProcessor); - } - - public static void addReactiveProcessors(List> processors) { - CqlRequestReactiveProcessor cqlRequestReactiveProcessor = - new CqlRequestReactiveProcessor(new CqlRequestAsyncProcessor()); - ContinuousCqlRequestReactiveProcessor continuousCqlRequestReactiveProcessor = - new ContinuousCqlRequestReactiveProcessor(new ContinuousCqlRequestAsyncProcessor()); - processors.add(cqlRequestReactiveProcessor); - processors.add(continuousCqlRequestReactiveProcessor); - } - - public static void addGraphReactiveProcessors( - DefaultDriverContext context, List> processors) { - ReactiveGraphRequestProcessor reactiveGraphRequestProcessor = - new ReactiveGraphRequestProcessor( - new GraphRequestAsyncProcessor(context, new GraphSupportChecker())); - processors.add(reactiveGraphRequestProcessor); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/session/BuiltInRequestProcessorsSubstitutions.java b/core/src/main/java/com/datastax/oss/driver/internal/core/session/BuiltInRequestProcessorsSubstitutions.java deleted file mode 100644 index b8bca431228..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/session/BuiltInRequestProcessorsSubstitutions.java +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.session; - -import static com.datastax.oss.driver.internal.core.util.Dependency.REACTIVE_STREAMS; -import static com.datastax.oss.driver.internal.core.util.Dependency.TINKERPOP; - -import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; -import com.datastax.oss.driver.internal.core.util.GraalDependencyChecker; -import com.oracle.svm.core.annotate.Substitute; -import com.oracle.svm.core.annotate.TargetClass; -import java.util.ArrayList; -import java.util.List; -import java.util.function.BooleanSupplier; - -@SuppressWarnings("unused") -public class BuiltInRequestProcessorsSubstitutions { - - @TargetClass(value = BuiltInRequestProcessors.class, onlyWith = GraphMissingReactiveMissing.class) - public static final class BuiltInRequestProcessorsGraphMissingReactiveMissing { - - @Substitute - public static List> createDefaultProcessors( - DefaultDriverContext context) { - List> processors = new ArrayList<>(); - BuiltInRequestProcessors.addBasicProcessors(processors, context); - return processors; - } - } - - @TargetClass(value = BuiltInRequestProcessors.class, onlyWith = GraphMissingReactivePresent.class) - public static final class BuiltInRequestProcessorsGraphMissingReactivePresent { - - @Substitute - public static List> createDefaultProcessors( - DefaultDriverContext context) { - List> processors = new ArrayList<>(); - BuiltInRequestProcessors.addBasicProcessors(processors, context); - BuiltInRequestProcessors.addReactiveProcessors(processors); - return processors; - } - } - - @TargetClass(value = BuiltInRequestProcessors.class, onlyWith = GraphPresentReactiveMissing.class) - public static final class BuiltInRequestProcessorsGraphPresentReactiveMissing { - - @Substitute - public static List> createDefaultProcessors( - DefaultDriverContext context) { - List> processors = new ArrayList<>(); - BuiltInRequestProcessors.addBasicProcessors(processors, context); - BuiltInRequestProcessors.addGraphProcessors(context, processors); - return processors; - } - } - - public static class GraphMissingReactiveMissing implements BooleanSupplier { - @Override - public boolean getAsBoolean() { - return !GraalDependencyChecker.isPresent(TINKERPOP) - && !GraalDependencyChecker.isPresent(REACTIVE_STREAMS); - } - } - - public static class GraphMissingReactivePresent implements BooleanSupplier { - @Override - public boolean getAsBoolean() { - return !GraalDependencyChecker.isPresent(TINKERPOP) - && GraalDependencyChecker.isPresent(REACTIVE_STREAMS); - } - } - - public static class GraphPresentReactiveMissing implements BooleanSupplier { - @Override - public boolean getAsBoolean() { - return GraalDependencyChecker.isPresent(TINKERPOP) - && !GraalDependencyChecker.isPresent(REACTIVE_STREAMS); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/session/DefaultSession.java b/core/src/main/java/com/datastax/oss/driver/internal/core/session/DefaultSession.java deleted file mode 100644 index b795c30fce7..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/session/DefaultSession.java +++ /dev/null @@ -1,711 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.session; - -import com.datastax.oss.driver.api.core.AsyncAutoCloseable; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.api.core.metadata.Metadata; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeState; -import com.datastax.oss.driver.api.core.metrics.Metrics; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.context.LifecycleListener; -import com.datastax.oss.driver.internal.core.metadata.DefaultNode; -import com.datastax.oss.driver.internal.core.metadata.MetadataManager; -import com.datastax.oss.driver.internal.core.metadata.MetadataManager.RefreshSchemaResult; -import com.datastax.oss.driver.internal.core.metadata.NodeStateEvent; -import com.datastax.oss.driver.internal.core.metadata.NodeStateManager; -import com.datastax.oss.driver.internal.core.metrics.NodeMetricUpdater; -import com.datastax.oss.driver.internal.core.metrics.SessionMetricUpdater; -import com.datastax.oss.driver.internal.core.pool.ChannelPool; -import com.datastax.oss.driver.internal.core.util.Loggers; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.driver.internal.core.util.concurrent.RunOrSchedule; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import io.netty.util.concurrent.EventExecutor; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.Set; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.RejectedExecutionException; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.function.Supplier; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * The session implementation. - * - *

It maintains a {@link ChannelPool} to each node that the {@link LoadBalancingPolicy} set to a - * non-ignored distance. It listens for distance events and node state events, in order to adjust - * the pools accordingly. - * - *

It executes requests by: - * - *

    - *
  • picking the appropriate processor to convert the request into a protocol message. - *
  • getting a query plan from the load balancing policy - *
  • trying to send the message on each pool, in the order of the query plan - *
- */ -@ThreadSafe -public class DefaultSession implements CqlSession { - - private static final Logger LOG = LoggerFactory.getLogger(DefaultSession.class); - - private static final AtomicInteger INSTANCE_COUNT = new AtomicInteger(); - - public static CompletionStage init( - InternalDriverContext context, Set contactPoints, CqlIdentifier keyspace) { - return new DefaultSession(context, contactPoints).init(keyspace); - } - - private final InternalDriverContext context; - private final EventExecutor adminExecutor; - private final String logPrefix; - private final SingleThreaded singleThreaded; - private final MetadataManager metadataManager; - private final RequestProcessorRegistry processorRegistry; - private final PoolManager poolManager; - private final SessionMetricUpdater metricUpdater; - - private DefaultSession(InternalDriverContext context, Set contactPoints) { - int instanceCount = INSTANCE_COUNT.incrementAndGet(); - int threshold = - context.getConfig().getDefaultProfile().getInt(DefaultDriverOption.SESSION_LEAK_THRESHOLD); - LOG.debug( - "Creating new session {} ({} live instances)", context.getSessionName(), instanceCount); - if (threshold > 0 && instanceCount > threshold) { - LOG.warn( - "You have too many session instances: {} active, expected less than {} " - + "(see '{}' in the configuration)", - instanceCount, - threshold, - DefaultDriverOption.SESSION_LEAK_THRESHOLD.getPath()); - } - - this.logPrefix = context.getSessionName(); - this.adminExecutor = context.getNettyOptions().adminEventExecutorGroup().next(); - try { - this.context = context; - this.singleThreaded = new SingleThreaded(context, contactPoints); - this.metadataManager = context.getMetadataManager(); - this.processorRegistry = context.getRequestProcessorRegistry(); - this.poolManager = context.getPoolManager(); - this.metricUpdater = context.getMetricsFactory().getSessionUpdater(); - } catch (Throwable t) { - LOG.debug( - "Error creating session {} ({} live instances)", - context.getSessionName(), - INSTANCE_COUNT.decrementAndGet()); - // Rethrow but make sure we release any resources allocated by Netty. At this stage there are - // no scheduled tasks on the event loops so getNow() won't block. - try { - context.getNettyOptions().onClose().getNow(); - } catch (Throwable suppressed) { - Loggers.warnWithException( - LOG, - "[{}] Error while closing NettyOptions " - + "(suppressed because we're already handling an init failure)", - logPrefix, - suppressed); - } - throw t; - } - } - - private CompletionStage init(CqlIdentifier keyspace) { - RunOrSchedule.on(adminExecutor, () -> singleThreaded.init(keyspace)); - return singleThreaded.initFuture; - } - - @NonNull - @Override - public String getName() { - return context.getSessionName(); - } - - @NonNull - @Override - public Metadata getMetadata() { - return metadataManager.getMetadata(); - } - - @Override - public boolean isSchemaMetadataEnabled() { - return metadataManager.isSchemaEnabled(); - } - - @NonNull - @Override - public CompletionStage setSchemaMetadataEnabled(@Nullable Boolean newValue) { - return metadataManager.setSchemaEnabled(newValue); - } - - @NonNull - @Override - public CompletionStage refreshSchemaAsync() { - return metadataManager - .refreshSchema(null, true, true) - .thenApply(RefreshSchemaResult::getMetadata); - } - - @NonNull - @Override - public CompletionStage checkSchemaAgreementAsync() { - return context.getTopologyMonitor().checkSchemaAgreement(); - } - - @NonNull - @Override - public DriverContext getContext() { - return context; - } - - @NonNull - @Override - public Optional getKeyspace() { - return Optional.ofNullable(poolManager.getKeyspace()); - } - - @NonNull - @Override - public Optional getMetrics() { - return context.getMetricsFactory().getMetrics(); - } - - /** - * INTERNAL USE ONLY -- switches the session to a new keyspace. - * - *

This is called by the driver when a {@code USE} query is successfully executed through the - * session. Calling it from anywhere else is highly discouraged, as an invalid keyspace would - * wreak havoc (close all connections and make the session unusable). - */ - @NonNull - public CompletionStage setKeyspace(@NonNull CqlIdentifier newKeyspace) { - return poolManager.setKeyspace(newKeyspace); - } - - @NonNull - public Map getPools() { - return poolManager.getPools(); - } - - @Nullable - @Override - public ResultT execute( - @NonNull RequestT request, @NonNull GenericType resultType) { - RequestProcessor processor = - processorRegistry.processorFor(request, resultType); - return isClosed() - ? processor.newFailure(new IllegalStateException("Session is closed")) - : processor.process(request, this, context, logPrefix); - } - - @Nullable - public DriverChannel getChannel(@NonNull Node node, @NonNull String logPrefix) { - ChannelPool pool = poolManager.getPools().get(node); - if (pool == null) { - LOG.trace("[{}] No pool to {}, skipping", logPrefix, node); - return null; - } else { - DriverChannel channel = pool.next(); - if (channel == null) { - LOG.trace("[{}] Pool returned no channel for {}, skipping", logPrefix, node); - return null; - } else if (channel.closeFuture().isDone()) { - LOG.trace("[{}] Pool returned closed connection to {}, skipping", logPrefix, node); - return null; - } else { - return channel; - } - } - } - - @NonNull - public ConcurrentMap getRepreparePayloads() { - return poolManager.getRepreparePayloads(); - } - - @NonNull - public SessionMetricUpdater getMetricUpdater() { - return metricUpdater; - } - - @NonNull - @Override - public CompletionStage closeFuture() { - return singleThreaded.closeFuture; - } - - @NonNull - @Override - public CompletionStage closeAsync() { - return closeSafely(singleThreaded::close); - } - - @NonNull - @Override - public CompletionStage forceCloseAsync() { - return closeSafely(singleThreaded::forceClose); - } - - private CompletionStage closeSafely(Runnable action) { - // Protect against getting closed twice: with the default NettyOptions, closing shuts down - // adminExecutor, so we don't want to call RunOrSchedule the second time. - if (!singleThreaded.closeFuture.isDone()) { - try { - RunOrSchedule.on(adminExecutor, action); - } catch (RejectedExecutionException e) { - // Checking the future is racy, there is still a tiny window that could get us here. - LOG.warn( - "[{}] Ignoring terminated executor. " - + "This generally happens if you close the session multiple times concurrently, " - + "and can be safely ignored if the close() call returns normally.", - logPrefix, - e); - } - } - return singleThreaded.closeFuture; - } - - private class SingleThreaded { - - private final InternalDriverContext context; - private final Set initialContactPoints; - private final NodeStateManager nodeStateManager; - private final SchemaListenerNotifier schemaListenerNotifier; - private final CompletableFuture initFuture = new CompletableFuture<>(); - private boolean initWasCalled; - private final CompletableFuture closeFuture = new CompletableFuture<>(); - private boolean closeWasCalled; - private boolean forceCloseWasCalled; - - private SingleThreaded(InternalDriverContext context, Set contactPoints) { - this.context = context; - this.nodeStateManager = new NodeStateManager(context); - this.initialContactPoints = contactPoints; - this.schemaListenerNotifier = - new SchemaListenerNotifier( - context.getSchemaChangeListener(), context.getEventBus(), adminExecutor); - context - .getEventBus() - .register( - NodeStateEvent.class, RunOrSchedule.on(adminExecutor, this::onNodeStateChanged)); - CompletableFutures.propagateCancellation( - this.initFuture, context.getTopologyMonitor().initFuture()); - } - - private void init(CqlIdentifier keyspace) { - assert adminExecutor.inEventLoop(); - if (initWasCalled) { - return; - } - initWasCalled = true; - LOG.debug("[{}] Starting initialization", logPrefix); - - // Eagerly fetch user-facing policies right now, no need to start opening connections if - // something is wrong in the configuration. - try { - context.getLoadBalancingPolicies(); - context.getRetryPolicies(); - context.getSpeculativeExecutionPolicies(); - context.getReconnectionPolicy(); - context.getAddressTranslator(); - context.getNodeStateListener(); - context.getSchemaChangeListener(); - context.getRequestTracker(); - context.getRequestThrottler(); - context.getAuthProvider(); - context.getSslHandlerFactory(); - context.getTimestampGenerator(); - } catch (Throwable error) { - RunOrSchedule.on(adminExecutor, this::closePolicies); - context - .getNettyOptions() - .onClose() - .addListener( - f -> { - if (!f.isSuccess()) { - Loggers.warnWithException( - LOG, - "[{}] Error while closing NettyOptions " - + "(suppressed because we're already handling an init failure)", - logPrefix, - f.cause()); - } - initFuture.completeExceptionally(error); - }); - LOG.debug( - "Error initializing new session {} ({} live instances)", - context.getSessionName(), - INSTANCE_COUNT.decrementAndGet()); - return; - } - - closeFuture.whenComplete( - (v, error) -> - LOG.debug( - "Closing session {} ({} live instances)", - context.getSessionName(), - INSTANCE_COUNT.decrementAndGet())); - - MetadataManager metadataManager = context.getMetadataManager(); - metadataManager.addContactPoints(initialContactPoints); - context - .getTopologyMonitor() - .init() - .thenCompose(v -> metadataManager.refreshNodes()) - .thenCompose(v -> checkProtocolVersion()) - .thenCompose(v -> initialSchemaRefresh()) - .thenCompose(v -> initializePools(keyspace)) - .whenComplete( - (v, error) -> { - if (error == null) { - LOG.debug("[{}] Initialization complete, ready", logPrefix); - notifyListeners(); - initFuture.complete(DefaultSession.this); - } else { - LOG.debug("[{}] Initialization failed, force closing", logPrefix, error); - forceCloseAsync() - .whenComplete( - (v1, error1) -> { - if (error1 != null) { - error.addSuppressed(error1); - } - initFuture.completeExceptionally(error); - }); - } - }); - } - - private CompletionStage checkProtocolVersion() { - try { - boolean protocolWasForced = - context.getConfig().getDefaultProfile().isDefined(DefaultDriverOption.PROTOCOL_VERSION); - if (!protocolWasForced) { - ProtocolVersion currentVersion = context.getProtocolVersion(); - ProtocolVersion bestVersion = - context - .getProtocolVersionRegistry() - .highestCommon(metadataManager.getMetadata().getNodes().values()); - if (bestVersion.getCode() < currentVersion.getCode()) { - LOG.info( - "[{}] Negotiated protocol version {} for the initial contact point, " - + "but other nodes only support {}, downgrading", - logPrefix, - currentVersion, - bestVersion); - context.getChannelFactory().setProtocolVersion(bestVersion); - - // Note that, with the default topology monitor, the control connection is already - // connected with currentVersion at this point. This doesn't really matter because none - // of the control queries use any protocol-dependent feature. - // Keep going as-is, the control connection might switch to the "correct" version later - // if it reconnects to another node. - } else if (bestVersion.getCode() > currentVersion.getCode()) { - LOG.info( - "[{}] Negotiated protocol version {} for the initial contact point, " - + "but cluster seems to support {}, keeping the negotiated version", - logPrefix, - currentVersion, - bestVersion); - } - } - return CompletableFuture.completedFuture(null); - } catch (Throwable throwable) { - return CompletableFutures.failedFuture(throwable); - } - } - - private CompletionStage initialSchemaRefresh() { - try { - return metadataManager - .refreshSchema(null, false, true) - .exceptionally( - error -> { - Loggers.warnWithException( - LOG, - "[{}] Unexpected error while refreshing schema during initialization, " - + "proceeding without schema metadata", - logPrefix, - error); - return null; - }); - } catch (Throwable throwable) { - return CompletableFutures.failedFuture(throwable); - } - } - - private CompletionStage initializePools(CqlIdentifier keyspace) { - try { - nodeStateManager.markInitialized(); - context.getLoadBalancingPolicyWrapper().init(); - context.getConfigLoader().onDriverInit(context); - return poolManager.init(keyspace); - } catch (Throwable throwable) { - return CompletableFutures.failedFuture(throwable); - } - } - - private void notifyListeners() { - for (LifecycleListener lifecycleListener : context.getLifecycleListeners()) { - try { - lifecycleListener.onSessionReady(); - } catch (Throwable t) { - Loggers.warnWithException( - LOG, - "[{}] Error while notifying {} of session ready", - logPrefix, - lifecycleListener, - t); - } - } - try { - context.getNodeStateListener().onSessionReady(DefaultSession.this); - } catch (Throwable t) { - Loggers.warnWithException( - LOG, - "[{}] Error while notifying {} of session ready", - logPrefix, - context.getNodeStateListener(), - t); - } - try { - schemaListenerNotifier.onSessionReady(DefaultSession.this); - } catch (Throwable t) { - Loggers.warnWithException( - LOG, - "[{}] Error while notifying {} of session ready", - logPrefix, - schemaListenerNotifier, - t); - } - try { - context.getRequestTracker().onSessionReady(DefaultSession.this); - } catch (Throwable t) { - Loggers.warnWithException( - LOG, - "[{}] Error while notifying {} of session ready", - logPrefix, - context.getRequestTracker(), - t); - } - } - - private void onNodeStateChanged(NodeStateEvent event) { - assert adminExecutor.inEventLoop(); - DefaultNode node = event.node; - if (node == null) { - LOG.debug( - "[{}] Node for this event was removed, ignoring state change: {}", logPrefix, event); - } else if (event.newState == null) { - context.getNodeStateListener().onRemove(node); - } else if (event.oldState == null && event.newState == NodeState.UNKNOWN) { - context.getNodeStateListener().onAdd(node); - } else if (event.newState == NodeState.UP) { - context.getNodeStateListener().onUp(node); - } else if (event.newState == NodeState.DOWN || event.newState == NodeState.FORCED_DOWN) { - context.getNodeStateListener().onDown(node); - } - } - - private void close() { - assert adminExecutor.inEventLoop(); - if (closeWasCalled) { - return; - } - closeWasCalled = true; - LOG.debug("[{}] Starting shutdown", logPrefix); - - closePolicies(); - - // clear metrics to prevent memory leak - for (Node n : metadataManager.getMetadata().getNodes().values()) { - NodeMetricUpdater updater = ((DefaultNode) n).getMetricUpdater(); - if (updater != null) updater.clearMetrics(); - } - - if (metricUpdater != null) metricUpdater.clearMetrics(); - - List> childrenCloseStages = new ArrayList<>(); - for (AsyncAutoCloseable closeable : internalComponentsToClose()) { - childrenCloseStages.add(closeable.closeAsync()); - } - CompletableFutures.whenAllDone( - childrenCloseStages, () -> onChildrenClosed(childrenCloseStages), adminExecutor); - } - - private void forceClose() { - assert adminExecutor.inEventLoop(); - if (forceCloseWasCalled) { - return; - } - forceCloseWasCalled = true; - LOG.debug( - "[{}] Starting forced shutdown (was {}closed before)", - logPrefix, - (closeWasCalled ? "" : "not ")); - - // clear metrics to prevent memory leak - for (Node n : metadataManager.getMetadata().getNodes().values()) { - NodeMetricUpdater updater = ((DefaultNode) n).getMetricUpdater(); - if (updater != null) updater.clearMetrics(); - } - - if (metricUpdater != null) metricUpdater.clearMetrics(); - - if (closeWasCalled) { - // onChildrenClosed has already been scheduled - for (AsyncAutoCloseable closeable : internalComponentsToClose()) { - closeable.forceCloseAsync(); - } - } else { - closePolicies(); - List> childrenCloseStages = new ArrayList<>(); - for (AsyncAutoCloseable closeable : internalComponentsToClose()) { - childrenCloseStages.add(closeable.forceCloseAsync()); - } - CompletableFutures.whenAllDone( - childrenCloseStages, () -> onChildrenClosed(childrenCloseStages), adminExecutor); - } - } - - private void onChildrenClosed(List> childrenCloseStages) { - assert adminExecutor.inEventLoop(); - for (CompletionStage stage : childrenCloseStages) { - warnIfFailed(stage); - } - context - .getNettyOptions() - .onClose() - .addListener( - f -> { - if (!f.isSuccess()) { - closeFuture.completeExceptionally(f.cause()); - } else { - closeFuture.complete(null); - } - }); - } - - private void warnIfFailed(CompletionStage stage) { - CompletableFuture future = stage.toCompletableFuture(); - assert future.isDone(); - if (future.isCompletedExceptionally()) { - Loggers.warnWithException( - LOG, - "[{}] Unexpected error while closing", - logPrefix, - CompletableFutures.getFailed(future)); - } - } - - private void closePolicies() { - // This is a bit tricky: we might be closing the session because of an initialization error. - // This error might have been triggered by a policy failing to initialize. If we try to access - // the policy here to close it, it will fail again. So make sure we ignore that error and - // proceed to close the other policies. - List policies = new ArrayList<>(); - for (Supplier supplier : - ImmutableList.>of( - context::getReconnectionPolicy, - context::getLoadBalancingPolicyWrapper, - context::getAddressTranslator, - context::getConfigLoader, - context::getNodeStateListener, - context::getSchemaChangeListener, - context::getRequestTracker, - context::getRequestThrottler, - context::getTimestampGenerator)) { - try { - policies.add(supplier.get()); - } catch (Throwable t) { - // Assume the policy had failed to initialize, and we don't need to close it => ignore - } - } - try { - context.getAuthProvider().ifPresent(policies::add); - } catch (Throwable t) { - // ignore - } - try { - context.getSslHandlerFactory().ifPresent(policies::add); - } catch (Throwable t) { - // ignore - } - try { - policies.addAll(context.getRetryPolicies().values()); - } catch (Throwable t) { - // ignore - } - try { - policies.addAll(context.getSpeculativeExecutionPolicies().values()); - } catch (Throwable t) { - // ignore - } - policies.addAll(context.getLifecycleListeners()); - - // Finally we have a list of all the policies that initialized successfully, close them: - for (AutoCloseable policy : policies) { - try { - policy.close(); - } catch (Throwable t) { - Loggers.warnWithException(LOG, "[{}] Error while closing {}", logPrefix, policy, t); - } - } - } - - private List internalComponentsToClose() { - ImmutableList.Builder components = - ImmutableList.builder() - .add(poolManager, nodeStateManager, metadataManager); - - // Same as closePolicies(): make sure we don't trigger errors by accessing context components - // that had failed to initialize: - try { - components.add(context.getTopologyMonitor()); - } catch (Throwable t) { - // ignore - } - try { - components.add(context.getControlConnection()); - } catch (Throwable t) { - // ignore - } - return components.build(); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/session/PoolManager.java b/core/src/main/java/com/datastax/oss/driver/internal/core/session/PoolManager.java deleted file mode 100644 index 661be017461..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/session/PoolManager.java +++ /dev/null @@ -1,536 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.session; - -import com.datastax.oss.driver.api.core.AsyncAutoCloseable; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.InvalidKeyspaceException; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeState; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.DefaultNode; -import com.datastax.oss.driver.internal.core.metadata.DistanceEvent; -import com.datastax.oss.driver.internal.core.metadata.NodeStateEvent; -import com.datastax.oss.driver.internal.core.metadata.TopologyEvent; -import com.datastax.oss.driver.internal.core.pool.ChannelPool; -import com.datastax.oss.driver.internal.core.pool.ChannelPoolFactory; -import com.datastax.oss.driver.internal.core.util.Loggers; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.driver.internal.core.util.concurrent.ReplayingEventFilter; -import com.datastax.oss.driver.internal.core.util.concurrent.RunOrSchedule; -import com.datastax.oss.driver.internal.core.util.concurrent.UncaughtExceptions; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import com.datastax.oss.driver.shaded.guava.common.collect.MapMaker; -import edu.umd.cs.findbugs.annotations.NonNull; -import io.netty.util.concurrent.EventExecutor; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Collection; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.WeakHashMap; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Maintains the connection pools of a session. - * - *

Logically this belongs to {@link DefaultSession}, but it's extracted here in order to be - * accessible from the context (notably for metrics). - */ -@ThreadSafe -public class PoolManager implements AsyncAutoCloseable { - - private static final Logger LOG = LoggerFactory.getLogger(PoolManager.class); - - // This is read concurrently, but only updated from adminExecutor - private volatile CqlIdentifier keyspace; - - private final ConcurrentMap pools = - new ConcurrentHashMap<>( - 16, - 0.75f, - // the map will only be updated from adminExecutor - 1); - - // The raw data to reprepare requests on the fly, if we hit a node that doesn't have them in - // its cache. - // This is raw protocol-level data, as opposed to the actual instances returned to the client - // (e.g. DefaultPreparedStatement) which are handled at the protocol level (e.g. - // CqlPrepareAsyncProcessor). We keep the two separate to avoid introducing a dependency from the - // session to a particular processor implementation. - private final ConcurrentMap repreparePayloads; - - private final String logPrefix; - private final EventExecutor adminExecutor; - private final DriverExecutionProfile config; - private final SingleThreaded singleThreaded; - - public PoolManager(InternalDriverContext context) { - this.logPrefix = context.getSessionName(); - this.adminExecutor = context.getNettyOptions().adminEventExecutorGroup().next(); - this.config = context.getConfig().getDefaultProfile(); - this.singleThreaded = new SingleThreaded(context); - - if (config.getBoolean(DefaultDriverOption.PREPARED_CACHE_WEAK_VALUES, true)) { - LOG.debug("[{}] Prepared statements cache configured to use weak values", logPrefix); - this.repreparePayloads = new MapMaker().weakValues().makeMap(); - } else { - LOG.debug("[{}] Prepared statements cache configured to use strong values", logPrefix); - this.repreparePayloads = new MapMaker().makeMap(); - } - } - - public CompletionStage init(CqlIdentifier keyspace) { - RunOrSchedule.on(adminExecutor, () -> singleThreaded.init(keyspace)); - return singleThreaded.initFuture; - } - - public CqlIdentifier getKeyspace() { - return keyspace; - } - - public CompletionStage setKeyspace(CqlIdentifier newKeyspace) { - CqlIdentifier oldKeyspace = this.keyspace; - if (Objects.equals(oldKeyspace, newKeyspace)) { - return CompletableFuture.completedFuture(null); - } - if (config.getBoolean(DefaultDriverOption.REQUEST_WARN_IF_SET_KEYSPACE)) { - LOG.warn( - "[{}] Detected a keyspace change at runtime ({} => {}). " - + "This is an anti-pattern that should be avoided in production " - + "(see '{}' in the configuration).", - logPrefix, - (oldKeyspace == null) ? "" : oldKeyspace.asInternal(), - newKeyspace.asInternal(), - DefaultDriverOption.REQUEST_WARN_IF_SET_KEYSPACE.getPath()); - } - this.keyspace = newKeyspace; - CompletableFuture result = new CompletableFuture<>(); - RunOrSchedule.on(adminExecutor, () -> singleThreaded.setKeyspace(newKeyspace, result)); - return result; - } - - public Map getPools() { - return pools; - } - - public ConcurrentMap getRepreparePayloads() { - return repreparePayloads; - } - - @NonNull - @Override - public CompletionStage closeFuture() { - return singleThreaded.closeFuture; - } - - @NonNull - @Override - public CompletionStage closeAsync() { - RunOrSchedule.on(adminExecutor, singleThreaded::close); - return singleThreaded.closeFuture; - } - - @NonNull - @Override - public CompletionStage forceCloseAsync() { - RunOrSchedule.on(adminExecutor, singleThreaded::forceClose); - return singleThreaded.closeFuture; - } - - private class SingleThreaded { - - private final InternalDriverContext context; - private final ChannelPoolFactory channelPoolFactory; - private final CompletableFuture initFuture = new CompletableFuture<>(); - private boolean initWasCalled; - private final CompletableFuture closeFuture = new CompletableFuture<>(); - private boolean closeWasCalled; - private boolean forceCloseWasCalled; - private final Object distanceListenerKey; - private final ReplayingEventFilter distanceEventFilter = - new ReplayingEventFilter<>(this::processDistanceEvent); - private final Object stateListenerKey; - private final ReplayingEventFilter stateEventFilter = - new ReplayingEventFilter<>(this::processStateEvent); - private final Object topologyListenerKey; - // The pools that we have opened but have not finished initializing yet - private final Map> pending = new HashMap<>(); - // If we receive events while a pool is initializing, the last one is stored here - private final Map pendingDistanceEvents = new WeakHashMap<>(); - private final Map pendingStateEvents = new WeakHashMap<>(); - - private SingleThreaded(InternalDriverContext context) { - this.context = context; - this.channelPoolFactory = context.getChannelPoolFactory(); - this.distanceListenerKey = - context - .getEventBus() - .register( - DistanceEvent.class, RunOrSchedule.on(adminExecutor, this::onDistanceEvent)); - this.stateListenerKey = - context - .getEventBus() - .register(NodeStateEvent.class, RunOrSchedule.on(adminExecutor, this::onStateEvent)); - this.topologyListenerKey = - context - .getEventBus() - .register( - TopologyEvent.class, RunOrSchedule.on(adminExecutor, this::onTopologyEvent)); - } - - private void init(CqlIdentifier keyspace) { - assert adminExecutor.inEventLoop(); - if (initWasCalled) { - return; - } - initWasCalled = true; - - LOG.debug("[{}] Starting initialization", logPrefix); - - PoolManager.this.keyspace = keyspace; - - // Make sure we don't miss any event while the pools are initializing - distanceEventFilter.start(); - stateEventFilter.start(); - - Collection nodes = context.getMetadataManager().getMetadata().getNodes().values(); - List> poolStages = new ArrayList<>(nodes.size()); - for (Node node : nodes) { - NodeDistance distance = node.getDistance(); - if (distance == NodeDistance.IGNORED) { - LOG.debug("[{}] Skipping {} because it is IGNORED", logPrefix, node); - } else if (node.getState() == NodeState.FORCED_DOWN) { - LOG.debug("[{}] Skipping {} because it is FORCED_DOWN", logPrefix, node); - } else { - LOG.debug("[{}] Creating a pool for {}", logPrefix, node); - poolStages.add(channelPoolFactory.init(node, keyspace, distance, context, logPrefix)); - } - } - CompletableFutures.whenAllDone(poolStages, () -> this.onPoolsInit(poolStages), adminExecutor); - } - - private void onPoolsInit(List> poolStages) { - assert adminExecutor.inEventLoop(); - LOG.debug("[{}] All pools have finished initializing", logPrefix); - // We will only propagate an invalid keyspace error if all pools get it - boolean allInvalidKeyspaces = poolStages.size() > 0; - for (CompletionStage poolStage : poolStages) { - // Note: pool init always succeeds - ChannelPool pool = CompletableFutures.getCompleted(poolStage.toCompletableFuture()); - boolean invalidKeyspace = pool.isInvalidKeyspace(); - if (invalidKeyspace) { - LOG.debug("[{}] Pool to {} reports an invalid keyspace", logPrefix, pool.getNode()); - } - allInvalidKeyspaces &= invalidKeyspace; - pools.put(pool.getNode(), pool); - } - if (allInvalidKeyspaces) { - initFuture.completeExceptionally( - new InvalidKeyspaceException("Invalid keyspace " + keyspace.asCql(true))); - forceClose(); - } else { - LOG.debug("[{}] Initialization complete, ready", logPrefix); - initFuture.complete(null); - distanceEventFilter.markReady(); - stateEventFilter.markReady(); - } - } - - private void onDistanceEvent(DistanceEvent event) { - assert adminExecutor.inEventLoop(); - distanceEventFilter.accept(event); - } - - private void onStateEvent(NodeStateEvent event) { - assert adminExecutor.inEventLoop(); - stateEventFilter.accept(event); - } - - private void processDistanceEvent(DistanceEvent event) { - assert adminExecutor.inEventLoop(); - // no need to check closeWasCalled, because we stop listening for events one closed - DefaultNode node = event.node; - NodeDistance newDistance = event.distance; - if (pending.containsKey(node)) { - pendingDistanceEvents.put(node, event); - } else if (newDistance == NodeDistance.IGNORED) { - ChannelPool pool = pools.remove(node); - if (pool != null) { - LOG.debug("[{}] {} became IGNORED, destroying pool", logPrefix, node); - pool.closeAsync() - .exceptionally( - error -> { - Loggers.warnWithException(LOG, "[{}] Error closing pool", logPrefix, error); - return null; - }); - } - } else { - NodeState state = node.getState(); - if (state == NodeState.FORCED_DOWN) { - LOG.warn( - "[{}] {} became {} but it is FORCED_DOWN, ignoring", logPrefix, node, newDistance); - return; - } - ChannelPool pool = pools.get(node); - if (pool == null) { - LOG.debug( - "[{}] {} became {} and no pool found, initializing it", logPrefix, node, newDistance); - CompletionStage poolFuture = - channelPoolFactory.init(node, keyspace, newDistance, context, logPrefix); - pending.put(node, poolFuture); - poolFuture - .thenAcceptAsync(this::onPoolInitialized, adminExecutor) - .exceptionally(UncaughtExceptions::log); - } else { - LOG.debug("[{}] {} became {}, resizing it", logPrefix, node, newDistance); - pool.resize(newDistance); - } - } - } - - private void processStateEvent(NodeStateEvent event) { - assert adminExecutor.inEventLoop(); - // no need to check closeWasCalled, because we stop listening for events once closed - DefaultNode node = event.node; - NodeState oldState = event.oldState; - NodeState newState = event.newState; - if (pending.containsKey(node)) { - pendingStateEvents.put(node, event); - } else if (newState == null || newState == NodeState.FORCED_DOWN) { - ChannelPool pool = pools.remove(node); - if (pool != null) { - LOG.debug( - "[{}] {} was {}, destroying pool", - logPrefix, - node, - newState == null ? "removed" : newState.name()); - pool.closeAsync() - .exceptionally( - error -> { - Loggers.warnWithException(LOG, "[{}] Error closing pool", logPrefix, error); - return null; - }); - } - } else if (oldState == NodeState.FORCED_DOWN - && newState == NodeState.UP - && node.getDistance() != NodeDistance.IGNORED) { - LOG.debug("[{}] {} was forced back UP, initializing pool", logPrefix, node); - createOrReconnectPool(node); - } - } - - private void onTopologyEvent(TopologyEvent event) { - assert adminExecutor.inEventLoop(); - if (event.type == TopologyEvent.Type.SUGGEST_UP) { - context - .getMetadataManager() - .getMetadata() - .findNode(event.broadcastRpcAddress) - .ifPresent( - node -> { - if (node.getDistance() != NodeDistance.IGNORED) { - LOG.debug( - "[{}] Received a SUGGEST_UP event for {}, reconnecting pool now", - logPrefix, - node); - ChannelPool pool = pools.get(node); - if (pool != null) { - pool.reconnectNow(); - } - } - }); - } - } - - private void createOrReconnectPool(Node node) { - ChannelPool pool = pools.get(node); - if (pool == null) { - CompletionStage poolFuture = - channelPoolFactory.init(node, keyspace, node.getDistance(), context, logPrefix); - pending.put(node, poolFuture); - poolFuture - .thenAcceptAsync(this::onPoolInitialized, adminExecutor) - .exceptionally(UncaughtExceptions::log); - } else { - pool.reconnectNow(); - } - } - - private void onPoolInitialized(ChannelPool pool) { - assert adminExecutor.inEventLoop(); - Node node = pool.getNode(); - if (closeWasCalled) { - LOG.debug( - "[{}] Session closed while a pool to {} was initializing, closing it", logPrefix, node); - pool.forceCloseAsync(); - } else { - LOG.debug("[{}] New pool to {} initialized", logPrefix, node); - if (Objects.equals(keyspace, pool.getInitialKeyspaceName())) { - reprepareStatements(pool); - } else { - // The keyspace changed while the pool was being initialized, switch it now. - pool.setKeyspace(keyspace) - .handleAsync( - (result, error) -> { - if (error != null) { - Loggers.warnWithException( - LOG, "Error while switching keyspace to " + keyspace, error); - } - reprepareStatements(pool); - return null; - }, - adminExecutor); - } - } - } - - private void reprepareStatements(ChannelPool pool) { - assert adminExecutor.inEventLoop(); - if (config.getBoolean(DefaultDriverOption.REPREPARE_ENABLED)) { - new ReprepareOnUp( - logPrefix + "|" + pool.getNode().getEndPoint(), - pool, - adminExecutor, - repreparePayloads, - context, - () -> RunOrSchedule.on(adminExecutor, () -> onPoolReady(pool))) - .start(); - } else { - LOG.debug("[{}] Reprepare on up is disabled, skipping", logPrefix); - onPoolReady(pool); - } - } - - private void onPoolReady(ChannelPool pool) { - assert adminExecutor.inEventLoop(); - Node node = pool.getNode(); - pending.remove(node); - pools.put(node, pool); - DistanceEvent distanceEvent = pendingDistanceEvents.remove(node); - NodeStateEvent stateEvent = pendingStateEvents.remove(node); - if (stateEvent != null && stateEvent.newState == NodeState.FORCED_DOWN) { - LOG.debug( - "[{}] Received {} while the pool was initializing, processing it now", - logPrefix, - stateEvent); - processStateEvent(stateEvent); - } else if (distanceEvent != null) { - LOG.debug( - "[{}] Received {} while the pool was initializing, processing it now", - logPrefix, - distanceEvent); - processDistanceEvent(distanceEvent); - } - } - - private void setKeyspace(CqlIdentifier newKeyspace, CompletableFuture doneFuture) { - assert adminExecutor.inEventLoop(); - if (closeWasCalled) { - doneFuture.complete(null); - return; - } - LOG.debug("[{}] Switching to keyspace {}", logPrefix, newKeyspace); - List> poolReadyFutures = Lists.newArrayListWithCapacity(pools.size()); - for (ChannelPool pool : pools.values()) { - poolReadyFutures.add(pool.setKeyspace(newKeyspace)); - } - CompletableFutures.completeFrom(CompletableFutures.allDone(poolReadyFutures), doneFuture); - } - - private void close() { - assert adminExecutor.inEventLoop(); - if (closeWasCalled) { - return; - } - closeWasCalled = true; - LOG.debug("[{}] Starting shutdown", logPrefix); - - // Stop listening for events - context.getEventBus().unregister(distanceListenerKey, DistanceEvent.class); - context.getEventBus().unregister(stateListenerKey, NodeStateEvent.class); - context.getEventBus().unregister(topologyListenerKey, TopologyEvent.class); - - List> closePoolStages = new ArrayList<>(pools.size()); - for (ChannelPool pool : pools.values()) { - closePoolStages.add(pool.closeAsync()); - } - CompletableFutures.whenAllDone( - closePoolStages, () -> onAllPoolsClosed(closePoolStages), adminExecutor); - } - - private void forceClose() { - assert adminExecutor.inEventLoop(); - if (forceCloseWasCalled) { - return; - } - forceCloseWasCalled = true; - LOG.debug( - "[{}] Starting forced shutdown (was {}closed before)", - logPrefix, - (closeWasCalled ? "" : "not ")); - - if (closeWasCalled) { - for (ChannelPool pool : pools.values()) { - pool.forceCloseAsync(); - } - } else { - List> closePoolStages = new ArrayList<>(pools.size()); - for (ChannelPool pool : pools.values()) { - closePoolStages.add(pool.forceCloseAsync()); - } - CompletableFutures.whenAllDone( - closePoolStages, () -> onAllPoolsClosed(closePoolStages), adminExecutor); - } - } - - private void onAllPoolsClosed(List> closePoolStages) { - assert adminExecutor.inEventLoop(); - Throwable firstError = null; - for (CompletionStage closePoolStage : closePoolStages) { - CompletableFuture closePoolFuture = closePoolStage.toCompletableFuture(); - assert closePoolFuture.isDone(); - if (closePoolFuture.isCompletedExceptionally()) { - Throwable error = CompletableFutures.getFailed(closePoolFuture); - if (firstError == null) { - firstError = error; - } else { - firstError.addSuppressed(error); - } - } - } - if (firstError != null) { - closeFuture.completeExceptionally(firstError); - } else { - LOG.debug("[{}] Shutdown complete", logPrefix); - closeFuture.complete(null); - } - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/session/ReprepareOnUp.java b/core/src/main/java/com/datastax/oss/driver/internal/core/session/ReprepareOnUp.java deleted file mode 100644 index ee979473fd1..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/session/ReprepareOnUp.java +++ /dev/null @@ -1,278 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.session; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.connection.BusyConnectionException; -import com.datastax.oss.driver.api.core.session.throttling.RequestThrottler; -import com.datastax.oss.driver.internal.core.adminrequest.AdminResult; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.internal.core.adminrequest.ThrottledAdminRequestHandler; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.cql.CqlRequestHandler; -import com.datastax.oss.driver.internal.core.metrics.SessionMetricUpdater; -import com.datastax.oss.driver.internal.core.pool.ChannelPool; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.driver.internal.core.util.concurrent.RunOrSchedule; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.request.Prepare; -import com.datastax.oss.protocol.internal.request.Query; -import com.datastax.oss.protocol.internal.util.Bytes; -import io.netty.util.concurrent.EventExecutor; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.ArrayDeque; -import java.util.Collections; -import java.util.HashSet; -import java.util.Map; -import java.util.Queue; -import java.util.Set; -import java.util.concurrent.CompletionStage; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Ensures that a newly added or restarted node knows all the prepared statements created from this - * driver instance. - * - *

See the comments in {@code reference.conf} for more explanations about this process. If any - * prepare request fail, we ignore the error because it will be retried on the fly (see {@link - * CqlRequestHandler}). - * - *

Logically this code belongs to {@link DefaultSession}, but it was extracted for modularity and - * testability. - */ -@ThreadSafe -class ReprepareOnUp { - - private static final Logger LOG = LoggerFactory.getLogger(ReprepareOnUp.class); - private static final Query QUERY_SERVER_IDS = - new Query("SELECT prepared_id FROM system.prepared_statements"); - - private final String logPrefix; - private final ChannelPool pool; - private final EventExecutor adminExecutor; - private final Map repreparePayloads; - private final Runnable whenPrepared; - private final boolean checkSystemTable; - private final int maxStatements; - private final int maxParallelism; - private final Duration timeout; - private final RequestThrottler throttler; - private final SessionMetricUpdater metricUpdater; - - // After the constructor, everything happens on adminExecutor, so these fields do not need any - // synchronization. - private Set serverKnownIds; - private Queue toReprepare; - private int runningWorkers; - - ReprepareOnUp( - String logPrefix, - ChannelPool pool, - EventExecutor adminExecutor, - Map repreparePayloads, - InternalDriverContext context, - Runnable whenPrepared) { - - this.logPrefix = logPrefix; - this.pool = pool; - this.adminExecutor = adminExecutor; - this.repreparePayloads = repreparePayloads; - this.whenPrepared = whenPrepared; - this.throttler = context.getRequestThrottler(); - - DriverConfig config = context.getConfig(); - this.checkSystemTable = - config.getDefaultProfile().getBoolean(DefaultDriverOption.REPREPARE_CHECK_SYSTEM_TABLE); - this.timeout = config.getDefaultProfile().getDuration(DefaultDriverOption.REPREPARE_TIMEOUT); - this.maxStatements = - config.getDefaultProfile().getInt(DefaultDriverOption.REPREPARE_MAX_STATEMENTS); - this.maxParallelism = - config.getDefaultProfile().getInt(DefaultDriverOption.REPREPARE_MAX_PARALLELISM); - - this.metricUpdater = context.getMetricsFactory().getSessionUpdater(); - } - - void start() { - if (repreparePayloads.isEmpty()) { - LOG.debug("[{}] No statements to reprepare, done", logPrefix); - whenPrepared.run(); - } else { - // Check log level because ConcurrentMap.size is not a constant operation - if (LOG.isDebugEnabled()) { - LOG.debug( - "[{}] {} statements to reprepare on newly added/up node", - logPrefix, - repreparePayloads.size()); - } - if (checkSystemTable) { - LOG.debug("[{}] Checking which statements the server knows about", logPrefix); - queryAsync(QUERY_SERVER_IDS, Collections.emptyMap(), "QUERY system.prepared_statements") - .whenCompleteAsync(this::gatherServerIds, adminExecutor); - } else { - LOG.debug( - "[{}] {} is disabled, repreparing directly", - logPrefix, - DefaultDriverOption.REPREPARE_CHECK_SYSTEM_TABLE.getPath()); - RunOrSchedule.on( - adminExecutor, - () -> { - serverKnownIds = Collections.emptySet(); - gatherPayloadsToReprepare(); - }); - } - } - } - - private void gatherServerIds(AdminResult rows, Throwable error) { - assert adminExecutor.inEventLoop(); - if (serverKnownIds == null) { - serverKnownIds = new HashSet<>(); - } - if (error != null) { - LOG.debug( - "[{}] Error querying system.prepared_statements ({}), proceeding without server ids", - logPrefix, - error.toString()); - gatherPayloadsToReprepare(); - } else { - for (AdminRow row : rows) { - serverKnownIds.add(row.getByteBuffer("prepared_id")); - } - if (rows.hasNextPage()) { - LOG.debug("[{}] system.prepared_statements has more pages", logPrefix); - rows.nextPage().whenCompleteAsync(this::gatherServerIds, adminExecutor); - } else { - LOG.debug("[{}] Gathered {} server ids, proceeding", logPrefix, serverKnownIds.size()); - gatherPayloadsToReprepare(); - } - } - } - - private void gatherPayloadsToReprepare() { - assert adminExecutor.inEventLoop(); - toReprepare = new ArrayDeque<>(); - for (RepreparePayload payload : repreparePayloads.values()) { - if (serverKnownIds.contains(payload.id)) { - LOG.trace( - "[{}] Skipping statement {} because it is already known to the server", - logPrefix, - Bytes.toHexString(payload.id)); - } else { - if (maxStatements > 0 && toReprepare.size() == maxStatements) { - LOG.debug( - "[{}] Limiting number of statements to reprepare to {} as configured, " - + "but there are more", - logPrefix, - maxStatements); - break; - } else { - toReprepare.add(payload); - } - } - } - if (toReprepare.isEmpty()) { - LOG.debug( - "[{}] No statements to reprepare that are not known by the server already, done", - logPrefix); - whenPrepared.run(); - } else { - startWorkers(); - } - } - - private void startWorkers() { - assert adminExecutor.inEventLoop(); - runningWorkers = Math.min(maxParallelism, toReprepare.size()); - LOG.debug( - "[{}] Repreparing {} statements with {} parallel workers", - logPrefix, - toReprepare.size(), - runningWorkers); - for (int i = 0; i < runningWorkers; i++) { - startWorker(); - } - } - - private void startWorker() { - assert adminExecutor.inEventLoop(); - if (toReprepare.isEmpty()) { - runningWorkers -= 1; - if (runningWorkers == 0) { - LOG.debug("[{}] All workers finished, done", logPrefix); - whenPrepared.run(); - } - } else { - RepreparePayload payload = toReprepare.poll(); - prepareAsync( - new Prepare( - payload.query, (payload.keyspace == null ? null : payload.keyspace.asInternal())), - payload.customPayload) - .handleAsync( - (result, error) -> { - // Don't log, AdminRequestHandler does already - startWorker(); - return null; - }, - adminExecutor); - } - } - - @VisibleForTesting - protected CompletionStage queryAsync( - Message message, Map customPayload, String debugString) { - DriverChannel channel = pool.next(); - if (channel == null) { - return CompletableFutures.failedFuture( - new BusyConnectionException("Found no channel to execute reprepare query")); - } else { - ThrottledAdminRequestHandler reprepareHandler = - ThrottledAdminRequestHandler.query( - channel, - false, - message, - customPayload, - timeout, - throttler, - metricUpdater, - logPrefix, - debugString); - return reprepareHandler.start(); - } - } - - @VisibleForTesting - protected CompletionStage prepareAsync( - Message message, Map customPayload) { - DriverChannel channel = pool.next(); - if (channel == null) { - return CompletableFutures.failedFuture( - new BusyConnectionException("Found no channel to execute reprepare query")); - } else { - ThrottledAdminRequestHandler reprepareHandler = - ThrottledAdminRequestHandler.prepare( - channel, false, message, customPayload, timeout, throttler, metricUpdater, logPrefix); - return reprepareHandler.start(); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/session/RepreparePayload.java b/core/src/main/java/com/datastax/oss/driver/internal/core/session/RepreparePayload.java deleted file mode 100644 index 7c4b10442a7..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/session/RepreparePayload.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.session; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.internal.core.cql.DefaultPreparedStatement; -import com.datastax.oss.protocol.internal.request.Prepare; -import java.nio.ByteBuffer; -import java.util.Map; -import net.jcip.annotations.Immutable; - -/** - * The information that's necessary to reprepare an already prepared statement, in case we hit a - * node that doesn't have it in its cache. - * - *

Make sure the object that's returned to the client (e.g. {@link DefaultPreparedStatement} for - * CQL statements) keeps a reference to this. - */ -@Immutable -public class RepreparePayload { - public final ByteBuffer id; - public final String query; - - /** The keyspace that is set independently from the query string (see CASSANDRA-10145) */ - public final CqlIdentifier keyspace; - - public final Map customPayload; - - public RepreparePayload( - ByteBuffer id, String query, CqlIdentifier keyspace, Map customPayload) { - this.id = id; - this.query = query; - this.keyspace = keyspace; - this.customPayload = customPayload; - } - - public Prepare toMessage() { - return new Prepare(query, keyspace == null ? null : keyspace.asInternal()); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/session/RequestProcessor.java b/core/src/main/java/com/datastax/oss/driver/internal/core/session/RequestProcessor.java deleted file mode 100644 index 49599667d70..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/session/RequestProcessor.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.session; - -import com.datastax.oss.driver.api.core.cql.PrepareRequest; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; - -/** - * Handles a type of request in the driver. - * - *

By default, the driver supports CQL {@link Statement queries} and {@link PrepareRequest - * preparation requests}. New processors can be plugged in to handle new types of requests. - * - * @param the type of request accepted. - * @param the type of result when a request is processed. - */ -public interface RequestProcessor { - - /** - * Whether the processor can produce the given result from the given request. - * - *

Processors will be tried in the order they were registered. The first processor for which - * this method returns true will be used. - */ - boolean canProcess(Request request, GenericType resultType); - - /** Processes the given request, producing a result. */ - ResultT process( - RequestT request, - DefaultSession session, - InternalDriverContext context, - String sessionLogPrefix); - - /** Builds a failed result to directly report the given error. */ - ResultT newFailure(RuntimeException error); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/session/RequestProcessorRegistry.java b/core/src/main/java/com/datastax/oss/driver/internal/core/session/RequestProcessorRegistry.java deleted file mode 100644 index b993365d201..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/session/RequestProcessorRegistry.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.session; - -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@ThreadSafe -public class RequestProcessorRegistry { - - private static final Logger LOG = LoggerFactory.getLogger(RequestProcessorRegistry.class); - - private final String logPrefix; - // Effectively immutable: the contents are never modified after construction - private final RequestProcessor[] processors; - - public RequestProcessorRegistry(String logPrefix, RequestProcessor... processors) { - this.logPrefix = logPrefix; - this.processors = processors; - } - - public RequestProcessor processorFor( - RequestT request, GenericType resultType) { - - for (RequestProcessor processor : processors) { - if (processor.canProcess(request, resultType)) { - LOG.trace("[{}] Using {} to process {}", logPrefix, processor, request); - // The cast is safe provided that the processor implements canProcess correctly - @SuppressWarnings("unchecked") - RequestProcessor result = - (RequestProcessor) processor; - return result; - } else { - LOG.trace("[{}] {} cannot process {}, trying next", logPrefix, processor, request); - } - } - throw new IllegalArgumentException("No request processor found for " + request); - } - - /** This creates a defensive copy on every call, do not overuse. */ - public Iterable> getProcessors() { - return ImmutableList.copyOf(processors); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/session/SchemaListenerNotifier.java b/core/src/main/java/com/datastax/oss/driver/internal/core/session/SchemaListenerNotifier.java deleted file mode 100644 index 51ba4d30624..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/session/SchemaListenerNotifier.java +++ /dev/null @@ -1,174 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.session; - -import com.datastax.oss.driver.api.core.metadata.schema.SchemaChangeListener; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.internal.core.context.EventBus; -import com.datastax.oss.driver.internal.core.metadata.schema.events.AggregateChangeEvent; -import com.datastax.oss.driver.internal.core.metadata.schema.events.FunctionChangeEvent; -import com.datastax.oss.driver.internal.core.metadata.schema.events.KeyspaceChangeEvent; -import com.datastax.oss.driver.internal.core.metadata.schema.events.TableChangeEvent; -import com.datastax.oss.driver.internal.core.metadata.schema.events.TypeChangeEvent; -import com.datastax.oss.driver.internal.core.metadata.schema.events.ViewChangeEvent; -import com.datastax.oss.driver.internal.core.util.concurrent.RunOrSchedule; -import io.netty.util.concurrent.EventExecutor; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -class SchemaListenerNotifier { - - private final SchemaChangeListener listener; - private final EventExecutor adminExecutor; - - // It is technically possible that a schema change could happen in the middle of session - // initialization. Don't forward events in this case, it would likely do more harm than good if a - // listener implementation doesn't expect it. - private boolean sessionReady; - - SchemaListenerNotifier( - SchemaChangeListener listener, EventBus eventBus, EventExecutor adminExecutor) { - this.listener = listener; - this.adminExecutor = adminExecutor; - - // No need to unregister at shutdown, this component has the same lifecycle as the cluster - eventBus.register( - AggregateChangeEvent.class, RunOrSchedule.on(adminExecutor, this::onAggregateChangeEvent)); - eventBus.register( - FunctionChangeEvent.class, RunOrSchedule.on(adminExecutor, this::onFunctionChangeEvent)); - eventBus.register( - KeyspaceChangeEvent.class, RunOrSchedule.on(adminExecutor, this::onKeyspaceChangeEvent)); - eventBus.register( - TableChangeEvent.class, RunOrSchedule.on(adminExecutor, this::onTableChangeEvent)); - eventBus.register( - TypeChangeEvent.class, RunOrSchedule.on(adminExecutor, this::onTypeChangeEvent)); - eventBus.register( - ViewChangeEvent.class, RunOrSchedule.on(adminExecutor, this::onViewChangeEvent)); - } - - void onSessionReady(Session session) { - RunOrSchedule.on( - adminExecutor, - () -> { - sessionReady = true; - listener.onSessionReady(session); - }); - } - - private void onAggregateChangeEvent(AggregateChangeEvent event) { - assert adminExecutor.inEventLoop(); - if (sessionReady) { - switch (event.changeType) { - case CREATED: - listener.onAggregateCreated(event.newAggregate); - break; - case UPDATED: - listener.onAggregateUpdated(event.newAggregate, event.oldAggregate); - break; - case DROPPED: - listener.onAggregateDropped(event.oldAggregate); - break; - } - } - } - - private void onFunctionChangeEvent(FunctionChangeEvent event) { - assert adminExecutor.inEventLoop(); - if (sessionReady) { - switch (event.changeType) { - case CREATED: - listener.onFunctionCreated(event.newFunction); - break; - case UPDATED: - listener.onFunctionUpdated(event.newFunction, event.oldFunction); - break; - case DROPPED: - listener.onFunctionDropped(event.oldFunction); - break; - } - } - } - - private void onKeyspaceChangeEvent(KeyspaceChangeEvent event) { - assert adminExecutor.inEventLoop(); - if (sessionReady) { - switch (event.changeType) { - case CREATED: - listener.onKeyspaceCreated(event.newKeyspace); - break; - case UPDATED: - listener.onKeyspaceUpdated(event.newKeyspace, event.oldKeyspace); - break; - case DROPPED: - listener.onKeyspaceDropped(event.oldKeyspace); - break; - } - } - } - - private void onTableChangeEvent(TableChangeEvent event) { - assert adminExecutor.inEventLoop(); - if (sessionReady) { - switch (event.changeType) { - case CREATED: - listener.onTableCreated(event.newTable); - break; - case UPDATED: - listener.onTableUpdated(event.newTable, event.oldTable); - break; - case DROPPED: - listener.onTableDropped(event.oldTable); - break; - } - } - } - - private void onTypeChangeEvent(TypeChangeEvent event) { - assert adminExecutor.inEventLoop(); - if (sessionReady) { - switch (event.changeType) { - case CREATED: - listener.onUserDefinedTypeCreated(event.newType); - break; - case UPDATED: - listener.onUserDefinedTypeUpdated(event.newType, event.oldType); - break; - case DROPPED: - listener.onUserDefinedTypeDropped(event.oldType); - break; - } - } - } - - private void onViewChangeEvent(ViewChangeEvent event) { - assert adminExecutor.inEventLoop(); - if (sessionReady) { - switch (event.changeType) { - case CREATED: - listener.onViewCreated(event.newView); - break; - case UPDATED: - listener.onViewUpdated(event.newView, event.oldView); - break; - case DROPPED: - listener.onViewDropped(event.oldView); - break; - } - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/session/SessionWrapper.java b/core/src/main/java/com/datastax/oss/driver/internal/core/session/SessionWrapper.java deleted file mode 100644 index 1a1270b41c8..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/session/SessionWrapper.java +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.session; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.metadata.Metadata; -import com.datastax.oss.driver.api.core.metrics.Metrics; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Optional; -import java.util.concurrent.CompletionStage; -import net.jcip.annotations.ThreadSafe; - -/** - * Utility class to wrap a session. - * - *

This will typically be used to mix in a convenience interface from a 3rd-party extension: - * - *

{@code
- * class ReactiveSessionWrapper extends SessionWrapper implements ReactiveSession {
- *   public ReactiveSessionWrapper(Session delegate) {
- *     super(delegate);
- *   }
- * }
- * }
- */ -@ThreadSafe -public class SessionWrapper implements Session { - - private final Session delegate; - - public SessionWrapper(@NonNull Session delegate) { - this.delegate = delegate; - } - - @NonNull - public Session getDelegate() { - return delegate; - } - - @NonNull - @Override - public String getName() { - return delegate.getName(); - } - - @NonNull - @Override - public Metadata getMetadata() { - return delegate.getMetadata(); - } - - @Override - public boolean isSchemaMetadataEnabled() { - return delegate.isSchemaMetadataEnabled(); - } - - @NonNull - @Override - public CompletionStage setSchemaMetadataEnabled(@Nullable Boolean newValue) { - return delegate.setSchemaMetadataEnabled(newValue); - } - - @NonNull - @Override - public CompletionStage refreshSchemaAsync() { - return delegate.refreshSchemaAsync(); - } - - @NonNull - @Override - public CompletionStage checkSchemaAgreementAsync() { - return delegate.checkSchemaAgreementAsync(); - } - - @NonNull - @Override - public DriverContext getContext() { - return delegate.getContext(); - } - - @NonNull - @Override - public Optional getKeyspace() { - return delegate.getKeyspace(); - } - - @NonNull - @Override - public Optional getMetrics() { - return delegate.getMetrics(); - } - - @Nullable - @Override - public ResultT execute( - @NonNull RequestT request, @NonNull GenericType resultType) { - return delegate.execute(request, resultType); - } - - @NonNull - @Override - public CompletionStage closeFuture() { - return delegate.closeFuture(); - } - - @NonNull - @Override - public CompletionStage closeAsync() { - return delegate.closeAsync(); - } - - @NonNull - @Override - public CompletionStage forceCloseAsync() { - return delegate.forceCloseAsync(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/session/throttling/ConcurrencyLimitingRequestThrottler.java b/core/src/main/java/com/datastax/oss/driver/internal/core/session/throttling/ConcurrencyLimitingRequestThrottler.java deleted file mode 100644 index 8146c5b113a..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/session/throttling/ConcurrencyLimitingRequestThrottler.java +++ /dev/null @@ -1,231 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.session.throttling; - -import com.datastax.oss.driver.api.core.RequestThrottlingException; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.session.throttling.RequestThrottler; -import com.datastax.oss.driver.api.core.session.throttling.Throttled; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Deque; -import java.util.concurrent.ConcurrentLinkedDeque; -import java.util.concurrent.atomic.AtomicInteger; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A request throttler that limits the number of concurrent requests. - * - *

To activate this throttler, modify the {@code advanced.throttler} section in the driver - * configuration, for example: - * - *

- * datastax-java-driver {
- *   advanced.throttler {
- *     class = ConcurrencyLimitingRequestThrottler
- *     max-concurrent-requests = 10000
- *     max-queue-size = 10000
- *   }
- * }
- * 
- * - * See {@code reference.conf} (in the manual or core driver JAR) for more details. - */ -@ThreadSafe -public class ConcurrencyLimitingRequestThrottler implements RequestThrottler { - - private static final Logger LOG = - LoggerFactory.getLogger(ConcurrencyLimitingRequestThrottler.class); - - private final String logPrefix; - private final int maxConcurrentRequests; - private final int maxQueueSize; - private final AtomicInteger concurrentRequests = new AtomicInteger(0); - // CLQ is not O(1) for size(), as it forces a full iteration of the queue. So, we track - // the size of the queue explicitly. - private final Deque queue = new ConcurrentLinkedDeque<>(); - private final AtomicInteger queueSize = new AtomicInteger(0); - private volatile boolean closed = false; - - public ConcurrencyLimitingRequestThrottler(DriverContext context) { - this.logPrefix = context.getSessionName(); - DriverExecutionProfile config = context.getConfig().getDefaultProfile(); - this.maxConcurrentRequests = - config.getInt(DefaultDriverOption.REQUEST_THROTTLER_MAX_CONCURRENT_REQUESTS); - this.maxQueueSize = config.getInt(DefaultDriverOption.REQUEST_THROTTLER_MAX_QUEUE_SIZE); - LOG.debug( - "[{}] Initializing with maxConcurrentRequests = {}, maxQueueSize = {}", - logPrefix, - maxConcurrentRequests, - maxQueueSize); - } - - @Override - public void register(@NonNull Throttled request) { - if (closed) { - LOG.trace("[{}] Rejecting request after shutdown", logPrefix); - fail(request, "The session is shutting down"); - return; - } - - // Implementation note: Technically the "concurrent requests" or "queue size" - // could read transiently over the limit, but the queue itself will never grow - // beyond the limit since we always check for that condition and revert if - // over-limit. We do this instead of a CAS-loop to avoid the potential loop. - - // If no backlog exists AND we get capacity, we can execute immediately - if (queueSize.get() == 0) { - // Take a claim first, and then check if we are OK to proceed - int newConcurrent = concurrentRequests.incrementAndGet(); - if (newConcurrent <= maxConcurrentRequests) { - LOG.trace("[{}] Starting newly registered request", logPrefix); - request.onThrottleReady(false); - return; - } else { - // We exceeded the limit, decrement the count and fall through to the queuing logic - concurrentRequests.decrementAndGet(); - } - } - - // If we have a backlog, or we failed to claim capacity, try to enqueue - int newQueueSize = queueSize.incrementAndGet(); - if (newQueueSize <= maxQueueSize) { - LOG.trace("[{}] Enqueuing request", logPrefix); - queue.offer(request); - - // Double-check that we were still supposed to be enqueued; it is possible - // that the session was closed while we were enqueuing, it's also possible - // that it is right now removing the request, so we need to check both - if (closed) { - if (queue.remove(request)) { - queueSize.decrementAndGet(); - LOG.trace("[{}] Rejecting late request after shutdown", logPrefix); - fail(request, "The session is shutting down"); - } - } - } else { - LOG.trace("[{}] Rejecting request because of full queue", logPrefix); - queueSize.decrementAndGet(); - fail( - request, - String.format( - "The session has reached its maximum capacity " - + "(concurrent requests: %d, queue size: %d)", - maxConcurrentRequests, maxQueueSize)); - } - } - - @Override - public void signalSuccess(@NonNull Throttled request) { - Throttled nextRequest = onRequestDoneAndDequeNext(); - if (nextRequest != null) { - nextRequest.onThrottleReady(true); - } - } - - @Override - public void signalError(@NonNull Throttled request, @NonNull Throwable error) { - signalSuccess(request); // not treated differently - } - - @Override - public void signalTimeout(@NonNull Throttled request) { - Throttled nextRequest = null; - if (!closed) { - if (queue.remove(request)) { // The request timed out before it was active - queueSize.decrementAndGet(); - LOG.trace("[{}] Removing timed out request from the queue", logPrefix); - } else { - nextRequest = onRequestDoneAndDequeNext(); - } - } - - if (nextRequest != null) { - nextRequest.onThrottleReady(true); - } - } - - @Override - public void signalCancel(@NonNull Throttled request) { - Throttled nextRequest = null; - if (!closed) { - if (queue.remove(request)) { // The request has been cancelled before it was active - queueSize.decrementAndGet(); - LOG.trace("[{}] Removing cancelled request from the queue", logPrefix); - } else { - nextRequest = onRequestDoneAndDequeNext(); - } - } - - if (nextRequest != null) { - nextRequest.onThrottleReady(true); - } - } - - @Nullable - private Throttled onRequestDoneAndDequeNext() { - if (!closed) { - Throttled nextRequest = queue.poll(); - if (nextRequest == null) { - concurrentRequests.decrementAndGet(); - } else { - queueSize.decrementAndGet(); - LOG.trace("[{}] Starting dequeued request", logPrefix); - return nextRequest; - } - } - - // no next task was dequeued - return null; - } - - @Override - public void close() { - closed = true; - - LOG.debug("[{}] Rejecting {} queued requests after shutdown", logPrefix, queueSize.get()); - Throttled request; - while ((request = queue.poll()) != null) { - queueSize.decrementAndGet(); - fail(request, "The session is shutting down"); - } - } - - public int getQueueSize() { - return queueSize.get(); - } - - @VisibleForTesting - int getConcurrentRequests() { - return concurrentRequests.get(); - } - - @VisibleForTesting - Deque getQueue() { - return queue; - } - - private static void fail(Throttled request, String message) { - request.onThrottleFailure(new RequestThrottlingException(message)); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/session/throttling/NanoClock.java b/core/src/main/java/com/datastax/oss/driver/internal/core/session/throttling/NanoClock.java deleted file mode 100644 index 9a25059caef..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/session/throttling/NanoClock.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.session.throttling; - -/** A thin wrapper around {@link System#nanoTime()}, to simplify testing. */ -interface NanoClock { - long nanoTime(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/session/throttling/PassThroughRequestThrottler.java b/core/src/main/java/com/datastax/oss/driver/internal/core/session/throttling/PassThroughRequestThrottler.java deleted file mode 100644 index 2210e4b26f1..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/session/throttling/PassThroughRequestThrottler.java +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.session.throttling; - -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.session.throttling.RequestThrottler; -import com.datastax.oss.driver.api.core.session.throttling.Throttled; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.IOException; -import net.jcip.annotations.ThreadSafe; - -/** - * A request throttler that does not enforce any kind of limitation: requests are always executed - * immediately. - * - *

To activate this throttler, modify the {@code advanced.throttler} section in the driver - * configuration, for example: - * - *

- * datastax-java-driver {
- *   advanced.throttler {
- *     class = PassThroughRequestThrottler
- *   }
- * }
- * 
- * - * See {@code reference.conf} (in the manual or core driver JAR) for more details. - */ -@ThreadSafe -public class PassThroughRequestThrottler implements RequestThrottler { - - @SuppressWarnings("unused") - public PassThroughRequestThrottler(DriverContext context) { - // nothing to do - } - - @Override - public void register(@NonNull Throttled request) { - request.onThrottleReady(false); - } - - @Override - public void signalSuccess(@NonNull Throttled request) { - // nothing to do - } - - @Override - public void signalError(@NonNull Throttled request, @NonNull Throwable error) { - // nothing to do - } - - @Override - public void signalTimeout(@NonNull Throttled request) { - // nothing to do - } - - @Override - public void signalCancel(@NonNull Throttled request) { - // nothing to do - } - - @Override - public void close() throws IOException { - // nothing to do - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/session/throttling/RateLimitingRequestThrottler.java b/core/src/main/java/com/datastax/oss/driver/internal/core/session/throttling/RateLimitingRequestThrottler.java deleted file mode 100644 index 03a693dc0fe..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/session/throttling/RateLimitingRequestThrottler.java +++ /dev/null @@ -1,284 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.session.throttling; - -import com.datastax.oss.driver.api.core.RequestThrottlingException; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.session.throttling.RequestThrottler; -import com.datastax.oss.driver.api.core.session.throttling.Throttled; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import edu.umd.cs.findbugs.annotations.NonNull; -import io.netty.util.concurrent.EventExecutor; -import java.time.Duration; -import java.util.ArrayDeque; -import java.util.Deque; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.locks.ReentrantLock; -import net.jcip.annotations.GuardedBy; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A request throttler that limits the rate of requests per second. - * - *

To activate this throttler, modify the {@code advanced.throttler} section in the driver - * configuration, for example: - * - *

- * datastax-java-driver {
- *   advanced.throttler {
- *     class = RateLimitingRequestThrottler
- *     max-requests-per-second = 10000
- *     max-queue-size = 10000
- *     drain-interval = 10 milliseconds
- *   }
- * }
- * 
- * - * See {@code reference.conf} (in the manual or core driver JAR) for more details. - */ -@ThreadSafe -public class RateLimitingRequestThrottler implements RequestThrottler { - - private static final Logger LOG = LoggerFactory.getLogger(RateLimitingRequestThrottler.class); - - private final String logPrefix; - private final NanoClock clock; - private final int maxRequestsPerSecond; - private final int maxQueueSize; - private final long drainIntervalNanos; - private final EventExecutor scheduler; - - private final ReentrantLock lock = new ReentrantLock(); - - @GuardedBy("lock") - private long lastUpdateNanos; - - @GuardedBy("lock") - private int storedPermits; - - @GuardedBy("lock") - private final Deque queue = new ArrayDeque<>(); - - @GuardedBy("lock") - private boolean closed; - - @SuppressWarnings("unused") - public RateLimitingRequestThrottler(DriverContext context) { - this(context, System::nanoTime); - } - - @VisibleForTesting - RateLimitingRequestThrottler(DriverContext context, NanoClock clock) { - this.logPrefix = context.getSessionName(); - this.clock = clock; - - DriverExecutionProfile config = context.getConfig().getDefaultProfile(); - - this.maxRequestsPerSecond = - config.getInt(DefaultDriverOption.REQUEST_THROTTLER_MAX_REQUESTS_PER_SECOND); - this.maxQueueSize = config.getInt(DefaultDriverOption.REQUEST_THROTTLER_MAX_QUEUE_SIZE); - Duration drainInterval = - config.getDuration(DefaultDriverOption.REQUEST_THROTTLER_DRAIN_INTERVAL); - this.drainIntervalNanos = drainInterval.toNanos(); - - this.lastUpdateNanos = clock.nanoTime(); - // Start with one second worth of permits to avoid delaying initial requests - this.storedPermits = maxRequestsPerSecond; - - this.scheduler = - ((InternalDriverContext) context).getNettyOptions().adminEventExecutorGroup().next(); - - LOG.debug( - "[{}] Initializing with maxRequestsPerSecond = {}, maxQueueSize = {}, drainInterval = {}", - logPrefix, - maxRequestsPerSecond, - maxQueueSize, - drainInterval); - } - - @Override - public void register(@NonNull Throttled request) { - long now = clock.nanoTime(); - lock.lock(); - try { - if (closed) { - LOG.trace("[{}] Rejecting request after shutdown", logPrefix); - fail(request, "The session is shutting down"); - } else if (queue.isEmpty() && acquire(now, 1) == 1) { - LOG.trace("[{}] Starting newly registered request", logPrefix); - request.onThrottleReady(false); - } else if (queue.size() < maxQueueSize) { - LOG.trace("[{}] Enqueuing request", logPrefix); - if (queue.isEmpty()) { - scheduler.schedule(this::drain, drainIntervalNanos, TimeUnit.NANOSECONDS); - } - queue.add(request); - } else { - LOG.trace("[{}] Rejecting request because of full queue", logPrefix); - fail( - request, - String.format( - "The session has reached its maximum capacity " - + "(requests/s: %d, queue size: %d)", - maxRequestsPerSecond, maxQueueSize)); - } - } finally { - lock.unlock(); - } - } - - // Runs periodically when the queue is not empty. It tries to dequeue as much as possible while - // staying under the target rate. If it does not completely drain the queue, it reschedules - // itself. - private void drain() { - assert scheduler.inEventLoop(); - long now = clock.nanoTime(); - lock.lock(); - try { - if (closed || queue.isEmpty()) { - return; - } - int toDequeue = acquire(now, queue.size()); - LOG.trace("[{}] Dequeuing {}/{} elements", logPrefix, toDequeue, queue.size()); - for (int i = 0; i < toDequeue; i++) { - LOG.trace("[{}] Starting dequeued request", logPrefix); - queue.poll().onThrottleReady(true); - } - if (!queue.isEmpty()) { - LOG.trace( - "[{}] {} elements remaining in queue, rescheduling drain task", - logPrefix, - queue.size()); - scheduler.schedule(this::drain, drainIntervalNanos, TimeUnit.NANOSECONDS); - } - } finally { - lock.unlock(); - } - } - - @Override - public void signalSuccess(@NonNull Throttled request) { - // nothing to do - } - - @Override - public void signalError(@NonNull Throttled request, @NonNull Throwable error) { - // nothing to do - } - - @Override - public void signalTimeout(@NonNull Throttled request) { - lock.lock(); - try { - if (!closed && queue.remove(request)) { // The request timed out before it was active - LOG.trace("[{}] Removing timed out request from the queue", logPrefix); - } - } finally { - lock.unlock(); - } - } - - @Override - public void signalCancel(@NonNull Throttled request) { - lock.lock(); - try { - if (!closed && queue.remove(request)) { // The request has been cancelled before it was active - LOG.trace("[{}] Removing cancelled request from the queue", logPrefix); - } - } finally { - lock.unlock(); - } - } - - @Override - public void close() { - lock.lock(); - try { - closed = true; - LOG.debug("[{}] Rejecting {} queued requests after shutdown", logPrefix, queue.size()); - for (Throttled request : queue) { - fail(request, "The session is shutting down"); - } - } finally { - lock.unlock(); - } - } - - @SuppressWarnings("GuardedBy") // this method is only called with the lock held - private int acquire(long currentTimeNanos, int wantedPermits) { - assert lock.isHeldByCurrentThread() && !closed; - - long elapsedNanos = currentTimeNanos - lastUpdateNanos; - - if (elapsedNanos >= 1_000_000_000) { - // created more than the max, so whatever was stored, the sum will be capped to the max - storedPermits = maxRequestsPerSecond; - lastUpdateNanos = currentTimeNanos; - } else if (elapsedNanos > 0) { - int createdPermits = (int) (elapsedNanos * maxRequestsPerSecond / 1_000_000_000); - if (createdPermits > 0) { - // Only reset interval if we've generated permits, otherwise we might continually reset - // before we get the chance to generate anything. - lastUpdateNanos = currentTimeNanos; - } - storedPermits = Math.min(storedPermits + createdPermits, maxRequestsPerSecond); - } - - int returned = (storedPermits >= wantedPermits) ? wantedPermits : storedPermits; - storedPermits = Math.max(storedPermits - wantedPermits, 0); - return returned; - } - - public int getQueueSize() { - lock.lock(); - try { - return queue.size(); - } finally { - lock.unlock(); - } - } - - @VisibleForTesting - int getStoredPermits() { - lock.lock(); - try { - return storedPermits; - } finally { - lock.unlock(); - } - } - - @VisibleForTesting - Deque getQueue() { - lock.lock(); - try { - return queue; - } finally { - lock.unlock(); - } - } - - private static void fail(Throttled request, String message) { - request.onThrottleFailure(new RequestThrottlingException(message)); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/specex/ConstantSpeculativeExecutionPolicy.java b/core/src/main/java/com/datastax/oss/driver/internal/core/specex/ConstantSpeculativeExecutionPolicy.java deleted file mode 100644 index 5e84f6b1002..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/specex/ConstantSpeculativeExecutionPolicy.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.specex; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.specex.SpeculativeExecutionPolicy; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import net.jcip.annotations.ThreadSafe; - -/** - * A policy that schedules a configurable number of speculative executions, separated by a fixed - * delay. - * - *

To activate this policy, modify the {@code advanced.speculative-execution-policy} section in - * the driver configuration, for example: - * - *

- * datastax-java-driver {
- *   advanced.speculative-execution-policy {
- *     class = ConstantSpeculativeExecutionPolicy
- *     max-executions = 3
- *     delay = 100 milliseconds
- *   }
- * }
- * 
- * - * See {@code reference.conf} (in the manual or core driver JAR) for more details. - */ -@ThreadSafe -public class ConstantSpeculativeExecutionPolicy implements SpeculativeExecutionPolicy { - - private final int maxExecutions; - private final long constantDelayMillis; - - public ConstantSpeculativeExecutionPolicy(DriverContext context, String profileName) { - DriverExecutionProfile config = context.getConfig().getProfile(profileName); - this.maxExecutions = config.getInt(DefaultDriverOption.SPECULATIVE_EXECUTION_MAX); - if (this.maxExecutions < 1) { - throw new IllegalArgumentException("Max must be at least 1"); - } - this.constantDelayMillis = - config.getDuration(DefaultDriverOption.SPECULATIVE_EXECUTION_DELAY).toMillis(); - if (this.constantDelayMillis < 0) { - throw new IllegalArgumentException("Delay must be positive or 0"); - } - } - - @Override - public long nextExecution( - @NonNull @SuppressWarnings("unused") Node node, - @Nullable @SuppressWarnings("unused") CqlIdentifier keyspace, - @NonNull @SuppressWarnings("unused") Request request, - int runningExecutions) { - assert runningExecutions >= 1; - return (runningExecutions < maxExecutions) ? constantDelayMillis : -1; - } - - @Override - public void close() { - // nothing to do - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/specex/NoSpeculativeExecutionPolicy.java b/core/src/main/java/com/datastax/oss/driver/internal/core/specex/NoSpeculativeExecutionPolicy.java deleted file mode 100644 index 2f6b17286e5..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/specex/NoSpeculativeExecutionPolicy.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.specex; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.specex.SpeculativeExecutionPolicy; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import net.jcip.annotations.ThreadSafe; - -/** - * A policy that never triggers speculative executions. - * - *

To activate this policy, modify the {@code advanced.speculative-execution-policy} section in - * the driver configuration, for example: - * - *

- * datastax-java-driver {
- *   advanced.speculative-execution-policy {
- *     class = NoSpeculativeExecutionPolicy
- *   }
- * }
- * 
- * - * See {@code reference.conf} (in the manual or core driver JAR) for more details. - */ -@ThreadSafe -public class NoSpeculativeExecutionPolicy implements SpeculativeExecutionPolicy { - - public NoSpeculativeExecutionPolicy( - @SuppressWarnings("unused") DriverContext context, - @SuppressWarnings("unused") String profileName) { - // nothing to do - } - - @Override - @SuppressWarnings("unused") - public long nextExecution( - @NonNull Node node, - @Nullable CqlIdentifier keyspace, - @NonNull Request request, - int runningExecutions) { - // never start speculative executions - return -1; - } - - @Override - public void close() { - // nothing to do - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/ssl/DefaultSslEngineFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/ssl/DefaultSslEngineFactory.java deleted file mode 100644 index 343d3f9e4e7..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/ssl/DefaultSslEngineFactory.java +++ /dev/null @@ -1,193 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.ssl; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.api.core.ssl.SslEngineFactory; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.InputStream; -import java.net.InetSocketAddress; -import java.net.SocketAddress; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.security.KeyStore; -import java.security.SecureRandom; -import java.time.Duration; -import java.util.List; -import java.util.Optional; -import javax.net.ssl.SSLContext; -import javax.net.ssl.SSLEngine; -import javax.net.ssl.SSLParameters; -import javax.net.ssl.TrustManagerFactory; -import net.jcip.annotations.ThreadSafe; - -/** - * Default SSL implementation. - * - *

To activate this class, add an {@code advanced.ssl-engine-factory} section in the driver - * configuration, for example: - * - *

- * datastax-java-driver {
- *   advanced.ssl-engine-factory {
- *     class = DefaultSslEngineFactory
- *     cipher-suites = [ "TLS_RSA_WITH_AES_128_CBC_SHA", "TLS_RSA_WITH_AES_256_CBC_SHA" ]
- *     hostname-validation = false
- *     truststore-path = /path/to/client.truststore
- *     truststore-password = password123
- *     keystore-path = /path/to/client.keystore
- *     keystore-password = password123
- *     keystore-reload-interval = 30 minutes
- *   }
- * }
- * 
- * - * See {@code reference.conf} (in the manual or core driver JAR) for more details. - */ -@ThreadSafe -public class DefaultSslEngineFactory implements SslEngineFactory { - - private final SSLContext sslContext; - private final String[] cipherSuites; - private final boolean requireHostnameValidation; - private final boolean allowDnsReverseLookupSan; - private ReloadingKeyManagerFactory kmf; - - /** Builds a new instance from the driver configuration. */ - public DefaultSslEngineFactory(DriverContext driverContext) { - DriverExecutionProfile config = driverContext.getConfig().getDefaultProfile(); - try { - this.sslContext = buildContext(config); - } catch (Exception e) { - throw new IllegalStateException("Cannot initialize SSL Context", e); - } - if (config.isDefined(DefaultDriverOption.SSL_CIPHER_SUITES)) { - List list = config.getStringList(DefaultDriverOption.SSL_CIPHER_SUITES); - String tmp[] = new String[list.size()]; - this.cipherSuites = list.toArray(tmp); - } else { - this.cipherSuites = null; - } - this.requireHostnameValidation = - config.getBoolean(DefaultDriverOption.SSL_HOSTNAME_VALIDATION, true); - this.allowDnsReverseLookupSan = - config.getBoolean(DefaultDriverOption.SSL_ALLOW_DNS_REVERSE_LOOKUP_SAN, true); - } - - @VisibleForTesting - protected String hostname(InetSocketAddress addr) { - return allowDnsReverseLookupSan ? hostMaybeFromDnsReverseLookup(addr) : hostNoLookup(addr); - } - - @VisibleForTesting - protected String hostMaybeFromDnsReverseLookup(InetSocketAddress addr) { - // See java.net.InetSocketAddress.getHostName: - // "This method may trigger a name service reverse lookup if the address was created with a - // literal IP address." - return addr.getHostName(); - } - - @VisibleForTesting - protected String hostNoLookup(InetSocketAddress addr) { - // See java.net.InetSocketAddress.getHostString: - // "This has the benefit of not attempting a reverse lookup" - return addr.getHostString(); - } - - @NonNull - @Override - public SSLEngine newSslEngine(@NonNull EndPoint remoteEndpoint) { - SSLEngine engine; - SocketAddress remoteAddress = remoteEndpoint.resolve(); - if (remoteAddress instanceof InetSocketAddress) { - InetSocketAddress socketAddress = (InetSocketAddress) remoteAddress; - engine = sslContext.createSSLEngine(hostname(socketAddress), socketAddress.getPort()); - } else { - engine = sslContext.createSSLEngine(); - } - engine.setUseClientMode(true); - if (cipherSuites != null) { - engine.setEnabledCipherSuites(cipherSuites); - } - if (requireHostnameValidation) { - SSLParameters parameters = engine.getSSLParameters(); - parameters.setEndpointIdentificationAlgorithm("HTTPS"); - engine.setSSLParameters(parameters); - } - return engine; - } - - protected SSLContext buildContext(DriverExecutionProfile config) throws Exception { - if (config.isDefined(DefaultDriverOption.SSL_KEYSTORE_PATH) - || config.isDefined(DefaultDriverOption.SSL_TRUSTSTORE_PATH)) { - SSLContext context = SSLContext.getInstance("SSL"); - - // initialize truststore if configured. - TrustManagerFactory tmf = null; - if (config.isDefined(DefaultDriverOption.SSL_TRUSTSTORE_PATH)) { - try (InputStream tsf = - Files.newInputStream( - Paths.get(config.getString(DefaultDriverOption.SSL_TRUSTSTORE_PATH)))) { - KeyStore ts = KeyStore.getInstance("JKS"); - char[] password = - config.isDefined(DefaultDriverOption.SSL_TRUSTSTORE_PASSWORD) - ? config.getString(DefaultDriverOption.SSL_TRUSTSTORE_PASSWORD).toCharArray() - : null; - ts.load(tsf, password); - tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); - tmf.init(ts); - } - } - - // initialize keystore if configured. - if (config.isDefined(DefaultDriverOption.SSL_KEYSTORE_PATH)) { - kmf = buildReloadingKeyManagerFactory(config); - } - - context.init( - kmf != null ? kmf.getKeyManagers() : null, - tmf != null ? tmf.getTrustManagers() : null, - new SecureRandom()); - return context; - } else { - // if both keystore and truststore aren't configured, use default SSLContext. - return SSLContext.getDefault(); - } - } - - private ReloadingKeyManagerFactory buildReloadingKeyManagerFactory(DriverExecutionProfile config) - throws Exception { - Path keystorePath = Paths.get(config.getString(DefaultDriverOption.SSL_KEYSTORE_PATH)); - String password = config.getString(DefaultDriverOption.SSL_KEYSTORE_PASSWORD, null); - Optional reloadInterval = - Optional.ofNullable( - config.getDuration(DefaultDriverOption.SSL_KEYSTORE_RELOAD_INTERVAL, null)); - - return ReloadingKeyManagerFactory.create(keystorePath, password, reloadInterval); - } - - @Override - public void close() throws Exception { - if (kmf != null) kmf.close(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/ssl/JdkSslHandlerFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/ssl/JdkSslHandlerFactory.java deleted file mode 100644 index 7661325005e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/ssl/JdkSslHandlerFactory.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.ssl; - -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.api.core.ssl.SslEngineFactory; -import io.netty.channel.Channel; -import io.netty.handler.ssl.SslHandler; -import javax.net.ssl.SSLEngine; -import net.jcip.annotations.ThreadSafe; - -/** SSL handler factory used when JDK-based SSL was configured through the driver's public API. */ -@ThreadSafe -public class JdkSslHandlerFactory implements SslHandlerFactory { - private final SslEngineFactory sslEngineFactory; - - public JdkSslHandlerFactory(SslEngineFactory sslEngineFactory) { - this.sslEngineFactory = sslEngineFactory; - } - - @Override - public SslHandler newSslHandler(Channel channel, EndPoint remoteEndpoint) { - SSLEngine engine = sslEngineFactory.newSslEngine(remoteEndpoint); - return new SslHandler(engine); - } - - @Override - public void close() throws Exception { - sslEngineFactory.close(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/ssl/ReloadingKeyManagerFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/ssl/ReloadingKeyManagerFactory.java deleted file mode 100644 index 8a9e11bb2e9..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/ssl/ReloadingKeyManagerFactory.java +++ /dev/null @@ -1,264 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.ssl; - -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import java.io.ByteArrayInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.net.Socket; -import java.nio.file.Files; -import java.nio.file.Path; -import java.security.KeyStore; -import java.security.KeyStoreException; -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; -import java.security.Principal; -import java.security.PrivateKey; -import java.security.Provider; -import java.security.UnrecoverableKeyException; -import java.security.cert.CertificateException; -import java.security.cert.X509Certificate; -import java.time.Duration; -import java.util.Arrays; -import java.util.Optional; -import java.util.concurrent.Executors; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicReference; -import javax.net.ssl.KeyManager; -import javax.net.ssl.KeyManagerFactory; -import javax.net.ssl.KeyManagerFactorySpi; -import javax.net.ssl.ManagerFactoryParameters; -import javax.net.ssl.SSLEngine; -import javax.net.ssl.X509ExtendedKeyManager; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class ReloadingKeyManagerFactory extends KeyManagerFactory implements AutoCloseable { - private static final Logger logger = LoggerFactory.getLogger(ReloadingKeyManagerFactory.class); - private static final String KEYSTORE_TYPE = "JKS"; - private Path keystorePath; - private String keystorePassword; - private ScheduledExecutorService executor; - private final Spi spi; - - // We're using a single thread executor so this shouldn't need to be volatile, since all updates - // to lastDigest should come from the same thread - private volatile byte[] lastDigest; - - /** - * Create a new {@link ReloadingKeyManagerFactory} with the given keystore file and password, - * reloading from the file's content at the given interval. This function will do an initial - * reload before returning, to confirm that the file exists and is readable. - * - * @param keystorePath the keystore file to reload - * @param keystorePassword the keystore password - * @param reloadInterval the duration between reload attempts. Set to {@link Optional#empty()} to - * disable scheduled reloading. - * @return - */ - static ReloadingKeyManagerFactory create( - Path keystorePath, String keystorePassword, Optional reloadInterval) - throws UnrecoverableKeyException, KeyStoreException, NoSuchAlgorithmException, - CertificateException, IOException { - KeyManagerFactory kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); - - KeyStore ks; - try (InputStream ksf = Files.newInputStream(keystorePath)) { - ks = KeyStore.getInstance(KEYSTORE_TYPE); - ks.load(ksf, keystorePassword.toCharArray()); - } - kmf.init(ks, keystorePassword.toCharArray()); - - ReloadingKeyManagerFactory reloadingKeyManagerFactory = new ReloadingKeyManagerFactory(kmf); - reloadingKeyManagerFactory.start(keystorePath, keystorePassword, reloadInterval); - return reloadingKeyManagerFactory; - } - - @VisibleForTesting - protected ReloadingKeyManagerFactory(KeyManagerFactory initial) { - this( - new Spi((X509ExtendedKeyManager) initial.getKeyManagers()[0]), - initial.getProvider(), - initial.getAlgorithm()); - } - - private ReloadingKeyManagerFactory(Spi spi, Provider provider, String algorithm) { - super(spi, provider, algorithm); - this.spi = spi; - } - - private void start( - Path keystorePath, String keystorePassword, Optional reloadInterval) { - this.keystorePath = keystorePath; - this.keystorePassword = keystorePassword; - - // Ensure that reload is called once synchronously, to make sure the file exists etc. - reload(); - - if (!reloadInterval.isPresent() || reloadInterval.get().isZero()) { - final String msg = - "KeyStore reloading is disabled. If your Cassandra cluster requires client certificates, " - + "client application restarts are infrequent, and client certificates have short lifetimes, then your client " - + "may fail to re-establish connections to Cassandra hosts. To enable KeyStore reloading, see " - + "`advanced.ssl-engine-factory.keystore-reload-interval` in reference.conf."; - logger.info(msg); - } else { - logger.info("KeyStore reloading is enabled with interval {}", reloadInterval.get()); - - this.executor = - Executors.newScheduledThreadPool( - 1, - runnable -> { - Thread t = Executors.defaultThreadFactory().newThread(runnable); - t.setName(String.format("%s-%%d", this.getClass().getSimpleName())); - t.setDaemon(true); - return t; - }); - this.executor.scheduleWithFixedDelay( - this::reload, - reloadInterval.get().toMillis(), - reloadInterval.get().toMillis(), - TimeUnit.MILLISECONDS); - } - } - - @VisibleForTesting - void reload() { - try { - reload0(); - } catch (Exception e) { - String msg = - "Failed to reload KeyStore. If this continues to happen, your client may use stale identity" - + " certificates and fail to re-establish connections to Cassandra hosts."; - logger.warn(msg, e); - } - } - - private synchronized void reload0() - throws NoSuchAlgorithmException, IOException, KeyStoreException, CertificateException, - UnrecoverableKeyException { - logger.debug("Checking KeyStore file {} for updates", keystorePath); - - final byte[] keyStoreBytes = Files.readAllBytes(keystorePath); - final byte[] newDigest = digest(keyStoreBytes); - if (lastDigest != null && Arrays.equals(lastDigest, digest(keyStoreBytes))) { - logger.debug("KeyStore file content has not changed; skipping update"); - return; - } - - final KeyStore keyStore = KeyStore.getInstance(KEYSTORE_TYPE); - try (InputStream inputStream = new ByteArrayInputStream(keyStoreBytes)) { - keyStore.load(inputStream, keystorePassword.toCharArray()); - } - KeyManagerFactory kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); - kmf.init(keyStore, keystorePassword.toCharArray()); - logger.info("Detected updates to KeyStore file {}", keystorePath); - - this.spi.keyManager.set((X509ExtendedKeyManager) kmf.getKeyManagers()[0]); - this.lastDigest = newDigest; - } - - @Override - public void close() throws Exception { - if (executor != null) { - executor.shutdown(); - } - } - - private static byte[] digest(byte[] payload) throws NoSuchAlgorithmException { - final MessageDigest digest = MessageDigest.getInstance("SHA-256"); - return digest.digest(payload); - } - - private static class Spi extends KeyManagerFactorySpi { - DelegatingKeyManager keyManager; - - Spi(X509ExtendedKeyManager initial) { - this.keyManager = new DelegatingKeyManager(initial); - } - - @Override - protected void engineInit(KeyStore ks, char[] password) { - throw new UnsupportedOperationException(); - } - - @Override - protected void engineInit(ManagerFactoryParameters spec) { - throw new UnsupportedOperationException(); - } - - @Override - protected KeyManager[] engineGetKeyManagers() { - return new KeyManager[] {keyManager}; - } - } - - private static class DelegatingKeyManager extends X509ExtendedKeyManager { - AtomicReference delegate; - - DelegatingKeyManager(X509ExtendedKeyManager initial) { - delegate = new AtomicReference<>(initial); - } - - void set(X509ExtendedKeyManager keyManager) { - delegate.set(keyManager); - } - - @Override - public String chooseEngineClientAlias(String[] keyType, Principal[] issuers, SSLEngine engine) { - return delegate.get().chooseEngineClientAlias(keyType, issuers, engine); - } - - @Override - public String chooseEngineServerAlias(String keyType, Principal[] issuers, SSLEngine engine) { - return delegate.get().chooseEngineServerAlias(keyType, issuers, engine); - } - - @Override - public String[] getClientAliases(String keyType, Principal[] issuers) { - return delegate.get().getClientAliases(keyType, issuers); - } - - @Override - public String chooseClientAlias(String[] keyType, Principal[] issuers, Socket socket) { - return delegate.get().chooseClientAlias(keyType, issuers, socket); - } - - @Override - public String[] getServerAliases(String keyType, Principal[] issuers) { - return delegate.get().getServerAliases(keyType, issuers); - } - - @Override - public String chooseServerAlias(String keyType, Principal[] issuers, Socket socket) { - return delegate.get().chooseServerAlias(keyType, issuers, socket); - } - - @Override - public X509Certificate[] getCertificateChain(String alias) { - return delegate.get().getCertificateChain(alias); - } - - @Override - public PrivateKey getPrivateKey(String alias) { - return delegate.get().getPrivateKey(alias); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/ssl/SniSslEngineFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/ssl/SniSslEngineFactory.java deleted file mode 100644 index 4d2cb69fbfc..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/ssl/SniSslEngineFactory.java +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.datastax.oss.driver.internal.core.ssl; - -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.api.core.ssl.SslEngineFactory; -import com.datastax.oss.driver.internal.core.metadata.SniEndPoint; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.net.InetSocketAddress; -import java.util.concurrent.CopyOnWriteArrayList; -import javax.net.ssl.SNIHostName; -import javax.net.ssl.SSLContext; -import javax.net.ssl.SSLEngine; -import javax.net.ssl.SSLParameters; - -public class SniSslEngineFactory implements SslEngineFactory { - - // An offset that gets added to our "fake" ports (see below). We pick this value because it is the - // start of the ephemeral port range. - private static final int FAKE_PORT_OFFSET = 49152; - - private final SSLContext sslContext; - private final CopyOnWriteArrayList fakePorts = new CopyOnWriteArrayList<>(); - private final boolean allowDnsReverseLookupSan; - - public SniSslEngineFactory(SSLContext sslContext) { - this(sslContext, true); - } - - public SniSslEngineFactory(SSLContext sslContext, boolean allowDnsReverseLookupSan) { - this.sslContext = sslContext; - this.allowDnsReverseLookupSan = allowDnsReverseLookupSan; - } - - @NonNull - @Override - public SSLEngine newSslEngine(@NonNull EndPoint remoteEndpoint) { - if (!(remoteEndpoint instanceof SniEndPoint)) { - throw new IllegalArgumentException( - String.format( - "Configuration error: can only use %s with SNI end points", - this.getClass().getSimpleName())); - } - SniEndPoint sniEndPoint = (SniEndPoint) remoteEndpoint; - InetSocketAddress address = sniEndPoint.resolve(); - String sniServerName = sniEndPoint.getServerName(); - - // When hostname verification is enabled (with setEndpointIdentificationAlgorithm), the SSL - // engine will try to match the server's certificate against the SNI host name; if that doesn't - // work, it will fall back to the "advisory peer host" passed to createSSLEngine. - // - // In our case, the first check will never succeed because our SNI host name is not the DNS name - // (we use the Cassandra host_id instead). So we *must* set the advisory peer information. - // - // However if we use the address as-is, this leads to another issue: the advisory peer - // information is also used to cache SSL sessions internally. All of our nodes share the same - // proxy address, so the JDK tries to reuse SSL sessions across nodes. But it doesn't update the - // SNI host name every time, so it ends up opening connections to the wrong node. - // - // To avoid that, we create a unique "fake" port for every node. We still get session reuse for - // a given node, but not across nodes. This is safe because the advisory port is only used for - // session caching. - String peerHost = allowDnsReverseLookupSan ? address.getHostName() : address.getHostString(); - SSLEngine engine = sslContext.createSSLEngine(peerHost, getFakePort(sniServerName)); - engine.setUseClientMode(true); - SSLParameters parameters = engine.getSSLParameters(); - parameters.setServerNames(ImmutableList.of(new SNIHostName(sniServerName))); - parameters.setEndpointIdentificationAlgorithm("HTTPS"); - engine.setSSLParameters(parameters); - return engine; - } - - private int getFakePort(String sniServerName) { - fakePorts.addIfAbsent(sniServerName); - return FAKE_PORT_OFFSET + fakePorts.indexOf(sniServerName); - } - - @Override - public void close() { - // nothing to do - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/ssl/SslHandlerFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/ssl/SslHandlerFactory.java deleted file mode 100644 index 87bea563796..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/ssl/SslHandlerFactory.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.ssl; - -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; -import io.netty.channel.Channel; -import io.netty.handler.ssl.SslHandler; - -/** - * Low-level SSL extension point. - * - *

SSL is separated into two interfaces to avoid exposing Netty classes in our public API: - * - *

    - *
  • "normal" (JDK-based) SSL is part of the public API, and can be configured via an instance - * of {@link com.datastax.oss.driver.api.core.ssl.SslEngineFactory} defined in the driver - * configuration. - *
  • this interface deals with Netty handlers directly. It can be used for more advanced cases, - * like using Netty's native OpenSSL integration instead of the JDK. This is considered expert - * level, and therefore part of our internal API. - *
- * - * @see DefaultDriverContext#buildSslHandlerFactory() - */ -public interface SslHandlerFactory extends AutoCloseable { - SslHandler newSslHandler(Channel channel, EndPoint remoteEndpoint); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/time/AtomicTimestampGenerator.java b/core/src/main/java/com/datastax/oss/driver/internal/core/time/AtomicTimestampGenerator.java deleted file mode 100644 index 351ed96d66f..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/time/AtomicTimestampGenerator.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.time; - -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import java.util.concurrent.atomic.AtomicLong; -import net.jcip.annotations.ThreadSafe; - -/** - * A timestamp generator that guarantees monotonically increasing timestamps across all client - * threads, and logs warnings when timestamps drift in the future. - * - *

To activate this generator, modify the {@code advanced.timestamp-generator} section in the - * driver configuration, for example: - * - *

- * datastax-java-driver {
- *   advanced.timestamp-generator {
- *     class = AtomicTimestampGenerator
- *     drift-warning {
- *       threshold = 1 second
- *       interval = 10 seconds
- *     }
- *     force-java-clock = false
- *   }
- * }
- * 
- * - * See {@code reference.conf} (in the manual or core driver JAR) for more details. - */ -@ThreadSafe -public class AtomicTimestampGenerator extends MonotonicTimestampGenerator { - - private final AtomicLong lastRef = new AtomicLong(0); - - public AtomicTimestampGenerator(DriverContext context) { - super(context); - } - - @VisibleForTesting - AtomicTimestampGenerator(Clock clock, InternalDriverContext context) { - super(clock, context); - } - - @Override - public long next() { - while (true) { - long last = lastRef.get(); - long next = computeNext(last); - if (lastRef.compareAndSet(last, next)) { - return next; - } - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/time/Clock.java b/core/src/main/java/com/datastax/oss/driver/internal/core/time/Clock.java deleted file mode 100644 index e576b13a74b..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/time/Clock.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.time; - -import com.datastax.oss.driver.internal.core.os.Native; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A small abstraction around system clock that aims to provide microsecond precision with the best - * accuracy possible. - */ -public interface Clock { - Logger LOG = LoggerFactory.getLogger(Clock.class); - - /** - * Returns the best implementation for the current platform. - * - *

Usage with non-blocking threads: beware that this method may block the calling thread on its - * very first invocation, because native libraries used by the driver will be loaded at that - * moment. If that is a problem, consider invoking this method once from a thread that is allowed - * to block. Subsequent invocations are guaranteed not to block. - */ - static Clock getInstance(boolean forceJavaClock) { - if (forceJavaClock) { - LOG.info("Using Java system clock because this was explicitly required in the configuration"); - return new JavaClock(); - } else if (!Native.isCurrentTimeMicrosAvailable()) { - LOG.info( - "Could not access native clock (see debug logs for details), " - + "falling back to Java system clock"); - return new JavaClock(); - } else { - LOG.info("Using native clock for microsecond precision"); - return new NativeClock(); - } - } - - /** - * Returns the difference, measured in microseconds, between the current time and and the - * Epoch (that is, midnight, January 1, 1970 UTC). - */ - long currentTimeMicros(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/time/JavaClock.java b/core/src/main/java/com/datastax/oss/driver/internal/core/time/JavaClock.java deleted file mode 100644 index b6dfbebcdb0..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/time/JavaClock.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.time; - -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class JavaClock implements Clock { - @Override - public long currentTimeMicros() { - return System.currentTimeMillis() * 1000; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/time/MonotonicTimestampGenerator.java b/core/src/main/java/com/datastax/oss/driver/internal/core/time/MonotonicTimestampGenerator.java deleted file mode 100644 index 99a520d02b1..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/time/MonotonicTimestampGenerator.java +++ /dev/null @@ -1,113 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.time; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.time.TimestampGenerator; -import java.time.Duration; -import java.util.concurrent.atomic.AtomicLong; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A timestamp generator that guarantees monotonicity, and logs warnings when timestamps drift in - * the future. - */ -@ThreadSafe -abstract class MonotonicTimestampGenerator implements TimestampGenerator { - - private static final Logger LOG = LoggerFactory.getLogger(MonotonicTimestampGenerator.class); - - private final Clock clock; - private final long warningThresholdMicros; - private final long warningIntervalMillis; - private final AtomicLong lastDriftWarning = new AtomicLong(Long.MIN_VALUE); - - protected MonotonicTimestampGenerator(DriverContext context) { - this(buildClock(context), context); - } - - protected MonotonicTimestampGenerator(Clock clock, DriverContext context) { - this.clock = clock; - - DriverExecutionProfile config = context.getConfig().getDefaultProfile(); - this.warningThresholdMicros = - config - .getDuration( - DefaultDriverOption.TIMESTAMP_GENERATOR_DRIFT_WARNING_THRESHOLD, Duration.ZERO) - .toNanos() - / 1000; - - if (this.warningThresholdMicros == 0) { - this.warningIntervalMillis = 0; - } else { - this.warningIntervalMillis = - config - .getDuration(DefaultDriverOption.TIMESTAMP_GENERATOR_DRIFT_WARNING_INTERVAL) - .toMillis(); - } - } - - /** - * Compute the next timestamp, given the current clock tick and the last timestamp returned. - * - *

If timestamps have to drift ahead of the current clock tick to guarantee monotonicity, a - * warning will be logged according to the rules defined in the configuration. - */ - protected long computeNext(long last) { - long currentTick = clock.currentTimeMicros(); - if (last >= currentTick) { - maybeLog(currentTick, last); - return last + 1; - } - return currentTick; - } - - @Override - public void close() throws Exception { - // nothing to do - } - - private void maybeLog(long currentTick, long last) { - if (warningThresholdMicros != 0 - && LOG.isWarnEnabled() - && last > currentTick + warningThresholdMicros) { - long now = System.currentTimeMillis(); - long lastWarning = lastDriftWarning.get(); - if (now > lastWarning + warningIntervalMillis - && lastDriftWarning.compareAndSet(lastWarning, now)) { - LOG.warn( - "Clock skew detected: current tick ({}) was {} microseconds behind the last generated timestamp ({}), " - + "returned timestamps will be artificially incremented to guarantee monotonicity.", - currentTick, - last - currentTick, - last); - } - } - } - - private static Clock buildClock(DriverContext context) { - DriverExecutionProfile config = context.getConfig().getDefaultProfile(); - boolean forceJavaClock = - config.getBoolean(DefaultDriverOption.TIMESTAMP_GENERATOR_FORCE_JAVA_CLOCK, false); - return Clock.getInstance(forceJavaClock); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/time/NativeClock.java b/core/src/main/java/com/datastax/oss/driver/internal/core/time/NativeClock.java deleted file mode 100644 index 51265ead820..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/time/NativeClock.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.time; - -import static java.util.concurrent.TimeUnit.MILLISECONDS; -import static java.util.concurrent.TimeUnit.NANOSECONDS; -import static java.util.concurrent.TimeUnit.SECONDS; - -import com.datastax.oss.driver.internal.core.os.Native; -import java.util.concurrent.atomic.AtomicReference; -import net.jcip.annotations.ThreadSafe; - -/** - * Provides the current time with microseconds precision with some reasonable accuracy through the - * use of {@link Native#currentTimeMicros()}. - * - *

Because calling JNR methods is slightly expensive, we only call it once per second and add the - * number of nanoseconds since the last call to get the current time, which is good enough an - * accuracy for our purpose (see CASSANDRA-6106). - * - *

This reduces the cost of the call to {@link NativeClock#currentTimeMicros()} to levels - * comparable to those of a call to {@link System#nanoTime()}. - */ -@ThreadSafe -public class NativeClock implements Clock { - - private static final long ONE_SECOND_NS = NANOSECONDS.convert(1, SECONDS); - private static final long ONE_MILLISECOND_NS = NANOSECONDS.convert(1, MILLISECONDS); - - // Records a time in micros along with the System.nanoTime() value at the time the time is - // fetched. - private static class FetchedTime { - - private final long timeInMicros; - private final long nanoTimeAtCheck; - - private FetchedTime(long timeInMicros, long nanoTimeAtCheck) { - this.timeInMicros = timeInMicros; - this.nanoTimeAtCheck = nanoTimeAtCheck; - } - } - - private final AtomicReference lastFetchedTime = - new AtomicReference<>(fetchTimeMicros()); - - @Override - public long currentTimeMicros() { - FetchedTime spec = lastFetchedTime.get(); - long curNano = System.nanoTime(); - if (curNano > spec.nanoTimeAtCheck + ONE_SECOND_NS) { - lastFetchedTime.compareAndSet(spec, spec = fetchTimeMicros()); - } - return spec.timeInMicros + ((curNano - spec.nanoTimeAtCheck) / 1000); - } - - private static FetchedTime fetchTimeMicros() { - // To compensate for the fact that the Native.currentTimeMicros call could take some time, - // instead of picking the nano time before the call or after the call, we take the average of - // both. - long start = System.nanoTime(); - long micros = Native.currentTimeMicros(); - long end = System.nanoTime(); - // If it turns out the call took us more than 1 millisecond (can happen while the JVM warms up, - // unlikely otherwise, but no reasons to take risks), fall back to System.currentTimeMillis() - // temporarily. - if ((end - start) > ONE_MILLISECOND_NS) { - return new FetchedTime(System.currentTimeMillis() * 1000, System.nanoTime()); - } else { - return new FetchedTime(micros, (end + start) / 2); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/time/ServerSideTimestampGenerator.java b/core/src/main/java/com/datastax/oss/driver/internal/core/time/ServerSideTimestampGenerator.java deleted file mode 100644 index 0df056deb04..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/time/ServerSideTimestampGenerator.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.time; - -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.time.TimestampGenerator; -import net.jcip.annotations.ThreadSafe; - -/** - * A timestamp generator that never sends a timestamp with any query, therefore letting Cassandra - * assign a server-side timestamp. - * - *

To activate this generator, modify the {@code advanced.timestamp-generator} section in the - * driver configuration, for example: - * - *

- * datastax-java-driver {
- *   advanced.timestamp-generator {
- *     class = ServerSideTimestampGenerator
- *   }
- * }
- * 
- * - * See {@code reference.conf} (in the manual or core driver JAR) for more details. - */ -@ThreadSafe -public class ServerSideTimestampGenerator implements TimestampGenerator { - - public ServerSideTimestampGenerator(@SuppressWarnings("unused") DriverContext context) { - // nothing to do - } - - @Override - public long next() { - return Statement.NO_DEFAULT_TIMESTAMP; - } - - @Override - public void close() throws Exception { - // nothing to do - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/time/ThreadLocalTimestampGenerator.java b/core/src/main/java/com/datastax/oss/driver/internal/core/time/ThreadLocalTimestampGenerator.java deleted file mode 100644 index 598ae5cbbc2..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/time/ThreadLocalTimestampGenerator.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.time; - -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import net.jcip.annotations.ThreadSafe; - -/** - * A timestamp generator that guarantees monotonically increasing timestamps within each thread, and - * logs warnings when timestamps drift in the future. - * - *

Beware that there is a risk of timestamp collision with this generator when accessed by more - * than one thread at a time; only use it when threads are not in direct competition for timestamp - * ties (i.e., they are executing independent statements). - * - *

To activate this generator, modify the {@code advanced.timestamp-generator} section in the - * driver configuration, for example: - * - *

- * datastax-java-driver {
- *   advanced.timestamp-generator {
- *     class = ThreadLocalTimestampGenerator
- *     drift-warning {
- *       threshold = 1 second
- *       interval = 10 seconds
- *     }
- *     force-java-clock = false
- *   }
- * }
- * 
- * - * See {@code reference.conf} (in the manual or core driver JAR) for more details. - */ -@ThreadSafe -public class ThreadLocalTimestampGenerator extends MonotonicTimestampGenerator { - - private final ThreadLocal lastRef = ThreadLocal.withInitial(() -> 0L); - - public ThreadLocalTimestampGenerator(DriverContext context) { - super(context); - } - - @VisibleForTesting - ThreadLocalTimestampGenerator(Clock clock, DriverContext context) { - super(clock, context); - } - - @Override - public long next() { - Long last = this.lastRef.get(); - long next = computeNext(last); - this.lastRef.set(next); - return next; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/MultiplexingRequestTracker.java b/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/MultiplexingRequestTracker.java deleted file mode 100644 index 6fe2ba059bd..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/MultiplexingRequestTracker.java +++ /dev/null @@ -1,174 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.tracker; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.core.tracker.RequestTracker; -import com.datastax.oss.driver.internal.core.util.Loggers; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Arrays; -import java.util.Collection; -import java.util.List; -import java.util.Objects; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.function.Consumer; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Combines multiple request trackers into a single one. - * - *

Any exception thrown by a child tracker is caught and logged. - */ -@ThreadSafe -public class MultiplexingRequestTracker implements RequestTracker { - - private static final Logger LOG = LoggerFactory.getLogger(MultiplexingRequestTracker.class); - - private final List trackers = new CopyOnWriteArrayList<>(); - - public MultiplexingRequestTracker() {} - - public MultiplexingRequestTracker(RequestTracker... trackers) { - this(Arrays.asList(trackers)); - } - - public MultiplexingRequestTracker(Collection trackers) { - addTrackers(trackers); - } - - private void addTrackers(Collection source) { - for (RequestTracker tracker : source) { - addTracker(tracker); - } - } - - private void addTracker(RequestTracker toAdd) { - Objects.requireNonNull(toAdd, "tracker cannot be null"); - if (toAdd instanceof MultiplexingRequestTracker) { - addTrackers(((MultiplexingRequestTracker) toAdd).trackers); - } else { - trackers.add(toAdd); - } - } - - public void register(@NonNull RequestTracker tracker) { - addTracker(tracker); - } - - @Override - public void onSuccess( - @NonNull Request request, - long latencyNanos, - @NonNull DriverExecutionProfile executionProfile, - @NonNull Node node, - @NonNull String sessionRequestLogPrefix) { - invokeTrackers( - tracker -> - tracker.onSuccess( - request, latencyNanos, executionProfile, node, sessionRequestLogPrefix), - sessionRequestLogPrefix, - "onSuccess"); - } - - @Override - public void onError( - @NonNull Request request, - @NonNull Throwable error, - long latencyNanos, - @NonNull DriverExecutionProfile executionProfile, - @Nullable Node node, - @NonNull String sessionRequestLogPrefix) { - invokeTrackers( - tracker -> - tracker.onError( - request, error, latencyNanos, executionProfile, node, sessionRequestLogPrefix), - sessionRequestLogPrefix, - "onError"); - } - - @Override - public void onNodeSuccess( - @NonNull Request request, - long latencyNanos, - @NonNull DriverExecutionProfile executionProfile, - @NonNull Node node, - @NonNull String nodeRequestLogPrefix) { - invokeTrackers( - tracker -> - tracker.onNodeSuccess( - request, latencyNanos, executionProfile, node, nodeRequestLogPrefix), - nodeRequestLogPrefix, - "onNodeSuccess"); - } - - @Override - public void onNodeError( - @NonNull Request request, - @NonNull Throwable error, - long latencyNanos, - @NonNull DriverExecutionProfile executionProfile, - @NonNull Node node, - @NonNull String nodeRequestLogPrefix) { - invokeTrackers( - tracker -> - tracker.onNodeError( - request, error, latencyNanos, executionProfile, node, nodeRequestLogPrefix), - nodeRequestLogPrefix, - "onNodeError"); - } - - @Override - public void onSessionReady(@NonNull Session session) { - invokeTrackers(tracker -> tracker.onSessionReady(session), session.getName(), "onSessionReady"); - } - - @Override - public void close() throws Exception { - for (RequestTracker tracker : trackers) { - try { - tracker.close(); - } catch (Exception e) { - Loggers.warnWithException( - LOG, "Unexpected error while closing request tracker {}.", tracker, e); - } - } - } - - private void invokeTrackers( - @NonNull Consumer action, String logPrefix, String event) { - for (RequestTracker tracker : trackers) { - try { - action.accept(tracker); - } catch (Exception e) { - Loggers.warnWithException( - LOG, - "[{}] Unexpected error while notifying request tracker {} of an {} event.", - logPrefix, - tracker, - event, - e); - } - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/NoopRequestTracker.java b/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/NoopRequestTracker.java deleted file mode 100644 index 3821c6ace2d..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/NoopRequestTracker.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.tracker; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.tracker.RequestTracker; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.ThreadSafe; - -/** - * Default request tracker implementation with empty methods. This implementation is used when no - * trackers were registered, neither programmatically nor through the configuration. - */ -@ThreadSafe -public class NoopRequestTracker implements RequestTracker { - - public NoopRequestTracker(@SuppressWarnings("unused") DriverContext context) { - // nothing to do - } - - @Override - public void onSuccess( - @NonNull Request request, - long latencyNanos, - @NonNull DriverExecutionProfile executionProfile, - @NonNull Node node, - @NonNull String sessionRequestLogPrefix) { - // nothing to do - } - - @Override - public void onError( - @NonNull Request request, - @NonNull Throwable error, - long latencyNanos, - @NonNull DriverExecutionProfile executionProfile, - Node node, - @NonNull String sessionRequestLogPrefix) { - // nothing to do - } - - @Override - public void onNodeError( - @NonNull Request request, - @NonNull Throwable error, - long latencyNanos, - @NonNull DriverExecutionProfile executionProfile, - @NonNull Node node, - @NonNull String nodeRequestLogPrefix) { - // nothing to do - } - - @Override - public void onNodeSuccess( - @NonNull Request request, - long latencyNanos, - @NonNull DriverExecutionProfile executionProfile, - @NonNull Node node, - @NonNull String nodeRequestLogPrefix) { - // nothing to do - } - - @Override - public void close() throws Exception { - // nothing to do - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/RequestLogFormatter.java b/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/RequestLogFormatter.java deleted file mode 100644 index 808d08e228d..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/RequestLogFormatter.java +++ /dev/null @@ -1,302 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.tracker; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.cql.BatchStatement; -import com.datastax.oss.driver.api.core.cql.BatchableStatement; -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.DefaultBatchType; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.internal.core.util.NanoTime; -import java.nio.ByteBuffer; -import java.util.List; -import java.util.Map; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class RequestLogFormatter { - - private static final String FURTHER_VALUES_TRUNCATED = "...]"; - private static final String TRUNCATED = "..."; - - private final DriverContext context; - - public RequestLogFormatter(DriverContext context) { - this.context = context; - } - - public StringBuilder logBuilder(String logPrefix, Node node) { - return new StringBuilder("[").append(logPrefix).append("][").append(node).append("] "); - } - - public void appendSuccessDescription(StringBuilder builder) { - builder.append("Success "); - } - - public void appendSlowDescription(StringBuilder builder) { - builder.append("Slow "); - } - - public void appendErrorDescription(StringBuilder builder) { - builder.append("Error "); - } - - public void appendLatency(long latencyNanos, StringBuilder builder) { - builder.append('(').append(NanoTime.format(latencyNanos)).append(") "); - } - - public void appendRequest( - Request request, - int maxQueryLength, - boolean showValues, - int maxValues, - int maxValueLength, - StringBuilder builder) { - appendStats(request, builder); - appendQueryString(request, maxQueryLength, builder); - if (showValues) { - appendValues(request, maxValues, maxValueLength, true, builder); - } - } - - protected void appendStats(Request request, StringBuilder builder) { - int valueCount = countBoundValues(request); - if (request instanceof BatchStatement) { - BatchStatement statement = (BatchStatement) request; - builder - .append('[') - .append(statement.size()) - .append(" statements, ") - .append(valueCount) - .append(" values] "); - } else { - builder.append('[').append(valueCount).append(" values] "); - } - } - - protected int countBoundValues(Request request) { - if (request instanceof BatchStatement) { - int count = 0; - for (BatchableStatement child : (BatchStatement) request) { - count += countBoundValues(child); - } - return count; - } else if (request instanceof BoundStatement) { - return ((BoundStatement) request).getPreparedStatement().getVariableDefinitions().size(); - } else if (request instanceof SimpleStatement) { - SimpleStatement statement = (SimpleStatement) request; - return Math.max(statement.getPositionalValues().size(), statement.getNamedValues().size()); - } else { - return 0; - } - } - - protected int appendQueryString(Request request, int limit, StringBuilder builder) { - if (request instanceof BatchStatement) { - BatchStatement batch = (BatchStatement) request; - limit = append("BEGIN", limit, builder); - if (batch.getBatchType() == DefaultBatchType.UNLOGGED) { - limit = append(" UNLOGGED", limit, builder); - } else if (batch.getBatchType() == DefaultBatchType.COUNTER) { - limit = append(" COUNTER", limit, builder); - } - limit = append(" BATCH ", limit, builder); - for (BatchableStatement child : batch) { - limit = appendQueryString(child, limit, builder); - if (limit < 0) { - break; - } - limit = append("; ", limit, builder); - } - limit = append("APPLY BATCH", limit, builder); - return limit; - } else if (request instanceof BoundStatement) { - BoundStatement statement = (BoundStatement) request; - return append(statement.getPreparedStatement().getQuery(), limit, builder); - } else if (request instanceof SimpleStatement) { - SimpleStatement statement = (SimpleStatement) request; - return append(statement.getQuery(), limit, builder); - } else { - return append(request.toString(), limit, builder); - } - } - - /** - * @return the number of values that can still be appended after this, or -1 if the max was - * reached by this call. - */ - protected int appendValues( - Request request, - int maxValues, - int maxValueLength, - boolean addSeparator, - StringBuilder builder) { - if (request instanceof BatchStatement) { - BatchStatement batch = (BatchStatement) request; - for (BatchableStatement child : batch) { - maxValues = appendValues(child, maxValues, maxValueLength, addSeparator, builder); - if (addSeparator) { - addSeparator = false; - } - if (maxValues < 0) { - return -1; - } - } - } else if (request instanceof BoundStatement) { - BoundStatement statement = (BoundStatement) request; - ColumnDefinitions definitions = statement.getPreparedStatement().getVariableDefinitions(); - List values = statement.getValues(); - assert definitions.size() == values.size(); - if (definitions.size() > 0) { - if (addSeparator) { - builder.append(' '); - } - builder.append('['); - for (int i = 0; i < definitions.size(); i++) { - if (i > 0) { - builder.append(", "); - } - maxValues -= 1; - if (maxValues < 0) { - builder.append(FURTHER_VALUES_TRUNCATED); - return -1; - } - builder.append(definitions.get(i).getName().asCql(true)).append('='); - if (!statement.isSet(i)) { - builder.append(""); - } else { - ByteBuffer value = values.get(i); - DataType type = definitions.get(i).getType(); - appendValue(value, type, maxValueLength, builder); - } - } - builder.append(']'); - } - } else if (request instanceof SimpleStatement) { - SimpleStatement statement = (SimpleStatement) request; - if (!statement.getPositionalValues().isEmpty()) { - if (addSeparator) { - builder.append(' '); - } - builder.append('['); - int i = 0; - for (Object value : statement.getPositionalValues()) { - if (i > 0) { - builder.append(", "); - } - maxValues -= 1; - if (maxValues < 0) { - builder.append(FURTHER_VALUES_TRUNCATED); - return -1; - } - builder.append('v').append(i).append('='); - appendValue(value, maxValueLength, builder); - i += 1; - } - builder.append(']'); - } else if (!statement.getNamedValues().isEmpty()) { - if (addSeparator) { - builder.append(' '); - } - builder.append('['); - int i = 0; - for (Map.Entry entry : statement.getNamedValues().entrySet()) { - if (i > 0) { - builder.append(", "); - } - maxValues -= 1; - if (maxValues < 0) { - builder.append(FURTHER_VALUES_TRUNCATED); - return -1; - } - builder.append(entry.getKey().asCql(true)).append('='); - appendValue(entry.getValue(), maxValueLength, builder); - i += 1; - } - builder.append(']'); - } - } - return maxValues; - } - - protected void appendValue(ByteBuffer raw, DataType type, int maxLength, StringBuilder builder) { - TypeCodec codec = context.getCodecRegistry().codecFor(type); - if (type.equals(DataTypes.BLOB)) { - // For very large buffers, apply the limit before converting into a string - int maxBufferLength = Math.max((maxLength - 2) / 2, 0); - boolean bufferTooLarge = raw.remaining() > maxBufferLength; - if (bufferTooLarge) { - raw = (ByteBuffer) raw.duplicate().limit(maxBufferLength); - } - Object value = codec.decode(raw, context.getProtocolVersion()); - append(codec.format(value), maxLength, builder); - if (bufferTooLarge) { - builder.append(TRUNCATED); - } - } else { - Object value = codec.decode(raw, context.getProtocolVersion()); - append(codec.format(value), maxLength, builder); - } - } - - protected void appendValue(Object value, int maxLength, StringBuilder builder) { - TypeCodec codec = context.getCodecRegistry().codecFor(value); - if (value instanceof ByteBuffer) { - // For very large buffers, apply the limit before converting into a string - ByteBuffer buffer = (ByteBuffer) value; - int maxBufferLength = Math.max((maxLength - 2) / 2, 0); - boolean bufferTooLarge = buffer.remaining() > maxBufferLength; - if (bufferTooLarge) { - buffer = (ByteBuffer) buffer.duplicate().limit(maxBufferLength); - } - append(codec.format(buffer), maxLength, builder); - if (bufferTooLarge) { - builder.append(TRUNCATED); - } - } else { - append(codec.format(value), maxLength, builder); - } - } - - /** - * @return the number of characters that can still be appended after this, or -1 if this call hit - * the limit. - */ - protected int append(String value, int limit, StringBuilder builder) { - if (limit < 0) { - // Small simplification to avoid having to check the limit every time when we do a sequence of - // simple calls, like BEGIN... UNLOGGED... BATCH. If the first call hits the limit, the next - // ones will be ignored. - return limit; - } else if (value.length() <= limit) { - builder.append(value); - return limit - value.length(); - } else { - builder.append(value.substring(0, limit)).append(TRUNCATED); - return -1; - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/RequestLogger.java b/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/RequestLogger.java deleted file mode 100644 index f242ff89c54..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/RequestLogger.java +++ /dev/null @@ -1,251 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.tracker; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.session.SessionBuilder; -import com.datastax.oss.driver.api.core.tracker.RequestTracker; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.time.Duration; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A request tracker that logs the requests executed through the session, according to a set of - * configurable options. - * - *

To activate this tracker, modify the {@code advanced.request-tracker} section in the driver - * configuration, for example: - * - *

- * datastax-java-driver {
- *   advanced.request-tracker {
- *     classes = [RequestLogger]
- *     logs {
- *       success { enabled = true }
- *       slow { enabled = true, threshold = 1 second }
- *       error { enabled = true }
- *       max-query-length = 500
- *       show-values = true
- *       max-value-length = 50
- *       max-values = 50
- *       show-stack-traces = true
- *     }
- *   }
- * }
- * 
- * - * See {@code reference.conf} (in the manual or core driver JAR) for more details. - * - *

Note that if a tracker is specified programmatically with {@link - * SessionBuilder#addRequestTracker(RequestTracker)}, the configuration is ignored. - */ -@ThreadSafe -public class RequestLogger implements RequestTracker { - - private static final Logger LOG = LoggerFactory.getLogger(RequestLogger.class); - - public static final int DEFAULT_REQUEST_LOGGER_MAX_QUERY_LENGTH = 500; - public static final boolean DEFAULT_REQUEST_LOGGER_SHOW_VALUES = true; - public static final int DEFAULT_REQUEST_LOGGER_MAX_VALUES = 50; - public static final int DEFAULT_REQUEST_LOGGER_MAX_VALUE_LENGTH = 50; - - private final RequestLogFormatter formatter; - - public RequestLogger(DriverContext context) { - this(new RequestLogFormatter(context)); - } - - protected RequestLogger(RequestLogFormatter formatter) { - this.formatter = formatter; - } - - @Override - public void onSuccess( - @NonNull Request request, - long latencyNanos, - @NonNull DriverExecutionProfile executionProfile, - @NonNull Node node, - @NonNull String sessionRequestLogPrefix) { - - boolean successEnabled = - executionProfile.getBoolean(DefaultDriverOption.REQUEST_LOGGER_SUCCESS_ENABLED, false); - boolean slowEnabled = - executionProfile.getBoolean(DefaultDriverOption.REQUEST_LOGGER_SLOW_ENABLED, false); - if (!successEnabled && !slowEnabled) { - return; - } - - long slowThresholdNanos = - executionProfile - .getDuration(DefaultDriverOption.REQUEST_LOGGER_SLOW_THRESHOLD, Duration.ofSeconds(1)) - .toNanos(); - boolean isSlow = latencyNanos > slowThresholdNanos; - if ((isSlow && !slowEnabled) || (!isSlow && !successEnabled)) { - return; - } - - int maxQueryLength = - executionProfile.getInt( - DefaultDriverOption.REQUEST_LOGGER_MAX_QUERY_LENGTH, - DEFAULT_REQUEST_LOGGER_MAX_QUERY_LENGTH); - boolean showValues = - executionProfile.getBoolean( - DefaultDriverOption.REQUEST_LOGGER_VALUES, DEFAULT_REQUEST_LOGGER_SHOW_VALUES); - int maxValues = - executionProfile.getInt( - DefaultDriverOption.REQUEST_LOGGER_MAX_VALUES, DEFAULT_REQUEST_LOGGER_MAX_VALUES); - int maxValueLength = - executionProfile.getInt( - DefaultDriverOption.REQUEST_LOGGER_MAX_VALUE_LENGTH, - DEFAULT_REQUEST_LOGGER_MAX_VALUE_LENGTH); - - logSuccess( - request, - latencyNanos, - isSlow, - node, - maxQueryLength, - showValues, - maxValues, - maxValueLength, - sessionRequestLogPrefix); - } - - @Override - public void onError( - @NonNull Request request, - @NonNull Throwable error, - long latencyNanos, - @NonNull DriverExecutionProfile executionProfile, - Node node, - @NonNull String sessionRequestLogPrefix) { - - if (!executionProfile.getBoolean(DefaultDriverOption.REQUEST_LOGGER_ERROR_ENABLED, false)) { - return; - } - - int maxQueryLength = - executionProfile.getInt( - DefaultDriverOption.REQUEST_LOGGER_MAX_QUERY_LENGTH, - DEFAULT_REQUEST_LOGGER_MAX_QUERY_LENGTH); - boolean showValues = - executionProfile.getBoolean( - DefaultDriverOption.REQUEST_LOGGER_VALUES, DEFAULT_REQUEST_LOGGER_SHOW_VALUES); - int maxValues = - executionProfile.getInt( - DefaultDriverOption.REQUEST_LOGGER_MAX_VALUES, DEFAULT_REQUEST_LOGGER_MAX_VALUES); - - int maxValueLength = - executionProfile.getInt( - DefaultDriverOption.REQUEST_LOGGER_MAX_VALUE_LENGTH, - DEFAULT_REQUEST_LOGGER_MAX_VALUE_LENGTH); - boolean showStackTraces = - executionProfile.getBoolean(DefaultDriverOption.REQUEST_LOGGER_STACK_TRACES, false); - - logError( - request, - error, - latencyNanos, - node, - maxQueryLength, - showValues, - maxValues, - maxValueLength, - showStackTraces, - sessionRequestLogPrefix); - } - - @Override - public void onNodeError( - @NonNull Request request, - @NonNull Throwable error, - long latencyNanos, - @NonNull DriverExecutionProfile executionProfile, - @NonNull Node node, - @NonNull String nodeRequestLogPrefix) { - // Nothing to do - } - - @Override - public void onNodeSuccess( - @NonNull Request request, - long latencyNanos, - @NonNull DriverExecutionProfile executionProfile, - @NonNull Node node, - @NonNull String nodeRequestLogPrefix) { - // Nothing to do - } - - @Override - public void close() throws Exception { - // nothing to do - } - - protected void logSuccess( - Request request, - long latencyNanos, - boolean isSlow, - Node node, - int maxQueryLength, - boolean showValues, - int maxValues, - int maxValueLength, - String logPrefix) { - - StringBuilder builder = formatter.logBuilder(logPrefix, node); - if (isSlow) { - formatter.appendSlowDescription(builder); - } else { - formatter.appendSuccessDescription(builder); - } - formatter.appendLatency(latencyNanos, builder); - formatter.appendRequest( - request, maxQueryLength, showValues, maxValues, maxValueLength, builder); - LOG.info(builder.toString()); - } - - protected void logError( - Request request, - Throwable error, - long latencyNanos, - Node node, - int maxQueryLength, - boolean showValues, - int maxValues, - int maxValueLength, - boolean showStackTraces, - String logPrefix) { - - StringBuilder builder = formatter.logBuilder(logPrefix, node); - formatter.appendErrorDescription(builder); - formatter.appendLatency(latencyNanos, builder); - formatter.appendRequest( - request, maxQueryLength, showValues, maxValues, maxValueLength, builder); - if (showStackTraces) { - LOG.error(builder.toString(), error); - } else { - LOG.error("{} [{}]", builder.toString(), error.toString()); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/UuidRequestIdGenerator.java b/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/UuidRequestIdGenerator.java deleted file mode 100644 index cc07d6717f4..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/UuidRequestIdGenerator.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.tracker; - -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.tracker.RequestIdGenerator; -import com.datastax.oss.driver.api.core.uuid.Uuids; -import edu.umd.cs.findbugs.annotations.NonNull; - -public class UuidRequestIdGenerator implements RequestIdGenerator { - public UuidRequestIdGenerator(DriverContext context) {} - - /** Generates a random v4 UUID. */ - @Override - public String getSessionRequestId() { - return Uuids.random().toString(); - } - - /** - * {session-request-id}-{random-uuid} All node requests for a session request will have the same - * session request id - */ - @Override - public String getNodeRequestId(@NonNull Request statement, @NonNull String parentId) { - return parentId + "-" + Uuids.random(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/W3CContextRequestIdGenerator.java b/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/W3CContextRequestIdGenerator.java deleted file mode 100644 index fe15b93bc8e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/W3CContextRequestIdGenerator.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.tracker; - -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.tracker.RequestIdGenerator; -import com.datastax.oss.driver.shaded.guava.common.io.BaseEncoding; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.security.SecureRandom; -import java.util.Random; - -public class W3CContextRequestIdGenerator implements RequestIdGenerator { - - private final Random random = new SecureRandom(); - private final BaseEncoding baseEncoding = BaseEncoding.base16().lowerCase(); - private final String payloadKey; - - public W3CContextRequestIdGenerator(DriverContext context) { - payloadKey = RequestIdGenerator.super.getCustomPayloadKey(); - } - - public W3CContextRequestIdGenerator(String payloadKey) { - this.payloadKey = payloadKey; - } - - /** Random 16 bytes, e.g. "4bf92f3577b34da6a3ce929d0e0e4736" */ - @Override - public String getSessionRequestId() { - byte[] bytes = new byte[16]; - random.nextBytes(bytes); - return baseEncoding.encode(bytes); - } - - /** - * Following the format of W3C "traceparent" spec, - * https://www.w3.org/TR/trace-context/#traceparent-header-field-values e.g. - * "00-4bf92f3577b34da6a3ce929d0e0e4736-a3ce929d0e0e4736-01" All node requests in the same session - * request share the same "trace-id" field value - */ - @Override - public String getNodeRequestId(@NonNull Request statement, @NonNull String parentId) { - byte[] bytes = new byte[8]; - random.nextBytes(bytes); - return String.format("00-%s-%s-00", parentId, baseEncoding.encode(bytes)); - } - - @Override - public String getCustomPayloadKey() { - return this.payloadKey; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/DataTypeHelper.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/DataTypeHelper.java deleted file mode 100644 index 1e02a6b8e82..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/DataTypeHelper.java +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.response.result.RawType; -import com.datastax.oss.protocol.internal.util.IntMap; -import java.util.List; -import java.util.Map; - -public class DataTypeHelper { - - public static DataType fromProtocolSpec(RawType rawType, AttachmentPoint attachmentPoint) { - DataType type = PRIMITIVE_TYPES_BY_CODE.get(rawType.id); - if (type != null) { - return type; - } else { - switch (rawType.id) { - case ProtocolConstants.DataType.CUSTOM: - RawType.RawCustom rawCustom = (RawType.RawCustom) rawType; - return DataTypes.custom(rawCustom.className); - case ProtocolConstants.DataType.LIST: - RawType.RawList rawList = (RawType.RawList) rawType; - return DataTypes.listOf(fromProtocolSpec(rawList.elementType, attachmentPoint)); - case ProtocolConstants.DataType.SET: - RawType.RawSet rawSet = (RawType.RawSet) rawType; - return DataTypes.setOf(fromProtocolSpec(rawSet.elementType, attachmentPoint)); - case ProtocolConstants.DataType.MAP: - RawType.RawMap rawMap = (RawType.RawMap) rawType; - return DataTypes.mapOf( - fromProtocolSpec(rawMap.keyType, attachmentPoint), - fromProtocolSpec(rawMap.valueType, attachmentPoint)); - case ProtocolConstants.DataType.TUPLE: - RawType.RawTuple rawTuple = (RawType.RawTuple) rawType; - List rawFieldsList = rawTuple.fieldTypes; - ImmutableList.Builder fields = ImmutableList.builder(); - for (RawType rawField : rawFieldsList) { - fields.add(fromProtocolSpec(rawField, attachmentPoint)); - } - return new DefaultTupleType(fields.build(), attachmentPoint); - case ProtocolConstants.DataType.UDT: - RawType.RawUdt rawUdt = (RawType.RawUdt) rawType; - ImmutableList.Builder fieldNames = ImmutableList.builder(); - ImmutableList.Builder fieldTypes = ImmutableList.builder(); - for (Map.Entry entry : rawUdt.fields.entrySet()) { - fieldNames.add(CqlIdentifier.fromInternal(entry.getKey())); - fieldTypes.add(fromProtocolSpec(entry.getValue(), attachmentPoint)); - } - return new DefaultUserDefinedType( - CqlIdentifier.fromInternal(rawUdt.keyspace), - CqlIdentifier.fromInternal(rawUdt.typeName), - false, - fieldNames.build(), - fieldTypes.build(), - attachmentPoint); - default: - throw new IllegalArgumentException("Unsupported type: " + rawType.id); - } - } - } - - private static IntMap PRIMITIVE_TYPES_BY_CODE = - sortByProtocolCode( - DataTypes.ASCII, - DataTypes.BIGINT, - DataTypes.BLOB, - DataTypes.BOOLEAN, - DataTypes.COUNTER, - DataTypes.DECIMAL, - DataTypes.DOUBLE, - DataTypes.FLOAT, - DataTypes.INT, - DataTypes.TIMESTAMP, - DataTypes.UUID, - DataTypes.VARINT, - DataTypes.TIMEUUID, - DataTypes.INET, - DataTypes.DATE, - DataTypes.TEXT, - DataTypes.TIME, - DataTypes.SMALLINT, - DataTypes.TINYINT, - DataTypes.DURATION); - - private static IntMap sortByProtocolCode(DataType... types) { - IntMap.Builder builder = IntMap.builder(); - for (DataType type : types) { - builder.put(type.getProtocolCode(), type); - } - return builder.build(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultCustomType.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultCustomType.java deleted file mode 100644 index 7b9e03818ac..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultCustomType.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type; - -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.type.CustomType; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.IOException; -import java.io.ObjectInputStream; -import java.io.Serializable; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultCustomType implements CustomType, Serializable { - - private static final long serialVersionUID = 1; - - /** @serial */ - private final String className; - - public DefaultCustomType(@NonNull String className) { - Preconditions.checkNotNull(className); - this.className = className; - } - - @NonNull - @Override - public String getClassName() { - return className; - } - - @Override - public boolean isDetached() { - return false; - } - - @Override - public void attach(@NonNull AttachmentPoint attachmentPoint) { - // nothing to do - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof CustomType) { - CustomType that = (CustomType) other; - return this.className.equals(that.getClassName()); - } else { - return false; - } - } - - @Override - public int hashCode() { - return className.hashCode(); - } - - @Override - public String toString() { - return "Custom(" + className + ")"; - } - - private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { - in.defaultReadObject(); - Preconditions.checkNotNull(className); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultListType.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultListType.java deleted file mode 100644 index 6c21b44639e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultListType.java +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type; - -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.ListType; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.IOException; -import java.io.ObjectInputStream; -import java.io.Serializable; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultListType implements ListType, Serializable { - - private static final long serialVersionUID = 1; - - /** @serial */ - private final DataType elementType; - /** @serial */ - private final boolean frozen; - - public DefaultListType(@NonNull DataType elementType, boolean frozen) { - Preconditions.checkNotNull(elementType); - this.elementType = elementType; - this.frozen = frozen; - } - - @NonNull - @Override - public DataType getElementType() { - return elementType; - } - - @Override - public boolean isFrozen() { - return frozen; - } - - @Override - public boolean isDetached() { - return elementType.isDetached(); - } - - @Override - public void attach(@NonNull AttachmentPoint attachmentPoint) { - elementType.attach(attachmentPoint); - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof ListType) { - ListType that = (ListType) other; - // frozen is not taken into account - return this.elementType.equals(that.getElementType()); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(DefaultListType.class, this.elementType); - } - - @Override - public String toString() { - return "List(" + elementType + ", " + (frozen ? "" : "not ") + "frozen)"; - } - - private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { - in.defaultReadObject(); - Preconditions.checkNotNull(elementType); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultMapType.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultMapType.java deleted file mode 100644 index 8da9f196f26..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultMapType.java +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type; - -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.MapType; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.IOException; -import java.io.ObjectInputStream; -import java.io.Serializable; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultMapType implements MapType, Serializable { - - private static final long serialVersionUID = 1; - - /** @serial */ - private final DataType keyType; - /** @serial */ - private final DataType valueType; - /** @serial */ - private final boolean frozen; - - public DefaultMapType(@NonNull DataType keyType, @NonNull DataType valueType, boolean frozen) { - Preconditions.checkNotNull(keyType); - Preconditions.checkNotNull(valueType); - this.keyType = keyType; - this.valueType = valueType; - this.frozen = frozen; - } - - @NonNull - @Override - public DataType getKeyType() { - return keyType; - } - - @NonNull - @Override - public DataType getValueType() { - return valueType; - } - - @Override - public boolean isFrozen() { - return frozen; - } - - @Override - public boolean isDetached() { - return keyType.isDetached() || valueType.isDetached(); - } - - @Override - public void attach(@NonNull AttachmentPoint attachmentPoint) { - keyType.attach(attachmentPoint); - valueType.attach(attachmentPoint); - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof MapType) { - MapType that = (MapType) other; - // frozen is not taken into account - return this.keyType.equals(that.getKeyType()) && this.valueType.equals(that.getValueType()); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(DefaultMapType.class, keyType, valueType); - } - - @Override - public String toString() { - return "Map(" + keyType + " => " + valueType + ", " + (frozen ? "" : "not ") + "frozen)"; - } - - private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { - in.defaultReadObject(); - Preconditions.checkNotNull(keyType); - Preconditions.checkNotNull(valueType); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultSetType.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultSetType.java deleted file mode 100644 index 27641731c72..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultSetType.java +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type; - -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.SetType; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.IOException; -import java.io.ObjectInputStream; -import java.io.Serializable; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultSetType implements SetType, Serializable { - - private static final long serialVersionUID = 1; - - /** @serial */ - private final DataType elementType; - /** @serial */ - private final boolean frozen; - - public DefaultSetType(@NonNull DataType elementType, boolean frozen) { - Preconditions.checkNotNull(elementType); - this.elementType = elementType; - this.frozen = frozen; - } - - @NonNull - @Override - public DataType getElementType() { - return elementType; - } - - @Override - public boolean isFrozen() { - return frozen; - } - - @Override - public boolean isDetached() { - return elementType.isDetached(); - } - - @Override - public void attach(@NonNull AttachmentPoint attachmentPoint) { - elementType.attach(attachmentPoint); - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof SetType) { - SetType that = (SetType) other; - // frozen is not taken into account - return this.elementType.equals(that.getElementType()); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(DefaultSetType.class, this.elementType); - } - - @Override - public String toString() { - return "Set(" + elementType + ", " + (frozen ? "" : "not ") + "frozen)"; - } - - private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { - in.defaultReadObject(); - Preconditions.checkNotNull(elementType); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultTupleType.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultTupleType.java deleted file mode 100644 index 29b1b20436f..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultTupleType.java +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type; - -import com.datastax.oss.driver.api.core.data.TupleValue; -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.TupleType; -import com.datastax.oss.driver.internal.core.data.DefaultTupleValue; -import com.datastax.oss.driver.shaded.guava.common.base.Joiner; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.IOException; -import java.io.ObjectInputStream; -import java.io.Serializable; -import java.util.List; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultTupleType implements TupleType, Serializable { - - private static final long serialVersionUID = 1; - - /** @serial */ - private final ImmutableList componentTypes; - - private transient volatile AttachmentPoint attachmentPoint; - - public DefaultTupleType( - @NonNull List componentTypes, @NonNull AttachmentPoint attachmentPoint) { - Preconditions.checkNotNull(componentTypes); - this.componentTypes = ImmutableList.copyOf(componentTypes); - this.attachmentPoint = attachmentPoint; - } - - public DefaultTupleType(@NonNull List componentTypes) { - this(componentTypes, AttachmentPoint.NONE); - } - - @NonNull - @Override - public List getComponentTypes() { - return componentTypes; - } - - @NonNull - @Override - public TupleValue newValue() { - return new DefaultTupleValue(this); - } - - @NonNull - @Override - public TupleValue newValue(@NonNull Object... values) { - return new DefaultTupleValue(this, values); - } - - @Override - public boolean isDetached() { - return attachmentPoint == AttachmentPoint.NONE; - } - - @Override - public void attach(@NonNull AttachmentPoint attachmentPoint) { - this.attachmentPoint = attachmentPoint; - for (DataType componentType : componentTypes) { - componentType.attach(attachmentPoint); - } - } - - @NonNull - @Override - public AttachmentPoint getAttachmentPoint() { - return attachmentPoint; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof TupleType) { - TupleType that = (TupleType) other; - return this.componentTypes.equals(that.getComponentTypes()); - } else { - return false; - } - } - - @Override - public int hashCode() { - return componentTypes.hashCode(); - } - - @Override - public String toString() { - return "Tuple(" + WITH_COMMA.join(componentTypes) + ")"; - } - - private static final Joiner WITH_COMMA = Joiner.on(", "); - - private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { - in.defaultReadObject(); - Preconditions.checkNotNull(componentTypes); - this.attachmentPoint = AttachmentPoint.NONE; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultUserDefinedType.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultUserDefinedType.java deleted file mode 100644 index 6b1431dc699..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultUserDefinedType.java +++ /dev/null @@ -1,220 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.data.UdtValue; -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.internal.core.data.DefaultUdtValue; -import com.datastax.oss.driver.internal.core.data.IdentifierIndex; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.IOException; -import java.io.ObjectInputStream; -import java.io.Serializable; -import java.util.List; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultUserDefinedType implements UserDefinedType, Serializable { - - private static final long serialVersionUID = 1; - - /** @serial */ - private final CqlIdentifier keyspace; - /** @serial */ - private final CqlIdentifier name; - - // Data types are only [de]serialized as part of a row, frozenness doesn't matter in that context - private final transient boolean frozen; - - /** @serial */ - private final List fieldNames; - /** @serial */ - private final List fieldTypes; - - private transient IdentifierIndex index; - private transient volatile AttachmentPoint attachmentPoint; - - public DefaultUserDefinedType( - @NonNull CqlIdentifier keyspace, - @NonNull CqlIdentifier name, - boolean frozen, - List fieldNames, - @NonNull List fieldTypes, - @NonNull AttachmentPoint attachmentPoint) { - Preconditions.checkNotNull(keyspace); - Preconditions.checkNotNull(name); - Preconditions.checkNotNull(fieldNames); - Preconditions.checkNotNull(fieldTypes); - Preconditions.checkArgument(fieldNames.size() > 0, "Field names list can't be null or empty"); - Preconditions.checkArgument( - fieldTypes.size() == fieldNames.size(), - "There should be the same number of field names and types"); - this.keyspace = keyspace; - this.name = name; - this.frozen = frozen; - this.fieldNames = ImmutableList.copyOf(fieldNames); - this.fieldTypes = ImmutableList.copyOf(fieldTypes); - this.index = new IdentifierIndex(this.fieldNames); - this.attachmentPoint = attachmentPoint; - } - - public DefaultUserDefinedType( - @NonNull CqlIdentifier keyspace, - @NonNull CqlIdentifier name, - boolean frozen, - @NonNull List fieldNames, - @NonNull List fieldTypes) { - this(keyspace, name, frozen, fieldNames, fieldTypes, AttachmentPoint.NONE); - } - - @NonNull - @Override - public CqlIdentifier getKeyspace() { - return keyspace; - } - - @NonNull - @Override - public CqlIdentifier getName() { - return name; - } - - @Override - public boolean isFrozen() { - return frozen; - } - - @NonNull - @Override - public List getFieldNames() { - return fieldNames; - } - - @NonNull - @Override - public List allIndicesOf(@NonNull CqlIdentifier id) { - return index.allIndicesOf(id); - } - - @Override - public int firstIndexOf(@NonNull CqlIdentifier id) { - return index.firstIndexOf(id); - } - - @NonNull - @Override - public List allIndicesOf(@NonNull String name) { - return index.allIndicesOf(name); - } - - @Override - public int firstIndexOf(@NonNull String name) { - return index.firstIndexOf(name); - } - - @NonNull - @Override - public List getFieldTypes() { - return fieldTypes; - } - - @NonNull - @Override - public UserDefinedType copy(boolean newFrozen) { - return (newFrozen == frozen) - ? this - : new DefaultUserDefinedType( - keyspace, name, newFrozen, fieldNames, fieldTypes, attachmentPoint); - } - - @NonNull - @Override - public UdtValue newValue() { - return new DefaultUdtValue(this); - } - - @NonNull - @Override - public UdtValue newValue(@NonNull Object... fields) { - return new DefaultUdtValue(this, fields); - } - - @Override - public boolean isDetached() { - return attachmentPoint == AttachmentPoint.NONE; - } - - @Override - public void attach(@NonNull AttachmentPoint attachmentPoint) { - this.attachmentPoint = attachmentPoint; - for (DataType fieldType : fieldTypes) { - fieldType.attach(attachmentPoint); - } - } - - @NonNull - @Override - public AttachmentPoint getAttachmentPoint() { - return attachmentPoint; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof UserDefinedType) { - UserDefinedType that = (UserDefinedType) other; - // frozen is ignored in comparisons - return this.keyspace.equals(that.getKeyspace()) - && this.name.equals(that.getName()) - && this.fieldNames.equals(that.getFieldNames()) - && this.fieldTypes.equals(that.getFieldTypes()); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(keyspace, name, fieldNames, fieldTypes); - } - - @Override - public String toString() { - return "UDT(" + keyspace.asCql(true) + "." + name.asCql(true) + ")"; - } - - private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { - in.defaultReadObject(); - Preconditions.checkNotNull(keyspace); - Preconditions.checkNotNull(name); - Preconditions.checkArgument( - fieldNames != null && fieldNames.size() > 0, "Field names list can't be null or empty"); - Preconditions.checkArgument( - fieldTypes != null && fieldTypes.size() == fieldNames.size(), - "There should be the same number of field names and types"); - this.attachmentPoint = AttachmentPoint.NONE; - this.index = new IdentifierIndex(this.fieldNames); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultVectorType.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultVectorType.java deleted file mode 100644 index 0b1ced94769..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultVectorType.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type; - -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.VectorType; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultVectorType implements VectorType { - - public static final String VECTOR_CLASS_NAME = "org.apache.cassandra.db.marshal.VectorType"; - - private final DataType subtype; - private final int dimensions; - - public DefaultVectorType(DataType subtype, int dimensions) { - - this.dimensions = dimensions; - this.subtype = subtype; - } - - /* ============== ContainerType interface ============== */ - @Override - public DataType getElementType() { - return this.subtype; - } - - /* ============== VectorType interface ============== */ - @Override - public int getDimensions() { - return this.dimensions; - } - - /* ============== CustomType interface ============== */ - @NonNull - @Override - public String getClassName() { - return VECTOR_CLASS_NAME; - } - - @NonNull - @Override - public String asCql(boolean includeFrozen, boolean pretty) { - return String.format("vector<%s, %d>", getElementType().asCql(true, false), getDimensions()); - } - - /* ============== General class implementation ============== */ - @Override - public boolean equals(Object o) { - if (o == this) { - return true; - } else if (o instanceof DefaultVectorType) { - DefaultVectorType that = (DefaultVectorType) o; - return that.subtype.equals(this.subtype) && that.dimensions == this.dimensions; - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(DefaultVectorType.class, subtype, dimensions); - } - - @Override - public String toString() { - return String.format("Vector(%s, %d)", getElementType(), getDimensions()); - } - - @Override - public boolean isDetached() { - return false; - } - - @Override - public void attach(@NonNull AttachmentPoint attachmentPoint) { - // nothing to do - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/PrimitiveType.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/PrimitiveType.java deleted file mode 100644 index c6f815a7487..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/PrimitiveType.java +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type; - -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.Serializable; -import java.util.Locale; -import net.jcip.annotations.Immutable; - -@Immutable -public class PrimitiveType implements DataType, Serializable { - - /** @serial */ - private final int protocolCode; - - public PrimitiveType(int protocolCode) { - this.protocolCode = protocolCode; - } - - @Override - public int getProtocolCode() { - return protocolCode; - } - - @Override - public boolean isDetached() { - return false; - } - - @Override - public void attach(@NonNull AttachmentPoint attachmentPoint) { - // nothing to do - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof PrimitiveType) { - PrimitiveType that = (PrimitiveType) other; - return this.protocolCode == that.protocolCode; - } else { - return false; - } - } - - @Override - public int hashCode() { - return protocolCode; - } - - @NonNull - @Override - public String asCql(boolean includeFrozen, boolean pretty) { - return codeName(protocolCode).toLowerCase(Locale.ROOT); - } - - @Override - public String toString() { - return codeName(protocolCode); - } - - private static String codeName(int protocolCode) { - // Reminder: we don't use enums to leave the door open for custom extensions - switch (protocolCode) { - case ProtocolConstants.DataType.ASCII: - return "ASCII"; - case ProtocolConstants.DataType.BIGINT: - return "BIGINT"; - case ProtocolConstants.DataType.BLOB: - return "BLOB"; - case ProtocolConstants.DataType.BOOLEAN: - return "BOOLEAN"; - case ProtocolConstants.DataType.COUNTER: - return "COUNTER"; - case ProtocolConstants.DataType.DECIMAL: - return "DECIMAL"; - case ProtocolConstants.DataType.DOUBLE: - return "DOUBLE"; - case ProtocolConstants.DataType.FLOAT: - return "FLOAT"; - case ProtocolConstants.DataType.INT: - return "INT"; - case ProtocolConstants.DataType.TIMESTAMP: - return "TIMESTAMP"; - case ProtocolConstants.DataType.UUID: - return "UUID"; - case ProtocolConstants.DataType.VARINT: - return "VARINT"; - case ProtocolConstants.DataType.TIMEUUID: - return "TIMEUUID"; - case ProtocolConstants.DataType.INET: - return "INET"; - case ProtocolConstants.DataType.DATE: - return "DATE"; - case ProtocolConstants.DataType.VARCHAR: - return "TEXT"; - case ProtocolConstants.DataType.TIME: - return "TIME"; - case ProtocolConstants.DataType.SMALLINT: - return "SMALLINT"; - case ProtocolConstants.DataType.TINYINT: - return "TINYINT"; - case ProtocolConstants.DataType.DURATION: - return "DURATION"; - default: - return "0x" + Integer.toHexString(protocolCode); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/UserDefinedTypeBuilder.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/UserDefinedTypeBuilder.java deleted file mode 100644 index 43e05f17690..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/UserDefinedTypeBuilder.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import net.jcip.annotations.NotThreadSafe; - -/** - * Helper class to build {@link UserDefinedType} instances. - * - *

This is not part of the public API, because building user defined types manually can be - * tricky: the fields must be defined in the exact same order as the database definition, otherwise - * you will insert corrupt data in your database. If you decide to use this class anyway, make sure - * that you define fields in the correct order, and that the database schema never changes. - */ -@NotThreadSafe -public class UserDefinedTypeBuilder { - - private final CqlIdentifier keyspaceName; - private final CqlIdentifier typeName; - private boolean frozen; - private final ImmutableList.Builder fieldNames; - private final ImmutableList.Builder fieldTypes; - private AttachmentPoint attachmentPoint = AttachmentPoint.NONE; - - public UserDefinedTypeBuilder(CqlIdentifier keyspaceName, CqlIdentifier typeName) { - this.keyspaceName = keyspaceName; - this.typeName = typeName; - this.fieldNames = ImmutableList.builder(); - this.fieldTypes = ImmutableList.builder(); - } - - public UserDefinedTypeBuilder(String keyspaceName, String typeName) { - this(CqlIdentifier.fromCql(keyspaceName), CqlIdentifier.fromCql(typeName)); - } - - /** - * Adds a new field. The fields in the resulting type will be in the order of the calls to this - * method. - */ - public UserDefinedTypeBuilder withField(CqlIdentifier name, DataType type) { - fieldNames.add(name); - fieldTypes.add(type); - return this; - } - - public UserDefinedTypeBuilder withField(String name, DataType type) { - return withField(CqlIdentifier.fromCql(name), type); - } - - /** Makes the type frozen (by default, it is not). */ - public UserDefinedTypeBuilder frozen() { - this.frozen = true; - return this; - } - - public UserDefinedTypeBuilder withAttachmentPoint(AttachmentPoint attachmentPoint) { - this.attachmentPoint = attachmentPoint; - return this; - } - - public UserDefinedType build() { - return new DefaultUserDefinedType( - keyspaceName, typeName, frozen, fieldNames.build(), fieldTypes.build(), attachmentPoint); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/BigIntCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/BigIntCodec.java deleted file mode 100644 index 8496da17fa6..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/BigIntCodec.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.PrimitiveLongCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.util.Optional; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class BigIntCodec implements PrimitiveLongCodec { - @NonNull - @Override - public GenericType getJavaType() { - return GenericType.LONG; - } - - @NonNull - @Override - public DataType getCqlType() { - return DataTypes.BIGINT; - } - - @Override - public boolean accepts(@NonNull Object value) { - return value instanceof Long; - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return javaClass == Long.class || javaClass == long.class; - } - - @Nullable - @Override - public ByteBuffer encodePrimitive(long value, @NonNull ProtocolVersion protocolVersion) { - ByteBuffer bytes = ByteBuffer.allocate(8); - bytes.putLong(0, value); - return bytes; - } - - @Override - public long decodePrimitive( - @Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - if (bytes == null || bytes.remaining() == 0) { - return 0; - } else if (bytes.remaining() != 8) { - throw new IllegalArgumentException( - "Invalid 64-bits long value, expecting 8 bytes but got " + bytes.remaining()); - } else { - return bytes.getLong(bytes.position()); - } - } - - @NonNull - @Override - public String format(@Nullable Long value) { - return (value == null) ? "NULL" : Long.toString(value); - } - - @Nullable - @Override - public Long parse(@Nullable String value) { - try { - return (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) - ? null - : Long.parseLong(value); - } catch (NumberFormatException e) { - throw new IllegalArgumentException( - String.format("Cannot parse 64-bits long value from \"%s\"", value)); - } - } - - @NonNull - @Override - public Optional serializedSize() { - return Optional.of(8); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/BlobCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/BlobCodec.java deleted file mode 100644 index 1f5fcd5eeaa..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/BlobCodec.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.protocol.internal.util.Bytes; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import net.jcip.annotations.ThreadSafe; - -/** - * A codec that maps the CQL type {@code blob} to the Java type {@link ByteBuffer}. - * - *

If you are looking for a codec mapping the CQL type {@code blob} to the Java type {@code - * byte[]}, you should use {@link SimpleBlobCodec} instead. - */ -@ThreadSafe -public class BlobCodec implements TypeCodec { - @NonNull - @Override - public GenericType getJavaType() { - return GenericType.BYTE_BUFFER; - } - - @NonNull - @Override - public DataType getCqlType() { - return DataTypes.BLOB; - } - - @Override - public boolean accepts(@NonNull Object value) { - return value instanceof ByteBuffer; - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return ByteBuffer.class.equals(javaClass); - } - - @Nullable - @Override - public ByteBuffer encode(@Nullable ByteBuffer value, @NonNull ProtocolVersion protocolVersion) { - return (value == null) ? null : value.duplicate(); - } - - @Nullable - @Override - public ByteBuffer decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - return (bytes == null) ? null : bytes.duplicate(); - } - - @NonNull - @Override - public String format(@Nullable ByteBuffer value) { - return (value == null) ? "NULL" : Bytes.toHexString(value); - } - - @Nullable - @Override - public ByteBuffer parse(@Nullable String value) { - return (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) - ? null - : Bytes.fromHexString(value); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/BooleanCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/BooleanCodec.java deleted file mode 100644 index af388982be9..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/BooleanCodec.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.PrimitiveBooleanCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.util.Optional; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class BooleanCodec implements PrimitiveBooleanCodec { - - private static final ByteBuffer TRUE = ByteBuffer.wrap(new byte[] {1}); - private static final ByteBuffer FALSE = ByteBuffer.wrap(new byte[] {0}); - - @NonNull - @Override - public GenericType getJavaType() { - return GenericType.BOOLEAN; - } - - @NonNull - @Override - public DataType getCqlType() { - return DataTypes.BOOLEAN; - } - - @Override - public boolean accepts(@NonNull Object value) { - return value instanceof Boolean; - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return javaClass == Boolean.class || javaClass == boolean.class; - } - - @Nullable - @Override - public ByteBuffer encodePrimitive(boolean value, @NonNull ProtocolVersion protocolVersion) { - return value ? TRUE.duplicate() : FALSE.duplicate(); - } - - @Override - public boolean decodePrimitive( - @Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - if (bytes == null || bytes.remaining() == 0) { - return false; - } else if (bytes.remaining() != 1) { - throw new IllegalArgumentException( - "Invalid boolean value, expecting 1 byte but got " + bytes.remaining()); - } else { - return bytes.get(bytes.position()) != 0; - } - } - - @NonNull - @Override - public String format(@Nullable Boolean value) { - if (value == null) { - return "NULL"; - } else { - return value ? "true" : "false"; - } - } - - @Nullable - @Override - public Boolean parse(@Nullable String value) { - if (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) { - return null; - } else if (value.equalsIgnoreCase(Boolean.FALSE.toString())) { - return false; - } else if (value.equalsIgnoreCase(Boolean.TRUE.toString())) { - return true; - } else { - throw new IllegalArgumentException( - String.format("Cannot parse boolean value from \"%s\"", value)); - } - } - - @NonNull - @Override - public Optional serializedSize() { - return Optional.of(1); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/CounterCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/CounterCodec.java deleted file mode 100644 index ab90ba09c20..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/CounterCodec.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class CounterCodec extends BigIntCodec { - @NonNull - @Override - public DataType getCqlType() { - return DataTypes.COUNTER; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/CqlDurationCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/CqlDurationCodec.java deleted file mode 100644 index 90f6f56cf06..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/CqlDurationCodec.java +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.data.CqlDuration; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.util.VIntCoding; -import com.datastax.oss.driver.shaded.guava.common.io.ByteArrayDataOutput; -import com.datastax.oss.driver.shaded.guava.common.io.ByteStreams; -import com.datastax.oss.protocol.internal.util.Bytes; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.io.DataInput; -import java.io.IOException; -import java.nio.ByteBuffer; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class CqlDurationCodec implements TypeCodec { - @NonNull - @Override - public GenericType getJavaType() { - return GenericType.CQL_DURATION; - } - - @NonNull - @Override - public DataType getCqlType() { - return DataTypes.DURATION; - } - - @Override - public boolean accepts(@NonNull Object value) { - return value instanceof CqlDuration; - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return javaClass == CqlDuration.class; - } - - @Nullable - @Override - public ByteBuffer encode(@Nullable CqlDuration value, @NonNull ProtocolVersion protocolVersion) { - if (value == null) { - return null; - } - long months = value.getMonths(); - long days = value.getDays(); - long nanoseconds = value.getNanoseconds(); - int size = - VIntCoding.computeVIntSize(months) - + VIntCoding.computeVIntSize(days) - + VIntCoding.computeVIntSize(nanoseconds); - ByteArrayDataOutput out = ByteStreams.newDataOutput(size); - try { - VIntCoding.writeVInt(months, out); - VIntCoding.writeVInt(days, out); - VIntCoding.writeVInt(nanoseconds, out); - } catch (IOException e) { - // cannot happen - throw new AssertionError(); - } - return ByteBuffer.wrap(out.toByteArray()); - } - - @Nullable - @Override - public CqlDuration decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - if (bytes == null || bytes.remaining() == 0) { - return null; - } else { - DataInput in = ByteStreams.newDataInput(Bytes.getArray(bytes)); - try { - int months = (int) VIntCoding.readVInt(in); - int days = (int) VIntCoding.readVInt(in); - long nanoseconds = VIntCoding.readVInt(in); - return CqlDuration.newInstance(months, days, nanoseconds); - } catch (IOException e) { - // cannot happen - throw new AssertionError(); - } - } - } - - @NonNull - @Override - public String format(@Nullable CqlDuration value) { - return (value == null) ? "NULL" : value.toString(); - } - - @Nullable - @Override - public CqlDuration parse(@Nullable String value) { - return (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) - ? null - : CqlDuration.from(value); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/CustomCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/CustomCodec.java deleted file mode 100644 index 61a854e88d8..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/CustomCodec.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.CustomType; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.protocol.internal.util.Bytes; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class CustomCodec implements TypeCodec { - - private final CustomType cqlType; - - public CustomCodec(CustomType cqlType) { - this.cqlType = cqlType; - } - - @NonNull - @Override - public GenericType getJavaType() { - return GenericType.BYTE_BUFFER; - } - - @NonNull - @Override - public DataType getCqlType() { - return cqlType; - } - - @Override - public boolean accepts(@NonNull Object value) { - return value instanceof ByteBuffer; - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return ByteBuffer.class.equals(javaClass); - } - - @Nullable - @Override - public ByteBuffer encode(@Nullable ByteBuffer value, @NonNull ProtocolVersion protocolVersion) { - return (value == null) ? null : value.duplicate(); - } - - @Nullable - @Override - public ByteBuffer decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - return (bytes == null) ? null : bytes.duplicate(); - } - - @NonNull - @Override - public String format(@Nullable ByteBuffer value) { - return (value == null) ? "NULL" : Bytes.toHexString(value); - } - - @Nullable - @Override - public ByteBuffer parse(@Nullable String value) { - return (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) - ? null - : Bytes.fromHexString(value); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/DateCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/DateCodec.java deleted file mode 100644 index 2fc463ef7d2..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/DateCodec.java +++ /dev/null @@ -1,155 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static java.lang.Long.parseLong; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.util.Strings; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.time.LocalDate; -import java.time.format.DateTimeFormatter; -import java.time.temporal.ChronoUnit; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class DateCodec implements TypeCodec { - - private static final LocalDate EPOCH = LocalDate.of(1970, 1, 1); - - @NonNull - @Override - public GenericType getJavaType() { - return GenericType.LOCAL_DATE; - } - - @NonNull - @Override - public DataType getCqlType() { - return DataTypes.DATE; - } - - @Override - public boolean accepts(@NonNull Object value) { - return value instanceof LocalDate; - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return javaClass == LocalDate.class; - } - - @Nullable - @Override - public ByteBuffer encode(@Nullable LocalDate value, @NonNull ProtocolVersion protocolVersion) { - if (value == null) { - return null; - } - long days = ChronoUnit.DAYS.between(EPOCH, value); - int unsigned = signedToUnsigned((int) days); - return TypeCodecs.INT.encodePrimitive(unsigned, protocolVersion); - } - - @Nullable - @Override - public LocalDate decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - if (bytes == null || bytes.remaining() == 0) { - return null; - } - int unsigned = TypeCodecs.INT.decodePrimitive(bytes, protocolVersion); - int signed = unsignedToSigned(unsigned); - return EPOCH.plusDays(signed); - } - - @NonNull - @Override - public String format(@Nullable LocalDate value) { - return (value == null) ? "NULL" : Strings.quote(DateTimeFormatter.ISO_LOCAL_DATE.format(value)); - } - - @Nullable - @Override - public LocalDate parse(@Nullable String value) { - if (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) { - return null; - } - - // single quotes are optional for long literals, mandatory for date patterns - // strip enclosing single quotes, if any - if (Strings.isQuoted(value)) { - value = Strings.unquote(value); - } - - if (Strings.isLongLiteral(value)) { - long raw; - try { - raw = parseLong(value); - } catch (NumberFormatException e) { - throw new IllegalArgumentException( - String.format("Cannot parse date value from \"%s\"", value)); - } - int days; - try { - days = cqlDateToDaysSinceEpoch(raw); - } catch (IllegalArgumentException e) { - throw new IllegalArgumentException( - String.format("Cannot parse date value from \"%s\"", value)); - } - return EPOCH.plusDays(days); - } - - try { - return LocalDate.parse(value, DateTimeFormatter.ISO_LOCAL_DATE); - } catch (RuntimeException e) { - throw new IllegalArgumentException( - String.format("Cannot parse date value from \"%s\"", value)); - } - } - - private static int signedToUnsigned(int signed) { - return signed - Integer.MIN_VALUE; - } - - private static int unsignedToSigned(int unsigned) { - return unsigned + Integer.MIN_VALUE; // this relies on overflow for "negative" values - } - - /** - * Converts a raw CQL long representing a numeric DATE literal to the number of days since the - * Epoch. In CQL, numeric DATE literals are longs (unsigned integers actually) between 0 and 2^32 - * - 1, with the epoch in the middle; this method re-centers the epoch at 0. - */ - private static int cqlDateToDaysSinceEpoch(long raw) { - if (raw < 0 || raw > MAX_CQL_LONG_VALUE) - throw new IllegalArgumentException( - String.format( - "Numeric literals for DATE must be between 0 and %d (got %d)", - MAX_CQL_LONG_VALUE, raw)); - return (int) (raw - EPOCH_AS_CQL_LONG); - } - - private static final long MAX_CQL_LONG_VALUE = ((1L << 32) - 1); - private static final long EPOCH_AS_CQL_LONG = (1L << 31); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/DecimalCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/DecimalCodec.java deleted file mode 100644 index 25650b733cd..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/DecimalCodec.java +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.math.BigDecimal; -import java.math.BigInteger; -import java.nio.ByteBuffer; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class DecimalCodec implements TypeCodec { - @NonNull - @Override - public GenericType getJavaType() { - return GenericType.BIG_DECIMAL; - } - - @NonNull - @Override - public DataType getCqlType() { - return DataTypes.DECIMAL; - } - - @Override - public boolean accepts(@NonNull Object value) { - return value instanceof BigDecimal; - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return BigDecimal.class.isAssignableFrom(javaClass); - } - - @Nullable - @Override - public ByteBuffer encode(@Nullable BigDecimal value, @NonNull ProtocolVersion protocolVersion) { - if (value == null) { - return null; - } - BigInteger bi = value.unscaledValue(); - int scale = value.scale(); - byte[] bibytes = bi.toByteArray(); - - ByteBuffer bytes = ByteBuffer.allocate(4 + bibytes.length); - bytes.putInt(scale); - bytes.put(bibytes); - bytes.rewind(); - return bytes; - } - - @Nullable - @Override - public BigDecimal decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - if (bytes == null || bytes.remaining() == 0) { - return null; - } else if (bytes.remaining() < 4) { - throw new IllegalArgumentException( - "Invalid decimal value, expecting at least 4 bytes but got " + bytes.remaining()); - } - - bytes = bytes.duplicate(); - int scale = bytes.getInt(); - byte[] bibytes = new byte[bytes.remaining()]; - bytes.get(bibytes); - - BigInteger bi = new BigInteger(bibytes); - return new BigDecimal(bi, scale); - } - - @NonNull - @Override - public String format(@Nullable BigDecimal value) { - return (value == null) ? "NULL" : value.toString(); - } - - @Nullable - @Override - public BigDecimal parse(@Nullable String value) { - try { - return (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) - ? null - : new BigDecimal(value); - } catch (NumberFormatException e) { - throw new IllegalArgumentException( - String.format("Cannot parse decimal value from \"%s\"", value)); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/DoubleCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/DoubleCodec.java deleted file mode 100644 index b01847517d9..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/DoubleCodec.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.PrimitiveDoubleCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.util.Optional; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class DoubleCodec implements PrimitiveDoubleCodec { - @NonNull - @Override - public GenericType getJavaType() { - return GenericType.DOUBLE; - } - - @NonNull - @Override - public DataType getCqlType() { - return DataTypes.DOUBLE; - } - - @Override - public boolean accepts(@NonNull Object value) { - return value instanceof Double; - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return javaClass == Double.class || javaClass == double.class; - } - - @Nullable - @Override - public ByteBuffer encodePrimitive(double value, @NonNull ProtocolVersion protocolVersion) { - ByteBuffer bytes = ByteBuffer.allocate(8); - bytes.putDouble(0, value); - return bytes; - } - - @Override - public double decodePrimitive( - @Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - if (bytes == null || bytes.remaining() == 0) { - return 0; - } else if (bytes.remaining() != 8) { - throw new IllegalArgumentException( - "Invalid 64-bits double value, expecting 8 bytes but got " + bytes.remaining()); - } else { - return bytes.getDouble(bytes.position()); - } - } - - @NonNull - @Override - public String format(@Nullable Double value) { - return (value == null) ? "NULL" : Double.toString(value); - } - - @Nullable - @Override - public Double parse(@Nullable String value) { - try { - return (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) - ? null - : Double.parseDouble(value); - } catch (NumberFormatException e) { - throw new IllegalArgumentException( - String.format("Cannot parse 64-bits double value from \"%s\"", value)); - } - } - - @NonNull - @Override - public Optional serializedSize() { - return Optional.of(8); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/FloatCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/FloatCodec.java deleted file mode 100644 index fd851edfad3..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/FloatCodec.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.PrimitiveFloatCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.util.Optional; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class FloatCodec implements PrimitiveFloatCodec { - @NonNull - @Override - public GenericType getJavaType() { - return GenericType.FLOAT; - } - - @NonNull - @Override - public DataType getCqlType() { - return DataTypes.FLOAT; - } - - @Override - public boolean accepts(@NonNull Object value) { - return value instanceof Float; - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return javaClass == Float.class || javaClass == float.class; - } - - @Nullable - @Override - public ByteBuffer encodePrimitive(float value, @NonNull ProtocolVersion protocolVersion) { - ByteBuffer bytes = ByteBuffer.allocate(4); - bytes.putFloat(0, value); - return bytes; - } - - @Override - public float decodePrimitive( - @Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - if (bytes == null || bytes.remaining() == 0) { - return 0; - } else if (bytes.remaining() != 4) { - throw new IllegalArgumentException( - "Invalid 32-bits float value, expecting 4 bytes but got " + bytes.remaining()); - } else { - return bytes.getFloat(bytes.position()); - } - } - - @NonNull - @Override - public String format(@Nullable Float value) { - return (value == null) ? "NULL" : Float.toString(value); - } - - @Nullable - @Override - public Float parse(@Nullable String value) { - try { - return (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) - ? null - : Float.parseFloat(value); - } catch (NumberFormatException e) { - throw new IllegalArgumentException( - String.format("Cannot parse 32-bits float value from \"%s\"", value)); - } - } - - @NonNull - @Override - public Optional serializedSize() { - return Optional.of(4); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/InetCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/InetCodec.java deleted file mode 100644 index 167c7109bf9..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/InetCodec.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.util.Strings; -import com.datastax.oss.protocol.internal.util.Bytes; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.net.InetAddress; -import java.net.UnknownHostException; -import java.nio.ByteBuffer; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class InetCodec implements TypeCodec { - @NonNull - @Override - public GenericType getJavaType() { - return GenericType.INET_ADDRESS; - } - - @NonNull - @Override - public DataType getCqlType() { - return DataTypes.INET; - } - - @Override - public boolean accepts(@NonNull Object value) { - return value instanceof InetAddress; - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return InetAddress.class.equals(javaClass); - } - - @Nullable - @Override - public ByteBuffer encode(@Nullable InetAddress value, @NonNull ProtocolVersion protocolVersion) { - return (value == null) ? null : ByteBuffer.wrap(value.getAddress()); - } - - @Nullable - @Override - public InetAddress decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - if (bytes == null || bytes.remaining() == 0) { - return null; - } - try { - return InetAddress.getByAddress(Bytes.getArray(bytes)); - } catch (UnknownHostException e) { - throw new IllegalArgumentException( - "Invalid bytes for inet value, got " + bytes.remaining() + " bytes"); - } - } - - @NonNull - @Override - public String format(@Nullable InetAddress value) { - return (value == null) ? "NULL" : ("'" + value.getHostAddress() + "'"); - } - - @Nullable - @Override - public InetAddress parse(@Nullable String value) { - if (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) { - return null; - } - - value = value.trim(); - if (!Strings.isQuoted(value)) { - throw new IllegalArgumentException( - String.format("inet values must be enclosed in single quotes (\"%s\")", value)); - } - try { - return InetAddress.getByName(value.substring(1, value.length() - 1)); - } catch (Exception e) { - throw new IllegalArgumentException( - String.format("Cannot parse inet value from \"%s\"", value)); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/IntCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/IntCodec.java deleted file mode 100644 index b11b164a445..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/IntCodec.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.PrimitiveIntCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.util.Optional; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class IntCodec implements PrimitiveIntCodec { - - @NonNull - @Override - public GenericType getJavaType() { - return GenericType.INTEGER; - } - - @NonNull - @Override - public DataType getCqlType() { - return DataTypes.INT; - } - - @Override - public boolean accepts(@NonNull Object value) { - return value instanceof Integer; - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return javaClass == Integer.class || javaClass == int.class; - } - - @Nullable - @Override - public ByteBuffer encodePrimitive(int value, @NonNull ProtocolVersion protocolVersion) { - ByteBuffer bytes = ByteBuffer.allocate(4); - bytes.putInt(0, value); - return bytes; - } - - @Override - public int decodePrimitive(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - if (bytes == null || bytes.remaining() == 0) { - return 0; - } else if (bytes.remaining() != 4) { - throw new IllegalArgumentException( - "Invalid 32-bits integer value, expecting 4 bytes but got " + bytes.remaining()); - } else { - return bytes.getInt(bytes.position()); - } - } - - @NonNull - @Override - public String format(@Nullable Integer value) { - return (value == null) ? "NULL" : Integer.toString(value); - } - - @Nullable - @Override - public Integer parse(@Nullable String value) { - try { - return (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) - ? null - : Integer.parseInt(value); - } catch (NumberFormatException e) { - throw new IllegalArgumentException( - String.format("Cannot parse 32-bits int value from \"%s\"", value)); - } - } - - @NonNull - @Override - public Optional serializedSize() { - return Optional.of(4); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/ListCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/ListCodec.java deleted file mode 100644 index d587bbd5887..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/ListCodec.java +++ /dev/null @@ -1,205 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.ListType; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.List; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class ListCodec implements TypeCodec> { - - private final DataType cqlType; - private final GenericType> javaType; - private final TypeCodec elementCodec; - - public ListCodec(DataType cqlType, TypeCodec elementCodec) { - this.cqlType = cqlType; - this.javaType = GenericType.listOf(elementCodec.getJavaType()); - this.elementCodec = elementCodec; - Preconditions.checkArgument(cqlType instanceof ListType); - } - - @NonNull - @Override - public GenericType> getJavaType() { - return javaType; - } - - @NonNull - @Override - public DataType getCqlType() { - return cqlType; - } - - @Override - public boolean accepts(@NonNull Object value) { - if (List.class.isAssignableFrom(value.getClass())) { - // runtime type ok, now check element type - List list = (List) value; - return list.isEmpty() || elementCodec.accepts(list.get(0)); - } else { - return false; - } - } - - @Nullable - @Override - public ByteBuffer encode( - @Nullable List value, @NonNull ProtocolVersion protocolVersion) { - // An int indicating the number of elements in the list, followed by the elements. Each element - // is a byte array representing the serialized value, preceded by an int indicating its size. - if (value == null) { - return null; - } else { - int i = 0; - ByteBuffer[] encodedElements = new ByteBuffer[value.size()]; - int toAllocate = 4; // initialize with number of elements - for (ElementT element : value) { - if (element == null) { - throw new NullPointerException("Collection elements cannot be null"); - } - ByteBuffer encodedElement; - try { - encodedElement = elementCodec.encode(element, protocolVersion); - } catch (ClassCastException e) { - throw new IllegalArgumentException("Invalid type for element: " + element.getClass()); - } - if (encodedElement == null) { - throw new NullPointerException("Collection elements cannot encode to CQL NULL"); - } - encodedElements[i++] = encodedElement; - toAllocate += 4 + encodedElement.remaining(); // the element preceded by its size - } - ByteBuffer result = ByteBuffer.allocate(toAllocate); - result.putInt(value.size()); - for (ByteBuffer encodedElement : encodedElements) { - result.putInt(encodedElement.remaining()); - result.put(encodedElement); - } - result.flip(); - return result; - } - } - - @Nullable - @Override - public List decode( - @Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - if (bytes == null || bytes.remaining() == 0) { - return new ArrayList<>(0); - } else { - ByteBuffer input = bytes.duplicate(); - int size = input.getInt(); - List result = new ArrayList<>(size); - for (int i = 0; i < size; i++) { - ElementT element; - int elementSize = input.getInt(); - // Allow null elements on the decode path, because Cassandra might return such collections - // for some computed values in the future -- e.g. SELECT ttl(some_collection) - if (elementSize < 0) { - element = null; - } else { - ByteBuffer encodedElement = input.slice(); - encodedElement.limit(elementSize); - element = elementCodec.decode(encodedElement, protocolVersion); - input.position(input.position() + elementSize); - } - result.add(element); - } - return result; - } - } - - @NonNull - @Override - public String format(@Nullable List value) { - if (value == null) { - return "NULL"; - } - StringBuilder sb = new StringBuilder("["); - boolean first = true; - for (ElementT t : value) { - if (first) { - first = false; - } else { - sb.append(","); - } - sb.append(elementCodec.format(t)); - } - sb.append("]"); - return sb.toString(); - } - - @Nullable - @Override - public List parse(@Nullable String value) { - if (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) return null; - - int idx = ParseUtils.skipSpaces(value, 0); - if (value.charAt(idx++) != '[') - throw new IllegalArgumentException( - String.format( - "Cannot parse list value from \"%s\", at character %d expecting '[' but got '%c'", - value, idx, value.charAt(idx))); - - idx = ParseUtils.skipSpaces(value, idx); - - if (value.charAt(idx) == ']') { - return new ArrayList<>(0); - } - - List list = new ArrayList<>(); - while (idx < value.length()) { - int n; - try { - n = ParseUtils.skipCQLValue(value, idx); - } catch (IllegalArgumentException e) { - throw new IllegalArgumentException( - String.format( - "Cannot parse list value from \"%s\", invalid CQL value at character %d", - value, idx), - e); - } - - list.add(elementCodec.parse(value.substring(idx, n))); - idx = n; - - idx = ParseUtils.skipSpaces(value, idx); - if (value.charAt(idx) == ']') return list; - if (value.charAt(idx++) != ',') - throw new IllegalArgumentException( - String.format( - "Cannot parse list value from \"%s\", at character %d expecting ',' but got '%c'", - value, idx, value.charAt(idx))); - - idx = ParseUtils.skipSpaces(value, idx); - } - throw new IllegalArgumentException( - String.format("Malformed list value \"%s\", missing closing ']'", value)); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/MapCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/MapCodec.java deleted file mode 100644 index 999f41bf207..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/MapCodec.java +++ /dev/null @@ -1,267 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.shaded.guava.common.collect.Maps; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.util.LinkedHashMap; -import java.util.Map; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class MapCodec implements TypeCodec> { - - private final DataType cqlType; - private final GenericType> javaType; - private final TypeCodec keyCodec; - private final TypeCodec valueCodec; - - public MapCodec(DataType cqlType, TypeCodec keyCodec, TypeCodec valueCodec) { - this.cqlType = cqlType; - this.keyCodec = keyCodec; - this.valueCodec = valueCodec; - this.javaType = GenericType.mapOf(keyCodec.getJavaType(), valueCodec.getJavaType()); - } - - @NonNull - @Override - public GenericType> getJavaType() { - return javaType; - } - - @NonNull - @Override - public DataType getCqlType() { - return cqlType; - } - - @Override - public boolean accepts(@NonNull Object value) { - if (value instanceof Map) { - // runtime type ok, now check key and value types - Map map = (Map) value; - if (map.isEmpty()) { - return true; - } - Map.Entry entry = map.entrySet().iterator().next(); - return keyCodec.accepts(entry.getKey()) && valueCodec.accepts(entry.getValue()); - } - return false; - } - - @Override - @Nullable - public ByteBuffer encode( - @Nullable Map value, @NonNull ProtocolVersion protocolVersion) { - // An int indicating the number of key/value pairs in the map, followed by the pairs. Each pair - // is a byte array representing the serialized key, preceded by an int indicating its size, - // followed by the value in the same format. - if (value == null) { - return null; - } else { - int i = 0; - ByteBuffer[] encodedElements = new ByteBuffer[value.size() * 2]; - int toAllocate = 4; // initialize with number of elements - for (Map.Entry entry : value.entrySet()) { - if (entry.getKey() == null) { - throw new NullPointerException("Map keys cannot be null"); - } - if (entry.getValue() == null) { - throw new NullPointerException("Map values cannot be null"); - } - ByteBuffer encodedKey; - try { - encodedKey = keyCodec.encode(entry.getKey(), protocolVersion); - } catch (ClassCastException e) { - throw new IllegalArgumentException("Invalid type for key: " + entry.getKey().getClass()); - } - if (encodedKey == null) { - throw new NullPointerException("Map keys cannot encode to CQL NULL"); - } - encodedElements[i++] = encodedKey; - toAllocate += 4 + encodedKey.remaining(); // the key preceded by its size - ByteBuffer encodedValue; - try { - encodedValue = valueCodec.encode(entry.getValue(), protocolVersion); - } catch (ClassCastException e) { - throw new IllegalArgumentException( - "Invalid type for value: " + entry.getValue().getClass()); - } - if (encodedValue == null) { - throw new NullPointerException("Map values cannot encode to CQL NULL"); - } - encodedElements[i++] = encodedValue; - toAllocate += 4 + encodedValue.remaining(); // the value preceded by its size - } - ByteBuffer result = ByteBuffer.allocate(toAllocate); - result.putInt(value.size()); - for (ByteBuffer encodedElement : encodedElements) { - result.putInt(encodedElement.remaining()); - result.put(encodedElement); - } - result.flip(); - return result; - } - } - - @Nullable - @Override - public Map decode( - @Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - if (bytes == null || bytes.remaining() == 0) { - return new LinkedHashMap<>(0); - } else { - ByteBuffer input = bytes.duplicate(); - int size = input.getInt(); - Map result = Maps.newLinkedHashMapWithExpectedSize(size); - for (int i = 0; i < size; i++) { - KeyT key; - int keySize = input.getInt(); - // Allow null elements on the decode path, because Cassandra might return such collections - // for some computed values in the future -- e.g. SELECT ttl(some_collection) - if (keySize < 0) { - key = null; - } else { - ByteBuffer encodedKey = input.slice(); - encodedKey.limit(keySize); - key = keyCodec.decode(encodedKey, protocolVersion); - input.position(input.position() + keySize); - } - ValueT value; - int valueSize = input.getInt(); - if (valueSize < 0) { - value = null; - } else { - ByteBuffer encodedValue = input.slice(); - encodedValue.limit(valueSize); - value = valueCodec.decode(encodedValue, protocolVersion); - input.position(input.position() + valueSize); - } - result.put(key, value); - } - return result; - } - } - - @NonNull - @Override - public String format(@Nullable Map value) { - if (value == null) { - return "NULL"; - } - StringBuilder sb = new StringBuilder(); - sb.append("{"); - boolean first = true; - for (Map.Entry e : value.entrySet()) { - if (first) { - first = false; - } else { - sb.append(","); - } - sb.append(keyCodec.format(e.getKey())); - sb.append(":"); - sb.append(valueCodec.format(e.getValue())); - } - sb.append("}"); - return sb.toString(); - } - - @Nullable - @Override - public Map parse(@Nullable String value) { - if (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) { - return null; - } - - int idx = ParseUtils.skipSpaces(value, 0); - if (value.charAt(idx++) != '{') { - throw new IllegalArgumentException( - String.format( - "cannot parse map value from \"%s\", at character %d expecting '{' but got '%c'", - value, idx, value.charAt(idx))); - } - - idx = ParseUtils.skipSpaces(value, idx); - - if (value.charAt(idx) == '}') { - return new LinkedHashMap<>(0); - } - - Map map = new LinkedHashMap<>(); - while (idx < value.length()) { - int n; - try { - n = ParseUtils.skipCQLValue(value, idx); - } catch (IllegalArgumentException e) { - throw new IllegalArgumentException( - String.format( - "Cannot parse map value from \"%s\", invalid CQL value at character %d", - value, idx), - e); - } - - KeyT k = keyCodec.parse(value.substring(idx, n)); - idx = n; - - idx = ParseUtils.skipSpaces(value, idx); - if (value.charAt(idx++) != ':') { - throw new IllegalArgumentException( - String.format( - "Cannot parse map value from \"%s\", at character %d expecting ':' but got '%c'", - value, idx, value.charAt(idx))); - } - idx = ParseUtils.skipSpaces(value, idx); - - try { - n = ParseUtils.skipCQLValue(value, idx); - } catch (IllegalArgumentException e) { - throw new IllegalArgumentException( - String.format( - "Cannot parse map value from \"%s\", invalid CQL value at character %d", - value, idx), - e); - } - - ValueT v = valueCodec.parse(value.substring(idx, n)); - idx = n; - - map.put(k, v); - - idx = ParseUtils.skipSpaces(value, idx); - if (value.charAt(idx) == '}') { - return map; - } - if (value.charAt(idx++) != ',') { - throw new IllegalArgumentException( - String.format( - "Cannot parse map value from \"%s\", at character %d expecting ',' but got '%c'", - value, idx, value.charAt(idx))); - } - - idx = ParseUtils.skipSpaces(value, idx); - } - throw new IllegalArgumentException( - String.format("Malformed map value \"%s\", missing closing '}'", value)); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/ParseUtils.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/ParseUtils.java deleted file mode 100644 index a52130a093d..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/ParseUtils.java +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -public class ParseUtils { - - /** - * Returns the index of the first character in toParse from idx that is not a "space". - * - * @param toParse the string to skip space on. - * @param idx the index to start skipping space from. - * @return the index of the first character in toParse from idx that is not a "space. - */ - public static int skipSpaces(String toParse, int idx) { - while (idx < toParse.length() && isBlank(toParse.charAt(idx))) ++idx; - return idx; - } - - /** - * Assuming that idx points to the beginning of a CQL value in toParse, returns the index of the - * first character after this value. - * - * @param toParse the string to skip a value form. - * @param idx the index to start parsing a value from. - * @return the index ending the CQL value starting at {@code idx}. - * @throws IllegalArgumentException if idx doesn't point to the start of a valid CQL value. - */ - public static int skipCQLValue(String toParse, int idx) { - if (idx >= toParse.length()) throw new IllegalArgumentException(); - - if (isBlank(toParse.charAt(idx))) throw new IllegalArgumentException(); - - int cbrackets = 0; - int sbrackets = 0; - int parens = 0; - boolean inString = false; - - do { - char c = toParse.charAt(idx); - if (inString) { - if (c == '\'') { - if (idx + 1 < toParse.length() && toParse.charAt(idx + 1) == '\'') { - ++idx; // this is an escaped quote, skip it - } else { - inString = false; - if (cbrackets == 0 && sbrackets == 0 && parens == 0) return idx + 1; - } - } - // Skip any other character - } else if (c == '\'') { - inString = true; - } else if (c == '{') { - ++cbrackets; - } else if (c == '[') { - ++sbrackets; - } else if (c == '(') { - ++parens; - } else if (c == '}') { - if (cbrackets == 0) return idx; - - --cbrackets; - if (cbrackets == 0 && sbrackets == 0 && parens == 0) return idx + 1; - } else if (c == ']') { - if (sbrackets == 0) return idx; - - --sbrackets; - if (cbrackets == 0 && sbrackets == 0 && parens == 0) return idx + 1; - } else if (c == ')') { - if (parens == 0) return idx; - - --parens; - if (cbrackets == 0 && sbrackets == 0 && parens == 0) return idx + 1; - } else if (isBlank(c) || !isCqlIdentifierChar(c)) { - if (cbrackets == 0 && sbrackets == 0 && parens == 0) return idx; - } - } while (++idx < toParse.length()); - - if (inString || cbrackets != 0 || sbrackets != 0 || parens != 0) - throw new IllegalArgumentException(); - return idx; - } - - /** - * Assuming that idx points to the beginning of a CQL identifier in toParse, returns the index of - * the first character after this identifier. - * - * @param toParse the string to skip an identifier from. - * @param idx the index to start parsing an identifier from. - * @return the index ending the CQL identifier starting at {@code idx}. - * @throws IllegalArgumentException if idx doesn't point to the start of a valid CQL identifier. - */ - public static int skipCQLId(String toParse, int idx) { - if (idx >= toParse.length()) throw new IllegalArgumentException(); - - char c = toParse.charAt(idx); - if (isCqlIdentifierChar(c)) { - while (idx < toParse.length() && isCqlIdentifierChar(toParse.charAt(idx))) idx++; - return idx; - } - - if (c != '"') throw new IllegalArgumentException(); - - while (++idx < toParse.length()) { - c = toParse.charAt(idx); - if (c != '"') continue; - - if (idx + 1 < toParse.length() && toParse.charAt(idx + 1) == '\"') - ++idx; // this is an escaped double quote, skip it - else return idx + 1; - } - throw new IllegalArgumentException(); - } - - public static boolean isBlank(int c) { - return c == ' ' || c == '\t' || c == '\n'; - } - - public static boolean isCqlIdentifierChar(int c) { - return (c >= '0' && c <= '9') - || (c >= 'a' && c <= 'z') - || (c >= 'A' && c <= 'Z') - || c == '-' - || c == '+' - || c == '.' - || c == '_' - || c == '&'; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/SetCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/SetCodec.java deleted file mode 100644 index fc4c0887516..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/SetCodec.java +++ /dev/null @@ -1,206 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.SetType; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.driver.shaded.guava.common.collect.Sets; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.util.LinkedHashSet; -import java.util.Set; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class SetCodec implements TypeCodec> { - - private final DataType cqlType; - private final GenericType> javaType; - private final TypeCodec elementCodec; - - public SetCodec(DataType cqlType, TypeCodec elementCodec) { - this.cqlType = cqlType; - this.javaType = GenericType.setOf(elementCodec.getJavaType()); - this.elementCodec = elementCodec; - Preconditions.checkArgument(cqlType instanceof SetType); - } - - @NonNull - @Override - public GenericType> getJavaType() { - return javaType; - } - - @NonNull - @Override - public DataType getCqlType() { - return cqlType; - } - - @Override - public boolean accepts(@NonNull Object value) { - if (Set.class.isAssignableFrom(value.getClass())) { - // runtime type ok, now check element type - Set set = (Set) value; - return set.isEmpty() || elementCodec.accepts(set.iterator().next()); - } else { - return false; - } - } - - @Nullable - @Override - public ByteBuffer encode( - @Nullable Set value, @NonNull ProtocolVersion protocolVersion) { - // An int indicating the number of elements in the set, followed by the elements. Each element - // is a byte array representing the serialized value, preceded by an int indicating its size. - if (value == null) { - return null; - } else { - int i = 0; - ByteBuffer[] encodedElements = new ByteBuffer[value.size()]; - int toAllocate = 4; // initialize with number of elements - for (ElementT element : value) { - if (element == null) { - throw new NullPointerException("Collection elements cannot be null"); - } - ByteBuffer encodedElement; - try { - encodedElement = elementCodec.encode(element, protocolVersion); - } catch (ClassCastException e) { - throw new IllegalArgumentException("Invalid type for element: " + element.getClass()); - } - if (encodedElement == null) { - throw new NullPointerException("Collection elements cannot encode to CQL NULL"); - } - encodedElements[i++] = encodedElement; - toAllocate += 4 + encodedElement.remaining(); // the element preceded by its size - } - ByteBuffer result = ByteBuffer.allocate(toAllocate); - result.putInt(value.size()); - for (ByteBuffer encodedElement : encodedElements) { - result.putInt(encodedElement.remaining()); - result.put(encodedElement); - } - result.flip(); - return result; - } - } - - @Nullable - @Override - public Set decode( - @Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - if (bytes == null || bytes.remaining() == 0) { - return new LinkedHashSet<>(0); - } else { - ByteBuffer input = bytes.duplicate(); - int size = input.getInt(); - Set result = Sets.newLinkedHashSetWithExpectedSize(size); - for (int i = 0; i < size; i++) { - ElementT element; - int elementSize = input.getInt(); - // Allow null elements on the decode path, because Cassandra might return such collections - // for some computed values in the future -- e.g. SELECT ttl(some_collection) - if (elementSize < 0) { - element = null; - } else { - ByteBuffer encodedElement = input.slice(); - encodedElement.limit(elementSize); - element = elementCodec.decode(encodedElement, protocolVersion); - input.position(input.position() + elementSize); - } - result.add(element); - } - return result; - } - } - - @NonNull - @Override - public String format(@Nullable Set value) { - if (value == null) { - return "NULL"; - } - StringBuilder sb = new StringBuilder("{"); - boolean first = true; - for (ElementT t : value) { - if (first) { - first = false; - } else { - sb.append(","); - } - sb.append(elementCodec.format(t)); - } - sb.append("}"); - return sb.toString(); - } - - @Nullable - @Override - public Set parse(@Nullable String value) { - if (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) return null; - - int idx = ParseUtils.skipSpaces(value, 0); - if (value.charAt(idx++) != '{') - throw new IllegalArgumentException( - String.format( - "Cannot parse set value from \"%s\", at character %d expecting '{' but got '%c'", - value, idx, value.charAt(idx))); - - idx = ParseUtils.skipSpaces(value, idx); - - if (value.charAt(idx) == '}') { - return new LinkedHashSet<>(0); - } - - Set set = new LinkedHashSet<>(); - while (idx < value.length()) { - int n; - try { - n = ParseUtils.skipCQLValue(value, idx); - } catch (IllegalArgumentException e) { - throw new IllegalArgumentException( - String.format( - "Cannot parse set value from \"%s\", invalid CQL value at character %d", - value, idx), - e); - } - - set.add(elementCodec.parse(value.substring(idx, n))); - idx = n; - - idx = ParseUtils.skipSpaces(value, idx); - if (value.charAt(idx) == '}') return set; - if (value.charAt(idx++) != ',') - throw new IllegalArgumentException( - String.format( - "Cannot parse set value from \"%s\", at character %d expecting ',' but got '%c'", - value, idx, value.charAt(idx))); - - idx = ParseUtils.skipSpaces(value, idx); - } - throw new IllegalArgumentException( - String.format("Malformed set value \"%s\", missing closing '}'", value)); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/SimpleBlobCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/SimpleBlobCodec.java deleted file mode 100644 index 9f90feb8e7c..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/SimpleBlobCodec.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import com.datastax.oss.driver.api.core.data.ByteUtils; -import com.datastax.oss.driver.api.core.type.codec.MappingCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.codec.extras.array.ByteListToArrayCodec; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import net.jcip.annotations.Immutable; - -/** - * A codec that maps the CQL type {@code blob} to the Java type {@code byte[]}. - * - *

If you are looking for a codec mapping the CQL type {@code blob} to the Java type {@link - * ByteBuffer}, you should use {@link BlobCodec} instead. - * - *

If you are looking for a codec mapping the CQL type {@code list { - - public SimpleBlobCodec() { - super(TypeCodecs.BLOB, GenericType.of(byte[].class)); - } - - @Override - public boolean accepts(@NonNull Object value) { - return value instanceof byte[]; - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return byte[].class.equals(javaClass); - } - - @Nullable - @Override - protected byte[] innerToOuter(@Nullable ByteBuffer value) { - return value == null ? null : ByteUtils.getArray(value); - } - - @Nullable - @Override - protected ByteBuffer outerToInner(@Nullable byte[] value) { - return value == null ? null : ByteBuffer.wrap(value); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/SmallIntCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/SmallIntCodec.java deleted file mode 100644 index 08beb0b34c5..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/SmallIntCodec.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.PrimitiveShortCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class SmallIntCodec implements PrimitiveShortCodec { - @NonNull - @Override - public GenericType getJavaType() { - return GenericType.SHORT; - } - - @NonNull - @Override - public DataType getCqlType() { - return DataTypes.SMALLINT; - } - - @Override - public boolean accepts(@NonNull Object value) { - return value instanceof Short; - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return javaClass == Short.class || javaClass == short.class; - } - - @Nullable - @Override - public ByteBuffer encodePrimitive(short value, @NonNull ProtocolVersion protocolVersion) { - ByteBuffer bytes = ByteBuffer.allocate(2); - bytes.putShort(0, value); - return bytes; - } - - @Override - public short decodePrimitive( - @Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - if (bytes == null || bytes.remaining() == 0) { - return 0; - } else if (bytes.remaining() != 2) { - throw new IllegalArgumentException( - "Invalid 16-bits integer value, expecting 2 bytes but got " + bytes.remaining()); - } else { - return bytes.getShort(bytes.position()); - } - } - - @NonNull - @Override - public String format(@Nullable Short value) { - return (value == null) ? "NULL" : Short.toString(value); - } - - @Nullable - @Override - public Short parse(@Nullable String value) { - try { - return (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) - ? null - : Short.parseShort(value); - } catch (NumberFormatException e) { - throw new IllegalArgumentException( - String.format("Cannot parse 16-bits int value from \"%s\"", value)); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/StringCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/StringCodec.java deleted file mode 100644 index 2a9acdd8c47..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/StringCodec.java +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.util.Strings; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import io.netty.util.concurrent.FastThreadLocal; -import java.nio.ByteBuffer; -import java.nio.CharBuffer; -import java.nio.charset.CharacterCodingException; -import java.nio.charset.Charset; -import java.nio.charset.CharsetDecoder; -import java.nio.charset.CharsetEncoder; -import java.nio.charset.CodingErrorAction; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class StringCodec implements TypeCodec { - - private final DataType cqlType; - private final FastThreadLocal charsetEncoder; - private final FastThreadLocal charsetDecoder; - - public StringCodec(@NonNull DataType cqlType, @NonNull Charset charset) { - this.cqlType = cqlType; - charsetEncoder = - new FastThreadLocal() { - @Override - protected CharsetEncoder initialValue() throws Exception { - return charset - .newEncoder() - .onMalformedInput(CodingErrorAction.REPORT) - .onUnmappableCharacter(CodingErrorAction.REPORT); - } - }; - charsetDecoder = - new FastThreadLocal() { - @Override - protected CharsetDecoder initialValue() throws Exception { - return charset - .newDecoder() - .onMalformedInput(CodingErrorAction.REPORT) - .onUnmappableCharacter(CodingErrorAction.REPORT); - } - }; - } - - @NonNull - @Override - public GenericType getJavaType() { - return GenericType.STRING; - } - - @NonNull - @Override - public DataType getCqlType() { - return cqlType; - } - - @Override - public boolean accepts(@NonNull Object value) { - return value instanceof String; - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return javaClass == String.class; - } - - @Nullable - @Override - public ByteBuffer encode(@Nullable String value, @NonNull ProtocolVersion protocolVersion) { - if (value == null) { - return null; - } - try { - return charsetEncoder.get().encode(CharBuffer.wrap(value)); - } catch (CharacterCodingException e) { - throw new IllegalArgumentException(e); - } - } - - @Nullable - @Override - public String decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - if (bytes == null) { - return null; - } else if (bytes.remaining() == 0) { - return ""; - } else { - try { - return charsetDecoder.get().decode(bytes.duplicate()).toString(); - } catch (CharacterCodingException e) { - throw new IllegalArgumentException(e); - } - } - } - - @NonNull - @Override - public String format(@Nullable String value) { - return (value == null) ? "NULL" : Strings.quote(value); - } - - @Nullable - @Override - public String parse(String value) { - if (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) { - return null; - } else if (!Strings.isQuoted(value)) { - throw new IllegalArgumentException( - "text or varchar values must be enclosed by single quotes"); - } else { - return Strings.unquote(value); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/TimeCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/TimeCodec.java deleted file mode 100644 index 4977687342d..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/TimeCodec.java +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.util.Strings; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.time.LocalTime; -import java.time.format.DateTimeFormatter; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class TimeCodec implements TypeCodec { - - private static final DateTimeFormatter FORMATTER = - DateTimeFormatter.ofPattern("HH:mm:ss.SSSSSSSSS"); - - @NonNull - @Override - public GenericType getJavaType() { - return GenericType.LOCAL_TIME; - } - - @NonNull - @Override - public DataType getCqlType() { - return DataTypes.TIME; - } - - @Override - public boolean accepts(@NonNull Object value) { - return value instanceof LocalTime; - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return javaClass == LocalTime.class; - } - - @Nullable - @Override - public ByteBuffer encode(@Nullable LocalTime value, @NonNull ProtocolVersion protocolVersion) { - return (value == null) - ? null - : TypeCodecs.BIGINT.encodePrimitive(value.toNanoOfDay(), protocolVersion); - } - - @Nullable - @Override - public LocalTime decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - if (bytes == null || bytes.remaining() == 0) { - return null; - } else { - long nanosOfDay = TypeCodecs.BIGINT.decodePrimitive(bytes, protocolVersion); - return LocalTime.ofNanoOfDay(nanosOfDay); - } - } - - @NonNull - @Override - public String format(@Nullable LocalTime value) { - return (value == null) ? "NULL" : Strings.quote(FORMATTER.format(value)); - } - - @Nullable - @Override - public LocalTime parse(@Nullable String value) { - if (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) { - return null; - } - - // enclosing single quotes required, even for long literals - if (!Strings.isQuoted(value)) { - throw new IllegalArgumentException("time values must be enclosed by single quotes"); - } - value = value.substring(1, value.length() - 1); - - if (Strings.isLongLiteral(value)) { - try { - return LocalTime.ofNanoOfDay(Long.parseLong(value)); - } catch (NumberFormatException e) { - throw new IllegalArgumentException( - String.format("Cannot parse time value from \"%s\"", value), e); - } - } - - try { - return LocalTime.parse(value); - } catch (RuntimeException e) { - throw new IllegalArgumentException( - String.format("Cannot parse time value from \"%s\"", value), e); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/TimeUuidCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/TimeUuidCodec.java deleted file mode 100644 index 95744f63ee3..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/TimeUuidCodec.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.util.UUID; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class TimeUuidCodec extends UuidCodec { - @NonNull - @Override - public DataType getCqlType() { - return DataTypes.TIMEUUID; - } - - @Override - public boolean accepts(@NonNull Object value) { - return value instanceof UUID && ((UUID) value).version() == 1; - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return javaClass == UUID.class; - } - - @Nullable - @Override - public ByteBuffer encode(@Nullable UUID value, @NonNull ProtocolVersion protocolVersion) { - if (value == null) { - return null; - } else if (value.version() != 1) { - throw new IllegalArgumentException( - String.format("%s is not a Type 1 (time-based) UUID", value)); - } else { - return super.encode(value, protocolVersion); - } - } - - @NonNull - @Override - public String format(@Nullable UUID value) { - if (value == null) { - return "NULL"; - } else if (value.version() != 1) { - throw new IllegalArgumentException( - String.format("%s is not a Type 1 (time-based) UUID", value)); - } else { - return super.format(value); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/TimestampCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/TimestampCodec.java deleted file mode 100644 index 964f774c8d9..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/TimestampCodec.java +++ /dev/null @@ -1,303 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.util.Strings; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import io.netty.util.concurrent.FastThreadLocal; -import java.nio.ByteBuffer; -import java.text.ParsePosition; -import java.text.SimpleDateFormat; -import java.time.Instant; -import java.time.ZoneId; -import java.util.Date; -import java.util.Optional; -import java.util.TimeZone; -import net.jcip.annotations.ThreadSafe; - -/** - * A codec that handles Apache Cassandra(R)'s timestamp type and maps it to Java's {@link Instant}. - * - *

Implementation notes: - * - *

    - *
  1. Because {@code Instant} uses a precision of nanoseconds, whereas the timestamp type uses a - * precision of milliseconds, truncation will happen for any excess precision information as - * though the amount in nanoseconds was subject to integer division by one million. - *
  2. For compatibility reasons, this codec uses the legacy {@link SimpleDateFormat} API - * internally when parsing and formatting, and converts from {@link Instant} to {@link Date} - * and vice versa. Specially when parsing, this may yield different results as compared to - * what the newer Java Time API parsers would have produced for the same input. - *
  3. Also, {@code Instant} can store points on the time-line further in the future and further - * in the past than {@code Date}. This codec will throw an exception when attempting to parse - * or format an {@code Instant} falling in this category. - *
- * - *

Accepted date-time formats

- * - * The following patterns are valid CQL timestamp literal formats for Apache Cassandra(R) 3.0 and - * higher, and are thus all recognized when parsing: - * - *
    - *
  1. {@code yyyy-MM-dd'T'HH:mm} - *
  2. {@code yyyy-MM-dd'T'HH:mm:ss} - *
  3. {@code yyyy-MM-dd'T'HH:mm:ss.SSS} - *
  4. {@code yyyy-MM-dd'T'HH:mmX} - *
  5. {@code yyyy-MM-dd'T'HH:mmXX} - *
  6. {@code yyyy-MM-dd'T'HH:mmXXX} - *
  7. {@code yyyy-MM-dd'T'HH:mm:ssX} - *
  8. {@code yyyy-MM-dd'T'HH:mm:ssXX} - *
  9. {@code yyyy-MM-dd'T'HH:mm:ssXXX} - *
  10. {@code yyyy-MM-dd'T'HH:mm:ss.SSSX} - *
  11. {@code yyyy-MM-dd'T'HH:mm:ss.SSSXX} - *
  12. {@code yyyy-MM-dd'T'HH:mm:ss.SSSXXX} - *
  13. {@code yyyy-MM-dd'T'HH:mm z} - *
  14. {@code yyyy-MM-dd'T'HH:mm:ss z} - *
  15. {@code yyyy-MM-dd'T'HH:mm:ss.SSS z} - *
  16. {@code yyyy-MM-dd HH:mm} - *
  17. {@code yyyy-MM-dd HH:mm:ss} - *
  18. {@code yyyy-MM-dd HH:mm:ss.SSS} - *
  19. {@code yyyy-MM-dd HH:mmX} - *
  20. {@code yyyy-MM-dd HH:mmXX} - *
  21. {@code yyyy-MM-dd HH:mmXXX} - *
  22. {@code yyyy-MM-dd HH:mm:ssX} - *
  23. {@code yyyy-MM-dd HH:mm:ssXX} - *
  24. {@code yyyy-MM-dd HH:mm:ssXXX} - *
  25. {@code yyyy-MM-dd HH:mm:ss.SSSX} - *
  26. {@code yyyy-MM-dd HH:mm:ss.SSSXX} - *
  27. {@code yyyy-MM-dd HH:mm:ss.SSSXXX} - *
  28. {@code yyyy-MM-dd HH:mm z} - *
  29. {@code yyyy-MM-dd HH:mm:ss z} - *
  30. {@code yyyy-MM-dd HH:mm:ss.SSS z} - *
  31. {@code yyyy-MM-dd} - *
  32. {@code yyyy-MM-ddX} - *
  33. {@code yyyy-MM-ddXX} - *
  34. {@code yyyy-MM-ddXXX} - *
  35. {@code yyyy-MM-dd z} - *
- * - * By default, when parsing, timestamp literals that do not include any time zone information will - * be interpreted using the system's {@linkplain ZoneId#systemDefault() default time zone}. This is - * intended to mimic Apache Cassandra(R)'s own parsing behavior (see {@code - * org.apache.cassandra.serializers.TimestampSerializer}). The default time zone can be modified - * using the {@linkplain TimestampCodec#TimestampCodec(ZoneId) one-arg constructor} that takes a - * custom {@link ZoneId} as an argument. - * - *

When formatting, the pattern used is always {@code yyyy-MM-dd'T'HH:mm:ss.SSSXXX} and the time - * zone is either the the system's default one, or the one that was provided when instantiating the - * codec. - */ -@ThreadSafe -public class TimestampCodec implements TypeCodec { - - /** - * Patterns accepted by Apache Cassandra(R) 3.0 and higher when parsing CQL literals. - * - *

Note that Cassandra's TimestampSerializer declares many more patterns but some of them are - * equivalent when parsing. - */ - private static final String[] DATE_STRING_PATTERNS = - new String[] { - // 1) date-time patterns separated by 'T' - // (declared first because none of the others are ISO compliant, but some of these are) - // 1.a) without time zone - "yyyy-MM-dd'T'HH:mm", - "yyyy-MM-dd'T'HH:mm:ss", - "yyyy-MM-dd'T'HH:mm:ss.SSS", - // 1.b) with ISO-8601 time zone - "yyyy-MM-dd'T'HH:mmX", - "yyyy-MM-dd'T'HH:mmXX", - "yyyy-MM-dd'T'HH:mmXXX", - "yyyy-MM-dd'T'HH:mm:ssX", - "yyyy-MM-dd'T'HH:mm:ssXX", - "yyyy-MM-dd'T'HH:mm:ssXXX", - "yyyy-MM-dd'T'HH:mm:ss.SSSX", - "yyyy-MM-dd'T'HH:mm:ss.SSSXX", - "yyyy-MM-dd'T'HH:mm:ss.SSSXXX", - // 1.c) with generic time zone - "yyyy-MM-dd'T'HH:mm z", - "yyyy-MM-dd'T'HH:mm:ss z", - "yyyy-MM-dd'T'HH:mm:ss.SSS z", - // 2) date-time patterns separated by whitespace - // 2.a) without time zone - "yyyy-MM-dd HH:mm", - "yyyy-MM-dd HH:mm:ss", - "yyyy-MM-dd HH:mm:ss.SSS", - // 2.b) with ISO-8601 time zone - "yyyy-MM-dd HH:mmX", - "yyyy-MM-dd HH:mmXX", - "yyyy-MM-dd HH:mmXXX", - "yyyy-MM-dd HH:mm:ssX", - "yyyy-MM-dd HH:mm:ssXX", - "yyyy-MM-dd HH:mm:ssXXX", - "yyyy-MM-dd HH:mm:ss.SSSX", - "yyyy-MM-dd HH:mm:ss.SSSXX", - "yyyy-MM-dd HH:mm:ss.SSSXXX", - // 2.c) with generic time zone - "yyyy-MM-dd HH:mm z", - "yyyy-MM-dd HH:mm:ss z", - "yyyy-MM-dd HH:mm:ss.SSS z", - // 3) date patterns without time - // 3.a) without time zone - "yyyy-MM-dd", - // 3.b) with ISO-8601 time zone - "yyyy-MM-ddX", - "yyyy-MM-ddXX", - "yyyy-MM-ddXXX", - // 3.c) with generic time zone - "yyyy-MM-dd z" - }; - - private final FastThreadLocal parser; - - private final FastThreadLocal formatter; - - /** - * Creates a new {@code TimestampCodec} that uses the system's {@linkplain ZoneId#systemDefault() - * default time zone} to parse timestamp literals that do not include any time zone information. - */ - public TimestampCodec() { - this(ZoneId.systemDefault()); - } - - /** - * Creates a new {@code TimestampCodec} that uses the given {@link ZoneId} to parse timestamp - * literals that do not include any time zone information. - */ - public TimestampCodec(ZoneId defaultZoneId) { - parser = - new FastThreadLocal() { - @Override - protected SimpleDateFormat initialValue() { - SimpleDateFormat parser = new SimpleDateFormat(); - parser.setLenient(false); - parser.setTimeZone(TimeZone.getTimeZone(defaultZoneId)); - return parser; - } - }; - formatter = - new FastThreadLocal() { - @Override - protected SimpleDateFormat initialValue() { - SimpleDateFormat parser = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSXXX"); - parser.setTimeZone(TimeZone.getTimeZone(defaultZoneId)); - return parser; - } - }; - } - - @NonNull - @Override - public GenericType getJavaType() { - return GenericType.INSTANT; - } - - @NonNull - @Override - public DataType getCqlType() { - return DataTypes.TIMESTAMP; - } - - @Override - public boolean accepts(@NonNull Object value) { - return value instanceof Instant; - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return javaClass == Instant.class; - } - - @Nullable - @Override - public ByteBuffer encode(@Nullable Instant value, @NonNull ProtocolVersion protocolVersion) { - return (value == null) - ? null - : TypeCodecs.BIGINT.encodePrimitive(value.toEpochMilli(), protocolVersion); - } - - @Nullable - @Override - public Instant decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - return (bytes == null || bytes.remaining() == 0) - ? null - : Instant.ofEpochMilli(TypeCodecs.BIGINT.decodePrimitive(bytes, protocolVersion)); - } - - @NonNull - @Override - public String format(@Nullable Instant value) { - return (value == null) ? "NULL" : Strings.quote(formatter.get().format(Date.from(value))); - } - - @Nullable - @Override - public Instant parse(@Nullable String value) { - if (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) { - return null; - } - String unquoted = Strings.unquote(value); - if (Strings.isLongLiteral(unquoted)) { - // Numeric literals may be quoted or not - try { - return Instant.ofEpochMilli(Long.parseLong(unquoted)); - } catch (NumberFormatException e) { - throw new IllegalArgumentException( - String.format("Cannot parse timestamp value from \"%s\"", value)); - } - } else { - // Alphanumeric literals must be quoted - if (!Strings.isQuoted(value)) { - throw new IllegalArgumentException( - String.format("Alphanumeric timestamp literal must be quoted: \"%s\"", value)); - } - SimpleDateFormat parser = this.parser.get(); - TimeZone timeZone = parser.getTimeZone(); - ParsePosition pos = new ParsePosition(0); - for (String pattern : DATE_STRING_PATTERNS) { - parser.applyPattern(pattern); - pos.setIndex(0); - try { - Date date = parser.parse(unquoted, pos); - if (date != null && pos.getIndex() == unquoted.length()) { - return date.toInstant(); - } - } finally { - // restore the parser's default time zone, it might have been modified by the call to - // parse() - parser.setTimeZone(timeZone); - } - } - throw new IllegalArgumentException( - String.format("Cannot parse timestamp value from \"%s\"", value)); - } - } - - @NonNull - @Override - public Optional serializedSize() { - return Optional.of(8); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/TinyIntCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/TinyIntCodec.java deleted file mode 100644 index 13bf79b70d5..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/TinyIntCodec.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.PrimitiveByteCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class TinyIntCodec implements PrimitiveByteCodec { - @NonNull - @Override - public GenericType getJavaType() { - return GenericType.BYTE; - } - - @NonNull - @Override - public DataType getCqlType() { - return DataTypes.TINYINT; - } - - @Override - public boolean accepts(@NonNull Object value) { - return value instanceof Byte; - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return javaClass == Byte.class || javaClass == byte.class; - } - - @Nullable - @Override - public ByteBuffer encodePrimitive(byte value, @NonNull ProtocolVersion protocolVersion) { - ByteBuffer bytes = ByteBuffer.allocate(1); - bytes.put(0, value); - return bytes; - } - - @Override - public byte decodePrimitive( - @Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - if (bytes == null || bytes.remaining() == 0) { - return 0; - } else if (bytes.remaining() != 1) { - throw new IllegalArgumentException( - "Invalid 8-bits integer value, expecting 1 byte but got " + bytes.remaining()); - } else { - return bytes.get(bytes.position()); - } - } - - @NonNull - @Override - public String format(@Nullable Byte value) { - return (value == null) ? "NULL" : Byte.toString(value); - } - - @Nullable - @Override - public Byte parse(@Nullable String value) { - try { - return (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) - ? null - : Byte.parseByte(value); - } catch (NumberFormatException e) { - throw new IllegalArgumentException( - String.format("Cannot parse 8-bits int value from \"%s\"", value)); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/TupleCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/TupleCodec.java deleted file mode 100644 index cc85266682c..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/TupleCodec.java +++ /dev/null @@ -1,247 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.data.TupleValue; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.TupleType; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.BufferUnderflowException; -import java.nio.ByteBuffer; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class TupleCodec implements TypeCodec { - - private final TupleType cqlType; - - public TupleCodec(@NonNull TupleType cqlType) { - this.cqlType = cqlType; - } - - @NonNull - @Override - public GenericType getJavaType() { - return GenericType.TUPLE_VALUE; - } - - @NonNull - @Override - public DataType getCqlType() { - return cqlType; - } - - @Override - public boolean accepts(@NonNull Object value) { - return (value instanceof TupleValue) && ((TupleValue) value).getType().equals(cqlType); - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return TupleValue.class.equals(javaClass); - } - - @Nullable - @Override - public ByteBuffer encode(@Nullable TupleValue value, @NonNull ProtocolVersion protocolVersion) { - if (value == null) { - return null; - } - if (!value.getType().equals(cqlType)) { - throw new IllegalArgumentException( - String.format("Invalid tuple type, expected %s but got %s", cqlType, value.getType())); - } - // Encoding: each field as a [bytes] value ([bytes] = int length + contents, null is - // represented by -1) - int toAllocate = 0; - for (int i = 0; i < value.size(); i++) { - ByteBuffer field = value.getBytesUnsafe(i); - toAllocate += 4 + (field == null ? 0 : field.remaining()); - } - ByteBuffer result = ByteBuffer.allocate(toAllocate); - for (int i = 0; i < value.size(); i++) { - ByteBuffer field = value.getBytesUnsafe(i); - if (field == null) { - result.putInt(-1); - } else { - result.putInt(field.remaining()); - result.put(field.duplicate()); - } - } - return (ByteBuffer) result.flip(); - } - - @Nullable - @Override - public TupleValue decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - if (bytes == null) { - return null; - } - // empty byte buffers will result in empty values - try { - ByteBuffer input = bytes.duplicate(); - TupleValue value = cqlType.newValue(); - int i = 0; - while (input.hasRemaining()) { - if (i > cqlType.getComponentTypes().size()) { - throw new IllegalArgumentException( - String.format( - "Too many fields in encoded tuple, expected %d", - cqlType.getComponentTypes().size())); - } - int elementSize = input.getInt(); - ByteBuffer element; - if (elementSize < 0) { - element = null; - } else { - element = input.slice(); - element.limit(elementSize); - input.position(input.position() + elementSize); - } - value = value.setBytesUnsafe(i, element); - i += 1; - } - return value; - } catch (BufferUnderflowException e) { - throw new IllegalArgumentException("Not enough bytes to deserialize a tuple", e); - } - } - - @NonNull - @Override - public String format(@Nullable TupleValue value) { - if (value == null) { - return "NULL"; - } - if (!value.getType().equals(cqlType)) { - throw new IllegalArgumentException( - String.format("Invalid tuple type, expected %s but got %s", cqlType, value.getType())); - } - CodecRegistry registry = cqlType.getAttachmentPoint().getCodecRegistry(); - - StringBuilder sb = new StringBuilder("("); - boolean first = true; - for (int i = 0; i < value.size(); i++) { - if (first) { - first = false; - } else { - sb.append(","); - } - DataType elementType = cqlType.getComponentTypes().get(i); - TypeCodec codec = registry.codecFor(elementType); - sb.append(codec.format(value.get(i, codec))); - } - sb.append(")"); - return sb.toString(); - } - - @Nullable - @Override - public TupleValue parse(@Nullable String value) { - if (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) { - return null; - } - - TupleValue tuple = cqlType.newValue(); - int length = value.length(); - - int position = ParseUtils.skipSpaces(value, 0); - if (value.charAt(position) != '(') { - throw new IllegalArgumentException( - String.format( - "Cannot parse tuple value from \"%s\", at character %d expecting '(' but got '%c'", - value, position, value.charAt(position))); - } - - position++; - position = ParseUtils.skipSpaces(value, position); - - CodecRegistry registry = cqlType.getAttachmentPoint().getCodecRegistry(); - - int field = 0; - while (position < length) { - if (value.charAt(position) == ')') { - position = ParseUtils.skipSpaces(value, position + 1); - if (position == length) { - return tuple; - } - throw new IllegalArgumentException( - String.format( - "Cannot parse tuple value from \"%s\", at character %d expecting EOF or blank, but got \"%s\"", - value, position, value.substring(position))); - } - int n; - try { - n = ParseUtils.skipCQLValue(value, position); - } catch (IllegalArgumentException e) { - throw new IllegalArgumentException( - String.format( - "Cannot parse tuple value from \"%s\", invalid CQL value at field %d (character %d)", - value, field, position), - e); - } - - String fieldValue = value.substring(position, n); - DataType elementType = cqlType.getComponentTypes().get(field); - TypeCodec codec = registry.codecFor(elementType); - Object parsed; - try { - parsed = codec.parse(fieldValue); - } catch (Exception e) { - throw new IllegalArgumentException( - String.format( - "Cannot parse tuple value from \"%s\", invalid CQL value at field %d (character %d): %s", - value, field, position, e.getMessage()), - e); - } - tuple = tuple.set(field, parsed, codec); - - position = n; - - position = ParseUtils.skipSpaces(value, position); - if (position == length) { - throw new IllegalArgumentException( - String.format( - "Cannot parse tuple value from \"%s\", at field %d (character %d) expecting ',' or ')', but got EOF", - value, field, position)); - } - if (value.charAt(position) == ')') { - continue; - } - if (value.charAt(position) != ',') { - throw new IllegalArgumentException( - String.format( - "Cannot parse tuple value from \"%s\", at field %d (character %d) expecting ',' but got '%c'", - value, field, position, value.charAt(position))); - } - ++position; // skip ',' - - position = ParseUtils.skipSpaces(value, position); - field += 1; - } - throw new IllegalArgumentException( - String.format( - "Cannot parse tuple value from \"%s\", at field %d (character %d) expecting CQL value or ')', got EOF", - value, field, position)); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/UdtCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/UdtCodec.java deleted file mode 100644 index 5d0a379f761..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/UdtCodec.java +++ /dev/null @@ -1,294 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.data.UdtValue; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.BufferUnderflowException; -import java.nio.ByteBuffer; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@ThreadSafe -public class UdtCodec implements TypeCodec { - - private static final Logger LOG = LoggerFactory.getLogger(UdtCodec.class); - - private final UserDefinedType cqlType; - - public UdtCodec(@NonNull UserDefinedType cqlType) { - this.cqlType = cqlType; - } - - @NonNull - @Override - public GenericType getJavaType() { - return GenericType.UDT_VALUE; - } - - @NonNull - @Override - public DataType getCqlType() { - return cqlType; - } - - @Override - public boolean accepts(@NonNull Object value) { - return value instanceof UdtValue && ((UdtValue) value).getType().equals(cqlType); - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return UdtValue.class.equals(javaClass); - } - - @Nullable - @Override - public ByteBuffer encode(@Nullable UdtValue value, @NonNull ProtocolVersion protocolVersion) { - if (value == null) { - return null; - } - if (!value.getType().equals(cqlType)) { - throw new IllegalArgumentException( - String.format( - "Invalid user defined type, expected %s but got %s", cqlType, value.getType())); - } - // Encoding: each field as a [bytes] value ([bytes] = int length + contents, null is - // represented by -1) - int toAllocate = 0; - int size = cqlType.getFieldTypes().size(); - for (int i = 0; i < size; i++) { - ByteBuffer field = value.getBytesUnsafe(i); - toAllocate += 4 + (field == null ? 0 : field.remaining()); - } - ByteBuffer result = ByteBuffer.allocate(toAllocate); - for (int i = 0; i < value.size(); i++) { - ByteBuffer field = value.getBytesUnsafe(i); - if (field == null) { - result.putInt(-1); - } else { - result.putInt(field.remaining()); - result.put(field.duplicate()); - } - } - return (ByteBuffer) result.flip(); - } - - @Nullable - @Override - public UdtValue decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - if (bytes == null) { - return null; - } - // empty byte buffers will result in empty values - try { - ByteBuffer input = bytes.duplicate(); - UdtValue value = cqlType.newValue(); - int i = 0; - while (input.hasRemaining()) { - if (i == cqlType.getFieldTypes().size()) { - LOG.debug("Encountered unexpected fields when parsing codec {}", cqlType); - break; - } - int elementSize = input.getInt(); - ByteBuffer element; - if (elementSize < 0) { - element = null; - } else { - element = input.slice(); - element.limit(elementSize); - input.position(input.position() + elementSize); - } - value = value.setBytesUnsafe(i, element); - i += 1; - } - return value; - } catch (BufferUnderflowException e) { - throw new IllegalArgumentException("Not enough bytes to deserialize a UDT value", e); - } - } - - @NonNull - @Override - public String format(@Nullable UdtValue value) { - if (value == null) { - return "NULL"; - } - - CodecRegistry registry = cqlType.getAttachmentPoint().getCodecRegistry(); - - StringBuilder sb = new StringBuilder("{"); - int size = cqlType.getFieldTypes().size(); - boolean first = true; - for (int i = 0; i < size; i++) { - if (first) { - first = false; - } else { - sb.append(","); - } - CqlIdentifier elementName = cqlType.getFieldNames().get(i); - sb.append(elementName.asCql(true)); - sb.append(":"); - DataType elementType = cqlType.getFieldTypes().get(i); - TypeCodec codec = registry.codecFor(elementType); - sb.append(codec.format(value.get(i, codec))); - } - sb.append("}"); - return sb.toString(); - } - - @Nullable - @Override - public UdtValue parse(@Nullable String value) { - if (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) { - return null; - } - - UdtValue udt = cqlType.newValue(); - int length = value.length(); - - int position = ParseUtils.skipSpaces(value, 0); - if (value.charAt(position) != '{') { - throw new IllegalArgumentException( - String.format( - "Cannot parse UDT value from \"%s\" at character %d: expecting '{' but got '%c'", - value, position, value.charAt(position))); - } - - position++; - position = ParseUtils.skipSpaces(value, position); - - if (position == length) { - throw new IllegalArgumentException( - String.format( - "Cannot parse UDT value from \"%s\" at character %d: expecting CQL identifier or '}', got EOF", - value, position)); - } - - CodecRegistry registry = cqlType.getAttachmentPoint().getCodecRegistry(); - - CqlIdentifier id = null; - while (position < length) { - if (value.charAt(position) == '}') { - position = ParseUtils.skipSpaces(value, position + 1); - if (position == length) { - return udt; - } - throw new IllegalArgumentException( - String.format( - "Cannot parse UDT value from \"%s\", at character %d expecting EOF or blank, but got \"%s\"", - value, position, value.substring(position))); - } - int n; - try { - n = ParseUtils.skipCQLId(value, position); - } catch (IllegalArgumentException e) { - throw new IllegalArgumentException( - String.format( - "Cannot parse UDT value from \"%s\", cannot parse a CQL identifier at character %d", - value, position), - e); - } - id = CqlIdentifier.fromInternal(value.substring(position, n)); - position = n; - - if (!cqlType.contains(id)) { - throw new IllegalArgumentException( - String.format( - "Cannot parse UDT value from \"%s\", unknown CQL identifier at character %d: \"%s\"", - value, position, id)); - } - - position = ParseUtils.skipSpaces(value, position); - if (position == length) { - throw new IllegalArgumentException( - String.format( - "Cannot parse UDT value from \"%s\", at field %s (character %d) expecting ':', but got EOF", - value, id, position)); - } - if (value.charAt(position) != ':') { - throw new IllegalArgumentException( - String.format( - "Cannot parse UDT value from \"%s\", at field %s (character %d) expecting ':', but got '%c'", - value, id, position, value.charAt(position))); - } - position++; - position = ParseUtils.skipSpaces(value, position); - - try { - n = ParseUtils.skipCQLValue(value, position); - } catch (IllegalArgumentException e) { - throw new IllegalArgumentException( - String.format( - "Cannot parse UDT value from \"%s\", invalid CQL value at field %s (character %d)", - value, id, position), - e); - } - - String fieldValue = value.substring(position, n); - // This works because ids occur at most once in UDTs - DataType fieldType = cqlType.getFieldTypes().get(cqlType.firstIndexOf(id)); - TypeCodec codec = registry.codecFor(fieldType); - Object parsed; - try { - parsed = codec.parse(fieldValue); - } catch (Exception e) { - throw new IllegalArgumentException( - String.format( - "Cannot parse UDT value from \"%s\", invalid CQL value at field %s (character %d): %s", - value, id, position, e.getMessage()), - e); - } - udt = udt.set(id, parsed, codec); - position = n; - - position = ParseUtils.skipSpaces(value, position); - if (position == length) { - throw new IllegalArgumentException( - String.format( - "Cannot parse UDT value from \"%s\", at field %s (character %d) expecting ',' or '}', but got EOF", - value, id, position)); - } - if (value.charAt(position) == '}') { - continue; - } - if (value.charAt(position) != ',') { - throw new IllegalArgumentException( - String.format( - "Cannot parse UDT value from \"%s\", at field %s (character %d) expecting ',' but got '%c'", - value, id, position, value.charAt(position))); - } - ++position; // skip ',' - - position = ParseUtils.skipSpaces(value, position); - } - throw new IllegalArgumentException( - String.format( - "Cannot parse UDT value from \"%s\" at field %s (character %d): expecting CQL identifier or '}', got EOF", - value, id, position)); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/UuidCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/UuidCodec.java deleted file mode 100644 index cc5f48dbe52..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/UuidCodec.java +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.util.Optional; -import java.util.UUID; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class UuidCodec implements TypeCodec { - @NonNull - @Override - public GenericType getJavaType() { - return GenericType.UUID; - } - - @NonNull - @Override - public DataType getCqlType() { - return DataTypes.UUID; - } - - @Override - public boolean accepts(@NonNull Object value) { - return value instanceof UUID; - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return javaClass == UUID.class; - } - - @Nullable - @Override - public ByteBuffer encode(@Nullable UUID value, @NonNull ProtocolVersion protocolVersion) { - if (value == null) { - return null; - } - ByteBuffer bytes = ByteBuffer.allocate(16); - bytes.putLong(0, value.getMostSignificantBits()); - bytes.putLong(8, value.getLeastSignificantBits()); - return bytes; - } - - @Nullable - @Override - public UUID decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - if (bytes == null || bytes.remaining() == 0) { - return null; - } else if (bytes.remaining() != 16) { - throw new IllegalArgumentException( - "Unexpected number of bytes for a UUID, expected 16, got " + bytes.remaining()); - } else { - return new UUID(bytes.getLong(bytes.position()), bytes.getLong(bytes.position() + 8)); - } - } - - @NonNull - @Override - public String format(@Nullable UUID value) { - return (value == null) ? "NULL" : value.toString(); - } - - @Nullable - @Override - public UUID parse(@Nullable String value) { - try { - return (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) - ? null - : UUID.fromString(value); - } catch (IllegalArgumentException e) { - throw new IllegalArgumentException( - String.format("Cannot parse UUID value from \"%s\"", value), e); - } - } - - @NonNull - @Override - public Optional serializedSize() { - return Optional.of(16); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/VarIntCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/VarIntCodec.java deleted file mode 100644 index b04c959c704..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/VarIntCodec.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.protocol.internal.util.Bytes; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.math.BigInteger; -import java.nio.ByteBuffer; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class VarIntCodec implements TypeCodec { - @NonNull - @Override - public GenericType getJavaType() { - return GenericType.BIG_INTEGER; - } - - @NonNull - @Override - public DataType getCqlType() { - return DataTypes.VARINT; - } - - @Override - public boolean accepts(@NonNull Object value) { - return value instanceof BigInteger; - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return BigInteger.class.isAssignableFrom(javaClass); - } - - @Nullable - @Override - public ByteBuffer encode(@Nullable BigInteger value, @NonNull ProtocolVersion protocolVersion) { - return (value == null) ? null : ByteBuffer.wrap(value.toByteArray()); - } - - @Nullable - @Override - public BigInteger decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - return (bytes == null) || bytes.remaining() == 0 ? null : new BigInteger(Bytes.getArray(bytes)); - } - - @NonNull - @Override - public String format(@Nullable BigInteger value) { - return (value == null) ? "NULL" : value.toString(); - } - - @Nullable - @Override - public BigInteger parse(@Nullable String value) { - try { - return (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) - ? null - : new BigInteger(value); - } catch (NumberFormatException e) { - throw new IllegalArgumentException( - String.format("Cannot parse varint value from \"%s\"", value), e); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/VectorCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/VectorCodec.java deleted file mode 100644 index 1f8ce1a7166..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/VectorCodec.java +++ /dev/null @@ -1,203 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.data.CqlVector; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.VectorType; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.DefaultVectorType; -import com.datastax.oss.driver.internal.core.type.util.VIntCoding; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; -import java.util.NoSuchElementException; -import java.util.Optional; -import java.util.stream.Collectors; - -public class VectorCodec implements TypeCodec> { - - private final VectorType cqlType; - private final GenericType> javaType; - private final TypeCodec subtypeCodec; - - public VectorCodec(@NonNull VectorType cqlType, @NonNull TypeCodec subtypeCodec) { - this.cqlType = cqlType; - this.subtypeCodec = subtypeCodec; - this.javaType = GenericType.vectorOf(subtypeCodec.getJavaType()); - } - - public VectorCodec(int dimensions, @NonNull TypeCodec subtypeCodec) { - this(new DefaultVectorType(subtypeCodec.getCqlType(), dimensions), subtypeCodec); - } - - @NonNull - @Override - public GenericType> getJavaType() { - return this.javaType; - } - - @NonNull - @Override - public Optional serializedSize() { - return subtypeCodec.serializedSize().isPresent() - ? Optional.of(subtypeCodec.serializedSize().get() * cqlType.getDimensions()) - : Optional.empty(); - } - - @NonNull - @Override - public DataType getCqlType() { - return this.cqlType; - } - - @Nullable - @Override - public ByteBuffer encode( - @Nullable CqlVector value, @NonNull ProtocolVersion protocolVersion) { - boolean isVarSized = !subtypeCodec.serializedSize().isPresent(); - if (value == null || cqlType.getDimensions() <= 0) { - return null; - } - ByteBuffer[] valueBuffs = new ByteBuffer[cqlType.getDimensions()]; - Iterator values = value.iterator(); - int allValueBuffsSize = 0; - for (int i = 0; i < cqlType.getDimensions(); ++i) { - ByteBuffer valueBuff; - SubtypeT valueObj; - - try { - valueObj = values.next(); - } catch (NoSuchElementException nsee) { - throw new IllegalArgumentException( - String.format( - "Not enough elements; must provide elements for %d dimensions", - cqlType.getDimensions())); - } - - try { - valueBuff = this.subtypeCodec.encode(valueObj, protocolVersion); - } catch (ClassCastException e) { - throw new IllegalArgumentException("Invalid type for element: " + valueObj.getClass()); - } - if (valueBuff == null) { - throw new NullPointerException("Vector elements cannot encode to CQL NULL"); - } - int elementSize = valueBuff.limit(); - if (isVarSized) { - allValueBuffsSize += VIntCoding.computeVIntSize(elementSize); - } - allValueBuffsSize += elementSize; - valueBuff.rewind(); - valueBuffs[i] = valueBuff; - } - // if too many elements, throw - if (values.hasNext()) { - throw new IllegalArgumentException( - String.format( - "Too many elements; must provide elements for %d dimensions", - cqlType.getDimensions())); - } - /* Since we already did an early return for <= 0 dimensions above */ - assert valueBuffs.length > 0; - ByteBuffer rv = ByteBuffer.allocate(allValueBuffsSize); - for (int i = 0; i < cqlType.getDimensions(); ++i) { - if (isVarSized) { - VIntCoding.writeUnsignedVInt32(valueBuffs[i].remaining(), rv); - } - rv.put(valueBuffs[i]); - } - rv.flip(); - return rv; - } - - @Nullable - @Override - public CqlVector decode( - @Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - if (bytes == null || bytes.remaining() == 0) { - return null; - } - - // Upfront check for fixed-size types only - subtypeCodec - .serializedSize() - .ifPresent( - (fixed_size) -> { - if (bytes.remaining() != cqlType.getDimensions() * fixed_size) { - throw new IllegalArgumentException( - String.format( - "Expected elements of uniform size, observed %d elements with total bytes %d", - cqlType.getDimensions(), bytes.remaining())); - } - }); - ; - ByteBuffer slice = bytes.slice(); - List rv = new ArrayList(cqlType.getDimensions()); - for (int i = 0; i < cqlType.getDimensions(); ++i) { - - int size = - subtypeCodec - .serializedSize() - .orElseGet(() -> VIntCoding.getUnsignedVInt32(slice, slice.position())); - // If we aren't dealing with a fixed-size type we need to move the current slice position - // beyond the vint-encoded size of the current element. Ideally this would be - // serializedSize().ifNotPresent(Consumer) but the Optional API isn't doing us any favors - // there. - if (!subtypeCodec.serializedSize().isPresent()) - slice.position(slice.position() + VIntCoding.computeUnsignedVIntSize(size)); - int originalPosition = slice.position(); - slice.limit(originalPosition + size); - rv.add(this.subtypeCodec.decode(slice, protocolVersion)); - // Move to the start of the next element - slice.position(originalPosition + size); - // Reset the limit to the end of the buffer - slice.limit(slice.capacity()); - } - - // if too many elements, throw - if (slice.hasRemaining()) { - throw new IllegalArgumentException( - String.format( - "Too many elements; must provide elements for %d dimensions", - cqlType.getDimensions())); - } - - return CqlVector.newInstance(rv); - } - - @NonNull - @Override - public String format(CqlVector value) { - if (value == null) return "NULL"; - return value.stream().map(subtypeCodec::format).collect(Collectors.joining(", ", "[", "]")); - } - - @Nullable - @Override - public CqlVector parse(@Nullable String value) { - return (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) - ? null - : CqlVector.from(value, this.subtypeCodec); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/OptionalCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/OptionalCodec.java deleted file mode 100644 index e62e244bf5e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/OptionalCodec.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras; - -import com.datastax.oss.driver.api.core.type.codec.MappingCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Collection; -import java.util.Map; -import java.util.Objects; -import java.util.Optional; -import net.jcip.annotations.Immutable; - -/** - * A codec that wraps other codecs around {@link Optional} instances. - * - * @param The wrapped Java type. - */ -@Immutable -public class OptionalCodec extends MappingCodec> { - - public OptionalCodec(@NonNull TypeCodec innerCodec) { - super( - Objects.requireNonNull(innerCodec, "innerCodec must not be null"), - GenericType.optionalOf(innerCodec.getJavaType())); - } - - @Override - public boolean accepts(@NonNull Object value) { - Objects.requireNonNull(value); - if (value instanceof Optional) { - Optional optional = (Optional) value; - return optional.map(innerCodec::accepts).orElse(true); - } - return false; - } - - @Nullable - @Override - protected Optional innerToOuter(@Nullable T value) { - return Optional.ofNullable(isAbsent(value) ? null : value); - } - - @Nullable - @Override - protected T outerToInner(@Nullable Optional value) { - return value != null && value.isPresent() ? value.get() : null; - } - - protected boolean isAbsent(@Nullable T value) { - return value == null - || (value instanceof Collection && ((Collection) value).isEmpty()) - || (value instanceof Map && ((Map) value).isEmpty()); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/AbstractListToArrayCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/AbstractListToArrayCodec.java deleted file mode 100644 index fcf61a4e7b3..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/AbstractListToArrayCodec.java +++ /dev/null @@ -1,200 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.array; - -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.ListType; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.codec.ParseUtils; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.lang.reflect.Array; -import java.util.Objects; - -/** - * Base class for all codecs dealing with Java arrays. This class aims to reduce the amount of code - * required to create such codecs. - * - * @param The Java array type this codec handles - */ -public abstract class AbstractListToArrayCodec implements TypeCodec { - - @NonNull protected final ListType cqlType; - @NonNull protected final GenericType javaType; - - /** - * @param cqlType The CQL type. Must be a list type. - * @param arrayType The Java type. Must be an array class. - */ - protected AbstractListToArrayCodec( - @NonNull ListType cqlType, @NonNull GenericType arrayType) { - this.cqlType = Objects.requireNonNull(cqlType, "cqlType cannot be null"); - this.javaType = Objects.requireNonNull(arrayType, "arrayType cannot be null"); - if (!arrayType.isArray()) { - throw new IllegalArgumentException("Expecting Java array class, got " + arrayType); - } - } - - @NonNull - @Override - public GenericType getJavaType() { - return javaType; - } - - @NonNull - @Override - public DataType getCqlType() { - return cqlType; - } - - @NonNull - @Override - public String format(@Nullable ArrayT array) { - if (array == null) { - return "NULL"; - } - int length = Array.getLength(array); - StringBuilder sb = new StringBuilder(); - sb.append('['); - for (int i = 0; i < length; i++) { - if (i != 0) { - sb.append(","); - } - formatElement(sb, array, i); - } - sb.append(']'); - return sb.toString(); - } - - @Nullable - @Override - public ArrayT parse(@Nullable String value) { - if (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) { - return null; - } - int idx = skipSpaces(value, 0); - idx = skipOpeningBracket(value, idx); - idx = skipSpaces(value, idx); - if (value.charAt(idx) == ']') { - return newInstance(0); - } - // first pass: determine array length - int length = getArrayLength(value, idx); - // second pass: parse elements - ArrayT array = newInstance(length); - int i = 0; - for (; idx < value.length(); i++) { - int n = skipLiteral(value, idx); - parseElement(value.substring(idx, n), array, i); - idx = skipSpaces(value, n); - if (value.charAt(idx) == ']') { - return array; - } - idx = skipComma(value, idx); - idx = skipSpaces(value, idx); - } - throw new IllegalArgumentException( - String.format("Malformed list value \"%s\", missing closing ']'", value)); - } - - /** - * Creates a new array instance with the given size. - * - * @param size The size of the array to instantiate. - * @return a new array instance with the given size. - */ - @NonNull - protected abstract ArrayT newInstance(int size); - - /** - * Formats the {@code index}th element of {@code array} to {@code output}. - * - * @param output The StringBuilder to write to. - * @param array The array to read from. - * @param index The element index. - */ - protected abstract void formatElement( - @NonNull StringBuilder output, @NonNull ArrayT array, int index); - - /** - * Parses the {@code index}th element of {@code array} from {@code input}. - * - * @param input The String to read from. - * @param array The array to write to. - * @param index The element index. - */ - protected abstract void parseElement(@NonNull String input, @NonNull ArrayT array, int index); - - private int getArrayLength(String value, int idx) { - int length = 1; - for (; idx < value.length(); length++) { - idx = skipLiteral(value, idx); - idx = skipSpaces(value, idx); - if (value.charAt(idx) == ']') { - break; - } - idx = skipComma(value, idx); - idx = skipSpaces(value, idx); - } - return length; - } - - private int skipComma(String value, int idx) { - if (value.charAt(idx) != ',') { - throw new IllegalArgumentException( - String.format( - "Cannot parse list value from \"%s\", at character %d expecting ',' but got '%c'", - value, idx, value.charAt(idx))); - } - return idx + 1; - } - - private int skipOpeningBracket(String value, int idx) { - if (value.charAt(idx) != '[') { - throw new IllegalArgumentException( - String.format( - "cannot parse list value from \"%s\", at character %d expecting '[' but got '%c'", - value, idx, value.charAt(idx))); - } - return idx + 1; - } - - private int skipSpaces(String value, int idx) { - try { - return ParseUtils.skipSpaces(value, idx); - } catch (IllegalArgumentException e) { - throw new IllegalArgumentException( - String.format( - "Cannot parse list value from \"%s\", at character %d expecting space but got '%c'", - value, idx, value.charAt(idx)), - e); - } - } - - private int skipLiteral(String value, int idx) { - try { - return ParseUtils.skipCQLValue(value, idx); - } catch (IllegalArgumentException e) { - throw new IllegalArgumentException( - String.format( - "Cannot parse list value from \"%s\", invalid CQL value at character %d", value, idx), - e); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/AbstractPrimitiveListToArrayCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/AbstractPrimitiveListToArrayCodec.java deleted file mode 100644 index 3e5ece7c159..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/AbstractPrimitiveListToArrayCodec.java +++ /dev/null @@ -1,129 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.array; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.ListType; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.lang.reflect.Array; -import java.nio.ByteBuffer; -import java.util.Objects; - -/** - * Base class for all codecs dealing with Java primitive arrays. This class provides a more - * efficient implementation of {@link #encode(Object, ProtocolVersion)} and {@link - * #decode(ByteBuffer, ProtocolVersion)} for primitive arrays. - * - * @param The Java primitive array type this codec handles - */ -public abstract class AbstractPrimitiveListToArrayCodec - extends AbstractListToArrayCodec { - - /** - * @param cqlType The CQL type. Must be a list type. - * @param javaClass The Java type. Must be an array class. - */ - protected AbstractPrimitiveListToArrayCodec( - @NonNull ListType cqlType, @NonNull GenericType javaClass) { - super(cqlType, javaClass); - GenericType componentType = Objects.requireNonNull(javaClass.getComponentType()); - if (!componentType.isPrimitive()) { - throw new IllegalArgumentException( - "Expecting primitive array component type, got " + componentType); - } - } - - @Nullable - @Override - public ByteBuffer encode( - @Nullable PrimitiveArrayT array, @NonNull ProtocolVersion protocolVersion) { - if (array == null) { - return null; - } - int length = Array.getLength(array); - int sizeOfElement = 4 + sizeOfComponentType(); - int totalSize = 4 + length * sizeOfElement; - ByteBuffer output = ByteBuffer.allocate(totalSize); - output.putInt(length); - for (int i = 0; i < length; i++) { - output.putInt(sizeOfComponentType()); - serializeElement(output, array, i, protocolVersion); - } - output.flip(); - return output; - } - - @Nullable - @Override - public PrimitiveArrayT decode( - @Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - if (bytes == null || bytes.remaining() == 0) { - return newInstance(0); - } - ByteBuffer input = bytes.duplicate(); - int length = input.getInt(); - PrimitiveArrayT array = newInstance(length); - for (int i = 0; i < length; i++) { - int elementSize = input.getInt(); - // Null elements can happen on the decode path, but we cannot tolerate them - if (elementSize < 0) { - throw new NullPointerException("Primitive arrays cannot store null elements"); - } else { - deserializeElement(input, array, i, protocolVersion); - } - } - return array; - } - - /** - * Return the size in bytes of the array component type. - * - * @return the size in bytes of the array component type. - */ - protected abstract int sizeOfComponentType(); - - /** - * Write the {@code index}th element of {@code array} to {@code output}. - * - * @param output The ByteBuffer to write to. - * @param array The array to read from. - * @param index The element index. - * @param protocolVersion The protocol version to use. - */ - protected abstract void serializeElement( - @NonNull ByteBuffer output, - @NonNull PrimitiveArrayT array, - int index, - @NonNull ProtocolVersion protocolVersion); - - /** - * Read the {@code index}th element of {@code array} from {@code input}. - * - * @param input The ByteBuffer to read from. - * @param array The array to write to. - * @param index The element index. - * @param protocolVersion The protocol version to use. - */ - protected abstract void deserializeElement( - @NonNull ByteBuffer input, - @NonNull PrimitiveArrayT array, - int index, - @NonNull ProtocolVersion protocolVersion); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/BooleanListToArrayCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/BooleanListToArrayCodec.java deleted file mode 100644 index c9cc0baa41f..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/BooleanListToArrayCodec.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.array; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -/** - * A codec that maps the CQL type {@code list} to the Java type {@code boolean[]}. - * - *

Note that this codec is designed for performance and converts CQL lists directly to - * {@code boolean[]}, thus avoiding any unnecessary boxing and unboxing of Java primitive {@code - * boolean} values; it also instantiates arrays without the need for an intermediary Java {@code - * List} object. - */ -@Immutable -public class BooleanListToArrayCodec extends AbstractPrimitiveListToArrayCodec { - - private static final byte TRUE = (byte) 1; - private static final byte FALSE = (byte) 0; - - public BooleanListToArrayCodec() { - super(DataTypes.listOf(DataTypes.BOOLEAN), GenericType.of(boolean[].class)); - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - Objects.requireNonNull(javaClass); - return boolean[].class.equals(javaClass); - } - - @Override - public boolean accepts(@NonNull Object value) { - Objects.requireNonNull(value); - return value instanceof boolean[]; - } - - @Override - protected int sizeOfComponentType() { - return 1; - } - - @Override - protected void serializeElement( - @NonNull ByteBuffer output, - @NonNull boolean[] array, - int index, - @NonNull ProtocolVersion protocolVersion) { - byte element = array[index] ? TRUE : FALSE; - output.put(element); - } - - @Override - protected void deserializeElement( - @NonNull ByteBuffer input, - @NonNull boolean[] array, - int index, - @NonNull ProtocolVersion protocolVersion) { - array[index] = input.get() == TRUE; - } - - @Override - protected void formatElement(@NonNull StringBuilder output, @NonNull boolean[] array, int index) { - output.append(array[index]); - } - - @Override - protected void parseElement(@NonNull String input, @NonNull boolean[] array, int index) { - array[index] = Boolean.parseBoolean(input); - } - - @NonNull - @Override - protected boolean[] newInstance(int size) { - return new boolean[size]; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/ByteListToArrayCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/ByteListToArrayCodec.java deleted file mode 100644 index b811908e341..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/ByteListToArrayCodec.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.array; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.codec.SimpleBlobCodec; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -/** - * A codec that maps the CQL type {@code list} to the Java type {@code byte[]}. - * - *

Note that this codec is not suitable for reading CQL blobs as byte arrays; you should use - * {@link SimpleBlobCodec} for that. - * - *

Note that this codec is designed for performance and converts CQL lists directly to - * {@code byte[]}, thus avoiding any unnecessary boxing and unboxing of Java primitive {@code byte} - * values; it also instantiates arrays without the need for an intermediary Java {@code List} - * object. - */ -@Immutable -public class ByteListToArrayCodec extends AbstractPrimitiveListToArrayCodec { - - public ByteListToArrayCodec() { - super(DataTypes.listOf(DataTypes.SMALLINT), GenericType.of(byte[].class)); - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - Objects.requireNonNull(javaClass); - return byte[].class.equals(javaClass); - } - - @Override - public boolean accepts(@NonNull Object value) { - Objects.requireNonNull(value); - return value instanceof byte[]; - } - - @Override - protected int sizeOfComponentType() { - return 1; - } - - @Override - protected void serializeElement( - @NonNull ByteBuffer output, - @NonNull byte[] array, - int index, - @NonNull ProtocolVersion protocolVersion) { - output.put(array[index]); - } - - @Override - protected void deserializeElement( - @NonNull ByteBuffer input, - @NonNull byte[] array, - int index, - @NonNull ProtocolVersion protocolVersion) { - array[index] = input.get(); - } - - @Override - protected void formatElement(@NonNull StringBuilder output, @NonNull byte[] array, int index) { - output.append(array[index]); - } - - @Override - protected void parseElement(@NonNull String input, @NonNull byte[] array, int index) { - array[index] = Byte.parseByte(input); - } - - @NonNull - @Override - protected byte[] newInstance(int size) { - return new byte[size]; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/DoubleListToArrayCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/DoubleListToArrayCodec.java deleted file mode 100644 index fdf5befa635..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/DoubleListToArrayCodec.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.array; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -/** - * A codec that maps the CQL type {@code list} to the Java type {@code double[]}. - * - *

Note that this codec is designed for performance and converts CQL lists directly to - * {@code double[]}, thus avoiding any unnecessary boxing and unboxing of Java primitive {@code - * double} values; it also instantiates arrays without the need for an intermediary Java {@code - * List} object. - */ -@Immutable -public class DoubleListToArrayCodec extends AbstractPrimitiveListToArrayCodec { - - public DoubleListToArrayCodec() { - super(DataTypes.listOf(DataTypes.DOUBLE), GenericType.of(double[].class)); - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - Objects.requireNonNull(javaClass); - return double[].class.equals(javaClass); - } - - @Override - public boolean accepts(@NonNull Object value) { - Objects.requireNonNull(value); - return value instanceof double[]; - } - - @Override - protected int sizeOfComponentType() { - return 8; - } - - @Override - protected void serializeElement( - @NonNull ByteBuffer output, - @NonNull double[] array, - int index, - @NonNull ProtocolVersion protocolVersion) { - output.putDouble(array[index]); - } - - @Override - protected void deserializeElement( - @NonNull ByteBuffer input, - @NonNull double[] array, - int index, - @NonNull ProtocolVersion protocolVersion) { - array[index] = input.getDouble(); - } - - @Override - protected void formatElement(@NonNull StringBuilder output, @NonNull double[] array, int index) { - output.append(array[index]); - } - - @Override - protected void parseElement(@NonNull String input, @NonNull double[] array, int index) { - array[index] = Double.parseDouble(input); - } - - @NonNull - @Override - protected double[] newInstance(int size) { - return new double[size]; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/FloatListToArrayCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/FloatListToArrayCodec.java deleted file mode 100644 index b77e5d1243d..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/FloatListToArrayCodec.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.array; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -/** - * A codec that maps the CQL type {@code list} to the Java type {@code float[]}. - * - *

Note that this codec is designed for performance and converts CQL lists directly to - * {@code float[]}, thus avoiding any unnecessary boxing and unboxing of Java primitive {@code - * float} values; it also instantiates arrays without the need for an intermediary Java {@code List} - * object. - */ -@Immutable -public class FloatListToArrayCodec extends AbstractPrimitiveListToArrayCodec { - - public FloatListToArrayCodec() { - super(DataTypes.listOf(DataTypes.FLOAT), GenericType.of(float[].class)); - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - Objects.requireNonNull(javaClass); - return float[].class.equals(javaClass); - } - - @Override - public boolean accepts(@NonNull Object value) { - Objects.requireNonNull(value); - return value instanceof float[]; - } - - @Override - protected int sizeOfComponentType() { - return 4; - } - - @Override - protected void serializeElement( - @NonNull ByteBuffer output, - @NonNull float[] array, - int index, - @NonNull ProtocolVersion protocolVersion) { - output.putFloat(array[index]); - } - - @Override - protected void deserializeElement( - @NonNull ByteBuffer input, - @NonNull float[] array, - int index, - @NonNull ProtocolVersion protocolVersion) { - array[index] = input.getFloat(); - } - - @Override - protected void formatElement(@NonNull StringBuilder output, @NonNull float[] array, int index) { - output.append(array[index]); - } - - @Override - protected void parseElement(@NonNull String input, @NonNull float[] array, int index) { - array[index] = Float.parseFloat(input); - } - - @NonNull - @Override - protected float[] newInstance(int size) { - return new float[size]; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/IntListToArrayCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/IntListToArrayCodec.java deleted file mode 100644 index cf464282b1e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/IntListToArrayCodec.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.array; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -/** - * A codec that maps the CQL type {@code list} to the Java type {@code int[]}. - * - *

Note that this codec is designed for performance and converts CQL lists directly to - * {@code int[]}, thus avoiding any unnecessary boxing and unboxing of Java primitive {@code int} - * values; it also instantiates arrays without the need for an intermediary Java {@code List} - * object. - */ -@Immutable -public class IntListToArrayCodec extends AbstractPrimitiveListToArrayCodec { - - public IntListToArrayCodec() { - super(DataTypes.listOf(DataTypes.INT), GenericType.of(int[].class)); - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - Objects.requireNonNull(javaClass); - return int[].class.equals(javaClass); - } - - @Override - public boolean accepts(@NonNull Object value) { - Objects.requireNonNull(value); - return value instanceof int[]; - } - - @Override - protected int sizeOfComponentType() { - return 4; - } - - @Override - protected void serializeElement( - @NonNull ByteBuffer output, - @NonNull int[] array, - int index, - @NonNull ProtocolVersion protocolVersion) { - output.putInt(array[index]); - } - - @Override - protected void deserializeElement( - @NonNull ByteBuffer input, - @NonNull int[] array, - int index, - @NonNull ProtocolVersion protocolVersion) { - array[index] = input.getInt(); - } - - @Override - protected void formatElement(@NonNull StringBuilder output, @NonNull int[] array, int index) { - output.append(array[index]); - } - - @Override - protected void parseElement(@NonNull String input, @NonNull int[] array, int index) { - array[index] = Integer.parseInt(input); - } - - @NonNull - @Override - protected int[] newInstance(int size) { - return new int[size]; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/LongListToArrayCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/LongListToArrayCodec.java deleted file mode 100644 index bde21d40272..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/LongListToArrayCodec.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.array; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -/** - * A codec that maps the CQL type {@code list} to the Java type {@code long[]}. - * - *

Note that this codec is designed for performance and converts CQL lists directly to - * {@code long[]}, thus avoiding any unnecessary boxing and unboxing of Java primitive {@code long} - * values; it also instantiates arrays without the need for an intermediary Java {@code List} - * object. - */ -@Immutable -public class LongListToArrayCodec extends AbstractPrimitiveListToArrayCodec { - - public LongListToArrayCodec() { - super(DataTypes.listOf(DataTypes.BIGINT), GenericType.of(long[].class)); - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - Objects.requireNonNull(javaClass); - return long[].class.equals(javaClass); - } - - @Override - public boolean accepts(@NonNull Object value) { - Objects.requireNonNull(value); - return value instanceof long[]; - } - - @Override - protected int sizeOfComponentType() { - return 8; - } - - @Override - protected void serializeElement( - @NonNull ByteBuffer output, - @NonNull long[] array, - int index, - @NonNull ProtocolVersion protocolVersion) { - output.putLong(array[index]); - } - - @Override - protected void deserializeElement( - @NonNull ByteBuffer input, - @NonNull long[] array, - int index, - @NonNull ProtocolVersion protocolVersion) { - array[index] = input.getLong(); - } - - @Override - protected void formatElement(@NonNull StringBuilder output, @NonNull long[] array, int index) { - output.append(array[index]); - } - - @Override - protected void parseElement(@NonNull String input, @NonNull long[] array, int index) { - array[index] = Long.parseLong(input); - } - - @NonNull - @Override - protected long[] newInstance(int size) { - return new long[size]; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/ObjectListToArrayCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/ObjectListToArrayCodec.java deleted file mode 100644 index 8600ba3e9a5..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/ObjectListToArrayCodec.java +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.array; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.lang.reflect.Array; -import java.nio.ByteBuffer; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -/** - * Codec dealing with Java object arrays. Serialization and deserialization of elements in the array - * is delegated to the provided element codec. - * - *

For example, to create a codec that maps {@code list} to {@code String[]}, declare the - * following: - * - *

{@code
- * ObjectListToArrayCodec stringArrayCodec = new ObjectListToArrayCodec<>(TypeCodecs.TEXT);
- * }
- * - * @param The Java array component type this codec handles - */ -@Immutable -public class ObjectListToArrayCodec extends AbstractListToArrayCodec { - - private final TypeCodec elementCodec; - - public ObjectListToArrayCodec(@NonNull TypeCodec elementCodec) { - super( - DataTypes.listOf( - Objects.requireNonNull(elementCodec, "elementCodec must not be null").getCqlType()), - GenericType.arrayOf(elementCodec.getJavaType())); - this.elementCodec = elementCodec; - } - - @Override - public boolean accepts(@NonNull Object value) { - Objects.requireNonNull(value); - Class clazz = value.getClass(); - return clazz.isArray() - && clazz.getComponentType().equals(elementCodec.getJavaType().getRawType()); - } - - @Nullable - @Override - public ByteBuffer encode(@Nullable ElementT[] value, @NonNull ProtocolVersion protocolVersion) { - if (value == null) { - return null; - } - int i = 0; - ByteBuffer[] encodedElements = new ByteBuffer[value.length]; - int toAllocate = 4; // initialize with number of elements - for (ElementT elt : value) { - if (elt == null) { - throw new NullPointerException("Collection elements cannot be null"); - } - ByteBuffer encodedElement; - try { - encodedElement = elementCodec.encode(elt, protocolVersion); - } catch (ClassCastException e) { - throw new IllegalArgumentException( - String.format( - "Invalid type for %s element, expecting %s but got %s", - cqlType, elementCodec.getJavaType(), elt.getClass()), - e); - } - if (encodedElement == null) { - throw new NullPointerException("Collection elements cannot encode to CQL NULL"); - } - encodedElements[i++] = encodedElement; - toAllocate += 4 + encodedElement.remaining(); // the element preceded by its size - } - ByteBuffer result = ByteBuffer.allocate(toAllocate); - result.putInt(value.length); - for (ByteBuffer encodedElement : encodedElements) { - result.putInt(encodedElement.remaining()); - result.put(encodedElement); - } - result.flip(); - return result; - } - - @Nullable - @Override - public ElementT[] decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - if (bytes == null || bytes.remaining() == 0) { - return newInstance(0); - } - ByteBuffer input = bytes.duplicate(); - int size = input.getInt(); - ElementT[] result = newInstance(size); - for (int i = 0; i < size; i++) { - ElementT element; - int elementSize = input.getInt(); - // Allow null elements on the decode path, because Cassandra might return such collections - // for some computed values in the future -- e.g. SELECT ttl(some_collection) - if (elementSize < 0) { - element = null; - } else { - ByteBuffer encodedElement = input.slice(); - encodedElement.limit(elementSize); - element = elementCodec.decode(encodedElement, protocolVersion); - input.position(input.position() + elementSize); - } - result[i] = element; - } - return result; - } - - @Override - protected void formatElement( - @NonNull StringBuilder output, @NonNull ElementT[] array, int index) { - output.append(elementCodec.format(array[index])); - } - - @Override - protected void parseElement(@NonNull String input, @NonNull ElementT[] array, int index) { - array[index] = elementCodec.parse(input); - } - - @NonNull - @Override - @SuppressWarnings("unchecked") - protected ElementT[] newInstance(int size) { - return (ElementT[]) Array.newInstance(getJavaType().getRawType().getComponentType(), size); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/ShortListToArrayCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/ShortListToArrayCodec.java deleted file mode 100644 index 13bb5733bf9..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/ShortListToArrayCodec.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.array; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -/** - * A codec that maps the CQL type {@code list} to the Java type {@code short[]}. - * - *

Note that this codec is designed for performance and converts CQL lists directly to - * {@code short[]}, thus avoiding any unnecessary boxing and unboxing of Java primitive {@code - * short} values; it also instantiates arrays without the need for an intermediary Java {@code List} - * object. - */ -@Immutable -public class ShortListToArrayCodec extends AbstractPrimitiveListToArrayCodec { - - public ShortListToArrayCodec() { - super(DataTypes.listOf(DataTypes.SMALLINT), GenericType.of(short[].class)); - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return short[].class.equals(javaClass); - } - - @Override - public boolean accepts(@NonNull Object value) { - Objects.requireNonNull(value); - return value instanceof short[]; - } - - @Override - protected int sizeOfComponentType() { - return 2; - } - - @Override - protected void serializeElement( - @NonNull ByteBuffer output, - @NonNull short[] array, - int index, - @NonNull ProtocolVersion protocolVersion) { - output.putShort(array[index]); - } - - @Override - protected void deserializeElement( - @NonNull ByteBuffer input, - @NonNull short[] array, - int index, - @NonNull ProtocolVersion protocolVersion) { - array[index] = input.getShort(); - } - - @Override - protected void formatElement(@NonNull StringBuilder output, @NonNull short[] array, int index) { - output.append(array[index]); - } - - @Override - protected void parseElement(@NonNull String input, @NonNull short[] array, int index) { - array[index] = Short.parseShort(input); - } - - @NonNull - @Override - protected short[] newInstance(int size) { - return new short[size]; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/enums/EnumNameCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/enums/EnumNameCodec.java deleted file mode 100644 index 56363ef819e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/enums/EnumNameCodec.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.enums; - -import com.datastax.oss.driver.api.core.type.codec.MappingCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -/** - * A codec that serializes {@link Enum} instances as CQL {@code varchar}s representing their - * programmatic names as returned by {@link Enum#name()}. - * - *

Note that this codec relies on the enum constant names; it is therefore vital that - * enum names never change. - * - * @param The Enum class this codec serializes from and deserializes to. - */ -@Immutable -public class EnumNameCodec> extends MappingCodec { - - private final Class enumClass; - - public EnumNameCodec(@NonNull Class enumClass) { - super( - TypeCodecs.TEXT, - GenericType.of(Objects.requireNonNull(enumClass, "enumClass must not be null"))); - this.enumClass = enumClass; - } - - @Nullable - @Override - protected EnumT innerToOuter(@Nullable String value) { - return value == null || value.isEmpty() ? null : Enum.valueOf(enumClass, value); - } - - @Nullable - @Override - protected String outerToInner(@Nullable EnumT value) { - return value == null ? null : value.name(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/enums/EnumOrdinalCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/enums/EnumOrdinalCodec.java deleted file mode 100644 index 4d6ca26484e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/enums/EnumOrdinalCodec.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.enums; - -import com.datastax.oss.driver.api.core.type.codec.MappingCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -/** - * A codec that serializes {@link Enum} instances as CQL {@code int}s representing their ordinal - * values as returned by {@link Enum#ordinal()}. - * - *

Note that this codec relies on the enum constants declaration order; it is therefore - * vital that this order remains immutable. - * - * @param The Enum class this codec serializes from and deserializes to. - */ -@Immutable -public class EnumOrdinalCodec> extends MappingCodec { - - private final EnumT[] enumConstants; - - public EnumOrdinalCodec(@NonNull Class enumClass) { - super( - TypeCodecs.INT, - GenericType.of(Objects.requireNonNull(enumClass, "enumClass must not be null"))); - this.enumConstants = enumClass.getEnumConstants(); - } - - @Nullable - @Override - protected EnumT innerToOuter(@Nullable Integer value) { - return value == null ? null : enumConstants[value]; - } - - @Nullable - @Override - protected Integer outerToInner(@Nullable EnumT value) { - return value == null ? null : value.ordinal(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/json/JsonCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/json/JsonCodec.java deleted file mode 100644 index a971d27b3f3..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/json/JsonCodec.java +++ /dev/null @@ -1,186 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.json; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.util.Strings; -import com.datastax.oss.protocol.internal.util.Bytes; -import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.databind.JavaType; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.type.TypeFactory; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.Objects; - -/** - * A JSON codec that maps arbitrary Java objects to JSON strings stored as CQL type {@code text}, - * using the Jackson library to perform serialization and deserialization of JSON objects. - * - *

Note that this codec requires the presence of Jackson library at runtime. If you use Maven, - * this can be done by declaring the following dependency in your project: - * - *

{@code
- * 
- *   com.fasterxml.jackson.core
- *   jackson-databind
- *   LATEST
- * 
- * }
- * - * @see Jackson JSON Library - * @param The Java type that this codec serializes from and deserializes to, from JSON strings. - */ -public class JsonCodec implements TypeCodec { - - private final ObjectMapper objectMapper; - private final GenericType javaType; - private final JavaType jacksonJavaType; - - /** - * Creates a new instance for the provided {@code javaClass}, using a default, newly-allocated - * {@link ObjectMapper}. - * - *

The codec created with this constructor can handle all primitive CQL types as well as - * collections thereof, however it cannot handle tuples and user-defined types; if you need - * support for such CQL types, you need to create your own {@link ObjectMapper} and use the - * {@linkplain #JsonCodec(Class, ObjectMapper) two-arg constructor} instead. - * - * @param javaClass the Java class this codec maps to. - */ - public JsonCodec(@NonNull Class javaClass) { - this(GenericType.of(Objects.requireNonNull(javaClass, "javaClass cannot be null"))); - } - - /** - * Creates a new instance for the provided {@code javaType}, using a default, newly-allocated - * {@link ObjectMapper}. - * - *

The codec created with this constructor can handle all primitive CQL types as well as - * collections thereof, however it cannot handle tuples and user-defined types; if you need - * support for such CQL types, you need to create your own {@link ObjectMapper} and use the - * {@linkplain #JsonCodec(GenericType, ObjectMapper) two-arg constructor} instead. - * - * @param javaType the Java type this codec maps to. - */ - public JsonCodec(@NonNull GenericType javaType) { - this(javaType, new ObjectMapper()); - } - - /** - * Creates a new instance for the provided {@code javaClass}, and using the provided {@link - * ObjectMapper}. - * - * @param javaClass the Java class this codec maps to. - * @param objectMapper the {@link ObjectMapper} instance to use. - */ - public JsonCodec(@NonNull Class javaClass, @NonNull ObjectMapper objectMapper) { - this( - GenericType.of(Objects.requireNonNull(javaClass, "javaClass cannot be null")), - objectMapper); - } - - /** - * Creates a new instance for the provided {@code javaType}, and using the provided {@link - * ObjectMapper}. - * - * @param javaType the Java type this codec maps to. - * @param objectMapper the {@link ObjectMapper} instance to use. - */ - public JsonCodec(@NonNull GenericType javaType, @NonNull ObjectMapper objectMapper) { - this.javaType = Objects.requireNonNull(javaType, "javaType cannot be null"); - this.objectMapper = Objects.requireNonNull(objectMapper, "objectMapper cannot be null"); - this.jacksonJavaType = TypeFactory.defaultInstance().constructType(javaType.getType()); - } - - @NonNull - @Override - public GenericType getJavaType() { - return javaType; - } - - @NonNull - @Override - public DataType getCqlType() { - return DataTypes.TEXT; - } - - @Nullable - @Override - public ByteBuffer encode(@Nullable T value, @NonNull ProtocolVersion protocolVersion) { - if (value == null) { - return null; - } - try { - return ByteBuffer.wrap(objectMapper.writeValueAsBytes(value)); - } catch (JsonProcessingException e) { - throw new IllegalArgumentException("Failed to encode value as JSON", e); - } - } - - @Nullable - @Override - public T decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - if (bytes == null) { - return null; - } - try { - return objectMapper.readValue(Bytes.getArray(bytes), jacksonJavaType); - } catch (IOException e) { - throw new IllegalArgumentException("Failed to decode JSON value", e); - } - } - - @NonNull - @Override - public String format(@Nullable T value) { - if (value == null) { - return "NULL"; - } - String json; - try { - json = objectMapper.writeValueAsString(value); - } catch (JsonProcessingException e) { - throw new IllegalArgumentException("Failed to format value as JSON", e); - } - return Strings.quote(json); - } - - @Nullable - @Override - public T parse(@Nullable String value) { - if (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) { - return null; - } - if (!Strings.isQuoted(value)) { - throw new IllegalArgumentException("JSON strings must be enclosed by single quotes"); - } - String json = Strings.unquote(value); - try { - return objectMapper.readValue(json, jacksonJavaType); - } catch (IOException e) { - throw new IllegalArgumentException("Failed to parse value as JSON", e); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/LocalTimestampCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/LocalTimestampCodec.java deleted file mode 100644 index 6b66b5d2049..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/LocalTimestampCodec.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.time; - -import com.datastax.oss.driver.api.core.type.codec.MappingCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.codec.TimestampCodec; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.time.Instant; -import java.time.LocalDateTime; -import java.time.ZoneId; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -/** - * {@link TypeCodec} that maps {@link LocalDateTime} to CQL {@code timestamp}, allowing the setting - * and retrieval of {@code timestamp} columns as {@link LocalDateTime} instances. - * - *

This codec shares its logic with {@link TimestampCodec}. See the javadocs of this codec for - * important remarks about implementation notes and accepted timestamp formats. - */ -@Immutable -public class LocalTimestampCodec extends MappingCodec { - - private final ZoneId timeZone; - - /** - * Creates a new {@code LocalTimestampCodec} that converts CQL timestamps into {@link - * LocalDateTime} instances using the system's {@linkplain ZoneId#systemDefault() default time - * zone} as their time zone. The supplied {@code timeZone} will also be used to parse CQL - * timestamp literals that do not include any time zone information. - */ - public LocalTimestampCodec() { - this(ZoneId.systemDefault()); - } - - /** - * Creates a new {@code LocalTimestampCodec} that converts CQL timestamps into {@link - * LocalDateTime} instances using the given {@link ZoneId} as their time zone. The supplied {@code - * timeZone} will also be used to parse CQL timestamp literals that do not include any time zone - * information. - */ - public LocalTimestampCodec(@NonNull ZoneId timeZone) { - super( - new TimestampCodec(Objects.requireNonNull(timeZone, "timeZone cannot be null")), - GenericType.LOCAL_DATE_TIME); - this.timeZone = timeZone; - } - - @Override - public boolean accepts(@NonNull Object value) { - Objects.requireNonNull(value); - return value instanceof LocalDateTime; - } - - @Nullable - @Override - protected LocalDateTime innerToOuter(@Nullable Instant value) { - return value == null ? null : LocalDateTime.ofInstant(value, timeZone); - } - - @Nullable - @Override - protected Instant outerToInner(@Nullable LocalDateTime value) { - return value == null ? null : value.atZone(timeZone).toInstant(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/PersistentZonedTimestampCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/PersistentZonedTimestampCodec.java deleted file mode 100644 index c16a64b9ad9..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/PersistentZonedTimestampCodec.java +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.time; - -import com.datastax.oss.driver.api.core.data.TupleValue; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.TupleType; -import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; -import com.datastax.oss.driver.api.core.type.codec.MappingCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.time.Instant; -import java.time.ZoneId; -import java.time.ZonedDateTime; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -/** - * {@link TypeCodec} that maps {@link ZonedDateTime} to CQL {@code tuple}, - * providing a pattern for maintaining timezone information in Cassandra. - * - *

Since Cassandra's timestamp type does not store any time zone, by using a - * tuple<timestamp,varchar> a timezone can be persisted in the varchar - * field of such tuples, and so when the value is deserialized the original timezone is - * preserved. - * - *

Note: if you want to retrieve CQL timestamps as {@link ZonedDateTime} instances but don't need - * to persist the time zone to the database, you should rather use {@link ZonedTimestampCodec}. - */ -@Immutable -public class PersistentZonedTimestampCodec extends MappingCodec { - - private static final TupleType CQL_TYPE = DataTypes.tupleOf(DataTypes.TIMESTAMP, DataTypes.TEXT); - - public PersistentZonedTimestampCodec() { - super(TypeCodecs.tupleOf(CQL_TYPE), GenericType.ZONED_DATE_TIME); - } - - @Override - public boolean accepts(@NonNull Object value) { - Objects.requireNonNull(value); - return value instanceof ZonedDateTime; - } - - @NonNull - @Override - public TupleType getCqlType() { - return CQL_TYPE; - } - - @NonNull - @Override - public String format(@Nullable ZonedDateTime value) { - if (value == null) { - return "NULL"; - } - // Use TIMESTAMP_UTC for a better-looking format - return "(" - + ExtraTypeCodecs.TIMESTAMP_UTC.format(value.toInstant()) - + "," - + TypeCodecs.TEXT.format(value.getZone().toString()) - + ")"; - } - - @Nullable - @Override - protected ZonedDateTime innerToOuter(@Nullable TupleValue value) { - if (value == null) { - return null; - } else { - Instant instant = Objects.requireNonNull(value.getInstant(0)); - ZoneId zoneId = ZoneId.of(Objects.requireNonNull(value.getString(1))); - return ZonedDateTime.ofInstant(instant, zoneId); - } - } - - @Nullable - @Override - protected TupleValue outerToInner(@Nullable ZonedDateTime value) { - if (value == null) { - return null; - } else { - Instant instant = value.toInstant(); - String zoneId = value.getZone().toString(); - return this.getCqlType().newValue(instant, zoneId); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/TimestampMillisCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/TimestampMillisCodec.java deleted file mode 100644 index 12e3e839d2a..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/TimestampMillisCodec.java +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.time; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.PrimitiveLongCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.codec.TimestampCodec; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.time.Instant; -import java.time.ZoneId; -import java.util.Objects; -import java.util.Optional; -import net.jcip.annotations.Immutable; - -/** - * A {@link TypeCodec} that maps CQL timestamps to Java primitive longs, representing the number of - * milliseconds since the Epoch. - * - *

This codec can serve as a replacement for the driver's built-in {@link TypeCodecs#TIMESTAMP - * timestamp} codec, when application code prefers to deal with raw milliseconds than with {@link - * Instant} instances. - * - *

This codec shares its logic with {@link TimestampCodec}. See the javadocs of this codec for - * important remarks about implementation notes and accepted timestamp formats. - */ -@Immutable -public class TimestampMillisCodec implements PrimitiveLongCodec { - - private final TimestampCodec timestampCodec; - - /** - * Creates a new {@code TimestampMillisCodec} that uses the system's {@linkplain - * ZoneId#systemDefault() default time zone} to parse timestamp literals that do not include any - * time zone information. - */ - public TimestampMillisCodec() { - this(ZoneId.systemDefault()); - } - - /** - * Creates a new {@code TimestampMillisCodec} that uses the given {@link ZoneId} to parse - * timestamp literals that do not include any time zone information. - */ - public TimestampMillisCodec(ZoneId defaultZoneId) { - timestampCodec = new TimestampCodec(defaultZoneId); - } - - @NonNull - @Override - public GenericType getJavaType() { - return GenericType.LONG; - } - - @NonNull - @Override - public DataType getCqlType() { - return DataTypes.TIMESTAMP; - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return javaClass == Long.class || javaClass == long.class; - } - - @Override - public boolean accepts(@NonNull Object value) { - Objects.requireNonNull(value); - return value instanceof Long; - } - - @Nullable - @Override - public ByteBuffer encodePrimitive(long value, @NonNull ProtocolVersion protocolVersion) { - return TypeCodecs.BIGINT.encodePrimitive(value, protocolVersion); - } - - @Override - public long decodePrimitive( - @Nullable ByteBuffer value, @NonNull ProtocolVersion protocolVersion) { - return TypeCodecs.BIGINT.decodePrimitive(value, protocolVersion); - } - - @Nullable - @Override - public Long parse(@Nullable String value) { - Instant instant = timestampCodec.parse(value); - return instant == null ? null : instant.toEpochMilli(); - } - - @NonNull - @Override - public String format(@Nullable Long value) { - Instant instant = value == null ? null : Instant.ofEpochMilli(value); - return timestampCodec.format(instant); - } - - @NonNull - @Override - public Optional serializedSize() { - return Optional.of(8); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/ZonedTimestampCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/ZonedTimestampCodec.java deleted file mode 100644 index a0947ff3493..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/ZonedTimestampCodec.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.time; - -import com.datastax.oss.driver.api.core.type.codec.MappingCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.codec.TimestampCodec; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.time.Instant; -import java.time.ZoneId; -import java.time.ZonedDateTime; -import java.util.Objects; -import net.jcip.annotations.ThreadSafe; - -/** - * A codec that handles Apache Cassandra(R)'s timestamp type and maps it to Java's {@link - * ZonedDateTime}, using the {@link ZoneId} supplied at instantiation. - * - *

Note that Apache Cassandra(R)'s timestamp type does not store any time zone; this codec is - * provided merely as a convenience for users that need to deal with zoned timestamps in their - * applications. If you need to persist the time zone in the database, consider using {@link - * PersistentZonedTimestampCodec} instead. - * - *

This codec shares its logic with {@link TimestampCodec}. See the javadocs of this codec for - * important remarks about implementation notes and accepted timestamp formats. - * - * @see TimestampCodec - */ -@ThreadSafe -public class ZonedTimestampCodec extends MappingCodec { - - private final ZoneId timeZone; - - /** - * Creates a new {@code ZonedTimestampCodec} that converts CQL timestamps into {@link - * ZonedDateTime} instances using the system's {@linkplain ZoneId#systemDefault() default time - * zone} as their time zone. The supplied {@code timeZone} will also be used to parse CQL - * timestamp literals that do not include any time zone information. - */ - public ZonedTimestampCodec() { - this(ZoneId.systemDefault()); - } - - /** - * Creates a new {@code ZonedTimestampCodec} that converts CQL timestamps into {@link - * ZonedDateTime} instances using the given {@link ZoneId} as their time zone. The supplied {@code - * timeZone} will also be used to parse CQL timestamp literals that do not include any time zone - * information. - */ - public ZonedTimestampCodec(ZoneId timeZone) { - super( - new TimestampCodec(Objects.requireNonNull(timeZone, "timeZone cannot be null")), - GenericType.ZONED_DATE_TIME); - this.timeZone = timeZone; - } - - @Override - public boolean accepts(@NonNull Object value) { - Objects.requireNonNull(value); - return value instanceof ZonedDateTime; - } - - @Nullable - @Override - protected ZonedDateTime innerToOuter(@Nullable Instant value) { - return value == null ? null : value.atZone(timeZone); - } - - @Nullable - @Override - protected Instant outerToInner(@Nullable ZonedDateTime value) { - return value == null ? null : value.toInstant(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/vector/AbstractVectorToArrayCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/vector/AbstractVectorToArrayCodec.java deleted file mode 100644 index 3e4e844783c..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/vector/AbstractVectorToArrayCodec.java +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.vector; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.VectorType; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.lang.reflect.Array; -import java.nio.ByteBuffer; -import java.util.Objects; - -/** Common super-class for all codecs which map a CQL vector type onto a primitive array */ -public abstract class AbstractVectorToArrayCodec implements TypeCodec { - - @NonNull protected final VectorType cqlType; - @NonNull protected final GenericType javaType; - - /** - * @param cqlType The CQL type. Must be a list type. - * @param arrayType The Java type. Must be an array class. - */ - protected AbstractVectorToArrayCodec( - @NonNull VectorType cqlType, @NonNull GenericType arrayType) { - this.cqlType = Objects.requireNonNull(cqlType, "cqlType cannot be null"); - this.javaType = Objects.requireNonNull(arrayType, "arrayType cannot be null"); - if (!arrayType.isArray()) { - throw new IllegalArgumentException("Expecting Java array class, got " + arrayType); - } - } - - @NonNull - @Override - public GenericType getJavaType() { - return this.javaType; - } - - @NonNull - @Override - public DataType getCqlType() { - return this.cqlType; - } - - @Nullable - @Override - public ByteBuffer encode(@Nullable ArrayT array, @NonNull ProtocolVersion protocolVersion) { - if (array == null) { - return null; - } - int length = Array.getLength(array); - int totalSize = length * sizeOfComponentType(); - ByteBuffer output = ByteBuffer.allocate(totalSize); - for (int i = 0; i < length; i++) { - serializeElement(output, array, i, protocolVersion); - } - output.flip(); - return output; - } - - @Nullable - @Override - public ArrayT decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - if (bytes == null || bytes.remaining() == 0) { - throw new IllegalArgumentException( - "Input ByteBuffer must not be null and must have non-zero remaining bytes"); - } - ByteBuffer input = bytes.duplicate(); - int length = this.cqlType.getDimensions(); - int elementSize = sizeOfComponentType(); - ArrayT array = newInstance(); - for (int i = 0; i < length; i++) { - // Null elements can happen on the decode path, but we cannot tolerate them - if (elementSize < 0) { - throw new NullPointerException("Primitive arrays cannot store null elements"); - } else { - deserializeElement(input, array, i, protocolVersion); - } - } - return array; - } - - /** - * Creates a new array instance with a size matching the specified vector. - * - * @return a new array instance with a size matching the specified vector. - */ - @NonNull - protected abstract ArrayT newInstance(); - - /** - * Return the size in bytes of the array component type. - * - * @return the size in bytes of the array component type. - */ - protected abstract int sizeOfComponentType(); - - /** - * Write the {@code index}th element of {@code array} to {@code output}. - * - * @param output The ByteBuffer to write to. - * @param array The array to read from. - * @param index The element index. - * @param protocolVersion The protocol version to use. - */ - protected abstract void serializeElement( - @NonNull ByteBuffer output, - @NonNull ArrayT array, - int index, - @NonNull ProtocolVersion protocolVersion); - - /** - * Read the {@code index}th element of {@code array} from {@code input}. - * - * @param input The ByteBuffer to read from. - * @param array The array to write to. - * @param index The element index. - * @param protocolVersion The protocol version to use. - */ - protected abstract void deserializeElement( - @NonNull ByteBuffer input, - @NonNull ArrayT array, - int index, - @NonNull ProtocolVersion protocolVersion); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/vector/FloatVectorToArrayCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/vector/FloatVectorToArrayCodec.java deleted file mode 100644 index 86f31dc4980..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/vector/FloatVectorToArrayCodec.java +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.vector; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.VectorType; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.codec.FloatCodec; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.driver.shaded.guava.common.base.Splitter; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.util.Arrays; -import java.util.Iterator; -import java.util.Objects; - -/** A codec that maps CQL vectors to the Java type {@code float[]}. */ -public class FloatVectorToArrayCodec extends AbstractVectorToArrayCodec { - - public FloatVectorToArrayCodec(VectorType type) { - super(type, GenericType.of(float[].class)); - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - Objects.requireNonNull(javaClass); - return float[].class.equals(javaClass); - } - - @Override - public boolean accepts(@NonNull Object value) { - Objects.requireNonNull(value); - return value instanceof float[]; - } - - @NonNull - @Override - protected float[] newInstance() { - return new float[cqlType.getDimensions()]; - } - - @Override - protected int sizeOfComponentType() { - return 4; - } - - @Override - protected void serializeElement( - @NonNull ByteBuffer output, - @NonNull float[] array, - int index, - @NonNull ProtocolVersion protocolVersion) { - output.putFloat(array[index]); - } - - @Override - protected void deserializeElement( - @NonNull ByteBuffer input, - @NonNull float[] array, - int index, - @NonNull ProtocolVersion protocolVersion) { - array[index] = input.getFloat(); - } - - @NonNull - @Override - public String format(@Nullable float[] value) { - return value == null ? "NULL" : Arrays.toString(value); - } - - @Nullable - @Override - public float[] parse(@Nullable String str) { - Preconditions.checkArgument(str != null, "Cannot create float array from null string"); - Preconditions.checkArgument(!str.isEmpty(), "Cannot create float array from empty string"); - - FloatCodec codec = new FloatCodec(); - float[] rv = this.newInstance(); - Iterator strIter = - Splitter.on(", ").trimResults().split(str.substring(1, str.length() - 1)).iterator(); - for (int i = 0; i < rv.length; ++i) { - String strVal = strIter.next(); - if (strVal == null) { - throw new IllegalArgumentException("Null element observed in float array string"); - } - Float f = codec.parse(strVal); - rv[i] = f.floatValue(); - } - return rv; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/registry/CachingCodecRegistry.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/registry/CachingCodecRegistry.java deleted file mode 100644 index 3af5a30ba27..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/registry/CachingCodecRegistry.java +++ /dev/null @@ -1,764 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.registry; - -import com.datastax.oss.driver.api.core.data.CqlDuration; -import com.datastax.oss.driver.api.core.data.CqlVector; -import com.datastax.oss.driver.api.core.data.TupleValue; -import com.datastax.oss.driver.api.core.data.UdtValue; -import com.datastax.oss.driver.api.core.type.ContainerType; -import com.datastax.oss.driver.api.core.type.CustomType; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.ListType; -import com.datastax.oss.driver.api.core.type.MapType; -import com.datastax.oss.driver.api.core.type.SetType; -import com.datastax.oss.driver.api.core.type.TupleType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.api.core.type.VectorType; -import com.datastax.oss.driver.api.core.type.codec.CodecNotFoundException; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.codec.registry.MutableCodecRegistry; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.driver.shaded.guava.common.reflect.TypeToken; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.util.IntMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.lang.reflect.ParameterizedType; -import java.lang.reflect.Type; -import java.math.BigDecimal; -import java.math.BigInteger; -import java.net.InetAddress; -import java.nio.ByteBuffer; -import java.time.Instant; -import java.time.LocalDate; -import java.time.LocalTime; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A codec registry that handles built-in type mappings, can be extended with a list of - * user-provided codecs, generates more complex codecs from those basic codecs, and caches generated - * codecs for reuse. - * - *

The primitive mappings always take precedence over any user codec. The list of user codecs can - * not be modified after construction. - * - *

This class is abstract in order to be agnostic from the cache implementation. Subclasses must - * implement {@link #getCachedCodec(DataType, GenericType, boolean)}. - */ -@ThreadSafe -public abstract class CachingCodecRegistry implements MutableCodecRegistry { - - private static final Logger LOG = LoggerFactory.getLogger(CachingCodecRegistry.class); - - // Implementation notes: - // - built-in primitive codecs are served directly, without hitting the cache - // - same for user codecs (we assume the cardinality will always be low, so a sequential array - // traversal is cheap). - - protected final String logPrefix; - private final TypeCodec[] primitiveCodecs; - private final CopyOnWriteArrayList> userCodecs = new CopyOnWriteArrayList<>(); - private final IntMap> primitiveCodecsByCode; - private final Lock registerLock = new ReentrantLock(); - - protected CachingCodecRegistry( - @NonNull String logPrefix, @NonNull TypeCodec[] primitiveCodecs) { - this.logPrefix = logPrefix; - this.primitiveCodecs = primitiveCodecs; - this.primitiveCodecsByCode = sortByProtocolCode(primitiveCodecs); - } - - /** - * @deprecated this constructor calls an overridable method ({@link #register(TypeCodec[])}), - * which is a bad practice. The recommended alternative is to use {@link - * #CachingCodecRegistry(String, TypeCodec[])}, then add the codecs with one of the {@link - * #register} methods. - */ - @Deprecated - protected CachingCodecRegistry( - @NonNull String logPrefix, - @NonNull TypeCodec[] primitiveCodecs, - @NonNull TypeCodec[] userCodecs) { - this(logPrefix, primitiveCodecs); - register(userCodecs); - } - - @Override - public void register(TypeCodec newCodec) { - // This method could work without synchronization, but there is a tiny race condition that would - // allow two threads to register colliding codecs (the last added codec would later be ignored, - // but without any warning). Serialize calls to avoid that: - registerLock.lock(); - try { - for (TypeCodec primitiveCodec : primitiveCodecs) { - if (collides(newCodec, primitiveCodec)) { - LOG.warn( - "[{}] Ignoring codec {} because it collides with built-in primitive codec {}", - logPrefix, - newCodec, - primitiveCodec); - return; - } - } - for (TypeCodec userCodec : userCodecs) { - if (collides(newCodec, userCodec)) { - LOG.warn( - "[{}] Ignoring codec {} because it collides with previously registered codec {}", - logPrefix, - newCodec, - userCodec); - return; - } - } - // Technically this would cover the two previous cases as well, but we want precise messages. - try { - TypeCodec cachedCodec = - getCachedCodec(newCodec.getCqlType(), newCodec.getJavaType(), false); - LOG.warn( - "[{}] Ignoring codec {} because it collides with previously generated codec {}", - logPrefix, - newCodec, - cachedCodec); - return; - } catch (CodecNotFoundException ignored) { - // Catching the exception is ugly, but it avoids breaking the internal API (e.g. by adding a - // getCachedCodecIfExists) - } - userCodecs.add(newCodec); - } finally { - registerLock.unlock(); - } - } - - private boolean collides(TypeCodec newCodec, TypeCodec oldCodec) { - return oldCodec.accepts(newCodec.getCqlType()) && oldCodec.accepts(newCodec.getJavaType()); - } - - /** - * Gets a complex codec from the cache. - * - *

If the codec does not exist in the cache, this method must generate it with {@link - * #createCodec(DataType, GenericType, boolean)} (and most likely put it in the cache too for - * future calls). - */ - protected abstract TypeCodec getCachedCodec( - @Nullable DataType cqlType, @Nullable GenericType javaType, boolean isJavaCovariant); - - @NonNull - @Override - public TypeCodec codecFor( - @NonNull DataType cqlType, @NonNull GenericType javaType) { - return codecFor(cqlType, javaType, false); - } - - // Not exposed publicly, (isJavaCovariant=true) is only used for internal recursion - @NonNull - protected TypeCodec codecFor( - @NonNull DataType cqlType, - @NonNull GenericType javaType, - boolean isJavaCovariant) { - LOG.trace("[{}] Looking up codec for {} <-> {}", logPrefix, cqlType, javaType); - TypeCodec primitiveCodec = primitiveCodecsByCode.get(cqlType.getProtocolCode()); - if (primitiveCodec != null && matches(primitiveCodec, javaType, isJavaCovariant)) { - LOG.trace("[{}] Found matching primitive codec {}", logPrefix, primitiveCodec); - return uncheckedCast(primitiveCodec); - } - for (TypeCodec userCodec : userCodecs) { - if (userCodec.accepts(cqlType) && matches(userCodec, javaType, isJavaCovariant)) { - LOG.trace("[{}] Found matching user codec {}", logPrefix, userCodec); - return uncheckedCast(userCodec); - } - } - return uncheckedCast(getCachedCodec(cqlType, javaType, isJavaCovariant)); - } - - @NonNull - @Override - public TypeCodec codecFor( - @NonNull DataType cqlType, @NonNull Class javaType) { - LOG.trace("[{}] Looking up codec for {} <-> {}", logPrefix, cqlType, javaType); - TypeCodec primitiveCodec = primitiveCodecsByCode.get(cqlType.getProtocolCode()); - if (primitiveCodec != null && primitiveCodec.accepts(javaType)) { - LOG.trace("[{}] Found matching primitive codec {}", logPrefix, primitiveCodec); - return uncheckedCast(primitiveCodec); - } - for (TypeCodec userCodec : userCodecs) { - if (userCodec.accepts(cqlType) && userCodec.accepts(javaType)) { - LOG.trace("[{}] Found matching user codec {}", logPrefix, userCodec); - return uncheckedCast(userCodec); - } - } - return uncheckedCast(getCachedCodec(cqlType, GenericType.of(javaType), false)); - } - - @NonNull - @Override - public TypeCodec codecFor(@NonNull DataType cqlType) { - LOG.trace("[{}] Looking up codec for CQL type {}", logPrefix, cqlType); - TypeCodec primitiveCodec = primitiveCodecsByCode.get(cqlType.getProtocolCode()); - if (primitiveCodec != null) { - LOG.trace("[{}] Found matching primitive codec {}", logPrefix, primitiveCodec); - return uncheckedCast(primitiveCodec); - } - for (TypeCodec userCodec : userCodecs) { - if (userCodec.accepts(cqlType)) { - LOG.trace("[{}] Found matching user codec {}", logPrefix, userCodec); - return uncheckedCast(userCodec); - } - } - return uncheckedCast(getCachedCodec(cqlType, null, false)); - } - - @NonNull - @Override - public TypeCodec codecFor( - @NonNull DataType cqlType, @NonNull JavaTypeT value) { - Preconditions.checkNotNull(cqlType); - Preconditions.checkNotNull(value); - LOG.trace("[{}] Looking up codec for CQL type {} and object {}", logPrefix, cqlType, value); - - TypeCodec primitiveCodec = primitiveCodecsByCode.get(cqlType.getProtocolCode()); - if (primitiveCodec != null && primitiveCodec.accepts(value)) { - LOG.trace("[{}] Found matching primitive codec {}", logPrefix, primitiveCodec); - return uncheckedCast(primitiveCodec); - } - for (TypeCodec userCodec : userCodecs) { - if (userCodec.accepts(cqlType) && userCodec.accepts(value)) { - LOG.trace("[{}] Found matching user codec {}", logPrefix, userCodec); - return uncheckedCast(userCodec); - } - } - - GenericType javaType = inspectType(value, cqlType); - LOG.trace("[{}] Continuing based on inferred type {}", logPrefix, javaType); - return uncheckedCast(getCachedCodec(cqlType, javaType, true)); - } - - @NonNull - @Override - public TypeCodec codecFor(@NonNull JavaTypeT value) { - Preconditions.checkNotNull(value); - LOG.trace("[{}] Looking up codec for object {}", logPrefix, value); - - for (TypeCodec primitiveCodec : primitiveCodecs) { - if (primitiveCodec.accepts(value)) { - LOG.trace("[{}] Found matching primitive codec {}", logPrefix, primitiveCodec); - return uncheckedCast(primitiveCodec); - } - } - for (TypeCodec userCodec : userCodecs) { - if (userCodec.accepts(value)) { - LOG.trace("[{}] Found matching user codec {}", logPrefix, userCodec); - return uncheckedCast(userCodec); - } - } - - DataType cqlType = inferCqlTypeFromValue(value); - GenericType javaType = inspectType(value, cqlType); - LOG.trace( - "[{}] Continuing based on inferred CQL type {} and Java type {}", - logPrefix, - cqlType, - javaType); - return uncheckedCast(getCachedCodec(cqlType, javaType, true)); - } - - @NonNull - @Override - public TypeCodec codecFor(@NonNull GenericType javaType) { - return codecFor(javaType, false); - } - - // Not exposed publicly, (isJavaCovariant=true) is only used for internal recursion - @NonNull - protected TypeCodec codecFor( - @NonNull GenericType javaType, boolean isJavaCovariant) { - LOG.trace( - "[{}] Looking up codec for Java type {} (covariant = {})", - logPrefix, - javaType, - isJavaCovariant); - for (TypeCodec primitiveCodec : primitiveCodecs) { - if (matches(primitiveCodec, javaType, isJavaCovariant)) { - LOG.trace("[{}] Found matching primitive codec {}", logPrefix, primitiveCodec); - return uncheckedCast(primitiveCodec); - } - } - for (TypeCodec userCodec : userCodecs) { - if (matches(userCodec, javaType, isJavaCovariant)) { - LOG.trace("[{}] Found matching user codec {}", logPrefix, userCodec); - return uncheckedCast(userCodec); - } - } - return uncheckedCast(getCachedCodec(null, javaType, isJavaCovariant)); - } - - protected boolean matches( - @NonNull TypeCodec codec, @NonNull GenericType javaType, boolean isJavaCovariant) { - return isJavaCovariant ? codec.getJavaType().isSupertypeOf(javaType) : codec.accepts(javaType); - } - - @NonNull - protected GenericType inspectType(@NonNull Object value, @Nullable DataType cqlType) { - if (value instanceof List) { - List list = (List) value; - if (list.isEmpty()) { - // Empty collections are always encoded the same way, so any element type will do - // in the absence of a CQL type. When the CQL type is known, we try to infer the best Java - // type. - return cqlType == null ? JAVA_TYPE_FOR_EMPTY_LISTS : inferJavaTypeFromCqlType(cqlType); - } else { - Object firstElement = list.get(0); - if (firstElement == null) { - throw new IllegalArgumentException( - "Can't infer list codec because the first element is null " - + "(note that CQL does not allow null values in collections)"); - } - GenericType elementType = - inspectType( - firstElement, cqlType == null ? null : ((ContainerType) cqlType).getElementType()); - return GenericType.listOf(elementType); - } - } else if (value instanceof Set) { - Set set = (Set) value; - if (set.isEmpty()) { - return cqlType == null ? JAVA_TYPE_FOR_EMPTY_SETS : inferJavaTypeFromCqlType(cqlType); - } else { - Object firstElement = set.iterator().next(); - if (firstElement == null) { - throw new IllegalArgumentException( - "Can't infer set codec because the first element is null " - + "(note that CQL does not allow null values in collections)"); - } - GenericType elementType = - inspectType( - firstElement, cqlType == null ? null : ((SetType) cqlType).getElementType()); - return GenericType.setOf(elementType); - } - } else if (value instanceof Map) { - Map map = (Map) value; - if (map.isEmpty()) { - return cqlType == null ? JAVA_TYPE_FOR_EMPTY_MAPS : inferJavaTypeFromCqlType(cqlType); - } else { - Map.Entry firstEntry = map.entrySet().iterator().next(); - Object firstKey = firstEntry.getKey(); - Object firstValue = firstEntry.getValue(); - if (firstKey == null || firstValue == null) { - throw new IllegalArgumentException( - "Can't infer map codec because the first key and/or value is null " - + "(note that CQL does not allow null values in collections)"); - } - GenericType keyType = - inspectType(firstKey, cqlType == null ? null : ((MapType) cqlType).getKeyType()); - GenericType valueType = - inspectType(firstValue, cqlType == null ? null : ((MapType) cqlType).getValueType()); - return GenericType.mapOf(keyType, valueType); - } - } else if (value instanceof CqlVector) { - CqlVector vector = (CqlVector) value; - if (vector.isEmpty()) { - return cqlType == null ? JAVA_TYPE_FOR_EMPTY_CQLVECTORS : inferJavaTypeFromCqlType(cqlType); - } else { - Object firstElement = vector.iterator().next(); - if (firstElement == null) { - throw new IllegalArgumentException( - "Can't infer vector codec because the first element is null " - + "(note that CQL does not allow null values in collections)"); - } - GenericType elementType = - inspectType( - firstElement, cqlType == null ? null : ((VectorType) cqlType).getElementType()); - return GenericType.vectorOf(elementType); - } - } else { - // There's not much more we can do - return GenericType.of(value.getClass()); - } - } - - @NonNull - protected GenericType inferJavaTypeFromCqlType(@NonNull DataType cqlType) { - if (cqlType instanceof ListType) { - DataType elementType = ((ListType) cqlType).getElementType(); - return GenericType.listOf(inferJavaTypeFromCqlType(elementType)); - } else if (cqlType instanceof SetType) { - DataType elementType = ((SetType) cqlType).getElementType(); - return GenericType.setOf(inferJavaTypeFromCqlType(elementType)); - } else if (cqlType instanceof MapType) { - DataType keyType = ((MapType) cqlType).getKeyType(); - DataType valueType = ((MapType) cqlType).getValueType(); - return GenericType.mapOf( - inferJavaTypeFromCqlType(keyType), inferJavaTypeFromCqlType(valueType)); - } else if (cqlType instanceof VectorType) { - DataType elementType = ((VectorType) cqlType).getElementType(); - GenericType numberType = inferJavaTypeFromCqlType(elementType); - return GenericType.vectorOf(numberType); - } - switch (cqlType.getProtocolCode()) { - case ProtocolConstants.DataType.CUSTOM: - case ProtocolConstants.DataType.BLOB: - return GenericType.BYTE_BUFFER; - case ProtocolConstants.DataType.ASCII: - case ProtocolConstants.DataType.VARCHAR: - return GenericType.STRING; - case ProtocolConstants.DataType.BIGINT: - case ProtocolConstants.DataType.COUNTER: - return GenericType.LONG; - case ProtocolConstants.DataType.BOOLEAN: - return GenericType.BOOLEAN; - case ProtocolConstants.DataType.DECIMAL: - return GenericType.BIG_DECIMAL; - case ProtocolConstants.DataType.DOUBLE: - return GenericType.DOUBLE; - case ProtocolConstants.DataType.FLOAT: - return GenericType.FLOAT; - case ProtocolConstants.DataType.INT: - return GenericType.INTEGER; - case ProtocolConstants.DataType.TIMESTAMP: - return GenericType.INSTANT; - case ProtocolConstants.DataType.UUID: - case ProtocolConstants.DataType.TIMEUUID: - return GenericType.UUID; - case ProtocolConstants.DataType.VARINT: - return GenericType.BIG_INTEGER; - case ProtocolConstants.DataType.INET: - return GenericType.INET_ADDRESS; - case ProtocolConstants.DataType.DATE: - return GenericType.LOCAL_DATE; - case ProtocolConstants.DataType.TIME: - return GenericType.LOCAL_TIME; - case ProtocolConstants.DataType.SMALLINT: - return GenericType.SHORT; - case ProtocolConstants.DataType.TINYINT: - return GenericType.BYTE; - case ProtocolConstants.DataType.DURATION: - return GenericType.CQL_DURATION; - case ProtocolConstants.DataType.UDT: - return GenericType.UDT_VALUE; - case ProtocolConstants.DataType.TUPLE: - return GenericType.TUPLE_VALUE; - default: - throw new CodecNotFoundException(cqlType, null); - } - } - - @Nullable - protected DataType inferCqlTypeFromValue(@NonNull Object value) { - if (value instanceof List) { - List list = (List) value; - if (list.isEmpty()) { - return CQL_TYPE_FOR_EMPTY_LISTS; - } - Object firstElement = list.get(0); - if (firstElement == null) { - throw new IllegalArgumentException( - "Can't infer list codec because the first element is null " - + "(note that CQL does not allow null values in collections)"); - } - DataType elementType = inferCqlTypeFromValue(firstElement); - if (elementType == null) { - return null; - } - return DataTypes.listOf(elementType); - } else if (value instanceof Set) { - Set set = (Set) value; - if (set.isEmpty()) { - return CQL_TYPE_FOR_EMPTY_SETS; - } - Object firstElement = set.iterator().next(); - if (firstElement == null) { - throw new IllegalArgumentException( - "Can't infer set codec because the first element is null " - + "(note that CQL does not allow null values in collections)"); - } - DataType elementType = inferCqlTypeFromValue(firstElement); - if (elementType == null) { - return null; - } - return DataTypes.setOf(elementType); - } else if (value instanceof Map) { - Map map = (Map) value; - if (map.isEmpty()) { - return CQL_TYPE_FOR_EMPTY_MAPS; - } - Entry firstEntry = map.entrySet().iterator().next(); - Object firstKey = firstEntry.getKey(); - Object firstValue = firstEntry.getValue(); - if (firstKey == null || firstValue == null) { - throw new IllegalArgumentException( - "Can't infer map codec because the first key and/or value is null " - + "(note that CQL does not allow null values in collections)"); - } - DataType keyType = inferCqlTypeFromValue(firstKey); - DataType valueType = inferCqlTypeFromValue(firstValue); - if (keyType == null || valueType == null) { - return null; - } - return DataTypes.mapOf(keyType, valueType); - } else if (value instanceof CqlVector) { - CqlVector vector = (CqlVector) value; - if (vector.isEmpty()) { - return CQL_TYPE_FOR_EMPTY_VECTORS; - } - Object firstElement = vector.iterator().next(); - if (firstElement == null) { - throw new IllegalArgumentException( - "Can't infer vector codec because the first element is null " - + "(note that CQL does not allow null values in collections)"); - } - DataType elementType = inferCqlTypeFromValue(firstElement); - if (elementType == null) { - return null; - } - return DataTypes.vectorOf(elementType, vector.size()); - } - Class javaClass = value.getClass(); - if (ByteBuffer.class.isAssignableFrom(javaClass)) { - return DataTypes.BLOB; - } else if (String.class.equals(javaClass)) { - return DataTypes.TEXT; - } else if (Long.class.equals(javaClass)) { - return DataTypes.BIGINT; - } else if (Boolean.class.equals(javaClass)) { - return DataTypes.BOOLEAN; - } else if (BigDecimal.class.equals(javaClass)) { - return DataTypes.DECIMAL; - } else if (Double.class.equals(javaClass)) { - return DataTypes.DOUBLE; - } else if (Float.class.equals(javaClass)) { - return DataTypes.FLOAT; - } else if (Integer.class.equals(javaClass)) { - return DataTypes.INT; - } else if (Instant.class.equals(javaClass)) { - return DataTypes.TIMESTAMP; - } else if (UUID.class.equals(javaClass)) { - return DataTypes.UUID; - } else if (BigInteger.class.equals(javaClass)) { - return DataTypes.VARINT; - } else if (InetAddress.class.isAssignableFrom(javaClass)) { - return DataTypes.INET; - } else if (LocalDate.class.equals(javaClass)) { - return DataTypes.DATE; - } else if (LocalTime.class.equals(javaClass)) { - return DataTypes.TIME; - } else if (Short.class.equals(javaClass)) { - return DataTypes.SMALLINT; - } else if (Byte.class.equals(javaClass)) { - return DataTypes.TINYINT; - } else if (CqlDuration.class.equals(javaClass)) { - return DataTypes.DURATION; - } else if (UdtValue.class.isAssignableFrom(javaClass)) { - return ((UdtValue) value).getType(); - } else if (TupleValue.class.isAssignableFrom(javaClass)) { - return ((TupleValue) value).getType(); - } - // This might mean that the java type is a custom type with a custom codec, - // so don't throw CodecNotFoundException just yet. - return null; - } - - private TypeCodec getElementCodecForCqlAndJavaType( - ContainerType cqlType, TypeToken token, boolean isJavaCovariant) { - - DataType elementCqlType = cqlType.getElementType(); - if (token.getType() instanceof ParameterizedType) { - Type[] typeArguments = ((ParameterizedType) token.getType()).getActualTypeArguments(); - GenericType elementJavaType = GenericType.of(typeArguments[0]); - return uncheckedCast(codecFor(elementCqlType, elementJavaType, isJavaCovariant)); - } - return codecFor(elementCqlType); - } - - private TypeCodec getElementCodecForJavaType( - ParameterizedType parameterizedType, boolean isJavaCovariant) { - - Type[] typeArguments = parameterizedType.getActualTypeArguments(); - GenericType elementType = GenericType.of(typeArguments[0]); - return codecFor(elementType, isJavaCovariant); - } - - // Try to create a codec when we haven't found it in the cache - @NonNull - protected TypeCodec createCodec( - @Nullable DataType cqlType, @Nullable GenericType javaType, boolean isJavaCovariant) { - LOG.trace("[{}] Cache miss, creating codec", logPrefix); - // Either type can be null, but not both. - if (javaType == null) { - assert cqlType != null; - return createCodec(cqlType); - } else if (cqlType == null) { - return createCodec(javaType, isJavaCovariant); - } else { // Both non-null - TypeToken token = javaType.__getToken(); - if (cqlType instanceof ListType && List.class.isAssignableFrom(token.getRawType())) { - TypeCodec elementCodec = - getElementCodecForCqlAndJavaType((ContainerType) cqlType, token, isJavaCovariant); - return TypeCodecs.listOf(elementCodec); - } else if (cqlType instanceof SetType && Set.class.isAssignableFrom(token.getRawType())) { - TypeCodec elementCodec = - getElementCodecForCqlAndJavaType((ContainerType) cqlType, token, isJavaCovariant); - return TypeCodecs.setOf(elementCodec); - } else if (cqlType instanceof MapType && Map.class.isAssignableFrom(token.getRawType())) { - DataType keyCqlType = ((MapType) cqlType).getKeyType(); - DataType valueCqlType = ((MapType) cqlType).getValueType(); - TypeCodec keyCodec; - TypeCodec valueCodec; - if (token.getType() instanceof ParameterizedType) { - Type[] typeArguments = ((ParameterizedType) token.getType()).getActualTypeArguments(); - GenericType keyJavaType = GenericType.of(typeArguments[0]); - GenericType valueJavaType = GenericType.of(typeArguments[1]); - keyCodec = uncheckedCast(codecFor(keyCqlType, keyJavaType, isJavaCovariant)); - valueCodec = uncheckedCast(codecFor(valueCqlType, valueJavaType, isJavaCovariant)); - } else { - keyCodec = codecFor(keyCqlType); - valueCodec = codecFor(valueCqlType); - } - return TypeCodecs.mapOf(keyCodec, valueCodec); - } else if (cqlType instanceof TupleType - && TupleValue.class.isAssignableFrom(token.getRawType())) { - return TypeCodecs.tupleOf((TupleType) cqlType); - } else if (cqlType instanceof UserDefinedType - && UdtValue.class.isAssignableFrom(token.getRawType())) { - return TypeCodecs.udtOf((UserDefinedType) cqlType); - } else if (cqlType instanceof VectorType - && CqlVector.class.isAssignableFrom(token.getRawType())) { - VectorType vectorType = (VectorType) cqlType; - /* For a vector type we'll always get back an instance of TypeCodec due to the - * type of CqlVector... but getElementCodecForCqlAndJavaType() is a generalized function that can't - * return this more precise type. Thus the cast here. */ - TypeCodec elementCodec = - uncheckedCast(getElementCodecForCqlAndJavaType(vectorType, token, isJavaCovariant)); - return TypeCodecs.vectorOf(vectorType, elementCodec); - } else if (cqlType instanceof CustomType - && ByteBuffer.class.isAssignableFrom(token.getRawType())) { - return TypeCodecs.custom(cqlType); - } - throw new CodecNotFoundException(cqlType, javaType); - } - } - - // Try to create a codec when we haven't found it in the cache. - // Variant where the CQL type is unknown. Can be covariant if we come from a lookup by Java value. - @NonNull - protected TypeCodec createCodec(@NonNull GenericType javaType, boolean isJavaCovariant) { - TypeToken token = javaType.__getToken(); - if (List.class.isAssignableFrom(token.getRawType()) - && token.getType() instanceof ParameterizedType) { - TypeCodec elementCodec = - getElementCodecForJavaType((ParameterizedType) token.getType(), isJavaCovariant); - return TypeCodecs.listOf(elementCodec); - } else if (Set.class.isAssignableFrom(token.getRawType()) - && token.getType() instanceof ParameterizedType) { - TypeCodec elementCodec = - getElementCodecForJavaType((ParameterizedType) token.getType(), isJavaCovariant); - return TypeCodecs.setOf(elementCodec); - } else if (Map.class.isAssignableFrom(token.getRawType()) - && token.getType() instanceof ParameterizedType) { - Type[] typeArguments = ((ParameterizedType) token.getType()).getActualTypeArguments(); - GenericType keyType = GenericType.of(typeArguments[0]); - GenericType valueType = GenericType.of(typeArguments[1]); - TypeCodec keyCodec = codecFor(keyType, isJavaCovariant); - TypeCodec valueCodec = codecFor(valueType, isJavaCovariant); - return TypeCodecs.mapOf(keyCodec, valueCodec); - } - /* Note that this method cannot generate TypeCodec instances for any CqlVector type. VectorCodec needs - * to know the dimensions of the vector it will be operating on and there's no way to determine that from - * the Java type alone. */ - throw new CodecNotFoundException(null, javaType); - } - - // Try to create a codec when we haven't found it in the cache. - // Variant where the Java type is unknown. - @NonNull - protected TypeCodec createCodec(@NonNull DataType cqlType) { - if (cqlType instanceof ListType) { - DataType elementType = ((ListType) cqlType).getElementType(); - TypeCodec elementCodec = codecFor(elementType); - return TypeCodecs.listOf(elementCodec); - } else if (cqlType instanceof SetType) { - DataType elementType = ((SetType) cqlType).getElementType(); - TypeCodec elementCodec = codecFor(elementType); - return TypeCodecs.setOf(elementCodec); - } else if (cqlType instanceof MapType) { - DataType keyType = ((MapType) cqlType).getKeyType(); - DataType valueType = ((MapType) cqlType).getValueType(); - TypeCodec keyCodec = codecFor(keyType); - TypeCodec valueCodec = codecFor(valueType); - return TypeCodecs.mapOf(keyCodec, valueCodec); - } else if (cqlType instanceof VectorType) { - VectorType vectorType = (VectorType) cqlType; - TypeCodec elementCodec = - uncheckedCast(codecFor(vectorType.getElementType())); - return TypeCodecs.vectorOf(vectorType, elementCodec); - } else if (cqlType instanceof TupleType) { - return TypeCodecs.tupleOf((TupleType) cqlType); - } else if (cqlType instanceof UserDefinedType) { - return TypeCodecs.udtOf((UserDefinedType) cqlType); - } else if (cqlType instanceof CustomType) { - return TypeCodecs.custom(cqlType); - } - throw new CodecNotFoundException(cqlType, null); - } - - private static IntMap> sortByProtocolCode(TypeCodec[] codecs) { - IntMap.Builder> builder = IntMap.builder(); - for (TypeCodec codec : codecs) { - builder.put(codec.getCqlType().getProtocolCode(), codec); - } - return builder.build(); - } - - // We call this after validating the types, so we know the cast will never fail. - private static TypeCodec uncheckedCast( - TypeCodec codec) { - @SuppressWarnings("unchecked") - TypeCodec result = (TypeCodec) codec; - return result; - } - - // These are mock types that are used as placeholders when we try to find a codec for an empty - // Java collection instance. All empty collections are serialized in the same way, so any element - // type will do: - private static final GenericType> JAVA_TYPE_FOR_EMPTY_LISTS = - GenericType.listOf(Boolean.class); - private static final GenericType> JAVA_TYPE_FOR_EMPTY_SETS = - GenericType.setOf(Boolean.class); - private static final GenericType> JAVA_TYPE_FOR_EMPTY_MAPS = - GenericType.mapOf(Boolean.class, Boolean.class); - private static final GenericType> JAVA_TYPE_FOR_EMPTY_CQLVECTORS = - GenericType.vectorOf(Number.class); - private static final DataType CQL_TYPE_FOR_EMPTY_LISTS = DataTypes.listOf(DataTypes.BOOLEAN); - private static final DataType CQL_TYPE_FOR_EMPTY_SETS = DataTypes.setOf(DataTypes.BOOLEAN); - private static final DataType CQL_TYPE_FOR_EMPTY_MAPS = - DataTypes.mapOf(DataTypes.BOOLEAN, DataTypes.BOOLEAN); - private static final DataType CQL_TYPE_FOR_EMPTY_VECTORS = DataTypes.vectorOf(DataTypes.INT, 0); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/registry/CodecRegistryConstants.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/registry/CodecRegistryConstants.java deleted file mode 100644 index bbf77bdf5dc..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/registry/CodecRegistryConstants.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.registry; - -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import java.util.function.BiConsumer; -import java.util.function.BiFunction; - -public class CodecRegistryConstants { - - /** - * The driver's default primitive codecs (map all primitive CQL types to their "natural" Java - * equivalent). - * - *

This is exposed in case you want to call {@link - * DefaultCodecRegistry#DefaultCodecRegistry(String, int, BiFunction, int, BiConsumer, - * TypeCodec[])} but only customize the caching options. - */ - public static final TypeCodec[] PRIMITIVE_CODECS = - new TypeCodec[] { - // Must be declared before AsciiCodec so it gets chosen when CQL type not available - TypeCodecs.TEXT, - // Must be declared before TimeUUIDCodec so it gets chosen when CQL type not available - TypeCodecs.UUID, - TypeCodecs.TIMEUUID, - TypeCodecs.TIMESTAMP, - TypeCodecs.INT, - TypeCodecs.BIGINT, - TypeCodecs.BLOB, - TypeCodecs.DOUBLE, - TypeCodecs.FLOAT, - TypeCodecs.DECIMAL, - TypeCodecs.VARINT, - TypeCodecs.INET, - TypeCodecs.BOOLEAN, - TypeCodecs.SMALLINT, - TypeCodecs.TINYINT, - TypeCodecs.DATE, - TypeCodecs.TIME, - TypeCodecs.DURATION, - TypeCodecs.COUNTER, - TypeCodecs.ASCII - }; -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/registry/DefaultCodecRegistry.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/registry/DefaultCodecRegistry.java deleted file mode 100644 index cc14740e180..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/registry/DefaultCodecRegistry.java +++ /dev/null @@ -1,168 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.registry; - -import com.datastax.oss.driver.api.core.DriverExecutionException; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.shaded.guava.common.base.Throwables; -import com.datastax.oss.driver.shaded.guava.common.cache.CacheBuilder; -import com.datastax.oss.driver.shaded.guava.common.cache.CacheLoader; -import com.datastax.oss.driver.shaded.guava.common.cache.LoadingCache; -import com.datastax.oss.driver.shaded.guava.common.cache.RemovalListener; -import com.datastax.oss.driver.shaded.guava.common.util.concurrent.ExecutionError; -import com.datastax.oss.driver.shaded.guava.common.util.concurrent.UncheckedExecutionException; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Objects; -import java.util.function.BiConsumer; -import java.util.function.BiFunction; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * The default codec registry implementation. - * - *

It is a caching registry based on Guava cache (note that the driver shades Guava). - */ -@ThreadSafe -public class DefaultCodecRegistry extends CachingCodecRegistry { - - private static final Logger LOG = LoggerFactory.getLogger(DefaultCodecRegistry.class); - - private final LoadingCache> cache; - - /** - * Creates a new instance that accepts user codecs, with the default built-in codecs and the - * default cache behavior. - */ - public DefaultCodecRegistry(@NonNull String logPrefix) { - this(logPrefix, CodecRegistryConstants.PRIMITIVE_CODECS); - } - - /** - * Creates a new instance that accepts user codecs, with the given built-in codecs and the default - * cache behavior. - */ - public DefaultCodecRegistry(@NonNull String logPrefix, @NonNull TypeCodec... primitiveCodecs) { - this(logPrefix, 0, null, 0, null, primitiveCodecs); - } - - /** - * Same as {@link #DefaultCodecRegistry(String, TypeCodec[])}, but with some amount of control - * over cache behavior. - * - *

Giving full access to the Guava cache API would be too much work, since it is shaded and we - * have to wrap everything. If you need something that's not available here, it's easy enough to - * write your own CachingCodecRegistry implementation. It's doubtful that stuff like cache - * eviction is that useful anyway. - */ - public DefaultCodecRegistry( - @NonNull String logPrefix, - int initialCacheCapacity, - @Nullable BiFunction, Integer> cacheWeigher, - int maximumCacheWeight, - @Nullable BiConsumer> cacheRemovalListener, - @NonNull TypeCodec... primitiveCodecs) { - - super(logPrefix, primitiveCodecs); - CacheBuilder cacheBuilder = CacheBuilder.newBuilder(); - if (initialCacheCapacity > 0) { - cacheBuilder.initialCapacity(initialCacheCapacity); - } - if (cacheWeigher != null) { - cacheBuilder.weigher(cacheWeigher::apply).maximumWeight(maximumCacheWeight); - } - CacheLoader> cacheLoader = - new CacheLoader>() { - @Override - public TypeCodec load(@NonNull CacheKey key) throws Exception { - return createCodec(key.cqlType, key.javaType, key.isJavaCovariant); - } - }; - if (cacheRemovalListener != null) { - this.cache = - cacheBuilder - .removalListener( - (RemovalListener>) - notification -> - cacheRemovalListener.accept( - notification.getKey(), notification.getValue())) - .build(cacheLoader); - } else { - this.cache = cacheBuilder.build(cacheLoader); - } - } - - @Override - protected TypeCodec getCachedCodec( - @Nullable DataType cqlType, @Nullable GenericType javaType, boolean isJavaCovariant) { - LOG.trace("[{}] Checking cache", logPrefix); - try { - return cache.getUnchecked(new CacheKey(cqlType, javaType, isJavaCovariant)); - } catch (UncheckedExecutionException | ExecutionError e) { - // unwrap exception cause and throw it directly. - Throwable cause = e.getCause(); - if (cause != null) { - Throwables.throwIfUnchecked(cause); - throw new DriverExecutionException(cause); - } else { - // Should never happen, throw just in case - throw new RuntimeException(e.getMessage()); - } - } - } - - public static final class CacheKey { - - public final DataType cqlType; - public final GenericType javaType; - public final boolean isJavaCovariant; - - public CacheKey( - @Nullable DataType cqlType, @Nullable GenericType javaType, boolean isJavaCovariant) { - this.javaType = javaType; - this.cqlType = cqlType; - this.isJavaCovariant = isJavaCovariant; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof CacheKey) { - CacheKey that = (CacheKey) other; - return Objects.equals(this.cqlType, that.cqlType) - && Objects.equals(this.javaType, that.javaType) - && this.isJavaCovariant == that.isJavaCovariant; - } else { - return false; - } - } - - @Override - public int hashCode() { - // NOTE: inlined Objects.hash for performance reasons (avoid Object[] allocation - // seen in profiler allocation traces) - return ((31 + Objects.hashCode(cqlType)) * 31 + Objects.hashCode(javaType)) * 31 - + Boolean.hashCode(isJavaCovariant); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/util/VIntCoding.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/util/VIntCoding.java deleted file mode 100644 index 552f84f2ae1..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/util/VIntCoding.java +++ /dev/null @@ -1,258 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -package com.datastax.oss.driver.internal.core.type.util; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; -import java.nio.ByteBuffer; - -/** - * Variable length encoding inspired from Google varints. - * - *

Cassandra vints are encoded with the most significant group first. The most significant byte - * will contains the information about how many extra bytes need to be read as well as the most - * significant bits of the integer. The number of extra bytes to read is encoded as 1 bit on the - * left side. For example, if we need to read 3 more bytes the first byte will start with 1110. If - * the encoded integer is 8 bytes long the vint will be encoded on 9 bytes and the first byte will - * be: 11111111 - * - *

Signed integers are (like protocol buffer varints) encoded using the ZigZag encoding so that - * numbers with a small absolute value have a small vint encoded value too. - * - *

Note that there is also a type called {@code varint} in the CQL protocol specification. This - * is completely unrelated. - */ -public class VIntCoding { - - private static long readUnsignedVInt(DataInput input) throws IOException { - int firstByte = input.readByte(); - - // Bail out early if this is one byte, necessary or it fails later - if (firstByte >= 0) { - return firstByte; - } - - int size = numberOfExtraBytesToRead(firstByte); - long retval = firstByte & firstByteValueMask(size); - for (int ii = 0; ii < size; ii++) { - byte b = input.readByte(); - retval <<= 8; - retval |= b & 0xff; - } - - return retval; - } - - public static long readVInt(DataInput input) throws IOException { - return decodeZigZag64(readUnsignedVInt(input)); - } - - // & this with the first byte to give the value part for a given extraBytesToRead encoded in the - // byte - private static int firstByteValueMask(int extraBytesToRead) { - // by including the known 0bit in the mask, we can use this for encodeExtraBytesToRead - return 0xff >> extraBytesToRead; - } - - private static byte encodeExtraBytesToRead(int extraBytesToRead) { - // because we have an extra bit in the value mask, we just need to invert it - return (byte) ~firstByteValueMask(extraBytesToRead); - } - - private static int numberOfExtraBytesToRead(int firstByte) { - // we count number of set upper bits; so if we simply invert all of the bits, we're golden - // this is aided by the fact that we only work with negative numbers, so when upcast to an int - // all - // of the new upper bits are also set, so by inverting we set all of them to zero - return Integer.numberOfLeadingZeros(~firstByte) - 24; - } - - private static final ThreadLocal encodingBuffer = - ThreadLocal.withInitial(() -> new byte[9]); - - private static void writeUnsignedVInt(long value, DataOutput output) throws IOException { - int size = VIntCoding.computeUnsignedVIntSize(value); - if (size == 1) { - output.write((int) value); - return; - } - - output.write(VIntCoding.encodeVInt(value, size), 0, size); - } - - private static byte[] encodeVInt(long value, int size) { - byte encodingSpace[] = encodingBuffer.get(); - int extraBytes = size - 1; - - for (int i = extraBytes; i >= 0; --i) { - encodingSpace[i] = (byte) value; - value >>= 8; - } - encodingSpace[0] |= encodeExtraBytesToRead(extraBytes); - return encodingSpace; - } - - public static void writeVInt(long value, DataOutput output) throws IOException { - writeUnsignedVInt(encodeZigZag64(value), output); - } - - /** - * Decode a ZigZag-encoded 64-bit value. ZigZag encodes signed integers into values that can be - * efficiently encoded with varint. (Otherwise, negative values must be sign-extended to 64 bits - * to be varint encoded, thus always taking 10 bytes on the wire.) - * - * @param n an unsigned 64-bit integer, stored in a signed int because Java has no explicit - * unsigned support. - * @return a signed 64-bit integer. - */ - private static long decodeZigZag64(final long n) { - return (n >>> 1) ^ -(n & 1); - } - - /** - * Encode a ZigZag-encoded 64-bit value. ZigZag encodes signed integers into values that can be - * efficiently encoded with varint. (Otherwise, negative values must be sign-extended to 64 bits - * to be varint encoded, thus always taking 10 bytes on the wire.) - * - * @param n a signed 64-bit integer. - * @return an unsigned 64-bit integer, stored in a signed int because Java has no explicit - * unsigned support. - */ - private static long encodeZigZag64(final long n) { - // Note: the right-shift must be arithmetic - return (n << 1) ^ (n >> 63); - } - - /** Compute the number of bytes that would be needed to encode a varint. */ - public static int computeVIntSize(final long param) { - return computeUnsignedVIntSize(encodeZigZag64(param)); - } - - /** Compute the number of bytes that would be needed to encode an unsigned varint. */ - public static int computeUnsignedVIntSize(final long value) { - int magnitude = - Long.numberOfLeadingZeros( - value | 1); // | with 1 to ensure magnitude <= 63, so (63 - 1) / 7 <= 8 - return (639 - magnitude * 9) >> 6; - } - - public static void writeUnsignedVInt32(int value, ByteBuffer output) { - writeUnsignedVInt((long) value, output); - } - - public static void writeUnsignedVInt(long value, ByteBuffer output) { - int size = VIntCoding.computeUnsignedVIntSize(value); - if (size == 1) { - output.put((byte) value); - return; - } - - output.put(VIntCoding.encodeVInt(value, size), 0, size); - } - - /** - * Read up to a 32-bit integer back, using the unsigned (no zigzag) encoding. - * - *

Note this method is the same as {@link #readUnsignedVInt(DataInput)}, except that we do - * *not* block if there are not enough bytes in the buffer to reconstruct the value. - * - * @throws VIntOutOfRangeException If the vint doesn't fit into a 32-bit integer - */ - public static int getUnsignedVInt32(ByteBuffer input, int readerIndex) { - return checkedCast(getUnsignedVInt(input, readerIndex)); - } - - public static long getUnsignedVInt(ByteBuffer input, int readerIndex) { - return getUnsignedVInt(input, readerIndex, input.limit()); - } - - public static long getUnsignedVInt(ByteBuffer input, int readerIndex, int readerLimit) { - if (readerIndex < 0) - throw new IllegalArgumentException( - "Reader index should be non-negative, but was " + readerIndex); - - if (readerIndex >= readerLimit) return -1; - - int firstByte = input.get(readerIndex++); - - // Bail out early if this is one byte, necessary or it fails later - if (firstByte >= 0) return firstByte; - - int size = numberOfExtraBytesToRead(firstByte); - if (readerIndex + size > readerLimit) return -1; - - long retval = firstByte & firstByteValueMask(size); - for (int ii = 0; ii < size; ii++) { - byte b = input.get(readerIndex++); - retval <<= 8; - retval |= b & 0xff; - } - - return retval; - } - - public static int checkedCast(long value) { - int result = (int) value; - if ((long) result != value) throw new VIntOutOfRangeException(value); - return result; - } - - /** - * Throw when attempting to decode a vint and the output type doesn't have enough space to fit the - * value that was decoded - */ - public static class VIntOutOfRangeException extends RuntimeException { - public final long value; - - private VIntOutOfRangeException(long value) { - super(value + " is out of range for a 32-bit integer"); - this.value = value; - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/AddressUtils.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/AddressUtils.java deleted file mode 100644 index 8905edb9192..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/AddressUtils.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util; - -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.net.UnknownHostException; -import java.util.HashSet; -import java.util.Set; - -public class AddressUtils { - - public static Set extract(String address, boolean resolve) { - int separator = address.lastIndexOf(':'); - if (separator < 0) { - throw new IllegalArgumentException("expecting format host:port"); - } - - String host = address.substring(0, separator); - String portString = address.substring(separator + 1); - int port; - try { - port = Integer.parseInt(portString); - } catch (NumberFormatException e) { - throw new IllegalArgumentException("expecting port to be a number, got " + portString, e); - } - if (!resolve) { - return ImmutableSet.of(InetSocketAddress.createUnresolved(host, port)); - } else { - InetAddress[] inetAddresses; - try { - inetAddresses = InetAddress.getAllByName(host); - } catch (UnknownHostException e) { - throw new RuntimeException(e); - } - Set result = new HashSet<>(); - for (InetAddress inetAddress : inetAddresses) { - result.add(new InetSocketAddress(inetAddress, port)); - } - return result; - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/ArrayUtils.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/ArrayUtils.java deleted file mode 100644 index 490b1dc7d17..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/ArrayUtils.java +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util; - -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Random; -import java.util.concurrent.ThreadLocalRandom; - -public class ArrayUtils { - - public static void swap(@NonNull ElementT[] elements, int i, int j) { - if (i != j) { - ElementT tmp = elements[i]; - elements[i] = elements[j]; - elements[j] = tmp; - } - } - - /** - * Moves an element towards the beginning of the array, shifting all the intermediary elements to - * the right (no-op if {@code targetIndex >= sourceIndex}). - */ - public static void bubbleUp( - @NonNull ElementT[] elements, int sourceIndex, int targetIndex) { - for (int i = sourceIndex; i > targetIndex; i--) { - swap(elements, i, i - 1); - } - } - - /** - * Moves an element towards the end of the array, shifting all the intermediary elements to the - * left (no-op if {@code targetIndex <= sourceIndex}). - */ - public static void bubbleDown( - @NonNull ElementT[] elements, int sourceIndex, int targetIndex) { - for (int i = sourceIndex; i < targetIndex; i++) { - swap(elements, i, i + 1); - } - } - - /** - * Shuffles the first n elements of the array in-place. - * - * @param elements the array to shuffle. - * @param n the number of elements to shuffle; must be {@code <= elements.length}. - * @see Modern - * Fisher-Yates shuffle - */ - public static void shuffleHead(@NonNull ElementT[] elements, int n) { - shuffleHead(elements, n, ThreadLocalRandom.current()); - } - - /** - * Shuffles the first n elements of the array in-place. - * - * @param elements the array to shuffle. - * @param n the number of elements to shuffle; must be {@code <= elements.length}. - * @param random the {@link ThreadLocalRandom} instance to use. This is mainly intended to - * facilitate tests. - * @see Modern - * Fisher-Yates shuffle - */ - public static void shuffleHead( - @NonNull ElementT[] elements, int n, @NonNull Random random) { - if (n > elements.length) { - throw new ArrayIndexOutOfBoundsException( - String.format( - "Can't shuffle the first %d elements, there are only %d", n, elements.length)); - } - if (n > 1) { - for (int i = n - 1; i > 0; i--) { - int j = random.nextInt(i + 1); - swap(elements, i, j); - } - } - } - - /** Rotates the elements in the specified range by the specified amount (round-robin). */ - public static void rotate( - @NonNull ElementT[] elements, int startIndex, int length, int amount) { - if (length >= 2) { - amount = amount % length; - // Repeatedly shift by 1. This is not the most time-efficient but the array will typically be - // small so we don't care, and this avoids allocating a temporary buffer. - for (int i = 0; i < amount; i++) { - bubbleDown(elements, startIndex, startIndex + length - 1); - } - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/CollectionsUtils.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/CollectionsUtils.java deleted file mode 100644 index 0dd9a85fcc6..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/CollectionsUtils.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util; - -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; - -public class CollectionsUtils { - public static Map combineListsIntoOrderedMap(List keys, List values) { - if (keys.size() != values.size()) { - throw new IllegalArgumentException("Cannot combine lists with not matching sizes"); - } - - Map map = new LinkedHashMap<>(); - for (int i = 0; i < keys.size(); i++) { - map.put(keys.get(i), values.get(i)); - } - return map; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/CountingIterator.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/CountingIterator.java deleted file mode 100644 index 391996d9369..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/CountingIterator.java +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util; - -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import java.util.Iterator; -import java.util.NoSuchElementException; -import net.jcip.annotations.NotThreadSafe; - -/** - * An iterator that knows in advance how many elements it will return, and maintains a counter as - * elements get returned. - */ -@NotThreadSafe -public abstract class CountingIterator implements Iterator { - - protected int remaining; - - public CountingIterator(int remaining) { - this.remaining = remaining; - } - - public int remaining() { - return remaining; - } - - /* - * The rest of this class was adapted from Guava's `AbstractIterator` (which we can't extend - * because its `next` method is final). Guava copyright notice follows: - * - * Copyright (C) 2007 The Guava Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - private enum State { - READY, - NOT_READY, - DONE, - FAILED, - } - - private State state = State.NOT_READY; - private ElementT next; - - protected abstract ElementT computeNext(); - - protected final ElementT endOfData() { - state = State.DONE; - return null; - } - - @Override - public final boolean hasNext() { - Preconditions.checkState(state != State.FAILED); - switch (state) { - case DONE: - return false; - case READY: - return true; - default: - } - return tryToComputeNext(); - } - - private boolean tryToComputeNext() { - state = State.FAILED; // temporary pessimism - next = computeNext(); - if (state != State.DONE) { - state = State.READY; - return true; - } - return false; - } - - @Override - public final ElementT next() { - if (!hasNext()) { - throw new NoSuchElementException(); - } - state = State.NOT_READY; - ElementT result = next; - next = null; - // Added to original Guava code: decrement counter when we return an element - remaining -= 1; - return result; - } - - public final ElementT peek() { - if (!hasNext()) { - throw new NoSuchElementException(); - } - return next; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/DefaultDependencyChecker.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/DefaultDependencyChecker.java deleted file mode 100644 index 2e717590569..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/DefaultDependencyChecker.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util; - -import java.util.concurrent.ConcurrentHashMap; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A checker for the presence of various {@link Dependency} instances at runtime. Predicate tests - * for Graal substitutions should NOT use this class; see {@link GraalDependencyChecker} for more - * information. - */ -public class DefaultDependencyChecker { - - private static final Logger LOG = LoggerFactory.getLogger(DefaultDependencyChecker.class); - - private static final ConcurrentHashMap CACHE = new ConcurrentHashMap<>(); - - /** - * Return true iff we can find all classes for the dependency on the classpath, false otherwise - * - * @param dependency the dependency to search for - * @return true if the dependency is available, false otherwise - */ - public static boolean isPresent(Dependency dependency) { - try { - return CACHE.computeIfAbsent( - dependency, - (dep) -> { - for (String classNameToTest : dependency.classes()) { - // Always use the driver class loader, assuming that the driver classes and - // the dependency classes are either being loaded by the same class loader, - // or – as in OSGi deployments – by two distinct, but compatible class loaders. - if (Reflection.loadClass(null, classNameToTest) == null) { - return false; - } - } - return true; - }); - } catch (Exception e) { - LOG.warn("Unexpected exception when checking for dependency " + dependency, e); - return false; - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/Dependency.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/Dependency.java deleted file mode 100644 index 97cfa25d9af..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/Dependency.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util; - -import java.util.Arrays; -import java.util.Collections; -import java.util.List; - -/** - * A set of driver optional dependencies and a common mechanism to test the presence of such - * dependencies on the application's classpath. - * - *

We use the given fully-qualified names of classes to test the presence of the whole dependency - * on the classpath, including its transitive dependencies if applicable. This assumes that if these - * classes are present, then the entire library is present and functional, and vice versa. - * - *

Note: some of the libraries declared here may be shaded; in these cases the shade plugin will - * replace the package names listed above with names starting with {@code - * com.datastax.oss.driver.shaded.*}, but the presence check would still work as expected. - */ -public enum Dependency { - SNAPPY("org.xerial.snappy.Snappy"), - LZ4("net.jpountz.lz4.LZ4Compressor"), - ESRI("com.esri.core.geometry.ogc.OGCGeometry"), - TINKERPOP( - // gremlin-core - "org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal", - // tinkergraph-gremlin - "org.apache.tinkerpop.gremlin.tinkergraph.structure.TinkerIoRegistryV3d0"), - REACTIVE_STREAMS("org.reactivestreams.Publisher"), - JACKSON( - // jackson-core - "com.fasterxml.jackson.core.JsonParser", - // jackson-databind - "com.fasterxml.jackson.databind.ObjectMapper"), - DROPWIZARD("com.codahale.metrics.MetricRegistry"), - ; - - @SuppressWarnings("ImmutableEnumChecker") - private final List clzs; - - Dependency(String... classNames) { - clzs = Collections.unmodifiableList(Arrays.asList(classNames)); - } - - public Iterable classes() { - return this.clzs; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/DirectedGraph.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/DirectedGraph.java deleted file mode 100644 index b9ab863cb88..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/DirectedGraph.java +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util; - -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.driver.shaded.guava.common.collect.LinkedHashMultimap; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import com.datastax.oss.driver.shaded.guava.common.collect.Maps; -import com.datastax.oss.driver.shaded.guava.common.collect.Multimap; -import java.util.ArrayDeque; -import java.util.Arrays; -import java.util.Collection; -import java.util.List; -import java.util.Map; -import java.util.Queue; -import net.jcip.annotations.NotThreadSafe; - -/** A basic directed graph implementation to perform topological sorts. */ -@NotThreadSafe -public class DirectedGraph { - - // We need to keep track of the predecessor count. For simplicity, use a map to store it - // alongside the vertices. - private final Map vertices; - private final Multimap adjacencyList; - private boolean wasSorted; - - public DirectedGraph(Collection vertices) { - this.vertices = Maps.newLinkedHashMapWithExpectedSize(vertices.size()); - this.adjacencyList = LinkedHashMultimap.create(); - - for (VertexT vertex : vertices) { - this.vertices.put(vertex, 0); - } - } - - @VisibleForTesting - @SafeVarargs - DirectedGraph(VertexT... vertices) { - this(Arrays.asList(vertices)); - } - - /** - * this assumes that {@code from} and {@code to} were part of the vertices passed to the - * constructor - */ - public void addEdge(VertexT from, VertexT to) { - Preconditions.checkArgument(vertices.containsKey(from) && vertices.containsKey(to)); - adjacencyList.put(from, to); - vertices.put(to, vertices.get(to) + 1); - } - - /** one-time use only, calling this multiple times on the same graph won't work */ - public List topologicalSort() { - Preconditions.checkState(!wasSorted); - wasSorted = true; - - Queue queue = new ArrayDeque<>(); - - for (Map.Entry entry : vertices.entrySet()) { - if (entry.getValue() == 0) { - queue.add(entry.getKey()); - } - } - - List result = Lists.newArrayList(); - while (!queue.isEmpty()) { - VertexT vertex = queue.remove(); - result.add(vertex); - for (VertexT successor : adjacencyList.get(vertex)) { - if (decrementAndGetCount(successor) == 0) { - queue.add(successor); - } - } - } - - if (result.size() != vertices.size()) { - throw new IllegalArgumentException("failed to perform topological sort, graph has a cycle"); - } - - return result; - } - - private int decrementAndGetCount(VertexT vertex) { - Integer count = vertices.get(vertex); - count = count - 1; - vertices.put(vertex, count); - return count; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/GraalDependencyChecker.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/GraalDependencyChecker.java deleted file mode 100644 index c80970eb3b6..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/GraalDependencyChecker.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util; - -import java.util.concurrent.ConcurrentHashMap; - -/** - * A dependency checker implementation which should be safe to use for build-time checks when - * building Graal native images. This class is similar to {@link DefaultDependencyChecker} but - * doesn't introduce any external dependencies which might complicate the native image build - * process. Expectation is that this will be most prominently used in the various predicate classes - * which determine whether or not Graal substitutions should be used. - */ -public class GraalDependencyChecker { - - private static final ConcurrentHashMap CACHE = new ConcurrentHashMap<>(); - - /** - * Return true iff we can find all classes for the dependency on the classpath, false otherwise - * - * @param dependency the dependency to search for - * @return true if the dependency is available, false otherwise - */ - public static boolean isPresent(Dependency dependency) { - try { - return CACHE.computeIfAbsent( - dependency, - (dep) -> { - for (String classNameToTest : dependency.classes()) { - // Note that this lands in a pretty similar spot to - // Reflection.loadClass() with a null class loader - // arg. Major difference here is that we avoid the - // more complex exception handling/logging ops in - // that code. - try { - Class.forName(classNameToTest); - } catch (LinkageError | Exception e) { - return false; - } - } - return true; - }); - } catch (Exception e) { - return false; - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/Loggers.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/Loggers.java deleted file mode 100644 index 99dca2c60c0..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/Loggers.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util; - -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.data.AccessibleById; -import com.datastax.oss.driver.api.core.data.AccessibleByName; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class Loggers { - - /** - * Emits a warning log that includes an exception. If the current level is debug, the full stack - * trace is included, otherwise only the exception's message. - */ - public static void warnWithException(Logger logger, String format, Object... arguments) { - if (logger.isDebugEnabled()) { - logger.warn(format, arguments); - } else { - Object last = arguments[arguments.length - 1]; - if (last instanceof Throwable) { - Throwable t = (Throwable) last; - arguments[arguments.length - 1] = t.getClass().getSimpleName() + ": " + t.getMessage(); - logger.warn(format + " ({})", arguments); - } else { - // Should only be called with an exception as last argument, but handle gracefully anyway - logger.warn(format, arguments); - } - } - } - - // Loggers for API interfaces, declared here in order to keep them internal. - public static Logger COLUMN_DEFINITIONS = LoggerFactory.getLogger(ColumnDefinitions.class); - public static Logger ACCESSIBLE_BY_ID = LoggerFactory.getLogger(AccessibleById.class); - public static Logger ACCESSIBLE_BY_NAME = LoggerFactory.getLogger(AccessibleByName.class); - public static Logger USER_DEFINED_TYPE = LoggerFactory.getLogger(UserDefinedType.class); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/NanoTime.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/NanoTime.java deleted file mode 100644 index 0001bc9925c..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/NanoTime.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util; - -public class NanoTime { - - private static final long ONE_HOUR = 3600L * 1000 * 1000 * 1000; - private static final long ONE_MINUTE = 60L * 1000 * 1000 * 1000; - private static final long ONE_SECOND = 1000 * 1000 * 1000; - private static final long ONE_MILLISECOND = 1000 * 1000; - private static final long ONE_MICROSECOND = 1000; - - /** Formats a duration in the best unit (truncating the fractional part). */ - public static String formatTimeSince(long startTimeNs) { - return format(System.nanoTime() - startTimeNs); - } - - /** Formats a duration in the best unit (truncating the fractional part). */ - public static String format(long elapsedNs) { - if (elapsedNs >= ONE_HOUR) { - long hours = elapsedNs / ONE_HOUR; - long minutes = (elapsedNs % ONE_HOUR) / ONE_MINUTE; - return hours + " h " + minutes + " mn"; - } else if (elapsedNs >= ONE_MINUTE) { - long minutes = elapsedNs / ONE_MINUTE; - long seconds = (elapsedNs % ONE_MINUTE) / ONE_SECOND; - return minutes + " mn " + seconds + " s"; - } else if (elapsedNs >= ONE_SECOND) { - long seconds = elapsedNs / ONE_SECOND; - long milliseconds = (elapsedNs % ONE_SECOND) / ONE_MILLISECOND; - return seconds + "." + milliseconds + " s"; - } else if (elapsedNs >= ONE_MILLISECOND) { - return (elapsedNs / ONE_MILLISECOND) + " ms"; - } else if (elapsedNs >= ONE_MICROSECOND) { - return (elapsedNs / ONE_MICROSECOND) + " us"; - } else { - return elapsedNs + " ns"; - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/ProtocolUtils.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/ProtocolUtils.java deleted file mode 100644 index f653ea6f5f9..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/ProtocolUtils.java +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util; - -import com.datastax.oss.protocol.internal.ProtocolConstants; - -public class ProtocolUtils { - /** - * Formats a message opcode for logs and error messages. - * - *

Note that the reason why we don't use enums is because the driver can be extended with - * custom opcodes. - */ - public static String opcodeString(int opcode) { - switch (opcode) { - case ProtocolConstants.Opcode.ERROR: - return "ERROR"; - case ProtocolConstants.Opcode.STARTUP: - return "STARTUP"; - case ProtocolConstants.Opcode.READY: - return "READY"; - case ProtocolConstants.Opcode.AUTHENTICATE: - return "AUTHENTICATE"; - case ProtocolConstants.Opcode.OPTIONS: - return "OPTIONS"; - case ProtocolConstants.Opcode.SUPPORTED: - return "SUPPORTED"; - case ProtocolConstants.Opcode.QUERY: - return "QUERY"; - case ProtocolConstants.Opcode.RESULT: - return "RESULT"; - case ProtocolConstants.Opcode.PREPARE: - return "PREPARE"; - case ProtocolConstants.Opcode.EXECUTE: - return "EXECUTE"; - case ProtocolConstants.Opcode.REGISTER: - return "REGISTER"; - case ProtocolConstants.Opcode.EVENT: - return "EVENT"; - case ProtocolConstants.Opcode.BATCH: - return "BATCH"; - case ProtocolConstants.Opcode.AUTH_CHALLENGE: - return "AUTH_CHALLENGE"; - case ProtocolConstants.Opcode.AUTH_RESPONSE: - return "AUTH_RESPONSE"; - case ProtocolConstants.Opcode.AUTH_SUCCESS: - return "AUTH_SUCCESS"; - default: - return "0x" + Integer.toHexString(opcode); - } - } - - /** - * Formats an error code for logs and error messages. - * - *

Note that the reason why we don't use enums is because the driver can be extended with - * custom codes. - */ - public static String errorCodeString(int errorCode) { - switch (errorCode) { - case ProtocolConstants.ErrorCode.SERVER_ERROR: - return "SERVER_ERROR"; - case ProtocolConstants.ErrorCode.PROTOCOL_ERROR: - return "PROTOCOL_ERROR"; - case ProtocolConstants.ErrorCode.AUTH_ERROR: - return "AUTH_ERROR"; - case ProtocolConstants.ErrorCode.UNAVAILABLE: - return "UNAVAILABLE"; - case ProtocolConstants.ErrorCode.OVERLOADED: - return "OVERLOADED"; - case ProtocolConstants.ErrorCode.IS_BOOTSTRAPPING: - return "IS_BOOTSTRAPPING"; - case ProtocolConstants.ErrorCode.TRUNCATE_ERROR: - return "TRUNCATE_ERROR"; - case ProtocolConstants.ErrorCode.WRITE_TIMEOUT: - return "WRITE_TIMEOUT"; - case ProtocolConstants.ErrorCode.READ_TIMEOUT: - return "READ_TIMEOUT"; - case ProtocolConstants.ErrorCode.READ_FAILURE: - return "READ_FAILURE"; - case ProtocolConstants.ErrorCode.FUNCTION_FAILURE: - return "FUNCTION_FAILURE"; - case ProtocolConstants.ErrorCode.WRITE_FAILURE: - return "WRITE_FAILURE"; - case ProtocolConstants.ErrorCode.CDC_WRITE_FAILURE: - return "CDC_WRITE_FAILURE"; - case ProtocolConstants.ErrorCode.CAS_WRITE_UNKNOWN: - return "CAS_WRITE_UNKNOWN"; - case ProtocolConstants.ErrorCode.SYNTAX_ERROR: - return "SYNTAX_ERROR"; - case ProtocolConstants.ErrorCode.UNAUTHORIZED: - return "UNAUTHORIZED"; - case ProtocolConstants.ErrorCode.INVALID: - return "INVALID"; - case ProtocolConstants.ErrorCode.CONFIG_ERROR: - return "CONFIG_ERROR"; - case ProtocolConstants.ErrorCode.ALREADY_EXISTS: - return "ALREADY_EXISTS"; - case ProtocolConstants.ErrorCode.UNPREPARED: - return "UNPREPARED"; - default: - return "0x" + Integer.toHexString(errorCode); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/Reflection.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/Reflection.java deleted file mode 100644 index 75a8f5b7380..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/Reflection.java +++ /dev/null @@ -1,344 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.config.DriverOption; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.shaded.guava.common.base.Joiner; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ListMultimap; -import com.datastax.oss.driver.shaded.guava.common.collect.MultimapBuilder; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.lang.reflect.Constructor; -import java.lang.reflect.InvocationTargetException; -import java.util.Collection; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class Reflection { - - private static final Logger LOG = LoggerFactory.getLogger(Reflection.class); - - /** - * Loads a class by name using the given {@link ClassLoader}. - * - *

If the class loader is null, the class will be loaded using the class loader that loaded the - * driver. - * - * @return null if the class does not exist or could not be loaded. - */ - @Nullable - public static Class loadClass(@Nullable ClassLoader classLoader, @NonNull String className) { - try { - Class clazz; - if (classLoader == null) { - LOG.trace("Attempting to load {} with driver's class loader", className); - clazz = Class.forName(className); - } else { - LOG.trace("Attempting to load {} with {}", className, classLoader); - clazz = Class.forName(className, true, classLoader); - } - LOG.trace("Successfully loaded {}", className); - return clazz; - } catch (LinkageError | Exception e) { - // Note: only ClassNotFoundException, LinkageError and SecurityException - // are declared to be thrown; however some class loaders (Apache Felix) - // may throw other checked exceptions, which cannot be caught directly - // because that would cause a compilation failure. - LOG.debug( - String.format("Could not load %s with loader %s: %s", className, classLoader, e), e); - if (classLoader == null) { - return null; - } else { - // If the user-supplied class loader is unable to locate the class, try with the driver's - // default class loader. This is useful in OSGi deployments where the user-supplied loader - // may be able to load some classes but not all of them. Besides, the driver bundle, in - // OSGi, has a "Dynamic-Import:*" directive that makes its class loader capable of locating - // a great number of classes. - return loadClass(null, className); - } - } - } - - /** - * Tries to create an instance of a class, given an option defined in the driver configuration. - * - *

For example: - * - *

-   * my-policy.class = my.package.MyPolicyImpl
-   * 
- * - * The class will be instantiated via reflection, it must have a constructor that takes a {@link - * DriverContext} argument. - * - * @param context the driver context. - * @param classNameOption the option that indicates the class. It will be looked up in the default - * profile of the configuration stored in the context. - * @param expectedSuperType a super-type that the class is expected to implement/extend. - * @param defaultPackages the default packages to prepend to the class name if it's not qualified. - * They will be tried in order, the first one that matches an existing class will be used. - * @return the new instance, or empty if {@code classNameOption} is not defined in the - * configuration. - */ - public static Optional buildFromConfig( - InternalDriverContext context, - DriverOption classNameOption, - Class expectedSuperType, - String... defaultPackages) { - return buildFromConfig(context, null, classNameOption, expectedSuperType, defaultPackages); - } - - /** - * Tries to create a list of instances, given an option defined in the driver configuration. - * - *

For example: - * - *

-   * my-policy.classes = [my.package.MyPolicyImpl1,my.package.MyPolicyImpl2]
-   * 
- * - * Each class will be instantiated via reflection, and must have a constructor that takes a {@link - * DriverContext} argument. - * - * @param context the driver context. - * @param classNamesOption the option that indicates the class list. It will be looked up in the - * default profile of the configuration stored in the context. - * @param expectedSuperType a super-type that the classes are expected to implement/extend. - * @param defaultPackages the default packages to prepend to the class names if they are not - * qualified. They will be tried in order, the first one that matches an existing class will - * be used. - * @return the list of new instances, or an empty list if {@code classNamesOption} is not defined - * in the configuration. - */ - public static ImmutableList buildFromConfigList( - InternalDriverContext context, - DriverOption classNamesOption, - Class expectedSuperType, - String... defaultPackages) { - return buildFromConfigList(context, null, classNamesOption, expectedSuperType, defaultPackages); - } - - /** - * Tries to create multiple instances of a class, given options defined in the driver - * configuration and possibly overridden in profiles. - * - *

For example: - * - *

-   * my-policy.class = package1.PolicyImpl1
-   * profiles {
-   *   my-profile { my-policy.class = package2.PolicyImpl2 }
-   * }
-   * 
- * - * The class will be instantiated via reflection, it must have a constructor that takes two - * arguments: the {@link DriverContext}, and a string representing the profile name. - * - *

This method assumes the policy is mandatory, the class option must be present at least for - * the default profile. - * - * @param context the driver context. - * @param classNameOption the option that indicates the class (my-policy.class in the example - * above). - * @param rootOption the root of the section containing the policy's configuration (my-policy in - * the example above). Profiles that have the same contents under that section will share the - * same policy instance. - * @param expectedSuperType a super-type that the class is expected to implement/extend. - * @param defaultPackages the default packages to prepend to the class name if it's not qualified. - * They will be tried in order, the first one that matches an existing class will be used. - * @return the policy instances by profile name. If multiple profiles share the same - * configuration, a single instance will be shared by all their entries. - */ - public static Map buildFromConfigProfiles( - InternalDriverContext context, - DriverOption classNameOption, - DriverOption rootOption, - Class expectedSuperType, - String... defaultPackages) { - - // Find out how many distinct configurations we have - ListMultimap profilesByConfig = - MultimapBuilder.hashKeys().arrayListValues().build(); - for (DriverExecutionProfile profile : context.getConfig().getProfiles().values()) { - profilesByConfig.put(profile.getComparisonKey(rootOption), profile.getName()); - } - - // Instantiate each distinct configuration, and associate it with the corresponding profiles - ImmutableMap.Builder result = ImmutableMap.builder(); - for (Collection profiles : profilesByConfig.asMap().values()) { - // Since all profiles use the same config, we can use any of them - String profileName = profiles.iterator().next(); - ComponentT policy = - buildFromConfig(context, profileName, classNameOption, expectedSuperType, defaultPackages) - .orElseThrow( - () -> - new IllegalArgumentException( - String.format( - "Missing configuration for %s in profile %s", - rootOption.getPath(), profileName))); - for (String profile : profiles) { - result.put(profile, policy); - } - } - return result.build(); - } - - /** - * @param profileName if null, this is a global policy, use the default profile and look for a - * one-arg constructor. If not null, this is a per-profile policy, look for a two-arg - * constructor. - */ - public static Optional buildFromConfig( - InternalDriverContext context, - String profileName, - DriverOption classNameOption, - Class expectedSuperType, - String... defaultPackages) { - - DriverExecutionProfile config = - (profileName == null) - ? context.getConfig().getDefaultProfile() - : context.getConfig().getProfile(profileName); - - String configPath = classNameOption.getPath(); - LOG.debug("Creating a {} from config option {}", expectedSuperType.getSimpleName(), configPath); - - if (!config.isDefined(classNameOption)) { - LOG.debug("Option is not defined, skipping"); - return Optional.empty(); - } - - String className = config.getString(classNameOption); - return Optional.of( - resolveClass( - context, profileName, expectedSuperType, configPath, className, defaultPackages)); - } - - /** - * @param profileName if null, this is a global policy, use the default profile and look for a - * one-arg constructor. If not null, this is a per-profile policy, look for a two-arg - * constructor. - */ - public static ImmutableList buildFromConfigList( - InternalDriverContext context, - String profileName, - DriverOption classNamesOption, - Class expectedSuperType, - String... defaultPackages) { - - DriverExecutionProfile config = - (profileName == null) - ? context.getConfig().getDefaultProfile() - : context.getConfig().getProfile(profileName); - - String configPath = classNamesOption.getPath(); - LOG.debug( - "Creating a list of {} from config option {}", - expectedSuperType.getSimpleName(), - configPath); - - if (!config.isDefined(classNamesOption)) { - LOG.debug("Option is not defined, skipping"); - return ImmutableList.of(); - } - - List classNames = config.getStringList(classNamesOption); - ImmutableList.Builder components = ImmutableList.builder(); - for (String className : classNames) { - components.add( - resolveClass( - context, profileName, expectedSuperType, configPath, className, defaultPackages)); - } - return components.build(); - } - - @NonNull - private static ComponentT resolveClass( - InternalDriverContext context, - String profileName, - Class expectedSuperType, - String configPath, - String className, - String[] defaultPackages) { - Class clazz = null; - if (className.contains(".")) { - LOG.debug("Building from fully-qualified name {}", className); - clazz = loadClass(context.getClassLoader(), className); - } else { - LOG.debug("Building from unqualified name {}", className); - for (String defaultPackage : defaultPackages) { - String qualifiedClassName = defaultPackage + "." + className; - LOG.debug("Trying with default package {}", qualifiedClassName); - clazz = loadClass(context.getClassLoader(), qualifiedClassName); - if (clazz != null) { - break; - } - } - } - if (clazz == null) { - throw new IllegalArgumentException( - String.format("Can't find class %s (specified by %s)", className, configPath)); - } - Preconditions.checkArgument( - expectedSuperType.isAssignableFrom(clazz), - "Expected class %s (specified by %s) to be a subtype of %s", - className, - configPath, - expectedSuperType.getName()); - - Constructor constructor; - Class[] argumentTypes = - (profileName == null) - ? new Class[] {DriverContext.class} - : new Class[] {DriverContext.class, String.class}; - try { - constructor = clazz.asSubclass(expectedSuperType).getConstructor(argumentTypes); - } catch (NoSuchMethodException e) { - throw new IllegalArgumentException( - String.format( - "Expected class %s (specified by %s) " - + "to have an accessible constructor with arguments (%s)", - className, configPath, Joiner.on(',').join(argumentTypes))); - } - try { - @SuppressWarnings("JavaReflectionInvocation") - ComponentT instance = - (profileName == null) - ? constructor.newInstance(context) - : constructor.newInstance(context, profileName); - return instance; - } catch (Exception e) { - // ITE just wraps an exception thrown by the constructor, get rid of it: - Throwable cause = (e instanceof InvocationTargetException) ? e.getCause() : e; - throw new IllegalArgumentException( - String.format( - "Error instantiating class %s (specified by %s): %s", - className, configPath, cause.getMessage()), - cause); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/RoutingKey.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/RoutingKey.java deleted file mode 100644 index 7d8895d228f..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/RoutingKey.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util; - -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; - -public class RoutingKey { - - /** Assembles multiple routing key components into a single buffer. */ - @NonNull - public static ByteBuffer compose(@NonNull ByteBuffer... components) { - if (components.length == 1) return components[0]; - - int totalLength = 0; - for (ByteBuffer bb : components) totalLength += 2 + bb.remaining() + 1; - - ByteBuffer out = ByteBuffer.allocate(totalLength); - for (ByteBuffer buffer : components) { - ByteBuffer bb = buffer.duplicate(); - putShortLength(out, bb.remaining()); - out.put(bb); - out.put((byte) 0); - } - out.flip(); - return out; - } - - private static void putShortLength(ByteBuffer bb, int length) { - bb.put((byte) ((length >> 8) & 0xFF)); - bb.put((byte) (length & 0xFF)); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/Sizes.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/Sizes.java deleted file mode 100644 index 337895ec107..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/Sizes.java +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.cql.BatchableStatement; -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.internal.core.data.ValuesHelper; -import com.datastax.oss.protocol.internal.FrameCodec; -import com.datastax.oss.protocol.internal.PrimitiveSizes; -import com.datastax.oss.protocol.internal.request.query.QueryOptions; -import com.datastax.oss.protocol.internal.request.query.Values; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -public class Sizes { - - /** Returns a common size for all kinds of Request implementations. */ - public static int minimumRequestSize(Request request) { - - // Header and payload are common inside a Frame at the protocol level - - // Frame header has a fixed size of 9 for protocol version >= V3, which includes Frame flags - // size - int size = FrameCodec.V3_ENCODED_HEADER_SIZE; - - if (!request.getCustomPayload().isEmpty()) { - // Custom payload is not supported in v3, but assume user won't have a custom payload set if - // they use this version - size += PrimitiveSizes.sizeOfBytesMap(request.getCustomPayload()); - } - - return size; - } - - public static int minimumStatementSize(Statement statement, DriverContext context) { - int size = minimumRequestSize(statement); - - // These are options in the protocol inside a frame that are common to all Statements - - size += QueryOptions.queryFlagsSize(context.getProtocolVersion().getCode()); - - size += PrimitiveSizes.SHORT; // size of consistency level - size += PrimitiveSizes.SHORT; // size of serial consistency level - - return size; - } - - /** - * Returns the size in bytes of a simple statement's values, depending on whether the values are - * named or positional. - */ - public static int sizeOfSimpleStatementValues( - SimpleStatement simpleStatement, - ProtocolVersion protocolVersion, - CodecRegistry codecRegistry) { - int size = 0; - - if (!simpleStatement.getPositionalValues().isEmpty()) { - - List positionalValues = - new ArrayList<>(simpleStatement.getPositionalValues().size()); - for (Object value : simpleStatement.getPositionalValues()) { - positionalValues.add( - ValuesHelper.encodeToDefaultCqlMapping(value, codecRegistry, protocolVersion)); - } - - size += Values.sizeOfPositionalValues(positionalValues); - - } else if (!simpleStatement.getNamedValues().isEmpty()) { - - Map namedValues = new HashMap<>(simpleStatement.getNamedValues().size()); - for (Map.Entry value : simpleStatement.getNamedValues().entrySet()) { - namedValues.put( - value.getKey().asInternal(), - ValuesHelper.encodeToDefaultCqlMapping( - value.getValue(), codecRegistry, protocolVersion)); - } - - size += Values.sizeOfNamedValues(namedValues); - } - return size; - } - - /** Return the size in bytes of a bound statement's values. */ - public static int sizeOfBoundStatementValues(BoundStatement boundStatement) { - return Values.sizeOfPositionalValues(boundStatement.getValues()); - } - - /** - * The size of a statement inside a batch query is different from the size of a complete - * Statement. The inner batch statements only include the query or prepared ID, and the values of - * the statement. - */ - public static Integer sizeOfInnerBatchStatementInBytes( - BatchableStatement statement, ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { - int size = 0; - - size += - PrimitiveSizes - .BYTE; // for each inner statement, there is one byte for the "kind": prepared or string - - if (statement instanceof SimpleStatement) { - size += PrimitiveSizes.sizeOfLongString(((SimpleStatement) statement).getQuery()); - size += - sizeOfSimpleStatementValues( - ((SimpleStatement) statement), protocolVersion, codecRegistry); - } else if (statement instanceof BoundStatement) { - size += - PrimitiveSizes.sizeOfShortBytes( - ((BoundStatement) statement).getPreparedStatement().getId()); - size += sizeOfBoundStatementValues(((BoundStatement) statement)); - } - return size; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/Strings.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/Strings.java deleted file mode 100644 index 2e85b451c75..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/Strings.java +++ /dev/null @@ -1,343 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util; - -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import java.util.Locale; -import java.util.Objects; - -public class Strings { - - /** - * Return {@code true} if the given string is surrounded by single quotes, and {@code false} - * otherwise. - * - * @param value The string to inspect. - * @return {@code true} if the given string is surrounded by single quotes, and {@code false} - * otherwise. - */ - public static boolean isQuoted(String value) { - return isQuoted(value, '\''); - } - - /** - * Quote the given string; single quotes are escaped. If the given string is null, this method - * returns a quoted empty string ({@code ''}). - * - * @param value The value to quote. - * @return The quoted string. - */ - public static String quote(String value) { - return quote(value, '\''); - } - - /** - * Unquote the given string if it is quoted; single quotes are unescaped. If the given string is - * not quoted, it is returned without any modification. - * - * @param value The string to unquote. - * @return The unquoted string. - */ - public static String unquote(String value) { - return unquote(value, '\''); - } - - /** - * Return {@code true} if the given string is surrounded by double quotes, and {@code false} - * otherwise. - * - * @param value The string to inspect. - * @return {@code true} if the given string is surrounded by double quotes, and {@code false} - * otherwise. - */ - public static boolean isDoubleQuoted(String value) { - return isQuoted(value, '\"'); - } - - /** - * Double quote the given string; double quotes are escaped. If the given string is null, this - * method returns a quoted empty string ({@code ""}). - * - * @param value The value to double quote. - * @return The double quoted string. - */ - public static String doubleQuote(String value) { - return quote(value, '"'); - } - - /** - * Unquote the given string if it is double quoted; double quotes are unescaped. If the given - * string is not double quoted, it is returned without any modification. - * - * @param value The string to un-double quote. - * @return The un-double quoted string. - */ - public static String unDoubleQuote(String value) { - return unquote(value, '"'); - } - - /** Whether a string needs double quotes to be a valid CQL identifier. */ - public static boolean needsDoubleQuotes(String s) { - // this method should only be called for C*-provided identifiers, - // so we expect it to be non-null and non-empty. - assert s != null && !s.isEmpty(); - char c = s.charAt(0); - if (!(c >= 97 && c <= 122)) // a-z - return true; - for (int i = 1; i < s.length(); i++) { - c = s.charAt(i); - if (!((c >= 48 && c <= 57) // 0-9 - || (c == 95) // _ - || (c >= 97 && c <= 122) // a-z - )) { - return true; - } - } - return isReservedCqlKeyword(s); - } - - /** - * Return {@code true} if the given string is surrounded by the quote character given, and {@code - * false} otherwise. - * - * @param value The string to inspect. - * @return {@code true} if the given string is surrounded by the quote character, and {@code - * false} otherwise. - */ - private static boolean isQuoted(String value, char quoteChar) { - return value != null - && value.length() > 1 - && value.charAt(0) == quoteChar - && value.charAt(value.length() - 1) == quoteChar; - } - - /** - * @param quoteChar " or ' - * @return A quoted empty string. - */ - private static String emptyQuoted(char quoteChar) { - // don't handle non quote characters, this is done so that these are interned and don't create - // repeated empty quoted strings. - assert quoteChar == '"' || quoteChar == '\''; - if (quoteChar == '"') return "\"\""; - else return "''"; - } - - /** - * Quotes text and escapes any existing quotes in the text. {@code String.replace()} is a bit too - * inefficient (see JAVA-67, JAVA-1262). - * - * @param text The text. - * @param quoteChar The character to use as a quote. - * @return The text with surrounded in quotes with all existing quotes escaped with (i.e. ' - * becomes '') - */ - private static String quote(String text, char quoteChar) { - if (text == null || text.isEmpty()) return emptyQuoted(quoteChar); - - int nbMatch = 0; - int start = -1; - do { - start = text.indexOf(quoteChar, start + 1); - if (start != -1) ++nbMatch; - } while (start != -1); - - // no quotes found that need to be escaped, simply surround in quotes and return. - if (nbMatch == 0) return quoteChar + text + quoteChar; - - // 2 for beginning and end quotes. - // length for original text - // nbMatch for escape characters to add to quotes to be escaped. - int newLength = 2 + text.length() + nbMatch; - char[] result = new char[newLength]; - result[0] = quoteChar; - result[newLength - 1] = quoteChar; - int newIdx = 1; - for (int i = 0; i < text.length(); i++) { - char c = text.charAt(i); - if (c == quoteChar) { - // escape quote with another occurrence. - result[newIdx++] = c; - result[newIdx++] = c; - } else { - result[newIdx++] = c; - } - } - return new String(result); - } - - /** - * Unquotes text and unescapes non surrounding quotes. {@code String.replace()} is a bit too - * inefficient (see JAVA-67, JAVA-1262). - * - * @param text The text - * @param quoteChar The character to use as a quote. - * @return The text with surrounding quotes removed and non surrounding quotes unescaped (i.e. '' - * becomes ') - */ - private static String unquote(String text, char quoteChar) { - if (!isQuoted(text, quoteChar)) return text; - - if (text.length() == 2) return ""; - - String search = emptyQuoted(quoteChar); - int nbMatch = 0; - int start = -1; - do { - start = text.indexOf(search, start + 2); - // ignore the second to last character occurrence, as the last character is a quote. - if (start != -1 && start != text.length() - 2) ++nbMatch; - } while (start != -1); - - // no escaped quotes found, simply remove surrounding quotes and return. - if (nbMatch == 0) return text.substring(1, text.length() - 1); - - // length of the new string will be its current length - the number of occurrences. - int newLength = text.length() - nbMatch - 2; - char[] result = new char[newLength]; - int newIdx = 0; - // track whenever a quoteChar is encountered and the previous character is not a quoteChar. - boolean firstFound = false; - for (int i = 1; i < text.length() - 1; i++) { - char c = text.charAt(i); - if (c == quoteChar) { - if (firstFound) { - // The previous character was a quoteChar, don't add this to result, this action in - // effect removes consecutive quotes. - firstFound = false; - } else { - // found a quoteChar and the previous character was not a quoteChar, include in result. - firstFound = true; - result[newIdx++] = c; - } - } else { - // non quoteChar encountered, include in result. - result[newIdx++] = c; - firstFound = false; - } - } - return new String(result); - } - - @VisibleForTesting - static boolean isReservedCqlKeyword(String id) { - return id != null && RESERVED_KEYWORDS.contains(id.toLowerCase(Locale.ROOT)); - } - - /** - * Check whether the given string corresponds to a valid CQL long literal. Long literals are - * composed solely by digits, but can have an optional leading minus sign. - * - * @param str The string to inspect. - * @return {@code true} if the given string corresponds to a valid CQL integer literal, {@code - * false} otherwise. - */ - public static boolean isLongLiteral(String str) { - if (str == null || str.isEmpty()) return false; - char[] chars = str.toCharArray(); - for (int i = 0; i < chars.length; i++) { - char c = chars[i]; - if ((c < '0' && (i != 0 || c != '-')) || c > '9') return false; - } - return true; - } - - /** - * Checks whether the given text is not null and not empty. - * - * @param text The text to check. - * @param name The name of the argument. - * @return The text (for method chaining). - */ - public static String requireNotEmpty(String text, String name) { - Objects.requireNonNull(text, name + " cannot be null"); - if (text.isEmpty()) { - throw new IllegalArgumentException(name + " cannot be empty"); - } - return text; - } - - private Strings() {} - - private static final ImmutableSet RESERVED_KEYWORDS = - ImmutableSet.of( - // See https://github.com/apache/cassandra/blob/trunk/doc/cql3/CQL.textile#appendixA - "add", - "allow", - "alter", - "and", - "apply", - "asc", - "authorize", - "batch", - "begin", - "by", - "columnfamily", - "create", - "default", - "delete", - "desc", - "describe", - "drop", - "entries", - "execute", - "from", - "full", - "grant", - "if", - "in", - "index", - "infinity", - "insert", - "into", - "is", - "keyspace", - "limit", - "materialized", - "mbean", - "mbeans", - "modify", - "nan", - "norecursive", - "not", - "null", - "of", - "on", - "or", - "order", - "primary", - "rename", - "replace", - "revoke", - "schema", - "select", - "set", - "table", - "to", - "token", - "truncate", - "unlogged", - "unset", - "update", - "use", - "using", - "view", - "where", - "with"); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/collection/CompositeQueryPlan.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/collection/CompositeQueryPlan.java deleted file mode 100644 index 10ca8c0c48d..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/collection/CompositeQueryPlan.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.collection; - -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.shaded.guava.common.collect.Iterators; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.AbstractQueue; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; -import java.util.Queue; -import java.util.concurrent.atomic.AtomicInteger; -import net.jcip.annotations.ThreadSafe; - -/** A query plan that encompasses many child plans, and consumes them one by one. */ -@ThreadSafe -public class CompositeQueryPlan extends AbstractQueue implements QueryPlan { - - private final Queue[] plans; - private final AtomicInteger currentPlan = new AtomicInteger(0); - - @SafeVarargs - public CompositeQueryPlan(@NonNull Queue... plans) { - if (plans.length == 0) { - throw new IllegalArgumentException("at least one child plan must be provided"); - } - for (Queue plan : plans) { - if (plan == null) { - throw new NullPointerException("child plan cannot be null"); - } - } - this.plans = plans; - } - - @Nullable - @Override - public Node poll() { - while (true) { - int current = currentPlan.get(); - Queue plan = plans[current]; - Node n = plan.poll(); - if (n != null) { - return n; - } - int next = current + 1; - if (next == plans.length) { - return null; - } - currentPlan.compareAndSet(current, next); - } - } - - @NonNull - @Override - public Iterator iterator() { - List> its = new ArrayList<>(plans.length); - for (Queue plan : plans) { - its.add(plan.iterator()); - } - return Iterators.concat(its.iterator()); - } - - @Override - public int size() { - int size = 0; - for (Queue plan : plans) { - size += plan.size(); - } - return size; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/collection/EmptyQueryPlan.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/collection/EmptyQueryPlan.java deleted file mode 100644 index 53177147695..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/collection/EmptyQueryPlan.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.collection; - -import com.datastax.oss.driver.api.core.metadata.Node; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.AbstractQueue; -import java.util.Collections; -import java.util.Iterator; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -class EmptyQueryPlan extends AbstractQueue implements QueryPlan { - - @Override - public Node poll() { - return null; - } - - @NonNull - @Override - public Iterator iterator() { - return Collections.emptyIterator(); - } - - @Override - public int size() { - return 0; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/collection/LazyQueryPlan.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/collection/LazyQueryPlan.java deleted file mode 100644 index 075143c2e8d..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/collection/LazyQueryPlan.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.collection; - -import com.datastax.oss.driver.api.core.metadata.Node; -import net.jcip.annotations.ThreadSafe; - -/** - * A query plan where nodes are computed lazily, when the plan is consumed for the first time. - * - *

This class can be useful when a query plan computation is heavy but the plan has a low chance - * of ever being consumed, e.g. the last query plan in a {@link CompositeQueryPlan}. - */ -@ThreadSafe -public abstract class LazyQueryPlan extends QueryPlanBase { - - private volatile Object[] nodes; - - /** - * Computes and returns the nodes to use for this query plan. - * - *

For efficiency, the declared return type is {@code Object[]} but all elements must be - * instances of {@link Node}. See {@link #getNodes()} for details. - * - *

This method is guaranteed to be invoked only once, at the first call to {@link #poll()}. - * - *

Implementors must avoid blocking calls in this method as it will be invoked on the driver's - * hot path. - */ - protected abstract Object[] computeNodes(); - - @Override - protected Object[] getNodes() { - if (nodes == null) { - synchronized (this) { - if (nodes == null) { - nodes = computeNodes(); - } - } - } - return nodes; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/collection/QueryPlan.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/collection/QueryPlan.java deleted file mode 100644 index 371e100a0e2..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/collection/QueryPlan.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.collection; - -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.session.Session; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Iterator; -import java.util.Queue; -import net.jcip.annotations.ThreadSafe; - -/** - * A specialized, thread-safe node queue for use when creating {@linkplain - * com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy#newQueryPlan(Request, Session) - * query plans}. - * - *

This interface and its built-in implementations are not general-purpose queues; they are - * tailored for the specific use case of creating query plans in the driver. They make a few - * unconventional API choices for the sake of performance. - * - *

Furthermore, the driver only consumes query plans through calls to its {@link #poll()} method; - * therefore, this method is the only valid mutation operation for a query plan, other mutating - * methods throw. - * - *

Both {@link #size()} and {@link #iterator()} are supported and never throw, even if called - * concurrently. These methods are implemented for reporting purposes only, the driver itself does - * not use them. - * - *

All built-in {@link QueryPlan} implementations can be safely reused for custom load balancing - * policies; if you plan to do so, study the source code of {@link - * com.datastax.oss.driver.internal.core.loadbalancing.DefaultLoadBalancingPolicy} or {@link - * com.datastax.oss.driver.internal.core.loadbalancing.BasicLoadBalancingPolicy}. - * - * @see QueryPlanBase - */ -@ThreadSafe -public interface QueryPlan extends Queue { - - QueryPlan EMPTY = new EmptyQueryPlan(); - - /** - * {@inheritDoc} - * - *

Implementation note: query plan iterators are snapshots that reflect the contents of the - * queue at the time of the call, and are not affected by further modifications. Successive calls - * to this method will return different objects. - */ - @NonNull - @Override - Iterator iterator(); - - @Override - default boolean offer(Node node) { - throw new UnsupportedOperationException("Not implemented"); - } - - @Override - default Node peek() { - throw new UnsupportedOperationException("Not implemented"); - } - - @Override - default boolean add(Node node) { - throw new UnsupportedOperationException("Not implemented"); - } - - @Override - default Node remove() { - throw new UnsupportedOperationException("Not implemented"); - } - - @Override - default Node element() { - throw new UnsupportedOperationException("Not implemented"); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/collection/QueryPlanBase.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/collection/QueryPlanBase.java deleted file mode 100644 index 43f369f636a..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/collection/QueryPlanBase.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.collection; - -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.shaded.guava.common.collect.Iterators; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.AbstractQueue; -import java.util.Arrays; -import java.util.Collections; -import java.util.Iterator; -import java.util.concurrent.atomic.AtomicInteger; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public abstract class QueryPlanBase extends AbstractQueue implements QueryPlan { - - private final AtomicInteger nextIndex = new AtomicInteger(); - - /** - * Returns the nodes in this query plan; the returned array should stay the same across - * invocations. - * - *

The declared return type is {@code Object[]} because of implementation details of {@link - * com.datastax.oss.driver.internal.core.loadbalancing.DefaultLoadBalancingPolicy - * DefaultLoadBalancingPolicy} and {@link - * com.datastax.oss.driver.internal.core.loadbalancing.BasicLoadBalancingPolicy - * BasicLoadBalancingPolicy}, but all elements must be instances of {@link Node}, otherwise - * instance methods will fail later. - */ - protected abstract Object[] getNodes(); - - @Nullable - @Override - public Node poll() { - // We don't handle overflow. In practice it won't be an issue, since the driver stops polling - // once the query plan is empty. - int i = nextIndex.getAndIncrement(); - Object[] nodes = getNodes(); - return (i >= nodes.length) ? null : (Node) nodes[i]; - } - - @NonNull - @Override - public Iterator iterator() { - int i = nextIndex.get(); - Object[] nodes = getNodes(); - if (i >= nodes.length) { - return Collections.emptyIterator(); - } else { - return Iterators.forArray(Arrays.copyOfRange(nodes, i, nodes.length, Node[].class)); - } - } - - @Override - public int size() { - return Math.max(getNodes().length - nextIndex.get(), 0); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/collection/SimpleQueryPlan.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/collection/SimpleQueryPlan.java deleted file mode 100644 index 4e0df8d2354..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/collection/SimpleQueryPlan.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.collection; - -import com.datastax.oss.driver.api.core.metadata.Node; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.ThreadSafe; - -/** Query plan where nodes must be provided at construction time. */ -@ThreadSafe -public class SimpleQueryPlan extends QueryPlanBase { - - private final Object[] nodes; - - /** - * Creates a new query plan with the given nodes. - * - *

For efficiency, there is no defensive copy, the provided array is used directly. The - * declared type is {@code Object[]} but all elements must be instances of {@link Node}. See - * {@link #getNodes()} for details. - * - * @param nodes the nodes to initially fill the queue with. - */ - public SimpleQueryPlan(@NonNull Object... nodes) { - this.nodes = nodes; - } - - @Override - protected Object[] getNodes() { - return nodes; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/BlockingOperation.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/BlockingOperation.java deleted file mode 100644 index 3f2d10b62e0..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/BlockingOperation.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.concurrent; - -import edu.umd.cs.findbugs.annotations.NonNull; -import io.netty.util.concurrent.FastThreadLocalThread; -import java.util.concurrent.ThreadFactory; - -/** - * Safeguards against bad usage patterns in client code that could introduce deadlocks in the - * driver. - * - *

The driver internals are fully asynchronous, nothing should ever block. On the other hand, our - * API exposes synchronous wrappers, that call async methods and wait on the result (as a - * convenience for clients that don't want to do async). These methods should never be called on a - * driver thread, because this can lead to deadlocks. This can happen from client code if it uses - * callbacks. - */ -public class BlockingOperation { - - /** - * This method is invoked from each synchronous driver method, and checks that we are not on a - * driver thread. - * - *

For this to work, all driver threads must be created by {@link SafeThreadFactory} (which is - * the case by default). - * - * @throws IllegalStateException if a driver thread is executing this. - */ - public static void checkNotDriverThread() { - if (Thread.currentThread() instanceof InternalThread) { - throw new IllegalStateException( - "Detected a synchronous API call on a driver thread, " - + "failing because this can cause deadlocks."); - } - } - - /** - * Marks threads as driver threads, so that they will be detected by {@link - * #checkNotDriverThread()} - */ - public static class SafeThreadFactory implements ThreadFactory { - @Override - public Thread newThread(@NonNull Runnable r) { - return new InternalThread(r); - } - } - - static class InternalThread extends FastThreadLocalThread { - private InternalThread(Runnable runnable) { - super(runnable); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/CompletableFutures.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/CompletableFutures.java deleted file mode 100644 index 275b2ddfeef..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/CompletableFutures.java +++ /dev/null @@ -1,198 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.concurrent; - -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.DriverExecutionException; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.driver.shaded.guava.common.base.Throwables; -import java.util.List; -import java.util.concurrent.CancellationException; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Executor; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.function.Supplier; - -public class CompletableFutures { - - public static CompletableFuture failedFuture(Throwable cause) { - CompletableFuture future = new CompletableFuture<>(); - future.completeExceptionally(cause); - return future; - } - - /** Completes {@code target} with the outcome of {@code source}. */ - public static void completeFrom(CompletionStage source, CompletableFuture target) { - source.whenComplete( - (t, error) -> { - if (error != null) { - target.completeExceptionally(error); - } else { - target.complete(t); - } - }); - } - - /** @return a completion stage that completes when all inputs are done (success or failure). */ - public static CompletionStage allDone(List> inputs) { - CompletableFuture result = new CompletableFuture<>(); - if (inputs.isEmpty()) { - result.complete(null); - } else { - final int todo = inputs.size(); - final AtomicInteger done = new AtomicInteger(); - for (CompletionStage input : inputs) { - input.whenComplete( - (v, error) -> { - if (done.incrementAndGet() == todo) { - result.complete(null); - } - }); - } - } - return result; - } - - /** Do something when all inputs are done (success or failure). */ - public static void whenAllDone( - List> inputs, Runnable callback, Executor executor) { - allDone(inputs).thenRunAsync(callback, executor).exceptionally(UncaughtExceptions::log); - } - /** - * @return a completion stage that completes when all inputs are successful, or fails if any of - * them failed. - */ - public static CompletionStage allSuccessful(List> inputs) { - CompletableFuture result = new CompletableFuture<>(); - if (inputs.isEmpty()) { - result.complete(null); - } else { - final int todo = inputs.size(); - final AtomicInteger done = new AtomicInteger(); - final CopyOnWriteArrayList errors = new CopyOnWriteArrayList<>(); - for (CompletionStage input : inputs) { - input.whenComplete( - (v, error) -> { - if (error != null) { - errors.add(error); - } - if (done.incrementAndGet() == todo) { - if (errors.isEmpty()) { - result.complete(null); - } else { - Throwable finalError = errors.get(0); - for (int i = 1; i < errors.size(); i++) { - Throwable suppressedError = errors.get(i); - if (finalError != suppressedError) { - finalError.addSuppressed(suppressedError); - } - } - result.completeExceptionally(finalError); - } - } - }); - } - } - return result; - } - - /** Get the result now, when we know for sure that the future is complete. */ - public static T getCompleted(CompletionStage stage) { - CompletableFuture future = stage.toCompletableFuture(); - Preconditions.checkArgument(future.isDone() && !future.isCompletedExceptionally()); - try { - return future.get(); - } catch (InterruptedException | ExecutionException e) { - // Neither can happen given the precondition - throw new AssertionError("Unexpected error", e); - } - } - - /** Get the error now, when we know for sure that the future is failed. */ - public static Throwable getFailed(CompletionStage stage) { - CompletableFuture future = stage.toCompletableFuture(); - Preconditions.checkArgument(future.isCompletedExceptionally()); - try { - future.get(); - throw new AssertionError("future should be failed"); - } catch (InterruptedException e) { - throw new AssertionError("Unexpected error", e); - } catch (ExecutionException e) { - return e.getCause(); - } - } - - public static T getUninterruptibly(CompletionStage stage) { - boolean interrupted = false; - try { - while (true) { - try { - return stage.toCompletableFuture().get(); - } catch (InterruptedException e) { - interrupted = true; - } catch (ExecutionException e) { - Throwable cause = e.getCause(); - if (cause instanceof DriverException) { - throw ((DriverException) cause).copy(); - } - Throwables.throwIfUnchecked(cause); - throw new DriverExecutionException(cause); - } - } - } finally { - if (interrupted) { - Thread.currentThread().interrupt(); - } - } - } - - /** - * Executes a function on the calling thread and returns result in a {@link CompletableFuture}. - * - *

Similar to {@link CompletableFuture#completedFuture} except takes a {@link Supplier} and if - * the supplier throws an unchecked exception, the returning future fails with that exception. - * - * @param supplier Function to execute - * @param Type of result - * @return result of function wrapped in future - */ - public static CompletableFuture wrap(Supplier supplier) { - try { - return CompletableFuture.completedFuture(supplier.get()); - } catch (Throwable t) { - return failedFuture(t); - } - } - - public static void whenCancelled(CompletionStage stage, Runnable action) { - stage.exceptionally( - (error) -> { - if (error instanceof CancellationException) { - action.run(); - } - return null; - }); - } - - public static void propagateCancellation(CompletionStage source, CompletionStage target) { - whenCancelled(source, () -> target.toCompletableFuture().cancel(true)); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/CycleDetector.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/CycleDetector.java deleted file mode 100644 index 548ee0bb042..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/CycleDetector.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.concurrent; - -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.graph.Graphs; -import com.datastax.oss.driver.shaded.guava.common.graph.MutableValueGraph; -import com.datastax.oss.driver.shaded.guava.common.graph.ValueGraphBuilder; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** Detects cycles between a set of {@link LazyReference} instances. */ -@ThreadSafe -public class CycleDetector { - private static final boolean ENABLED = - Boolean.getBoolean("com.datastax.oss.driver.DETECT_CYCLES"); - private static final Logger LOG = LoggerFactory.getLogger(CycleDetector.class); - - private final String errorMessage; - private final boolean enabled; - private final MutableValueGraph graph; - - public CycleDetector(String errorMessage) { - this(errorMessage, ENABLED); - } - - @VisibleForTesting - CycleDetector(String errorMessage, boolean enabled) { - this.errorMessage = errorMessage; - this.enabled = enabled; - this.graph = enabled ? ValueGraphBuilder.directed().build() : null; - } - - void onTryLock(LazyReference reference) { - if (enabled) { - synchronized (this) { - Thread me = Thread.currentThread(); - LOG.debug("{} wants to initialize {}", me, reference.getName()); - graph.putEdgeValue(me.getName(), reference.getName(), "wants to initialize"); - LOG.debug("{}", graph); - if (Graphs.hasCycle(graph.asGraph())) { - throw new IllegalStateException(errorMessage + " " + graph); - } - } - } - } - - void onLockAcquired(LazyReference reference) { - if (enabled) { - synchronized (this) { - Thread me = Thread.currentThread(); - LOG.debug("{} is initializing {}", me, reference.getName()); - String old = graph.removeEdge(me.getName(), reference.getName()); - assert "wants to initialize".equals(old); - graph.putEdgeValue(reference.getName(), me.getName(), "is getting initialized by"); - } - } - } - - void onReleaseLock(LazyReference reference) { - if (enabled) { - synchronized (this) { - Thread me = Thread.currentThread(); - LOG.debug("{} is done initializing {}", me, reference.getName()); - graph.removeEdge(reference.getName(), me.getName()); - } - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/Debouncer.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/Debouncer.java deleted file mode 100644 index 6bde155858c..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/Debouncer.java +++ /dev/null @@ -1,172 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.concurrent; - -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import io.netty.util.concurrent.EventExecutor; -import io.netty.util.concurrent.ScheduledFuture; -import java.time.Duration; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.TimeUnit; -import java.util.function.Consumer; -import java.util.function.Function; -import net.jcip.annotations.NotThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Debounces a sequence of events to smoothen temporary oscillations. - * - *

When a first event is received, the debouncer starts a time window. If no other event is - * received within that window, the initial event is flushed. However, if another event arrives, the - * window is reset, and the next flush will now contain both events. If the window keeps getting - * reset, the debouncer will flush after a given number of accumulated events. - * - * @param the type of the incoming events. - * @param the resulting type after the events of a batch have been coalesced. - */ -@NotThreadSafe // must be confined to adminExecutor -public class Debouncer { - private static final Logger LOG = LoggerFactory.getLogger(Debouncer.class); - - private final String logPrefix; - private final EventExecutor adminExecutor; - private final Consumer onFlush; - private final Duration window; - private final long maxEvents; - private final Function, CoalescedT> coalescer; - - private List currentBatch = new ArrayList<>(); - private ScheduledFuture nextFlush; - private boolean stopped; - - /** - * Creates a new instance. - * - * @param adminExecutor the executor that will be used to schedule all tasks. - * @param coalescer how to transform a batch of events into a result. - * @param onFlush what to do with a result. - * @param window the time window. - * @param maxEvents the maximum number of accumulated events before a flush is forced. - */ - public Debouncer( - EventExecutor adminExecutor, - Function, CoalescedT> coalescer, - Consumer onFlush, - Duration window, - long maxEvents) { - this("debouncer", adminExecutor, coalescer, onFlush, window, maxEvents); - } - - /** - * Creates a new instance. - * - * @param logPrefix the log prefix to use in log messages. - * @param adminExecutor the executor that will be used to schedule all tasks. - * @param coalescer how to transform a batch of events into a result. - * @param onFlush what to do with a result. - * @param window the time window. - * @param maxEvents the maximum number of accumulated events before a flush is forced. - */ - public Debouncer( - String logPrefix, - EventExecutor adminExecutor, - Function, CoalescedT> coalescer, - Consumer onFlush, - Duration window, - long maxEvents) { - this.logPrefix = logPrefix; - this.coalescer = coalescer; - Preconditions.checkArgument(maxEvents >= 1, "maxEvents should be at least 1"); - this.adminExecutor = adminExecutor; - this.onFlush = onFlush; - this.window = window; - this.maxEvents = maxEvents; - } - - /** This must be called on eventExecutor too. */ - public void receive(IncomingT element) { - assert adminExecutor.inEventLoop(); - if (stopped) { - return; - } - if (window.isZero() || maxEvents == 1) { - LOG.debug( - "[{}] Received {}, flushing immediately (window = {}, maxEvents = {})", - logPrefix, - element, - window, - maxEvents); - onFlush.accept(coalescer.apply(ImmutableList.of(element))); - } else { - currentBatch.add(element); - if (currentBatch.size() == maxEvents) { - LOG.debug( - "[{}] Received {}, flushing immediately (because {} accumulated events)", - logPrefix, - element, - maxEvents); - flushNow(); - } else { - LOG.debug("[{}] Received {}, scheduling next flush in {}", logPrefix, element, window); - scheduleFlush(); - } - } - } - - public void flushNow() { - assert adminExecutor.inEventLoop(); - LOG.debug("[{}] Flushing now", logPrefix); - cancelNextFlush(); - if (!currentBatch.isEmpty()) { - onFlush.accept(coalescer.apply(currentBatch)); - currentBatch = new ArrayList<>(); - } - } - - private void scheduleFlush() { - assert adminExecutor.inEventLoop(); - cancelNextFlush(); - nextFlush = adminExecutor.schedule(this::flushNow, window.toNanos(), TimeUnit.NANOSECONDS); - nextFlush.addListener(UncaughtExceptions::log); - } - - private void cancelNextFlush() { - assert adminExecutor.inEventLoop(); - if (nextFlush != null && !nextFlush.isDone()) { - boolean cancelled = nextFlush.cancel(true); - if (cancelled) { - LOG.debug("[{}] Cancelled existing scheduled flush", logPrefix); - } - } - } - - /** - * Stop debouncing: the next flush is cancelled, and all pending and future events will be - * ignored. - */ - public void stop() { - assert adminExecutor.inEventLoop(); - if (!stopped) { - stopped = true; - cancelNextFlush(); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/DriverBlockHoundIntegration.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/DriverBlockHoundIntegration.java deleted file mode 100644 index 7d90c50028e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/DriverBlockHoundIntegration.java +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.concurrent; - -import com.datastax.oss.driver.internal.core.util.concurrent.BlockingOperation.InternalThread; -import reactor.blockhound.BlockHound; -import reactor.blockhound.integration.BlockHoundIntegration; - -public final class DriverBlockHoundIntegration implements BlockHoundIntegration { - - @Override - public void applyTo(BlockHound.Builder builder) { - - // disallow blocking operations in driver internal threads by default; - // note that session initialization will happen on one of these threads, which is why - // we need to allow a few blocking calls below. - builder.nonBlockingThreadPredicate(current -> current.or(InternalThread.class::isInstance)); - - // blocking calls in initialization methods - - builder.allowBlockingCallsInside( - "com.datastax.oss.driver.internal.core.context.DefaultNettyOptions", "createTimer"); - builder.allowBlockingCallsInside( - "com.datastax.oss.driver.internal.core.os.Native$LibcLoader", "load"); - builder.allowBlockingCallsInside( - // requires native libraries - "com.datastax.oss.driver.internal.core.time.Clock", "getInstance"); - builder.allowBlockingCallsInside( - "com.datastax.oss.driver.internal.core.util.concurrent.LazyReference", "get"); - builder.allowBlockingCallsInside( - "com.datastax.oss.driver.internal.core.util.concurrent.ReplayingEventFilter", "accept"); - builder.allowBlockingCallsInside( - "com.datastax.oss.driver.internal.core.util.concurrent.ReplayingEventFilter", "markReady"); - builder.allowBlockingCallsInside( - "com.datastax.oss.driver.internal.core.util.concurrent.ReplayingEventFilter", "start"); - - // called upon initialization but also on topology/status events - - builder.allowBlockingCallsInside( - "com.datastax.oss.driver.internal.core.metadata.LoadBalancingPolicyWrapper$SinglePolicyDistanceReporter", - "setDistance"); - builder.allowBlockingCallsInside( - "com.datastax.oss.driver.internal.core.pool.ChannelSet", "add"); - builder.allowBlockingCallsInside( - "com.datastax.oss.driver.internal.core.pool.ChannelSet", "remove"); - - // never called directly by the driver; locks that usually operate with low thread contention - - builder.allowBlockingCallsInside( - "com.datastax.oss.driver.internal.core.type.codec.registry.CachingCodecRegistry", - "register"); - builder.allowBlockingCallsInside( - // requires native libraries, for now because of Uuids.getProcessPiece; if JAVA-1116 gets - // implemented, Uuids.getCurrentTimestamp will also require an exception. Pre-emptively - // protect the whole Uuids.timeBased method. - "com.datastax.oss.driver.api.core.uuid.Uuids", "timeBased"); - - // continuous paging - - builder.allowBlockingCallsInside( - "com.datastax.dse.driver.internal.core.cql.continuous.ContinuousRequestHandlerBase$NodeResponseCallback", - "cancel"); - builder.allowBlockingCallsInside( - "com.datastax.dse.driver.internal.core.cql.continuous.ContinuousRequestHandlerBase$NodeResponseCallback", - "dequeueOrCreatePending"); - builder.allowBlockingCallsInside( - "com.datastax.dse.driver.internal.core.cql.continuous.ContinuousRequestHandlerBase$NodeResponseCallback", - "isLastResponse"); - builder.allowBlockingCallsInside( - "com.datastax.dse.driver.internal.core.cql.continuous.ContinuousRequestHandlerBase$NodeResponseCallback", - "onFailure"); - builder.allowBlockingCallsInside( - "com.datastax.dse.driver.internal.core.cql.continuous.ContinuousRequestHandlerBase$NodeResponseCallback", - "onPageTimeout"); - builder.allowBlockingCallsInside( - "com.datastax.dse.driver.internal.core.cql.continuous.ContinuousRequestHandlerBase$NodeResponseCallback", - "onResponse"); - builder.allowBlockingCallsInside( - "com.datastax.dse.driver.internal.core.cql.continuous.ContinuousRequestHandlerBase$NodeResponseCallback", - "onStreamIdAssigned"); - builder.allowBlockingCallsInside( - "com.datastax.dse.driver.internal.core.cql.continuous.ContinuousRequestHandlerBase$NodeResponseCallback", - "operationComplete"); - - // Netty extra exceptions - - // see https://github.com/netty/netty/pull/10810 - builder.allowBlockingCallsInside("io.netty.util.HashedWheelTimer", "start"); - builder.allowBlockingCallsInside("io.netty.util.HashedWheelTimer", "stop"); - - // see https://github.com/netty/netty/pull/10811 - builder.allowBlockingCallsInside("io.netty.util.concurrent.GlobalEventExecutor", "addTask"); - builder.allowBlockingCallsInside( - "io.netty.util.concurrent.SingleThreadEventExecutor", "addTask"); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/LazyReference.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/LazyReference.java deleted file mode 100644 index e04b7647d8e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/LazyReference.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.concurrent; - -import java.util.concurrent.locks.ReentrantLock; -import java.util.function.Supplier; -import net.jcip.annotations.ThreadSafe; - -/** Holds a reference to an object that is initialized on first access. */ -@ThreadSafe -public class LazyReference { - - private final String name; - private final Supplier supplier; - private final CycleDetector checker; - private volatile T value; - private final ReentrantLock lock = new ReentrantLock(); - - public LazyReference(String name, Supplier supplier, CycleDetector cycleDetector) { - this.name = name; - this.supplier = supplier; - this.checker = cycleDetector; - } - - public LazyReference(Supplier supplier) { - this(null, supplier, null); - } - - public T get() { - T t = value; - if (t == null) { - if (checker != null) { - checker.onTryLock(this); - } - lock.lock(); - try { - if (checker != null) { - checker.onLockAcquired(this); - } - t = value; - if (t == null) { - value = t = supplier.get(); - } - } finally { - if (checker != null) { - checker.onReleaseLock(this); - } - lock.unlock(); - } - } - return t; - } - - public String getName() { - return name; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/PromiseCombiner.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/PromiseCombiner.java deleted file mode 100644 index b854820403d..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/PromiseCombiner.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.concurrent; - -import edu.umd.cs.findbugs.annotations.NonNull; -import io.netty.util.concurrent.Future; -import io.netty.util.concurrent.GenericFutureListener; -import io.netty.util.concurrent.Promise; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; -import net.jcip.annotations.ThreadSafe; - -/** - * A thread-safe version of Netty's {@link io.netty.util.concurrent.PromiseCombiner} that uses - * proper synchronization to trigger the completion of the aggregate promise. - */ -@ThreadSafe -public class PromiseCombiner { - - /** - * Combines the given futures into the given promise, that is, ties the completion of the latter - * to that of the formers. - * - * @param aggregatePromise The promise that will complete when all parents complete. - * @param parents The parent futures. - */ - public static void combine( - @NonNull Promise aggregatePromise, @NonNull Future... parents) { - PromiseCombinerListener listener = - new PromiseCombinerListener(aggregatePromise, parents.length); - for (Future parent : parents) { - parent.addListener(listener); - } - } - - private static class PromiseCombinerListener implements GenericFutureListener> { - - private final Promise aggregatePromise; - private final AtomicInteger remainingCount; - private final AtomicReference aggregateFailureRef = new AtomicReference<>(); - - private PromiseCombinerListener(Promise aggregatePromise, int numberOfParents) { - this.aggregatePromise = aggregatePromise; - remainingCount = new AtomicInteger(numberOfParents); - } - - @Override - public void operationComplete(Future future) { - if (!future.isSuccess()) { - aggregateFailureRef.updateAndGet( - aggregateFailure -> { - if (aggregateFailure == null) { - aggregateFailure = future.cause(); - } else { - aggregateFailure.addSuppressed(future.cause()); - } - return aggregateFailure; - }); - } - if (remainingCount.decrementAndGet() == 0) { - Throwable aggregateFailure = aggregateFailureRef.get(); - if (aggregateFailure != null) { - aggregatePromise.tryFailure(aggregateFailure); - } else { - aggregatePromise.trySuccess(null); - } - } - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/Reconnection.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/Reconnection.java deleted file mode 100644 index 28aaf596705..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/Reconnection.java +++ /dev/null @@ -1,238 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.concurrent; - -import com.datastax.oss.driver.api.core.connection.ReconnectionPolicy.ReconnectionSchedule; -import com.datastax.oss.driver.internal.core.util.Loggers; -import io.netty.util.concurrent.EventExecutor; -import io.netty.util.concurrent.Future; -import io.netty.util.concurrent.ScheduledFuture; -import java.time.Duration; -import java.util.concurrent.Callable; -import java.util.concurrent.CancellationException; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.TimeUnit; -import java.util.function.Supplier; -import net.jcip.annotations.NotThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A reconnection process that, if failed, is retried periodically according to the intervals - * defined by a policy. - * - *

All the tasks run on a Netty event executor that is provided at construction time. Clients are - * also expected to call the public methods on that thread. - */ -@NotThreadSafe // must be confined to executor -public class Reconnection { - private static final Logger LOG = LoggerFactory.getLogger(Reconnection.class); - - private enum State { - STOPPED, - SCHEDULED, // next attempt scheduled but not started yet - ATTEMPT_IN_PROGRESS, // current attempt started and not completed yet - STOP_AFTER_CURRENT, // stopped, but we're letting an in-progress attempt finish - ; - } - - private final String logPrefix; - private final EventExecutor executor; - private final Supplier scheduleSupplier; - private final Callable> reconnectionTask; - private final Runnable onStart; - private final Runnable onStop; - - private State state = State.STOPPED; - private ReconnectionSchedule reconnectionSchedule; - private ScheduledFuture> nextAttempt; - - /** - * @param reconnectionTask the actual thing to try on a reconnection, returns if it succeeded or - * not. - */ - public Reconnection( - String logPrefix, - EventExecutor executor, - Supplier scheduleSupplier, - Callable> reconnectionTask, - Runnable onStart, - Runnable onStop) { - this.logPrefix = logPrefix; - this.executor = executor; - this.scheduleSupplier = scheduleSupplier; - this.reconnectionTask = reconnectionTask; - this.onStart = onStart; - this.onStop = onStop; - } - - public Reconnection( - String logPrefix, - EventExecutor executor, - Supplier scheduleSupplier, - Callable> reconnectionTask) { - this(logPrefix, executor, scheduleSupplier, reconnectionTask, () -> {}, () -> {}); - } - - /** - * Note that if {@link #stop()} was called but we're still waiting for the last pending attempt to - * complete, this still returns {@code true}. - */ - public boolean isRunning() { - assert executor.inEventLoop(); - return state != State.STOPPED; - } - - /** This is a no-op if the reconnection is already running. */ - public void start() { - start(null); - } - - public void start(ReconnectionSchedule customSchedule) { - assert executor.inEventLoop(); - switch (state) { - case SCHEDULED: - case ATTEMPT_IN_PROGRESS: - // nothing to do - break; - case STOP_AFTER_CURRENT: - // cancel the scheduled stop - state = State.ATTEMPT_IN_PROGRESS; - break; - case STOPPED: - reconnectionSchedule = (customSchedule == null) ? scheduleSupplier.get() : customSchedule; - onStart.run(); - scheduleNextAttempt(); - break; - } - } - - /** - * Forces a reconnection now, without waiting for the next scheduled attempt. - * - * @param forceIfStopped if true and the reconnection is not running, it will get started (meaning - * subsequent reconnections will be scheduled if this attempt fails). If false and the - * reconnection is not running, no attempt is scheduled. - */ - public void reconnectNow(boolean forceIfStopped) { - assert executor.inEventLoop(); - if (state == State.ATTEMPT_IN_PROGRESS || state == State.STOP_AFTER_CURRENT) { - LOG.debug( - "[{}] reconnectNow and current attempt was still running, letting it complete", - logPrefix); - if (state == State.STOP_AFTER_CURRENT) { - // Make sure that we will schedule other attempts if this one fails. - state = State.ATTEMPT_IN_PROGRESS; - } - } else if (state == State.STOPPED && !forceIfStopped) { - LOG.debug("[{}] reconnectNow(false) while stopped, nothing to do", logPrefix); - } else { - assert state == State.SCHEDULED || (state == State.STOPPED && forceIfStopped); - LOG.debug("[{}] Forcing next attempt now", logPrefix); - if (nextAttempt != null) { - nextAttempt.cancel(true); - } - try { - onNextAttemptStarted(reconnectionTask.call()); - } catch (Exception e) { - Loggers.warnWithException( - LOG, "[{}] Uncaught error while starting reconnection attempt", logPrefix, e); - scheduleNextAttempt(); - } - } - } - - public void stop() { - assert executor.inEventLoop(); - switch (state) { - case STOPPED: - case STOP_AFTER_CURRENT: - break; - case ATTEMPT_IN_PROGRESS: - state = State.STOP_AFTER_CURRENT; - break; - case SCHEDULED: - reallyStop(); - break; - } - } - - private void reallyStop() { - LOG.debug("[{}] Stopping reconnection", logPrefix); - state = State.STOPPED; - if (nextAttempt != null) { - nextAttempt.cancel(true); - nextAttempt = null; - } - onStop.run(); - reconnectionSchedule = null; - } - - private void scheduleNextAttempt() { - assert executor.inEventLoop(); - state = State.SCHEDULED; - if (reconnectionSchedule == null) { // happens if reconnectNow() while we were stopped - reconnectionSchedule = scheduleSupplier.get(); - } - Duration nextInterval = reconnectionSchedule.nextDelay(); - LOG.debug("[{}] Scheduling next reconnection in {}", logPrefix, nextInterval); - nextAttempt = executor.schedule(reconnectionTask, nextInterval.toNanos(), TimeUnit.NANOSECONDS); - nextAttempt.addListener( - (Future> f) -> { - if (f.isSuccess()) { - onNextAttemptStarted(f.getNow()); - } else if (!f.isCancelled()) { - Loggers.warnWithException( - LOG, - "[{}] Uncaught error while starting reconnection attempt", - logPrefix, - f.cause()); - scheduleNextAttempt(); - } - }); - } - - // When the Callable runs this means the caller has started the attempt, we have yet to wait on - // the CompletableFuture to find out if that succeeded or not. - private void onNextAttemptStarted(CompletionStage futureOutcome) { - assert executor.inEventLoop(); - state = State.ATTEMPT_IN_PROGRESS; - futureOutcome - .whenCompleteAsync(this::onNextAttemptCompleted, executor) - .exceptionally(UncaughtExceptions::log); - } - - private void onNextAttemptCompleted(Boolean success, Throwable error) { - assert executor.inEventLoop(); - if (success) { - LOG.debug("[{}] Reconnection successful", logPrefix); - reallyStop(); - } else { - if (error != null && !(error instanceof CancellationException)) { - Loggers.warnWithException( - LOG, "[{}] Uncaught error while starting reconnection attempt", logPrefix, error); - } - if (state == State.STOP_AFTER_CURRENT) { - reallyStop(); - } else { - assert state == State.ATTEMPT_IN_PROGRESS; - scheduleNextAttempt(); - } - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/ReplayingEventFilter.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/ReplayingEventFilter.java deleted file mode 100644 index 27ca1b6ff42..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/ReplayingEventFilter.java +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.concurrent; - -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import java.util.List; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.locks.ReadWriteLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; -import java.util.function.Consumer; -import net.jcip.annotations.GuardedBy; -import net.jcip.annotations.ThreadSafe; - -/** - * Filters a list of events, accumulating them during an initialization period. - * - *

It has three states: - * - *

    - *
  • Not started: events are discarded. - *
  • Started: events accumulate but are not propagated to the end consumer yet. - *
  • Ready: all accumulated events are flushed to the end consumer; subsequent events are - * propagated directly. The order of events is preserved at all times. - *
- */ -@ThreadSafe -public class ReplayingEventFilter { - - private enum State { - NEW, - STARTED, - READY - } - - private final Consumer consumer; - - // Exceptionally, we use a lock: it will rarely be contended, and if so for only a short period. - private final ReadWriteLock stateLock = new ReentrantReadWriteLock(); - - @GuardedBy("stateLock") - private State state; - - @GuardedBy("stateLock") - private final List recordedEvents; - - public ReplayingEventFilter(Consumer consumer) { - this.consumer = consumer; - this.state = State.NEW; - this.recordedEvents = new CopyOnWriteArrayList<>(); - } - - public void start() { - stateLock.writeLock().lock(); - try { - state = State.STARTED; - } finally { - stateLock.writeLock().unlock(); - } - } - - public void markReady() { - stateLock.writeLock().lock(); - try { - state = State.READY; - for (EventT event : recordedEvents) { - consumer.accept(event); - } - } finally { - recordedEvents.clear(); - stateLock.writeLock().unlock(); - } - } - - public void accept(EventT event) { - stateLock.readLock().lock(); - try { - switch (state) { - case NEW: - break; - case STARTED: - recordedEvents.add(event); - break; - case READY: - consumer.accept(event); - break; - } - } finally { - stateLock.readLock().unlock(); - } - } - - @VisibleForTesting - public List recordedEvents() { - stateLock.readLock().lock(); - try { - return ImmutableList.copyOf(recordedEvents); - } finally { - stateLock.readLock().unlock(); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/RunOrSchedule.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/RunOrSchedule.java deleted file mode 100644 index addaf1850bf..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/RunOrSchedule.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.concurrent; - -import io.netty.util.concurrent.EventExecutor; -import io.netty.util.concurrent.Future; -import java.util.concurrent.Callable; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.function.Consumer; - -/** - * Utility to run a task on a Netty event executor (i.e. thread). If we're already on the executor, - * the task is submitted, otherwise it's scheduled. - * - *

Be careful when using this, always keep in mind that the task might be executed synchronously. - * This can lead to subtle bugs when both the calling code and the callback manipulate a collection: - * - *

{@code
- * List> futureFoos;
- *
- * // Scheduled on eventExecutor:
- * for (int i = 0; i < count; i++) {
- *   CompletionStage futureFoo = FooFactory.init();
- *   futureFoos.add(futureFoo);
- *   // futureFoo happens to be complete by now, so callback gets executed immediately
- *   futureFoo.whenComplete(RunOrSchedule.on(eventExecutor, () -> callback(futureFoo)));
- * }
- *
- * private void callback(CompletionStage futureFoo) {
- *    futureFoos.remove(futureFoo); // ConcurrentModificationException!!!
- * }
- * }
- * - * For that kind of situation, it's better to use {@code futureFoo.whenCompleteAsync(theTask, - * eventExecutor)}, so that the task is always scheduled. - */ -public class RunOrSchedule { - - public static void on(EventExecutor executor, Runnable task) { - if (executor.inEventLoop()) { - task.run(); - } else { - executor.submit(task).addListener(UncaughtExceptions::log); - } - } - - public static Consumer on(EventExecutor executor, Consumer task) { - return (t) -> { - if (executor.inEventLoop()) { - task.accept(t); - } else { - executor.submit(() -> task.accept(t)).addListener(UncaughtExceptions::log); - } - }; - } - - public static CompletionStage on( - EventExecutor executor, Callable> task) { - if (executor.inEventLoop()) { - try { - return task.call(); - } catch (Exception e) { - return CompletableFutures.failedFuture(e); - } - } else { - CompletableFuture result = new CompletableFuture<>(); - executor - .submit(task) - .addListener( - (Future> f) -> { - if (f.isSuccess()) { - CompletableFutures.completeFrom(f.getNow(), result); - } else { - result.completeExceptionally(f.cause()); - } - }); - return result; - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/UncaughtExceptions.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/UncaughtExceptions.java deleted file mode 100644 index 25bce8773e8..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/UncaughtExceptions.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.concurrent; - -import com.datastax.oss.driver.internal.core.util.Loggers; -import io.netty.util.concurrent.Future; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Utility methods to log unexpected exceptions in asynchronous tasks. - * - *

Use this whenever you execute a future callback to apply side effects, but throw away the - * future itself: - * - *

{@code
- * CompletionStage futureFoo = FooFactory.build();
- *
- * futureFoo
- *   .whenComplete((f, error) -> { handler code with side effects })
- *   // futureFoo is not propagated, do this or any unexpected error in the handler will be
- *   // swallowed
- *   .exceptionally(UncaughtExceptions::log);
- *
- * // If you return the future, you don't need it (but it's up to the caller to handle a failed
- * // future)
- * return futureFoo.whenComplete(...)
- * }
- */ -public class UncaughtExceptions { - - private static final Logger LOG = LoggerFactory.getLogger(UncaughtExceptions.class); - - public static void log(Future future) { - if (!future.isSuccess() && !future.isCancelled()) { - Loggers.warnWithException(LOG, "Uncaught exception in scheduled task", future.cause()); - } - } - - @SuppressWarnings("TypeParameterUnusedInFormals") // type parameter is only needed for chaining - public static T log(Throwable t) { - Loggers.warnWithException(LOG, "Uncaught exception in scheduled task", t); - return null; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/package-info.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/package-info.java deleted file mode 100644 index bd0e2590b47..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/package-info.java +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** Internal utilities specific to Netty. */ -package com.datastax.oss.driver.internal.core.util; diff --git a/core/src/main/java/com/datastax/oss/driver/internal/package-info.java b/core/src/main/java/com/datastax/oss/driver/internal/package-info.java deleted file mode 100644 index 486afc446e3..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/package-info.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Internal implementation details of the driver. - * - *

The types present here (and in subpackages) should not be used from client applications. If - * you decide to use them, do so at your own risk: binary compatibility is best-effort, and we - * reserve the right to break things at any time. Documentation may be sparse. - */ -package com.datastax.oss.driver.internal; diff --git a/core/src/main/resources/META-INF/native-image/com.datastax.oss/java-driver-core/native-image.properties b/core/src/main/resources/META-INF/native-image/com.datastax.oss/java-driver-core/native-image.properties deleted file mode 100644 index 2baa59f3b07..00000000000 --- a/core/src/main/resources/META-INF/native-image/com.datastax.oss/java-driver-core/native-image.properties +++ /dev/null @@ -1,25 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -Args=-H:IncludeResources=reference\\.conf \ - -H:IncludeResources=application\\.conf \ - -H:IncludeResources=application\\.json \ - -H:IncludeResources=application\\.properties \ - -H:IncludeResources=.*Driver\\.properties \ - -H:DynamicProxyConfigurationResources=${.}/proxy.json \ - -H:ReflectionConfigurationResources=${.}/reflection.json \ - --initialize-at-build-time=com.datastax.oss.driver.internal.core.util.Dependency diff --git a/core/src/main/resources/META-INF/native-image/com.datastax.oss/java-driver-core/proxy.json b/core/src/main/resources/META-INF/native-image/com.datastax.oss/java-driver-core/proxy.json deleted file mode 100644 index 37cf6fcf805..00000000000 --- a/core/src/main/resources/META-INF/native-image/com.datastax.oss/java-driver-core/proxy.json +++ /dev/null @@ -1,3 +0,0 @@ -[ - ["java.lang.reflect.TypeVariable"] -] diff --git a/core/src/main/resources/META-INF/native-image/com.datastax.oss/java-driver-core/reflection.json b/core/src/main/resources/META-INF/native-image/com.datastax.oss/java-driver-core/reflection.json deleted file mode 100644 index 6082b853611..00000000000 --- a/core/src/main/resources/META-INF/native-image/com.datastax.oss/java-driver-core/reflection.json +++ /dev/null @@ -1,154 +0,0 @@ -[ - { - "name": "com.datastax.oss.driver.internal.core.loadbalancing.DefaultLoadBalancingPolicy", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext", "java.lang.String" ] } ] - }, - { - "name": "com.datastax.oss.driver.internal.core.loadbalancing.DcInferringLoadBalancingPolicy", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext", "java.lang.String" ] } ] - }, - { - "name": "com.datastax.oss.driver.internal.core.loadbalancing.BasicLoadBalancingPolicy", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext", "java.lang.String" ] } ] - }, - { - "name": "com.datastax.oss.driver.internal.core.connection.ExponentialReconnectionPolicy", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] - }, - { - "name": "com.datastax.oss.driver.internal.core.connection.ConstantReconnectionPolicy", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] - }, - { - "name": "com.datastax.oss.driver.internal.core.retry.DefaultRetryPolicy", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext", "java.lang.String" ] } ] - }, - { - "name": "com.datastax.oss.driver.internal.core.retry.ConsistencyDowngradingRetryPolicy", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext", "java.lang.String" ] } ] - }, - { - "name": "com.datastax.oss.driver.internal.core.specex.NoSpeculativeExecutionPolicy", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext", "java.lang.String" ] } ] - }, - { - "name": "com.datastax.oss.driver.internal.core.specex.ConstantSpeculativeExecutionPolicy", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext", "java.lang.String" ] } ] - }, - { - "name": "com.datastax.oss.driver.internal.core.time.AtomicTimestampGenerator", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] - }, - { - "name": "com.datastax.oss.driver.internal.core.time.ThreadLocalTimestampGenerator", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] - }, - { - "name": "com.datastax.oss.driver.internal.core.time.ServerSideTimestampGenerator", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] - }, - { - "name": "com.datastax.oss.driver.internal.core.tracker.NoopRequestTracker", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] - }, - { - "name": "com.datastax.oss.driver.internal.core.tracker.RequestLogger", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] - }, - { - "name": "com.datastax.oss.driver.internal.core.session.throttling.PassThroughRequestThrottler", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] - }, - { - "name": "com.datastax.oss.driver.internal.core.session.throttling.RateLimitingRequestThrottler", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] - }, - { - "name": "com.datastax.oss.driver.internal.core.session.throttling.ConcurrencyLimitingRequestThrottler", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] - }, - { - "name": "com.datastax.oss.driver.internal.core.metadata.NoopNodeStateListener", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] - }, - { - "name": "com.datastax.oss.driver.internal.core.metadata.schema.NoopSchemaChangeListener", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] - }, - { - "name": "com.datastax.oss.driver.internal.core.addresstranslation.PassThroughAddressTranslator", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] - }, - { - "name": "com.datastax.oss.driver.internal.core.addresstranslation.Ec2MultiRegionAddressTranslator", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] - }, - { - "name": "com.datastax.oss.driver.internal.core.auth.PlainTextAuthProvider", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] - }, - { - "name": "com.datastax.dse.driver.internal.core.auth.DseGssApiAuthProvider", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] - }, - { - "name": "com.datastax.oss.driver.internal.core.ssl.DefaultSslEngineFactory", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] - }, - { - "name": "com.datastax.oss.driver.internal.core.metrics.DefaultMetricsFactory", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] - }, - { - "name": "com.datastax.oss.driver.internal.core.metrics.NoopMetricsFactory", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] - }, - { - "name": "com.datastax.oss.driver.internal.core.metrics.DropwizardMetricsFactory", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] - }, - { - "name": "com.datastax.oss.driver.internal.core.metrics.DefaultMetricIdGenerator", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] - }, - { - "name": "com.datastax.oss.driver.internal.core.metrics.TaggingMetricIdGenerator", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] - }, - { - "name": "io.netty.channel.socket.nio.NioSocketChannel", - "methods": [ { "name": "", "parameterTypes": [] } ] - }, - { - "name": "io.netty.buffer.AbstractByteBufAllocator", - "methods": [ { "name": "toLeakAwareBuffer", "parameterTypes": ["io.netty.buffer.ByteBuf" ] } ] - }, - { - "name": "io.netty.util.ReferenceCountUtil", - "methods": [ { "name": "touch", "parameterTypes": ["java.lang.Object", "java.lang.Object" ] } ] - }, - { - "name" : "io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerIndexField", - "fields": [ {"name": "producerIndex", "allowUnsafeAccess": true} ] - }, - { - "name" : "io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerLimitField", - "fields": [ {"name": "producerLimit", "allowUnsafeAccess": true} ] - }, - { - "name" : "io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueConsumerIndexField", - "fields": [ {"name": "consumerIndex", "allowUnsafeAccess": true} ] - }, - { - "name" : "io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueProducerFields", - "fields": [ {"name": "producerIndex", "allowUnsafeAccess": true} ] - }, - { - "name" : "io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueColdProducerFields", - "fields": [ {"name": "producerLimit", "allowUnsafeAccess": true} ] - }, - { - "name" : "io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueConsumerFields", - "fields": [ {"name": "consumerIndex", "allowUnsafeAccess": true} ] - } -] diff --git a/core/src/main/resources/META-INF/services/reactor.blockhound.integration.BlockHoundIntegration b/core/src/main/resources/META-INF/services/reactor.blockhound.integration.BlockHoundIntegration deleted file mode 100644 index b848ce24855..00000000000 --- a/core/src/main/resources/META-INF/services/reactor.blockhound.integration.BlockHoundIntegration +++ /dev/null @@ -1 +0,0 @@ -com.datastax.oss.driver.internal.core.util.concurrent.DriverBlockHoundIntegration \ No newline at end of file diff --git a/core/src/main/resources/com/datastax/oss/driver/Driver.properties b/core/src/main/resources/com/datastax/oss/driver/Driver.properties deleted file mode 100644 index 4706afe2da8..00000000000 --- a/core/src/main/resources/com/datastax/oss/driver/Driver.properties +++ /dev/null @@ -1,28 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# Note: properties files should be encoded in ISO-8859-1, but we keep this one -# encoded in UTF-8 because that's much easier when building with Maven. - -driver.groupId=${project.groupId} -driver.artifactId=${project.artifactId} -driver.version=${project.version} -# It would be better to use ${project.parent.name} here, but for some reason the bundle plugin -# prevents that from being resolved correctly (unlike the project-level properties above). -# The value is not likely to change, so we simply hard-code it: -driver.name=Apache Cassandra Java Driver diff --git a/core/src/main/resources/reference.conf b/core/src/main/resources/reference.conf deleted file mode 100644 index 4ae83362e29..00000000000 --- a/core/src/main/resources/reference.conf +++ /dev/null @@ -1,2377 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# Reference configuration for the Java Driver for Apache Cassandra®. -# -# Unless you use a custom mechanism to load your configuration (see -# SessionBuilder.withConfigLoader), all the values declared here will be used as defaults. You can -# place your own `application.conf` in the classpath to override them. -# -# Options are classified into two categories: -# - basic: what is most likely to be customized first when kickstarting a new application. -# - advanced: more elaborate tuning options, or "expert"-level customizations. -# -# This file is in HOCON format, see https://github.com/typesafehub/config/blob/master/HOCON.md. -datastax-java-driver { - - # BASIC OPTIONS ---------------------------------------------------------------------------------- - - # The contact points to use for the initial connection to the cluster. - # - # These are addresses of Cassandra nodes that the driver uses to discover the cluster topology. - # Only one contact point is required (the driver will retrieve the address of the other nodes - # automatically), but it is usually a good idea to provide more than one contact point, because if - # that single contact point is unavailable, the driver cannot initialize itself correctly. - # - # This must be a list of strings with each contact point specified as "host:port". If the host is - # a DNS name that resolves to multiple A-records, all the corresponding addresses will be used. Do - # not use "localhost" as the host name (since it resolves to both IPv4 and IPv6 addresses on some - # platforms). - # - # Note that Cassandra 3 and below requires all nodes in a cluster to share the same port (see - # CASSANDRA-7544). - # - # Contact points can also be provided programmatically when you build a cluster instance. If both - # are specified, they will be merged. If both are absent, the driver will default to - # 127.0.0.1:9042. - # - # Required: no - # Modifiable at runtime: no - # Overridable in a profile: no - // basic.contact-points = [ "127.0.0.1:9042", "127.0.0.2:9042" ] - - # A name that uniquely identifies the driver instance created from this configuration. This is - # used as a prefix for log messages and metrics. - # - # If this option is absent, the driver will generate an identifier composed of the letter 's' - # followed by an incrementing counter. If you provide a different value, try to keep it short to - # keep the logs readable. Also, make sure it is unique: reusing the same value will not break the - # driver, but it will mix up the logs and metrics. - # - # Required: no - # Modifiable at runtime: no - # Overridable in a profile: no - // basic.session-name = my_session - - # The name of the keyspace that the session should initially be connected to. - # - # This expects the same format as in a CQL query: case-sensitive names must be quoted (note that - # the quotes must be escaped in HOCON format). For example: - # session-keyspace = case_insensitive_name - # session-keyspace = \"CaseSensitiveName\" - # - # If this option is absent, the session won't be connected to any keyspace, and you'll have to - # either qualify table names in your queries, or use the per-query keyspace feature available in - # Cassandra 4 and above (see Request.getKeyspace()). - # - # This can also be provided programatically in CqlSessionBuilder. - # - # Required: no - # Modifiable at runtime: no - # Overridable in a profile: no - // basic.session-keyspace = my_keyspace - - # How often the driver tries to reload the configuration. - # - # To disable periodic reloading, set this to 0. - # - # Required: yes (unless you pass a different ConfigLoader to the session builder). - # Modifiable at runtime: yes, the new value will be used after the next time the configuration - # gets reloaded. - # Overridable in a profile: no - basic.config-reload-interval = 5 minutes - - basic.request { - # How long the driver waits for a request to complete. This is a global limit on the duration of - # a session.execute() call, including any internal retries the driver might do. - # - # By default, this value is set pretty high to ensure that DDL queries don't time out, in order - # to provide the best experience for new users trying the driver with the out-of-the-box - # configuration. - # For any serious deployment, we recommend that you use separate configuration profiles for DDL - # and DML; you can then set the DML timeout much lower (down to a few milliseconds if needed). - # - # Note that, because timeouts are scheduled on the driver's timer thread, the duration specified - # here must be greater than the timer tick duration defined by the - # advanced.netty.timer.tick-duration setting (see below). If that is not the case, timeouts will - # not be triggered as timely as desired. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for requests issued after the change. - # Overridable in a profile: yes - timeout = 2 seconds - - # The consistency level. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for requests issued after the change. - # Overridable in a profile: yes - consistency = LOCAL_ONE - - # The page size. This controls how many rows will be retrieved simultaneously in a single - # network roundtrip (the goal being to avoid loading too many results in memory at the same - # time). If there are more results, additional requests will be used to retrieve them (either - # automatically if you iterate with the sync API, or explicitly with the async API's - # fetchNextPage method). - # If the value is 0 or negative, it will be ignored and the request will not be paged. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for requests issued after the change. - # Overridable in a profile: yes - page-size = 5000 - - # The serial consistency level. - # The allowed values are SERIAL and LOCAL_SERIAL. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for requests issued after the change. - # Overridable in a profile: yes - serial-consistency = SERIAL - - # The default idempotence of a request, that will be used for all `Request` instances where - # `isIdempotent()` returns null. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for requests issued after the change. - # Overridable in a profile: yes - default-idempotence = false - } - - # The policy that decides the "query plan" for each query; that is, which nodes to try as - # coordinators, and in which order. - # - # Required: yes - # Modifiable at runtime: no (but custom implementations may elect to watch configuration changes - # and allow child options to be changed at runtime). - # Overridable in a profile: yes. Note that the driver creates as few instances as possible: if a - # named profile inherits from the default profile, or if two sibling profiles have the exact - # same configuration, they will share a single policy instance at runtime. - # If there are multiple load balancing policies in a single driver instance, they work together - # in the following way: - # - each request gets a query plan from its profile's policy (or the default policy if the - # request has no profile, or the profile does not override the policy). - # - when the policies assign distances to nodes, the driver uses the closest assigned distance - # for any given node. - basic.load-balancing-policy { - # The class of the policy. If it is not qualified, the driver assumes that it resides in one of - # the following packages: - # - com.datastax.oss.driver.internal.core.loadbalancing. - # - com.datastax.dse.driver.internal.core.loadbalancing. - # - # The driver provides three implementations out of the box: - # - # - `DefaultLoadBalancingPolicy`: should almost always be used; it requires a local datacenter - # to be specified either programmatically when creating the session, or via the configuration - # option: datastax-java-driver.basic.load-balancing-policy.local-datacenter. It can also - # use a highly efficient slow replica avoidance mechanism, which is by default enabled – see - # the option: datastax-java-driver.basic.load-balancing-policy.slow-replica-avoidance. - # - `DcInferringLoadBalancingPolicy`: similar to `DefaultLoadBalancingPolicy`, but does not - # require a local datacenter to be defined, in which case it will attempt to infer the local - # datacenter from the provided contact points, if possible; if that fails, it will throw an - # error during session initialization. This policy is intended mostly for ETL tools and - # should not be used by normal applications. - # - `BasicLoadBalancingPolicy`: similar to `DefaultLoadBalancingPolicy`, but does not have - # the slow replica avoidance mechanism. More importantly, it is the only policy capable of - # operating without local datacenter defined, in which case it will consider nodes in the - # cluster in a datacenter-agnostic way. Beware that this could cause spikes in - # cross-datacenter traffic! This policy is provided mostly as a starting point for users - # wishing to implement their own load balancing policy; it should not be used as is in normal - # applications. - # - # You can also specify a custom class that implements LoadBalancingPolicy and has a public - # constructor with two arguments: the DriverContext and a String representing the profile name. - class = DefaultLoadBalancingPolicy - - # The datacenter that is considered "local": the default policy will only include nodes from - # this datacenter in its query plans. - # - # When using the default policy, this option can only be absent if you specified no contact - # points: in that case, the driver defaults to 127.0.0.1:9042, and that node's datacenter is - # used as the local datacenter. As soon as you provide contact points (either through the - # configuration or through the session builder), you must define the local datacenter - # explicitly, and initialization will fail if this property is absent. In addition, all contact - # points should be from this datacenter; warnings will be logged for nodes that are from a - # different one. - # - # This can also be specified programmatically with SessionBuilder.withLocalDatacenter. If both - # are specified, the programmatic value takes precedence. - // local-datacenter = datacenter1 - - # The class of a custom node distance evaluator. - # - # This option is not required; if present, it must be the fully-qualified name of a class that - # implements `com.datastax.oss.driver.api.core.loadbalancing.NodeDistanceEvaluator`, and has a - # public constructor taking two arguments: the DriverContext and a String representing the - # profile name. - # - # Alternatively, you can pass an instance of your distance evaluator to - # CqlSession.builder().withNodeDistanceEvaluator(). In that case, this option will be ignored. - # - # The evaluator will be invoked each time the policy processes a topology or state change. The - # evaluator's `evaluateDistance` method will be called with the node affected by the change, and - # the local datacenter name (or null if none is defined). If it returns a non-null distance, the - # policy will suggest that distance for the node; if the function returns null, the policy will - # will assign a default distance instead, based on its internal algorithm for computing node - # distances. - // evaluator.class= - - # DEPRECATED. Use evaluator.class instead (see above). If both evaluator.class and filter.class - # are defined, the former wins. - # - # A custom filter to include/exclude nodes. - # - # This option is not required; if present, it must be the fully-qualified name of a class that - # implements `java.util.function.Predicate`, and has a public constructor taking two - # arguments: the DriverContext and a String representing the profile name. - # - # Alternatively, you can pass an instance of your filter to - # CqlSession.builder().withNodeFilter(). In that case, this option will be ignored. - # - # The predicate's `test(Node)` method will be invoked each time the policy processes a - # topology or state change: if it returns false, the node will be set at distance IGNORED - # (meaning the driver won't ever connect to it), and never included in any query plan. - // filter.class= - - # Whether to enable the slow replica avoidance mechanism in DefaultLoadBalancingPolicy. - # - # The default policy always moves replicas first in the query plan (if routing information can - # be determined for the current request). However: - # - if this option is true, it also applies a custom algorithm that takes the responsiveness and - # uptime of each replica into account to order them among each other; - # - if this option is false, replicas are simply shuffled. - # - # If this option is not defined, the driver defaults to true. - slow-replica-avoidance = true - } - basic.cloud { - # The location of the cloud secure bundle used to connect to DataStax Apache Cassandra as a - # service. - # This setting must be a valid URL. - # If the protocol is not specified, it is implicitly assumed to be the `file://` protocol, - # in which case the value is expected to be a valid path on the local filesystem. - # For example, `/a/path/to/bundle` will be interpreted as `file:/a/path/to/bunde`. - # If the protocol is provided explicitly, then the value will be used as is. - # - # Required: no - # Modifiable at runtime: no - # Overridable in a profile: no - // secure-connect-bundle = /location/of/secure/connect/bundle - } - - # DataStax Insights monitoring. - basic.application { - # The name of the application using the session. - # - # It will be sent in the STARTUP protocol message for each new connection established by the - # driver. - # - # This can also be defined programmatically with DseSessionBuilder.withApplicationName(). If you - # specify both, the programmatic value takes precedence and this option is ignored. - # - # Required: no - # Modifiable at runtime: no - # Overridable in a profile: no - // name = - - # The version of the application using the session. - # - # It will be sent in the STARTUP protocol message for each new connection established by the - # driver. - # - # This can also be defined programmatically with DseSessionBuilder.withApplicationVersion(). If - # you specify both, the programmatic value takes precedence and this option is ignored. - # - # Required: no - # Modifiable at runtime: no - # Overridable in a profile: no - // version = - } - - # Graph (DataStax Enterprise only) - basic.graph { - # The name of the graph targeted by graph statements. - # - # This can also be overridden programmatically with GraphStatement.setGraphName(). If both are - # specified, the programmatic value takes precedence, and this option is ignored. - # - # Required: no. In particular, system queries -- such as creating or dropping a graph -- must be - # executed without a graph name (see also basic.graph.is-system-query). - # Modifiable at runtime: yes, the new value will be used for requests issued after the change. - # Overridable in a profile: yes - // name = your-graph-name - - # The traversal source to use for graph statements. - # - # This setting doesn't usually need to change, unless executing OLAP queries, which require the - # traversal source "a". - # - # This can also be overridden programmatically with GraphStatement.setTraversalSource(). If both - # are specified, the programmatic value takes precedence, and this option is ignored. - # - # Required: no - # Modifiable at runtime: yes, the new value will be used for requests issued after the change. - # Overridable in a profile: yes - traversal-source = "g" - - # Whether a script statement represents a system query. - # - # Script statements that access the `system` variable *must not* specify a graph name (otherwise - # `system` is not available). However, if your application executes a lot of non-system - # statements, it is convenient to configure basic.graph.name to avoid repeating it every time. - # This option allows you to ignore that global graph name, for example in a specific profile. - # - # This can also be overridden programmatically with ScriptGraphStatement.setSystemQuery(). If - # both are specified, the programmatic value takes precedence, and this option is ignored. - # - # Required: no (defaults to false) - # Modifiable at runtime: yes, the new value will be used for requests issued after the change. - # Overridable in a profile: yes - // is-system-query = false - - # The read consistency level to use for graph statements. - # - # DSE Graph is able to distinguish between read and write timeouts for the internal storage - # queries that will be produced by a traversal. Hence the consistency level for reads and writes - # can be set separately. - # - # This can also be overridden programmatically with GraphStatement.setReadConsistencyLevel(). If - # both are specified, the programmatic value takes precedence, and this option is ignored. - # - # Required: no (defaults to request.basic.consistency) - # Modifiable at runtime: yes, the new value will be used for requests issued after the change. - # Overridable in a profile: yes - // read-consistency-level = LOCAL_QUORUM - - # The write consistency level to use for graph statements. - # - # DSE Graph is able to distinguish between read and write timeouts for the internal storage - # queries that will be produced by a traversal. Hence the consistency level for reads and writes - # can be set separately. - # - # This can also be overridden programmatically with GraphStatement.setReadConsistencyLevel(). If - # both are specified, the programmatic value takes precedence, and this option is ignored. - # - # Required: no (defaults to request.basic.consistency) - # Modifiable at runtime: yes, the new value will be used for requests issued after the change. - # Overridable in a profile: yes - // write-consistency-level = LOCAL_ONE - - # How long the driver waits for a graph request to complete. This is a global limit on the - # duration of a session.execute() call, including any internal retries the driver might do. - # - # Graph statements behave a bit differently than regular CQL requests (hence this dedicated - # option instead of reusing basic.request.timeout): by default, the client timeout is not set, - # and the driver will just wait as long as needed until the server replies (which is itself - # governed by server-side timeout configuration). - # If you specify a client timeout with this option, then the driver will fail the request after - # the given time; note that the value is also sent along with the request, so that the server - # can also time out early and avoid wasting resources on a response that the client has already - # given up on. - # - # This can also be overridden programmatically with GraphStatement.setTimeout(). If both are - # specified, the programmatic value takes precedence, and this option is ignored. - # - # If this value is left unset (default) or is explicitly set to zero, no timeout will be - # applied. - # - # Required: no (defaults to zero - no timeout) - # Modifiable at runtime: yes, the new value will be used for requests issued after the change. - # Overridable in a profile: yes - // timeout = 10 seconds - } - - - # ADVANCED OPTIONS ------------------------------------------------------------------------------- - - # The maximum number of live sessions that are allowed to coexist in a given VM. - # - # This is intended to help detect resource leaks in client applications that create too many - # sessions and/or do not close them correctly. The driver keeps track of the number of live - # sessions in a static variable; if it gets over this threshold, a warning will be logged for - # every new session. - # - # If the value is less than or equal to 0, the feature is disabled: no warning will be issued. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for sessions created after the change. - # Overridable in a profile: no - advanced.session-leak.threshold = 4 - - advanced.connection { - # The timeout to use when establishing driver connections. - # - # This timeout is for controlling how long the driver will wait for the underlying channel - # to actually connect to the server. This is not the time limit for completing protocol - # negotiations, only the time limit for establishing a channel connection. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for connections created after the - # change. - # Overridable in a profile: no - connect-timeout = 5 seconds - - # The timeout to use for internal queries that run as part of the initialization process, just - # after we open a connection. If this timeout fires, the initialization of the connection will - # fail. If this is the first connection ever, the driver will fail to initialize as well, - # otherwise it will retry the connection later. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for connections created after the - # change. - # Overridable in a profile: no - init-query-timeout = 5 seconds - - # The timeout to use when the driver changes the keyspace on a connection at runtime (this - # happens when the client issues a `USE ...` query, and all connections belonging to the current - # session need to be updated). - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for connections created after the - # change. - # Overridable in a profile: no - set-keyspace-timeout = ${datastax-java-driver.advanced.connection.init-query-timeout} - - # The driver maintains a connection pool to each node, according to the distance assigned to it - # by the load balancing policy. - # If the distance is LOCAL, then local.size connections are opened; if the distance is REMOTE, - # then remote.size connections are opened. If the distance is IGNORED, no connections at all - # are maintained. - pool { - # The number of connections in the pool for a node whose distance is LOCAL, that is, a node - # that belongs to the local datacenter, as inferred by the load balancing or defined by the - # option: datastax-java-driver.basic.load-balancing-policy.local-datacenter. - # - # Each connection can handle many concurrent requests, so 1 is generally a good place to - # start. You should only need higher values in very high performance scenarios, where - # connections might start maxing out their I/O thread (see the driver's online manual for - # more tuning instructions). - # - # Required: yes - # Modifiable at runtime: yes; when the change is detected, all active pools will be notified - # and will adjust their size. - # Overridable in a profile: no - local.size = 1 - - # The number of connections in the pool for a node whose distance is REMOTE, that is, a node - # that does not belong to the local datacenter. - # - # Note: by default, the built-in load-balancing policies will never assign the REMOTE distance - # to any node, to avoid cross-datacenter network traffic. If you want to change this behavior - # and understand the consequences, configure your policy to accept nodes in remote - # datacenters by adjusting the following advanced options: - # - # - datastax-java-driver.advanced.load-balancing-policy.dc-failover.max-nodes-per-remote-dc - # - datastax-java-driver.advanced.load-balancing-policy.dc-failover.allow-for-local-consistency-levels - # - # Required: yes - # Modifiable at runtime: yes; when the change is detected, all active pools will be notified - # and will adjust their size. - # Overridable in a profile: no - remote.size = 1 - } - - # The maximum number of requests that can be executed concurrently on a connection. This must be - # strictly positive, and less than 32768. - # - # We recommend against changing this value: the default of 1024 is fine for most situations, - # it's a good balance between sufficient concurrency on the client and reasonable pressure on - # the server. If you're looking for a way to limit the global throughput of the session, this is - # not the right way to do it: use a request throttler instead (see the `advanced.throttler` - # section in this configuration). - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for connections created after the - # change. - # Overridable in a profile: no - max-requests-per-connection = 1024 - - # The maximum number of "orphaned" requests before a connection gets closed automatically. - # - # Sometimes the driver writes to a node but stops listening for a response (for example if the - # request timed out, or was completed by another node). But we can't safely reuse the stream id - # on this connection until we know for sure that the server is done with it. Therefore the id is - # marked as "orphaned" until we get a response from the node. - # - # If the response never comes (or is lost because of a network issue), orphaned ids can - # accumulate over time, eventually affecting the connection's throughput. So we monitor them - # and close the connection above a given threshold (the pool will replace it). - # - # The value must be lower than `max-requests-per-connection`. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for connections created after the - # change. - # Overridable in a profile: no - max-orphan-requests = 256 - - # Whether to log non-fatal errors when the driver tries to open a new connection. - # - # This error as recoverable, as the driver will try to reconnect according to the reconnection - # policy. Therefore some users see them as unnecessary clutter in the logs. On the other hand, - # those logs can be handy to debug a misbehaving node. - # - # Note that some type of errors are always logged, regardless of this option: - # - protocol version mismatches (the node gets forced down) - # - when the cluster name in system.local doesn't match the other nodes (the node gets forced - # down) - # - authentication errors (will be retried) - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for connections created after the - # change. - # Overridable in a profile: no - warn-on-init-error = true - } - - # Advanced options for the built-in load-balancing policies. - advanced.load-balancing-policy { - # Cross-datacenter failover configuration: configure the load-balancing policies to use nodes - # in remote datacenters. - dc-failover { - # The maximum number of nodes to contact in each remote datacenter. - # - # By default, this number is zero, to avoid cross-datacenter network traffic. When this - # number is greater than zero: - # - # - The load policies will assign the REMOTE distance to that many nodes in each remote - # datacenter. - # - The driver will then attempt to open connections to those nodes. The actual number of - # connections to open to each one of those nodes is configurable via the option: - # datastax-java-driver.advanced.connection.pool.remote.size. - # - The load-balancing policies will include those remote nodes (and only those) in query - # plans, effectively enabling cross-datacenter failover. - # - # Beware that enabling such failover can result in cross-datacenter network traffic spikes, - # if the local datacenter is down or experiencing high latencies! - # - # Required: yes - # Modifiable at runtime: no - # Overridable in a profile: yes - max-nodes-per-remote-dc = 0 - - # Whether cross-datacenter failover should be allowed for requests executed with local - # consistency levels (LOCAL_ONE, LOCAL_QUORUM and LOCAL_SERIAL). - # - # This is disabled by default. Enabling this feature may have unexpected results, since a - # local consistency level may have different semantics depending on the replication factor in - # use in each datacenter. - # - # Required: yes - # Modifiable at runtime: no - # Overridable in a profile: yes - allow-for-local-consistency-levels = false - - # Ordered preference list of remote dc's (in order) optionally supplied for automatic failover. While building a query plan, the driver uses the DC's supplied in order together with max-nodes-per-remote-dc - # Users are not required to specify all DCs, when listing preferences via this config - # Required: no - # Modifiable at runtime: no - # Overridable in a profile: no - preferred-remote-dcs = [""] - } - } - - # Whether to schedule reconnection attempts if all contact points are unreachable on the first - # initialization attempt. - # - # If this is true, the driver will retry according to the reconnection policy. The - # `SessionBuilder.build()` call -- or the future returned by `SessionBuilder.buildAsync()` -- - # won't complete until a contact point has been reached. - # - # If this is false and no contact points are available, the driver will fail with an - # AllNodesFailedException. - # - # Required: yes - # Modifiable at runtime: no - # Overridable in a profile: no - advanced.reconnect-on-init = false - - # The policy that controls how often the driver tries to re-establish connections to down nodes. - # - # Required: yes - # Modifiable at runtime: no (but custom implementations may elect to watch configuration changes - # and allow child options to be changed at runtime). - # Overridable in a profile: no - advanced.reconnection-policy { - # The class of the policy. If it is not qualified, the driver assumes that it resides in the - # package com.datastax.oss.driver.internal.core.connection. - # - # The driver provides two implementations out of the box: ExponentialReconnectionPolicy and - # ConstantReconnectionPolicy. - # - # You can also specify a custom class that implements ReconnectionPolicy and has a public - # constructor with a DriverContext argument. - class = ExponentialReconnectionPolicy - - # ExponentialReconnectionPolicy starts with the base delay, and doubles it after each failed - # reconnection attempt, up to the maximum delay (after that it stays constant). - # - # ConstantReconnectionPolicy only uses the base-delay value, the interval never changes. - base-delay = 1 second - max-delay = 60 seconds - } - - # The policy that controls if the driver retries requests that have failed on one node. - # - # Required: yes - # Modifiable at runtime: no (but custom implementations may elect to watch configuration changes - # and allow child options to be changed at runtime). - # Overridable in a profile: yes. Note that the driver creates as few instances as possible: if a - # named profile inherits from the default profile, or if two sibling profiles have the exact - # same configuration, they will share a single policy instance at runtime. - advanced.retry-policy { - # The class of the policy. If it is not qualified, the driver assumes that it resides in the - # package com.datastax.oss.driver.internal.core.retry. - # - # The driver provides two implementations out of the box: - # - # - DefaultRetryPolicy: the default policy, should almost always be the right choice. - # - ConsistencyDowngradingRetryPolicy: an alternative policy that weakens consistency guarantees - # as a trade-off to maximize the chance of success when retrying. Use with caution. - # - # Refer to the manual to understand how these policies work. - # - # You can also specify a custom class that implements RetryPolicy and has a public constructor - # with two arguments: the DriverContext and a String representing the profile name. - class = DefaultRetryPolicy - } - - # The policy that controls if the driver pre-emptively tries other nodes if a node takes too long - # to respond. - # - # Required: yes - # Modifiable at runtime: no (but custom implementations may elect to watch configuration changes - # and allow child options to be changed at runtime). - # Overridable in a profile: yes. Note that the driver creates as few instances as possible: if a - # named profile inherits from the default profile, or if two sibling profiles have the exact - # same configuration, they will share a single policy instance at runtime. - advanced.speculative-execution-policy { - # The class of the policy. If it is not qualified, the driver assumes that it resides in the - # package com.datastax.oss.driver.internal.core.specex. - # - # The following implementations are available out of the box: - # - NoSpeculativeExecutionPolicy: never schedule any speculative execution - # - ConstantSpeculativeExecutionPolicy: schedule executions based on constant delays. This - # requires the `max-executions` and `delay` options below. - # - # You can also specify a custom class that implements SpeculativeExecutionPolicy and has a - # public constructor with two arguments: the DriverContext and a String representing the - # profile name. - class = NoSpeculativeExecutionPolicy - - # The maximum number of executions (including the initial, non-speculative execution). - # This must be at least one. - // max-executions = 3 - - # The delay between each execution. 0 is allowed, and will result in all executions being sent - # simultaneously when the request starts. - # - # Note that sub-millisecond precision is not supported, any excess precision information will be - # dropped; in particular, delays of less than 1 millisecond are equivalent to 0. - # - # Also note that, because speculative executions are scheduled on the driver's timer thread, - # the duration specified here must be greater than the timer tick duration defined by the - # advanced.netty.timer.tick-duration setting (see below). If that is not the case, speculative - # executions will not be triggered as timely as desired. - # - # This must be positive or 0. - // delay = 100 milliseconds - } - - # The component that handles authentication on each new connection. - # - # Required: no. If the 'class' child option is absent, no authentication will occur. - # Modifiable at runtime: no - # Overridable in a profile: no - # - # Note that the contents of this section can be overridden programmatically with - # SessionBuilder.withAuthProvider or SessionBuilder.withAuthCredentials. - advanced.auth-provider { - # The class of the provider. If it is not qualified, the driver assumes that it resides in one - # of the following packages: - # - com.datastax.oss.driver.internal.core.auth - # - com.datastax.dse.driver.internal.core.auth - # - # The driver provides two implementations: - # - PlainTextAuthProvider: uses plain-text credentials. It requires the `username` and - # `password` options below. When connecting to DataStax Enterprise, an optional - # `authorization-id` can also be specified. - # For backward compatibility with previous driver versions, you can also use the class name - # "DsePlainTextAuthProvider" for this provider. - # - DseGssApiAuthProvider: provides GSSAPI authentication for DSE clusters secured with - # DseAuthenticator. See the example below and refer to the manual for detailed instructions. - # - # You can also specify a custom class that implements AuthProvider and has a public constructor - # with a DriverContext argument (to simplify this, the driver provides two abstract classes that - # can be extended: PlainTextAuthProviderBase and DseGssApiAuthProviderBase). - # - # Finally, you can configure a provider instance programmatically with - # DseSessionBuilder#withAuthProvider. In that case, it will take precedence over the - # configuration. - // class = PlainTextAuthProvider - # - # Sample configuration for plain-text authentication providers: - // username = cassandra - // password = cassandra - # - # Proxy authentication: allows to login as another user or role (valid for both - # PlainTextAuthProvider and DseGssApiAuthProvider): - // authorization-id = userOrRole - # - # The settings below are only applicable to DseGssApiAuthProvider: - # - # Service name. For example, if in your dse.yaml configuration file the - # "kerberos_options/service_principal" setting is "cassandra/my.host.com@MY.REALM.COM", then set - # this option to "cassandra". If this value is not explicitly set via configuration (in an - # application.conf or programmatically), the driver will attempt to set it via a System - # property. The property should be "dse.sasl.service". For backwards compatibility with 1.x - # versions of the driver, if "dse.sasl.service" is not set as a System property, the driver will - # attempt to use "dse.sasl.protocol" as a fallback (which is the property for the 1.x driver). - //service = "cassandra" - # - # Login configuration. It is also possible to provide login configuration through a standard - # JAAS configuration file. The below configuration is just an example, see all possible options - # here: - # https://docs.oracle.com/javase/6/docs/jre/api/security/jaas/spec/com/sun/security/auth/module/Krb5LoginModule.html - // login-configuration { - // principal = "cassandra@DATASTAX.COM" - // useKeyTab = "true" - // refreshKrb5Config = "true" - // keyTab = "/path/to/keytab/file" - // } - # - # Internal SASL properties, if any, such as QOP. - // sasl-properties { - // javax.security.sasl.qop = "auth-conf" - // } - } - - # The SSL engine factory that will initialize an SSL engine for each new connection to a server. - # - # Required: no. If the 'class' child option is absent, SSL won't be activated. - # Modifiable at runtime: no - # Overridable in a profile: no - # - # Note that the contents of this section can be overridden programmatically with - # SessionBuilder.withSslEngineFactory or SessionBuilder#withSslContext. - advanced.ssl-engine-factory { - # The class of the factory. If it is not qualified, the driver assumes that it resides in the - # package com.datastax.oss.driver.internal.core.ssl. - # - # The driver provides a single implementation out of the box: DefaultSslEngineFactory, that uses - # the JDK's built-in SSL implementation. - # - # You can also specify a custom class that implements SslEngineFactory and has a public - # constructor with a DriverContext argument. - // class = DefaultSslEngineFactory - - # Sample configuration for the default SSL factory: - # The cipher suites to enable when creating an SSLEngine for a connection. - # This property is optional. If it is not present, the driver won't explicitly enable cipher - # suites on the engine, which according to the JDK documentations results in "a minimum quality - # of service". - // cipher-suites = [ "TLS_RSA_WITH_AES_128_CBC_SHA", "TLS_RSA_WITH_AES_256_CBC_SHA" ] - - # Whether or not to require validation that the hostname of the server certificate's common - # name matches the hostname of the server being connected to. If not set, defaults to true. - // hostname-validation = true - - # Whether or not to allow a DNS reverse-lookup of provided server addresses for SAN addresses, - # if cluster endpoints are specified as literal IPs. - # This is left as true for compatibility, but in most environments a DNS reverse-lookup should - # not be necessary to get an address that matches the server certificate SANs. - // allow-dns-reverse-lookup-san = true - - # The locations and passwords used to access truststore and keystore contents. - # These properties are optional. If either truststore-path or keystore-path are specified, - # the driver builds an SSLContext from these files. If neither option is specified, the - # default SSLContext is used, which is based on system property configuration. - // truststore-path = /path/to/client.truststore - // truststore-password = password123 - // keystore-path = /path/to/client.keystore - // keystore-password = password123 - - # The duration between attempts to reload the keystore from the contents of the file specified - # by `keystore-path`. This is mainly relevant in environments where certificates have short - # lifetimes and applications are restarted infrequently, since an expired client certificate - # will prevent new connections from being established until the application is restarted. If - # not set, defaults to not reload the keystore. - // keystore-reload-interval = 30 minutes - } - - # The generator that assigns a microsecond timestamp to each request. - # - # Required: yes - # Modifiable at runtime: no (but custom implementations may elect to watch configuration changes - # and allow child options to be changed at runtime). - # Overridable in a profile: yes. Note that the driver creates as few instances as possible: if a - # named profile inherits from the default profile, or if two sibling profiles have the exact - # same configuration, they will share a single generator instance at runtime. - advanced.timestamp-generator { - # The class of the generator. If it is not qualified, the driver assumes that it resides in the - # package com.datastax.oss.driver.internal.core.time. - # - # The driver provides the following implementations out of the box: - # - AtomicTimestampGenerator: timestamps are guaranteed to be unique across all client threads. - # - ThreadLocalTimestampGenerator: timestamps that are guaranteed to be unique within each - # thread only. - # - ServerSideTimestampGenerator: do not generate timestamps, let the server assign them. - # - # You can also specify a custom class that implements TimestampGenerator and has a public - # constructor with two arguments: the DriverContext and a String representing the profile name. - class = AtomicTimestampGenerator - - # To guarantee that queries are applied on the server in the same order as the client issued - # them, timestamps must be strictly increasing. But this means that, if the driver sends more - # than one query per microsecond, timestamps will drift in the future. While this could happen - # occasionally under high load, it should not be a regular occurrence. Therefore the built-in - # implementations log a warning to detect potential issues. - drift-warning { - # How far in the future timestamps are allowed to drift before the warning is logged. - # If it is undefined or set to 0, warnings are disabled. - threshold = 1 second - - # How often the warning will be logged if timestamps keep drifting above the threshold. - interval = 10 seconds - } - - # Whether to force the driver to use Java's millisecond-precision system clock. - # If this is false, the driver will try to access the microsecond-precision OS clock via native - # calls (and fallback to the Java one if the native calls fail). - # Unless you explicitly want to avoid native calls, there's no reason to change this. - force-java-clock = false - } - - # Request trackers are session-wide components that get notified of the outcome of requests. - advanced.request-tracker { - # The list of trackers to register. - # - # This must be a list of class names, either fully-qualified or non-qualified; if the latter, - # the driver assumes that the class resides in the package - # com.datastax.oss.driver.internal.core.tracker. - # - # All classes specified here must implement - # com.datastax.oss.driver.api.core.tracker.RequestTracker and have a public constructor with a - # DriverContext argument. - # - # The driver provides the following implementation out of the box: - # - RequestLogger: logs requests (see the parameters below). - # - # You can also pass instances of your trackers programmatically with - # CqlSession.builder().addRequestTracker(). - # - # Required: no - # Modifiable at runtime: no (but custom implementations may elect to watch configuration changes - # and allow child options to be changed at runtime). - # Overridable in a profile: no - #classes = [RequestLogger,com.example.app.MyTracker] - - # Parameters for RequestLogger. All of them can be overridden in a profile, and changed at - # runtime (the new values will be taken into account for requests logged after the change). - logs { - # Whether to log successful requests. - // success.enabled = true - - slow { - # The threshold to classify a successful request as "slow". If this is unset, all successful - # requests will be considered as normal. - // threshold = 1 second - - # Whether to log slow requests. - // enabled = true - } - - # Whether to log failed requests. - // error.enabled = true - - # The maximum length of the query string in the log message. If it is longer than that, it - # will be truncated. - // max-query-length = 500 - - # Whether to log bound values in addition to the query string. - // show-values = true - - # The maximum length for bound values in the log message. If the formatted representation of a - # value is longer than that, it will be truncated. - // max-value-length = 50 - - # The maximum number of bound values to log. If a request has more values, the list of values - # will be truncated. - // max-values = 50 - - # Whether to log stack traces for failed queries. If this is disabled, the log will just - # include the exception's string representation (generally the class name and message). - // show-stack-traces = true - } - } - - advanced.request-id { - generator { - # The component that generates a unique identifier for each CQL request, and possibly write the id to the custom payload . - // class = W3CContextRequestIdGenerator - } - } - - # A session-wide component that controls the rate at which requests are executed. - # - # Implementations vary, but throttlers generally track a metric that represents the level of - # utilization of the session, and prevent new requests from starting when that metric exceeds a - # threshold. Pending requests may be enqueued and retried later. - # - # From the public API's point of view, this process is mostly transparent: any time that the - # request is throttled is included in the session.execute() or session.executeAsync() call. - # Similarly, the request timeout encompasses throttling: the timeout starts ticking before the - # throttler has started processing the request; a request may time out while it is still in the - # throttler's queue, before the driver has even tried to send it to a node. - # - # The only visible effect is that a request may fail with a RequestThrottlingException, if the - # throttler has determined that it can neither allow the request to proceed now, nor enqueue it; - # this indicates that your session is overloaded. - # - # Required: yes - # Modifiable at runtime: no (but custom implementations may elect to watch configuration changes - # and allow child options to be changed at runtime). - # Overridable in a profile: no - advanced.throttler { - # The class of the throttler. If it is not qualified, the driver assumes that it resides in - # the package com.datastax.oss.driver.internal.core.session.throttling. - # - # The driver provides the following implementations out of the box: - # - # - PassThroughRequestThrottler: does not perform any kind of throttling, all requests are - # allowed to proceed immediately. Required options: none. - # - # - ConcurrencyLimitingRequestThrottler: limits the number of requests that can be executed in - # parallel. Required options: max-concurrent-requests, max-queue-size. - # - # - RateLimitingRequestThrottler: limits the request rate per second. Required options: - # max-requests-per-second, max-queue-size, drain-interval. - # - # You can also specify a custom class that implements RequestThrottler and has a public - # constructor with a DriverContext argument. - class = PassThroughRequestThrottler - - # The maximum number of requests that can be enqueued when the throttling threshold is exceeded. - # Beyond that size, requests will fail with a RequestThrottlingException. - // max-queue-size = 10000 - - # The maximum number of requests that are allowed to execute in parallel. - # Only used by ConcurrencyLimitingRequestThrottler. - // max-concurrent-requests = 10000 - - # The maximum allowed request rate. - # Only used by RateLimitingRequestThrottler. - // max-requests-per-second = 10000 - - # How often the throttler attempts to dequeue requests. This is the only way for rate-based - # throttling, because the completion of an active request does not necessarily free a "slot" for - # a queued one (the rate might still be too high). - # - # You want to set this high enough that each attempt will process multiple entries in the queue, - # but not delay requests too much. A few milliseconds is probably a happy medium. - # - # Only used by RateLimitingRequestThrottler. - // drain-interval = 10 milliseconds - } - - # The list of node state listeners to register. Node state listeners are session-wide - # components that listen for node state changes (e.g., when nodes go down or back up). - # - # This must be a list of fully-qualified class names; classes specified here must implement - # com.datastax.oss.driver.api.core.metadata.NodeStateListener and have a public - # constructor with a DriverContext argument. - # - # You can also pass instances of your listeners programmatically with - # CqlSession.builder().addNodeStateListener(). - # - # Required: no - # Modifiable at runtime: no (but custom implementations may elect to watch configuration changes - # and allow child options to be changed at runtime). - # Overridable in a profile: no - #advanced.node-state-listener.classes = [com.example.app.MyListener1,com.example.app.MyListener2] - - # The list of schema change listeners to register. Schema change listeners are session-wide - # components that listen for schema changes (e.g., when tables are created or dropped). - # - # This must be a list of fully-qualified class names; classes specified here must implement - # com.datastax.oss.driver.api.core.metadata.schema.SchemaChangeListener and have a public - # constructor with a DriverContext argument. - # - # You can also pass instances of your listeners programmatically with - # CqlSession.builder().addSchemaChangeListener(). - # - # Required: no - # Modifiable at runtime: no (but custom implementations may elect to watch configuration changes - # and allow child options to be changed at runtime). - # Overridable in a profile: no - #advanced.schema-change-listener.classes = [com.example.app.MyListener1,com.example.app.MyListener2] - - # The address translator to use to convert the addresses sent by Cassandra nodes into ones that - # the driver uses to connect. - # This is only needed if the nodes are not directly reachable from the driver (for example, the - # driver is in a different network region and needs to use a public IP, or it connects through a - # proxy). - # - # Required: yes - # Modifiable at runtime: no - # Overridable in a profile: no - advanced.address-translator { - # The class of the translator. If it is not qualified, the driver assumes that it resides in - # the package com.datastax.oss.driver.internal.core.addresstranslation. - # - # The driver provides the following implementations out of the box: - # - PassThroughAddressTranslator: returns all addresses unchanged. - # - FixedHostNameAddressTranslator: translates all addresses to a specific hostname. - # - SubnetAddressTranslator: translates addresses to hostname based on the subnet match. - # - Ec2MultiRegionAddressTranslator: suitable for an Amazon multi-region EC2 deployment where - # clients are also deployed in EC2. It optimizes network costs by favoring private IPs over - # public ones whenever possible. - # - # You can also specify a custom class that implements AddressTranslator and has a public - # constructor with a DriverContext argument. - class = PassThroughAddressTranslator - # - # This property has to be set only in case you use FixedHostNameAddressTranslator. - # advertised-hostname = mycustomhostname - # - # These properties are only applicable in case you use SubnetAddressTranslator. - # subnet-addresses { - # "100.64.0.0/15" = "cassandra.datacenter1.com:9042" - # "100.66.0.0/15" = "cassandra.datacenter2.com:9042" - # # IPv6 example: - # # "::ffff:6440:0/111" = "cassandra.datacenter1.com:9042" - # # "::ffff:6442:0/111" = "cassandra.datacenter2.com:9042" - # } - # Optional. When configured, addresses not matching the configured subnets are translated to this address. - # default-address = "cassandra.datacenter1.com:9042" - # Whether to resolve the addresses once on initialization (if true) or on each node (re-)connection (if false). - # If not configured, defaults to false. - # resolve-addresses = false - } - - # Whether to resolve the addresses passed to `basic.contact-points`. - # - # If this is true, addresses are created with `InetSocketAddress(String, int)`: the host name will - # be resolved the first time, and the driver will use the resolved IP address for all subsequent - # connection attempts. - # - # If this is false, addresses are created with `InetSocketAddress.createUnresolved()`: the host - # name will be resolved again every time the driver opens a new connection. This is useful for - # containerized environments where DNS records are more likely to change over time (note that the - # JVM and OS have their own DNS caching mechanisms, so you might need additional configuration - # beyond the driver). - # - # This option only applies to the contact points specified in the configuration. It has no effect - # on: - # - programmatic contact points passed to SessionBuilder.addContactPoints: these addresses are - # built outside of the driver, so it is your responsibility to provide unresolved instances. - # - dynamically discovered peers: the driver relies on Cassandra system tables, which expose raw - # IP addresses. Use a custom address translator to convert them to unresolved addresses (if - # you're in a containerized environment, you probably already need address translation anyway). - # - # Required: no (defaults to true) - # Modifiable at runtime: no - # Overridable in a profile: no - advanced.resolve-contact-points = true - - advanced.protocol { - # The native protocol version to use. - # - # If this option is absent, the driver looks up the versions of the nodes at startup (by default - # in system.peers.release_version), and chooses the highest common protocol version. - # For example, if you have a mixed cluster with Apache Cassandra 2.1 nodes (protocol v3) and - # Apache Cassandra 3.0 nodes (protocol v3 and v4), then protocol v3 is chosen. If the nodes - # don't have a common protocol version, initialization fails. - # - # If this option is set, then the given version will be used for all connections, without any - # negotiation or downgrading. If any of the contact points doesn't support it, that contact - # point will be skipped. - # - # Once the protocol version is set, it can't change for the rest of the driver's lifetime; if - # an incompatible node joins the cluster later, connection will fail and the driver will force - # it down (i.e. never try to connect to it again). - # - # You can check the actual version at runtime with Session.getContext().getProtocolVersion(). - # - # Required: no - # Modifiable at runtime: no - # Overridable in a profile: no - // version = V4 - - # The name of the algorithm used to compress protocol frames. - # - # The possible values are: - # - lz4: requires at.yawk.lz4:lz4-java in the classpath. - # - snappy: requires org.xerial.snappy:snappy-java in the classpath. - # - the string "none" to indicate no compression (this is functionally equivalent to omitting - # the option). - # - # The driver depends on the compression libraries, but they are optional. Make sure you - # redeclare an explicit dependency in your project. Refer to the driver's POM or manual for the - # exact version. - # - # Required: no. If the option is absent, protocol frames are not compressed. - # Modifiable at runtime: no - # Overridable in a profile: no - // compression = lz4 - - # The maximum length of the frames supported by the driver. Beyond that limit, requests will - # fail with an exception - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for connections created after the - # change. - # Overridable in a profile: no - max-frame-length = 256 MiB - } - - advanced.request { - # Whether a warning is logged when a request (such as a CQL `USE ...`) changes the active - # keyspace. - # Switching keyspace at runtime is highly discouraged, because it is inherently unsafe (other - # requests expecting the old keyspace might be running concurrently), and may cause statements - # prepared before the change to fail. - # It should only be done in very specific use cases where there is only a single client thread - # executing synchronous queries (such as a cqlsh-like interpreter). In other cases, clients - # should prefix table names in their queries instead. - # - # Note that CASSANDRA-10145 (scheduled for C* 4.0) will introduce a per-request keyspace option - # as a workaround to this issue. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for keyspace switches occurring after - # the change. - # Overridable in a profile: no - warn-if-set-keyspace = true - - # If tracing is enabled for a query, this controls how the trace is fetched. - trace { - # How many times the driver will attempt to fetch the query if it is not ready yet. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for traces fetched after the change. - # Overridable in a profile: yes - attempts = 5 - - # The interval between each attempt. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for traces fetched after the change. - # Overridable in a profile: yes - interval = 3 milliseconds - - # The consistency level to use for trace queries. - # Note that the default replication strategy for the system_traces keyspace is SimpleStrategy - # with RF=2, therefore LOCAL_ONE might not work if the local DC has no replicas for a given - # trace id. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for traces fetched after the change. - # Overridable in a profile: yes - consistency = ONE - } - - # Whether logging of server warnings generated during query execution should be disabled by the - # driver. All server generated warnings will be available programmatically via the ExecutionInfo - # object on the executed statement's ResultSet. If set to "false", this will prevent the driver - # from logging these warnings. - # - # NOTE: The log formatting for these warning messages will reuse the options defined for - # advanced.request-tracker. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for query warnings received after the change. - # Overridable in a profile: yes - log-warnings = true - } - - # Graph (DataStax Enterprise only) - advanced.graph { - # The sub-protocol the driver will use to communicate with DSE Graph, on top of the Cassandra - # native protocol. - # - # You should almost never have to change this: the driver sets it automatically, based on the - # information it has about the server. One exception is if you use the script API against a - # legacy DSE version (5.0.3 or older). In that case, you need to force the sub-protocol to - # "graphson-1.0". - # - # This can also be overridden programmatically with GraphStatement.setSubProtocol(). If both are - # specified, the programmatic value takes precedence, and this option is ignored. - # - # Possible values with built-in support in the driver are: - # [ "graphson-1.0", "graphson-2.0", "graph-binary-1.0"] - # - # IMPORTANT: The default value for the Graph sub-protocol is based only on the DSE - # version. If the version is DSE 6.7 and lower, "graphson-2.0" will be the default. For DSE 6.8 - # and higher, the default value is "graphson-binary-1.0". - # - # Required: no - # Modifiable at runtime: yes, the new value will be used for requests issued after the change. - # Overridable in a profile: yes - // sub-protocol = "graphson-2.0" - - # - # Whether or not Graph paging should be enabled or disabled for all queries. - # - #

If AUTO is set, the driver will decide whether or not to enable Graph paging - # based on the protocol version in use and the DSE version of all hosts. For this reason it is - # usually not necessary to change this setting. - # - #

IMPORTANT: Paging for DSE Graph is only available in DSE 6.8 and higher, and - # requires protocol version DSE_V1 or higher and graphs created with the Native engine; enabling - # paging for clusters and graphs that do not meet this requirement may result in query failures. - # - # Supported values are: ENABLED, DISABLED, AUTO - paging-enabled = "AUTO" - - - paging-options { - - # The page size. - # - # The value specified here can be interpreted in number of rows. - # Interpetation in number of bytes is not supported for graph continuous paging queries. - # - # It controls how many rows will be retrieved simultaneously in a single - # network roundtrip (the goal being to avoid loading too many results in memory at the same - # time). If there are more results, additional requests will be used to retrieve them (either - # automatically if you iterate with the sync API, or explicitly with the async API's - # fetchNextPage method). - # - # The default is the same as the driver's normal request page size, - # i.e., 5000 (rows). - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for continuous requests issued after - # the change - # Overridable in a profile: yes - page-size = ${datastax-java-driver.advanced.continuous-paging.page-size} - - # The maximum number of pages to return. - # - # The default is zero, which means retrieve all pages. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for continuous requests issued after - # the change - # Overridable in a profile: yes - max-pages = ${datastax-java-driver.advanced.continuous-paging.max-pages} - - # Returns the maximum number of pages per second. - # - # The default is zero, which means no limit. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for continuous requests issued after - # the change - # Overridable in a profile: yes - max-pages-per-second = ${datastax-java-driver.advanced.continuous-paging.max-pages-per-second} - - # Returns the maximum number of pages per second. - # - # The default is zero, which means no limit. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for continuous requests issued after - # the change - # Overridable in a profile: yes - max-enqueued-pages = ${datastax-java-driver.advanced.continuous-paging.max-enqueued-pages} - } - } - - # Continuous paging (DataStax Enterprise only) - advanced.continuous-paging { - - # The page size. - # - # The value specified here can be interpreted in number of rows - # or in number of bytes, depending on the unit defined with page-unit (see below). - # - # It controls how many rows (or how much data) will be retrieved simultaneously in a single - # network roundtrip (the goal being to avoid loading too many results in memory at the same - # time). If there are more results, additional requests will be used to retrieve them (either - # automatically if you iterate with the sync API, or explicitly with the async API's - # fetchNextPage method). - # - # The default is the same as the driver's normal request page size, - # i.e., 5000 (rows). - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for continuous requests issued after - # the change - # Overridable in a profile: yes - page-size = ${datastax-java-driver.basic.request.page-size} - - # Whether the page-size option should be interpreted in number of rows or bytes. - # - # The default is false, i.e., the page size will be interpreted in number of rows. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for continuous requests issued after - # the change - # Overridable in a profile: yes - page-size-in-bytes = false - - # The maximum number of pages to return. - # - # The default is zero, which means retrieve all pages. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for continuous requests issued after - # the change - # Overridable in a profile: yes - max-pages = 0 - - # Returns the maximum number of pages per second. - # - # The default is zero, which means no limit. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for continuous requests issued after - # the change - # Overridable in a profile: yes - max-pages-per-second = 0 - - # The maximum number of pages that can be stored in the local queue. - # - # This value must be positive. The default is 4. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for continuous requests issued after - # the change - # Overridable in a profile: yes - max-enqueued-pages = 4 - - # Timeouts for continuous paging. - # - # Note that there is no global timeout for continuous paging as there is - # for regular queries, because continuous paging queries can take an arbitrarily - # long time to complete. - # - # Instead, timeouts are applied to each exchange between the driver and the coordinator. In - # other words, if the driver decides to retry, all timeouts are reset. - timeout { - - # How long to wait for the coordinator to send the first page. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for continuous requests issued after - # the change - # Overridable in a profile: yes - first-page = 2 seconds - - # How long to wait for the coordinator to send subsequent pages. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for continuous requests issued after - # the change - # Overridable in a profile: yes - other-pages = 1 second - - } - } - - # DataStax Insights - advanced.monitor-reporting { - # Whether to send monitoring events. - # - # The default is true. - # - # Required: no (defaults to true) - # Modifiable at runtime: no - # Overridable in a profile: no - enabled = true - } - - advanced.metrics { - # Metrics Factory configuration. - factory { - # The class for the metrics factory. - # - # The driver provides out-of-the-box support for three metrics libraries: Dropwizard, - # Micrometer and MicroProfile Metrics. - # - # Dropwizard is the default metrics library in the driver; to use Dropwizard, this value - # should be left to its default, "DefaultMetricsFactory", or set to - # "DropwizardMetricsFactory". The only difference between the two is that the former will work - # even if Dropwizard is not present on the classpath (in which case it will silently disable - # metrics), while the latter requires its presence. - # - # To select Micrometer, set the value to "MicrometerMetricsFactory", and to select - # MicroProfile Metrics, set the value to "MicroProfileMetricsFactory". For these libraries to - # be used, you will also need to add an additional dependency: - # - Micrometer: org.apache.cassandra:java-driver-metrics-micrometer - # - MicroProfile: org.apache.cassandra:java-driver-metrics-microprofile - # - # If you would like to use another metrics library, set this value to the fully-qualified name - # of a class that implements com.datastax.oss.driver.internal.core.metrics.MetricsFactory. - # - # It is also possible to use "NoopMetricsFactory", which forcibly disables metrics completely. - # In fact, "DefaultMetricsFactory" delegates to "DropwizardMetricsFactory" if Dropwizard is - # present on the classpath, or to "NoopMetricsFactory" if it isn't. - # - # Note: specifying a metrics factory is not enough to enable metrics; for the driver to - # actually start collecting metrics, you also need to specify which metrics to collect. See - # the following options for more information: - # - advanced.metrics.session.enabled - # - advanced.metrics.node.enabled - # - # See also the driver online manual for extensive instructions about how to configure metrics. - # - # Required: yes - # Modifiable at runtime: no - # Overridable in a profile: no - class = DefaultMetricsFactory - } - - # This section configures how metric ids are generated. A metric id is a unique combination of - # a metric name and metric tags. - id-generator { - - # The class name of a component implementing - # com.datastax.oss.driver.internal.core.metrics.MetricIdGenerator. If it is not qualified, the - # driver assumes that it resides in the package com.datastax.oss.driver.internal.core.metrics. - # - # The driver ships with two built-in implementations: - # - # - DefaultMetricIdGenerator: generates identifiers composed solely of (unique) metric names; - # it does not generate tags. It is mostly suitable for use with metrics libraries that do - # not support tags, like Dropwizard. - # - TaggingMetricIdGenerator: generates identifiers composed of name and tags. It is mostly - # suitable for use with metrics libraries that support tags, like Micrometer or MicroProfile - # Metrics. - # - # For example, here is how each one of them generates identifiers for the session metric - # "bytes-sent", assuming that the session is named "s0": - # - DefaultMetricIdGenerator: name "s0.bytes-sent", tags: {}. - # - TaggingMetricIdGenerator: name "session.bytes-sent", tags: {"session":"s0"} - # - # Here is how each one of them generates identifiers for the node metric "bytes-sent", - # assuming that the session is named "s0", and the node's broadcast address is 10.1.2.3:9042: - # - DefaultMetricIdGenerator: name "s0.nodes.10_1_2_3:9042.bytes-sent", tags: {}. - # - TaggingMetricIdGenerator: name "nodes.bytes-sent", tags: { "session" : "s0", - # "node" : "\10.1.2.3:9042" } - # - # As shown above, both built-in implementations generate names that are path-like structures - # separated by dots. This is indeed the most common expected format by reporting tools. - # - # Required: yes - # Modifiable at runtime: no - # Overridable in a profile: no - class = DefaultMetricIdGenerator - - # An optional prefix to prepend to each generated metric name. - # - # The prefix should not start nor end with a dot or any other path separator; the following - # are two valid examples: "cassandra" or "myapp.prod.cassandra". - # - # For example, if this prefix is set to "cassandra", here is how the session metric - # "bytes-sent" would be named, assuming that the session is named "s0": - # - with DefaultMetricIdGenerator: "cassandra.s0.bytes-sent" - # - with TaggingMetricIdGenerator: "cassandra.session.bytes-sent" - # - # Here is how the node metric "bytes-sent" would be named, assuming that the session is named - # "s0", and the node's broadcast address is 10.1.2.3:9042: - # - with DefaultMetricIdGenerator: "cassandra.s0.nodes.10_1_2_3:9042.bytes-sent" - # - with TaggingMetricIdGenerator: "cassandra.nodes.bytes-sent" - # - # Required: no - # Modifiable at runtime: no - # Overridable in a profile: no - // prefix = "cassandra" - } - - histograms { - # Adds histogram buckets used to generate aggregable percentile approximations in monitoring - # systems that have query facilities to do so (e.g. Prometheus histogram_quantile, Atlas percentiles). - # - # Required: no - # Modifiable at runtime: no - # Overridable in a profile: no - generate-aggregable = true - } - - # The session-level metrics (all disabled by default). - # - # Required: yes - # Modifiable at runtime: no - # Overridable in a profile: no - session { - enabled = [ - # The number and rate of bytes sent for the entire session (exposed as a Meter if available, - # otherwise as a Counter). - // bytes-sent, - - # The number and rate of bytes received for the entire session (exposed as a Meter if - # available, otherwise as a Counter). - // bytes-received - - # The number of nodes to which the driver has at least one active connection (exposed as a - # Gauge). - // connected-nodes, - - # The throughput and latency percentiles of CQL requests (exposed as a Timer). - # - # This corresponds to the overall duration of the session.execute() call, including any - # retry. - // cql-requests, - - # The number of CQL requests that timed out -- that is, the session.execute() call failed - # with a DriverTimeoutException (exposed as a Counter). - // cql-client-timeouts, - - # The size of the driver-side cache of CQL prepared statements (exposed as a Gauge). - # - # The cache uses weak values eviction, so this represents the number of PreparedStatement - # instances that your application has created, and is still holding a reference to. Note - # that the returned value is approximate. - // cql-prepared-cache-size, - - # How long requests are being throttled (exposed as a Timer). - # - # This is the time between the start of the session.execute() call, and the moment when - # the throttler allows the request to proceed. - // throttling.delay, - - # The size of the throttling queue (exposed as a Gauge). - # - # This is the number of requests that the throttler is currently delaying in order to - # preserve its SLA. This metric only works with the built-in concurrency- and rate-based - # throttlers; in other cases, it will always be 0. - // throttling.queue-size, - - # The number of times a request was rejected with a RequestThrottlingException (exposed as - # a Counter) - // throttling.errors, - - # The throughput and latency percentiles of DSE continuous CQL requests (exposed as a - # Timer). - # - # This metric is a session-level metric and corresponds to the overall duration of the - # session.executeContinuously() call, including any retry. - # - # Note that this metric is analogous to the OSS driver's 'cql-requests' metrics, but for - # continuous paging requests only. Continuous paging requests do not update the - # 'cql-requests' metric, because they are usually much longer. Only the following metrics - # are updated during a continuous paging request: - # - # - At node level: all the usual metrics available for normal CQL requests, such as - # 'cql-messages' and error-related metrics (but these are only updated for the first - # page of results); - # - At session level: only 'continuous-cql-requests' is updated (this metric). - // continuous-cql-requests, - - # The throughput and latency percentiles of Graph requests (exposed as a Timer). - # - # This metric is a session-level metric and corresponds to the overall duration of the - # session.execute(GraphStatement) call, including any retry. - // graph-requests, - - # The number of graph requests that timed out -- that is, the - # session.execute(GraphStatement) call failed with a DriverTimeoutException (exposed as a - # Counter). - # - # Note that this metric is analogous to the OSS driver's 'cql-client-timeouts' metrics, but - # for Graph requests only. - // graph-client-timeouts - - ] - - # Extra configuration (for the metrics that need it) - - # Required: if the 'cql-requests' metric is enabled, and Dropwizard or Micrometer is used. - # Modifiable at runtime: no - # Overridable in a profile: no - cql-requests { - - # The largest latency that we expect to record. - # - # This should be slightly higher than request.timeout (in theory, readings can't be higher - # than the timeout, but there might be a small overhead due to internal scheduling). - # - # This is used to scale internal data structures. If a higher recording is encountered at - # runtime, it is discarded and a warning is logged. - # Valid for: Dropwizard, Micrometer. - highest-latency = 3 seconds - - # The shortest latency that we expect to record. This is used to scale internal data - # structures. - # Valid for: Micrometer. - lowest-latency = 1 millisecond - - # The number of significant decimal digits to which internal structures will maintain - # value resolution and separation (for example, 3 means that recordings up to 1 second - # will be recorded with a resolution of 1 millisecond or better). - # - # For Dropwizard, this must be between 0 and 5. If the value is out of range, it defaults to - # 3 and a warning is logged. - # Valid for: Dropwizard, Micrometer. - significant-digits = 3 - - # The interval at which percentile data is refreshed. - # - # The driver records latency data in a "live" histogram, and serves results from a cached - # snapshot. Each time the snapshot gets older than the interval, the two are switched. - # Note that this switch happens upon fetching the metrics, so if you never fetch the - # recording interval might grow higher (that shouldn't be an issue in a production - # environment because you would typically have a metrics reporter that exports to a - # monitoring tool at a regular interval). - # - # In practice, this means that if you set this to 5 minutes, you're looking at data from a - # 5-minute interval in the past, that is at most 5 minutes old. If you fetch the metrics - # at a faster pace, you will observe the same data for 5 minutes until the interval - # expires. - # - # Note that this does not apply to the total count and rates (those are updated in real - # time). - # Valid for: Dropwizard. - refresh-interval = 5 minutes - - # An optional list of latencies to track as part of the application's service-level - # objectives (SLOs). - # - # If defined, the histogram is guaranteed to contain these boundaries alongside other - # buckets used to generate aggregable percentile approximations. - # Valid for: Micrometer. - // slo = [ 100 milliseconds, 500 milliseconds, 1 second ] - - # An optional list of percentiles to be published by Micrometer. Produces an additional time series for each requested percentile. - # This percentile is computed locally, and so can't be aggregated with percentiles computed across other dimensions (e.g. in a different instance) - # Valid for: Micrometer. - // publish-percentiles = [ 0.75, 0.95, 0.99 ] - } - - # Required: if the 'throttling.delay' metric is enabled, and Dropwizard or Micrometer is used. - # Modifiable at runtime: no - # Overridable in a profile: no - throttling.delay { - highest-latency = 3 seconds - lowest-latency = 1 millisecond - significant-digits = 3 - refresh-interval = 5 minutes - // slo = [ 100 milliseconds, 500 milliseconds, 1 second ] - // publish-percentiles = [ 0.75, 0.95, 0.99 ] - } - - # Required: if the 'continuous-cql-requests' metric is enabled, and Dropwizard or Micrometer - # is used. - # Modifiable at runtime: no - # Overridable in a profile: no - continuous-cql-requests { - highest-latency = 120 seconds - lowest-latency = 10 milliseconds - significant-digits = 3 - refresh-interval = 5 minutes - // slo = [ 100 milliseconds, 500 milliseconds, 1 second ] - // publish-percentiles = [ 0.75, 0.95, 0.99 ] - } - - # Required: if the 'graph-requests' metric is enabled, and Dropwizard or Micrometer is used. - # Modifiable at runtime: no - # Overridable in a profile: no - graph-requests { - highest-latency = 12 seconds - lowest-latency = 1 millisecond - significant-digits = 3 - refresh-interval = 5 minutes - // slo = [ 100 milliseconds, 500 milliseconds, 1 second ] - // publish-percentiles = [ 0.75, 0.95, 0.99 ] - } - } - # The node-level metrics (all disabled by default). - # - # Required: yes - # Modifiable at runtime: no - # Overridable in a profile: no - node { - enabled = [ - # The number of connections open to this node for regular requests (exposed as a - # Gauge). - # - # This includes the control connection (which uses at most one extra connection to a - # random node in the cluster). - // pool.open-connections, - - # The number of stream ids available on the connections to this node (exposed as a - # Gauge). - # - # Stream ids are used to multiplex requests on each connection, so this is an indication - # of how many more requests the node could handle concurrently before becoming saturated - # (note that this is a driver-side only consideration, there might be other limitations on - # the server that prevent reaching that theoretical limit). - // pool.available-streams, - - # The number of requests currently executing on the connections to this node (exposed as a - # Gauge). This includes orphaned streams. - // pool.in-flight, - - # The number of "orphaned" stream ids on the connections to this node (exposed as a - # Gauge). - # - # See the description of the connection.max-orphan-requests option for more details. - // pool.orphaned-streams, - - # The number and rate of bytes sent to this node (exposed as a Meter if available, otherwise - # as a Counter). - // bytes-sent, - - # The number and rate of bytes received from this node (exposed as a Meter if available, - # otherwise as a Counter). - // bytes-received, - - # The throughput and latency percentiles of individual CQL messages sent to this node as - # part of an overall request (exposed as a Timer). - # - # Note that this does not necessarily correspond to the overall duration of the - # session.execute() call, since the driver might query multiple nodes because of retries - # and speculative executions. Therefore a single "request" (as seen from a client of the - # driver) can be composed of more than one of the "messages" measured by this metric. - # - # Therefore this metric is intended as an insight into the performance of this particular - # node. For statistics on overall request completion, use the session-level cql-requests. - // cql-messages, - - # The number of times the driver failed to send a request to this node (exposed as a - # Counter). - # - # In those case we know the request didn't even reach the coordinator, so they are retried - # on the next node automatically (without going through the retry policy). - // errors.request.unsent, - - # The number of times a request was aborted before the driver even received a response - # from this node (exposed as a Counter). - # - # This can happen in two cases: if the connection was closed due to an external event - # (such as a network error or heartbeat failure); or if there was an unexpected error - # while decoding the response (this can only be a driver bug). - // errors.request.aborted, - - # The number of times this node replied with a WRITE_TIMEOUT error (exposed as a Counter). - # - # Whether this error is rethrown directly to the client, rethrown or ignored is determined - # by the RetryPolicy. - // errors.request.write-timeouts, - - # The number of times this node replied with a READ_TIMEOUT error (exposed as a Counter). - # - # Whether this error is rethrown directly to the client, rethrown or ignored is determined - # by the RetryPolicy. - // errors.request.read-timeouts, - - # The number of times this node replied with an UNAVAILABLE error (exposed as a Counter). - # - # Whether this error is rethrown directly to the client, rethrown or ignored is determined - # by the RetryPolicy. - // errors.request.unavailables, - - # The number of times this node replied with an error that doesn't fall under other - # 'errors.*' metrics (exposed as a Counter). - // errors.request.others, - - # The total number of errors on this node that caused the RetryPolicy to trigger a retry - # (exposed as a Counter). - # - # This is a sum of all the other retries.* metrics. - // retries.total, - - # The number of errors on this node that caused the RetryPolicy to trigger a retry, broken - # down by error type (exposed as Counters). - // retries.aborted, - // retries.read-timeout, - // retries.write-timeout, - // retries.unavailable, - // retries.other, - - # The total number of errors on this node that were ignored by the RetryPolicy (exposed as - # a Counter). - # - # This is a sum of all the other ignores.* metrics. - // ignores.total, - - # The number of errors on this node that were ignored by the RetryPolicy, broken down by - # error type (exposed as Counters). - // ignores.aborted, - // ignores.read-timeout, - // ignores.write-timeout, - // ignores.unavailable, - // ignores.other, - - # The number of speculative executions triggered by a slow response from this node - # (exposed as a Counter). - // speculative-executions, - - # The number of errors encountered while trying to establish a connection to this node - # (exposed as a Counter). - # - # Connection errors are not a fatal issue for the driver, failed connections will be - # retried periodically according to the reconnection policy. You can choose whether or not - # to log those errors at WARN level with the connection.warn-on-init-error option. - # - # Authentication errors are not included in this counter, they are tracked separately in - # errors.connection.auth. - // errors.connection.init, - - # The number of authentication errors encountered while trying to establish a connection - # to this node (exposed as a Counter). - # Authentication errors are also logged at WARN level. - // errors.connection.auth, - - # The throughput and latency percentiles of individual graph messages sent to this node as - # part of an overall request (exposed as a Timer). - # - # Note that this does not necessarily correspond to the overall duration of the - # session.execute() call, since the driver might query multiple nodes because of retries - # and speculative executions. Therefore a single "request" (as seen from a client of the - # driver) can be composed of more than one of the "messages" measured by this metric. - # - # Therefore this metric is intended as an insight into the performance of this particular - # node. For statistics on overall request completion, use the session-level graph-requests. - // graph-messages, - ] - - # See cql-requests in the `session` section - # - # Required: if the 'cql-messages' metric is enabled, and Dropwizard or Micrometer is used. - # Modifiable at runtime: no - # Overridable in a profile: no - cql-messages { - highest-latency = 3 seconds - lowest-latency = 1 millisecond - significant-digits = 3 - refresh-interval = 5 minutes - // slo = [ 100 milliseconds, 500 milliseconds, 1 second ] - // publish-percentiles = [ 0.75, 0.95, 0.99 ] - } - - # See graph-requests in the `session` section - # - # Required: if the 'graph-messages' metric is enabled, and Dropwizard or Micrometer is used. - # Modifiable at runtime: no - # Overridable in a profile: no - graph-messages { - highest-latency = 3 seconds - lowest-latency = 1 millisecond - significant-digits = 3 - refresh-interval = 5 minutes - // slo = [ 100 milliseconds, 500 milliseconds, 1 second ] - // publish-percentiles = [ 0.75, 0.95, 0.99 ] - } - - # The time after which the node level metrics will be evicted. - # - # This is used to unregister stale metrics if a node leaves the cluster or gets a new address. - # If the node does not come back up when this interval elapses, all its metrics are removed - # from the registry. - # - # The lowest allowed value is 5 minutes. If you try to set it lower, the driver will log a - # warning and use 5 minutes. - # - # Required: yes - # Modifiable at runtime: no - # Overridable in a profile: no - expire-after = 1 hour - } - } - - advanced.socket { - # Whether or not to disable the Nagle algorithm. - # - # By default, this option is set to true (Nagle disabled), because the driver has its own - # internal message coalescing algorithm. - # - # See java.net.StandardSocketOptions.TCP_NODELAY. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for connections created after the - # change. - # Overridable in a profile: no - tcp-no-delay = true - - # All other socket options are unset by default. The actual value depends on the underlying - # Netty transport: - # - NIO uses the defaults from java.net.Socket (refer to the javadocs of - # java.net.StandardSocketOptions for each option). - # - Epoll delegates to the underlying file descriptor, which uses the O/S defaults. - - # Whether or not to enable TCP keep-alive probes. - # - # See java.net.StandardSocketOptions.SO_KEEPALIVE. - # - # Required: no - # Modifiable at runtime: yes, the new value will be used for connections created after the - # change. - # Overridable in a profile: no - //keep-alive = false - - # Whether or not to allow address reuse. - # - # See java.net.StandardSocketOptions.SO_REUSEADDR. - # - # Required: no - # Modifiable at runtime: yes, the new value will be used for connections created after the - # change. - # Overridable in a profile: no - //reuse-address = true - - # Sets the linger interval. - # - # If the value is zero or greater, then it represents a timeout value, in seconds; - # if the value is negative, it means that this option is disabled. - # - # See java.net.StandardSocketOptions.SO_LINGER. - # - # Required: no - # Modifiable at runtime: yes, the new value will be used for connections created after the - # change. - # Overridable in a profile: no - //linger-interval = 0 - - # Sets a hint to the size of the underlying buffers for incoming network I/O. - # - # See java.net.StandardSocketOptions.SO_RCVBUF. - # - # Required: no - # Modifiable at runtime: yes, the new value will be used for connections created after the - # change. - # Overridable in a profile: no - //receive-buffer-size = 65535 - - # Sets a hint to the size of the underlying buffers for outgoing network I/O. - # - # See java.net.StandardSocketOptions.SO_SNDBUF. - # - # Required: no - # Modifiable at runtime: yes, the new value will be used for connections created after the - # change. - # Overridable in a profile: no - //send-buffer-size = 65535 - } - - advanced.heartbeat { - # The heartbeat interval. If a connection stays idle for that duration (no reads), the driver - # sends a dummy message on it to make sure it's still alive. If not, the connection is trashed - # and replaced. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for connections created after the - # change. - # Overridable in a profile: no - interval = 30 seconds - - # How long the driver waits for the response to a heartbeat. If this timeout fires, the - # heartbeat is considered failed. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for connections created after the - # change. - # Overridable in a profile: no - timeout = ${datastax-java-driver.advanced.connection.init-query-timeout} - } - - advanced.metadata { - # Topology events are external signals that inform the driver of the state of Cassandra nodes - # (by default, they correspond to gossip events received on the control connection). - # The debouncer helps smoothen out oscillations if conflicting events are sent out in short - # bursts. - # Debouncing may be disabled by setting the window to 0 or max-events to 1 (this is not - # recommended). - topology-event-debouncer { - # How long the driver waits to propagate an event. If another event is received within that - # time, the window is reset and a batch of accumulated events will be delivered. - # - # Required: yes - # Modifiable at runtime: no - # Overridable in a profile: no - window = 1 second - - # The maximum number of events that can accumulate. If this count is reached, the events are - # delivered immediately and the time window is reset. This avoids holding events indefinitely - # if the window keeps getting reset. - # - # Required: yes - # Modifiable at runtime: no - # Overridable in a profile: no - max-events = 20 - } - - # Options relating to schema metadata (Session.getMetadata.getKeyspaces). - # This metadata is exposed by the driver for informational purposes, and is also necessary for - # token-aware routing. - schema { - # Whether schema metadata is enabled. - # If this is false, the schema will remain empty, or to the last known value. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for refreshes issued after the - # change. It can also be overridden programmatically via Session.setSchemaMetadataEnabled. - # Overridable in a profile: no - enabled = true - - # The keyspaces for which schema and token metadata should be maintained. - # - # Each element can be one of the following: - # 1. An exact name inclusion, for example "Ks1". If the name is case-sensitive, it must appear - # in its exact case. - # 2. An exact name exclusion, for example "!Ks1". - # 3. A regex inclusion, enclosed in slashes, for example "/^Ks.*/". The part between the - # slashes must follow the syntax rules of java.util.regex.Pattern. - # 4. A regex exclusion, for example "!/^Ks.*/". - # - # If the list is empty, or the option is unset, all keyspaces will match. Otherwise: - # - # If a keyspace matches an exact name inclusion, it is always included, regardless of what any - # other rule says. - # Otherwise, if it matches an exact name exclusion, it is always excluded, regardless of what - # any regex rule says. - # Otherwise, if there are regex rules: - # - if they're only inclusions, the keyspace must match at least one of them. - # - if they're only exclusions, the keyspace must match none of them. - # - if they're both, the keyspace must match at least one inclusion and none of the - # exclusions. - # - # If an element is malformed, or if its regex has a syntax error, a warning is logged and that - # single element is ignored. - # - # Try to use only exact name inclusions if possible. This allows the driver to filter on the - # server side with a WHERE IN clause. If you use any other rule, it has to fetch all system - # rows and filter on the client side. - # - # Required: no. The default value excludes all Cassandra and DSE system keyspaces. If the - # option is unset, this is interpreted as "include all keyspaces". - # Modifiable at runtime: yes, the new value will be used for refreshes issued after the - # change. - # Overridable in a profile: no - refreshed-keyspaces = [ "!system", "!/^system_.*/", "!/^dse_.*/", "!solr_admin", "!OpsCenter" ] - - # The timeout for the requests to the schema tables. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for refreshes issued after the - # change. - # Overridable in a profile: no - request-timeout = ${datastax-java-driver.basic.request.timeout} - - # The page size for the requests to the schema tables. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for refreshes issued after the - # change. - # Overridable in a profile: no - request-page-size = ${datastax-java-driver.basic.request.page-size} - - # Protects against bursts of schema updates (for example when a client issues a sequence of - # DDL queries), by coalescing them into a single update. - # Debouncing may be disabled by setting the window to 0 or max-events to 1 (this is highly - # discouraged for schema refreshes). - debouncer { - # How long the driver waits to apply a refresh. If another refresh is requested within that - # time, the window is reset and a single refresh will be triggered when it ends. - # - # Required: yes - # Modifiable at runtime: no - # Overridable in a profile: no - window = 1 second - - # The maximum number of refreshes that can accumulate. If this count is reached, a refresh - # is done immediately and the window is reset. - # - # Required: yes - # Modifiable at runtime: no - # Overridable in a profile: no - max-events = 20 - } - } - - # Whether token metadata (Session.getMetadata.getTokenMap) is enabled. - # This metadata is exposed by the driver for informational purposes, and is also necessary for - # token-aware routing. - # If this is false, it will remain empty, or to the last known value. Note that its computation - # requires information about the schema; therefore if schema metadata is disabled or filtered to - # a subset of keyspaces, the token map will be incomplete, regardless of the value of this - # property. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for refreshes issued after the change. - # Overridable in a profile: no - token-map.enabled = true - } - - advanced.control-connection { - # How long the driver waits for responses to control queries (e.g. fetching the list of nodes, - # refreshing the schema). - # - # Required: yes - # Modifiable at runtime: no - # Overridable in a profile: no - timeout = ${datastax-java-driver.advanced.connection.init-query-timeout} - - # Due to the distributed nature of Cassandra, schema changes made on one node might not be - # immediately visible to others. Under certain circumstances, the driver waits until all nodes - # agree on a common schema version (namely: before a schema refresh, before repreparing all - # queries on a newly up node, and before completing a successful schema-altering query). To do - # so, it queries system tables to find out the schema version of all nodes that are currently - # UP. If all the versions match, the check succeeds, otherwise it is retried periodically, until - # a given timeout. - # - # A schema agreement failure is not fatal, but it might produce unexpected results (for example, - # getting an "unconfigured table" error for a table that you created right before, just because - # the two queries went to different coordinators). - # - # Note that schema agreement never succeeds in a mixed-version cluster (it would be challenging - # because the way the schema version is computed varies across server versions); the assumption - # is that schema updates are unlikely to happen during a rolling upgrade anyway. - schema-agreement { - # The interval between each attempt. - # Required: yes - # Modifiable at runtime: yes, the new value will be used for checks issued after the change. - # Overridable in a profile: no - interval = 200 milliseconds - - # The timeout after which schema agreement fails. - # If this is set to 0, schema agreement is skipped and will always fail. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for checks issued after the change. - # Overridable in a profile: no - timeout = 10 seconds - - # Whether to log a warning if schema agreement fails. - # You might want to change this if you've set the timeout to 0. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for checks issued after the change. - # Overridable in a profile: no - warn-on-failure = true - } - } - - advanced.prepared-statements { - # Whether `Session.prepare` calls should be sent to all nodes in the cluster. - # - # A request to prepare is handled in two steps: - # 1) send to a single node first (to rule out simple errors like malformed queries). - # 2) if step 1 succeeds, re-send to all other active nodes (i.e. not ignored by the load - # balancing policy). - # This option controls whether step 2 is executed. - # - # The reason why you might want to disable it is to optimize network usage if you have a large - # number of clients preparing the same set of statements at startup. If your load balancing - # policy distributes queries randomly, each client will pick a different host to prepare its - # statements, and on the whole each host has a good chance of having been hit by at least one - # client for each statement. - # On the other hand, if that assumption turns out to be wrong and one host hasn't prepared a - # given statement, it needs to be re-prepared on the fly the first time it gets executed; this - # causes a performance penalty (one extra roundtrip to resend the query to prepare, and another - # to retry the execution). - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for prepares issued after the change. - # Overridable in a profile: yes - prepare-on-all-nodes = true - - # How the driver replicates prepared statements on a node that just came back up or joined the - # cluster. - reprepare-on-up { - # Whether the driver tries to prepare on new nodes at all. - # - # The reason why you might want to disable it is to optimize reconnection time when you - # believe nodes often get marked down because of temporary network issues, rather than the - # node really crashing. In that case, the node still has prepared statements in its cache when - # the driver reconnects, so re-preparing is redundant. - # - # On the other hand, if that assumption turns out to be wrong and the node had really - # restarted, its prepared statement cache is empty (before CASSANDRA-8831), and statements - # need to be re-prepared on the fly the first time they get executed; this causes a - # performance penalty (one extra roundtrip to resend the query to prepare, and another to - # retry the execution). - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for nodes that come back up after the - # change. - # Overridable in a profile: no - enabled = true - - # Whether to check `system.prepared_statements` on the target node before repreparing. - # - # This table exists since CASSANDRA-8831 (merged in 3.10). It stores the statements already - # prepared on the node, and preserves them across restarts. - # - # Checking the table first avoids repreparing unnecessarily, but the cost of the query is not - # always worth the improvement, especially if the number of statements is low. - # - # If the table does not exist, or the query fails for any other reason, the error is ignored - # and the driver proceeds to reprepare statements according to the other parameters. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for nodes that come back up after the - # change. - # Overridable in a profile: no - check-system-table = false - - # The maximum number of statements that should be reprepared. 0 or a negative value means no - # limit. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for nodes that come back up after the - # change. - # Overridable in a profile: no - max-statements = 0 - - # The maximum number of concurrent requests when repreparing. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for nodes that come back up after the - # change. - # Overridable in a profile: no - max-parallelism = 100 - - # The request timeout. This applies both to querying the system.prepared_statements table (if - # relevant), and the prepare requests themselves. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for nodes that come back up after the - # change. - # Overridable in a profile: no - timeout = ${datastax-java-driver.advanced.connection.init-query-timeout} - } - - # How to build the cache of prepared statements. - prepared-cache { - # Whether to use weak references for the prepared statements cache values. - # - # If this option is absent, weak references will be used. - # - # Required: no - # Modifiable at runtime: no - # Overridable in a profile: no - // weak-values = true - } - } - - # Options related to the Netty event loop groups used internally by the driver. - advanced.netty { - - # Whether the threads created by the driver should be daemon threads. - # This will apply to the threads in io-group, admin-group, and the timer thread. - # - # Required: yes - # Modifiable at runtime: no - # Overridable in a profile: no - daemon = false - - # The event loop group used for I/O operations (reading and writing to Cassandra nodes). - # By default, threads in this group are named after the session name, "-io-" and an incrementing - # counter, for example "s0-io-0". - io-group { - # The number of threads. - # If this is set to 0, the driver will use `Runtime.getRuntime().availableProcessors() * 2`. - # - # Required: yes - # Modifiable at runtime: no - # Overridable in a profile: no - size = 0 - - # The options to shut down the event loop group gracefully when the driver closes. If a task - # gets submitted during the quiet period, it is accepted and the quiet period starts over. - # The timeout limits the overall shutdown time. - # - # Required: yes - # Modifiable at runtime: no - # Overridable in a profile: no - shutdown {quiet-period = 2, timeout = 15, unit = SECONDS} - } - # The event loop group used for admin tasks not related to request I/O (handle cluster events, - # refresh metadata, schedule reconnections, etc.) - # By default, threads in this group are named after the session name, "-admin-" and an - # incrementing counter, for example "s0-admin-0". - admin-group { - size = 2 - - shutdown {quiet-period = 2, timeout = 15, unit = SECONDS} - } - # The timer used for scheduling request timeouts and speculative executions - # By default, this thread is named after the session name and "-timer-0", for example - # "s0-timer-0". - timer { - # The timer tick duration. - # This is how frequent the timer should wake up to check for timed-out tasks or speculative - # executions. Lower resolution (i.e. longer durations) will leave more CPU cycles for running - # I/O operations at the cost of precision of exactly when a request timeout will expire or a - # speculative execution will run. Higher resolution (i.e. shorter durations) will result in - # more precise request timeouts and speculative execution scheduling, but at the cost of CPU - # cycles taken from I/O operations, which could lead to lower overall I/O throughput. - # - # The default value is 100 milliseconds, which is a comfortable value for most use cases. - # However if you are using more agressive timeouts or speculative execution delays, then you - # should lower the timer tick duration as well, so that its value is always equal to or lesser - # than the timeout duration and/or speculative execution delay you intend to use. - # - # Note for Windows users: avoid setting this to aggressive values, that is, anything under 100 - # milliseconds; doing so is known to cause extreme CPU usage. Also, the tick duration must be - # a multiple of 10 under Windows; if that is not the case, it will be automatically rounded - # down to the nearest multiple of 10 (e.g. 99 milliseconds will be rounded down to 90 - # milliseconds). - # - # Required: yes - # Modifiable at runtime: no - # Overridable in a profile: no - tick-duration = 100 milliseconds - - # Number of ticks in a Timer wheel. The underlying implementation uses Netty's - # HashedWheelTimer, which uses hashes to arrange the timeouts. This effectively controls the - # size of the timer wheel. - # - # Required: yes - # Modifiable at runtime: no - # Overridable in a profile: no - ticks-per-wheel = 2048 - } - } - - # The component that coalesces writes on the connections. - # This is exposed mainly to facilitate tuning during development. You shouldn't have to adjust - # this. - advanced.coalescer { - # The reschedule interval. - # - # Required: yes - # Modifiable at runtime: no - # Overridable in a profile: no - reschedule-interval = 10 microseconds - } - - profiles { - # This is where your custom profiles go, for example: - # olap { - # basic.request.timeout = 5 seconds - # } - - # An example configuration profile for graph requests. - // my-graph-profile-example { - // graph { - // read-consistency-level = LOCAL_QUORUM - // write-consistency-level = LOCAL_ONE - // } - // } - - # An example pre-defined configuration profile for OLAP graph queries. - // graph-olap { - // graph { - // traversal-source = "a" // traversal source needs to be set to "a" for OLAP queries. - // } - // } - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/Assertions.java b/core/src/test/java/com/datastax/dse/driver/Assertions.java deleted file mode 100644 index 09f7b281f84..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/Assertions.java +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver; - -import org.apache.tinkerpop.gremlin.structure.io.Buffer; - -public class Assertions extends org.assertj.core.api.Assertions { - public static TinkerpopBufferAssert assertThat(Buffer actual) { - return new TinkerpopBufferAssert(actual); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/DriverRunListener.java b/core/src/test/java/com/datastax/dse/driver/DriverRunListener.java deleted file mode 100644 index 65e58878dbc..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/DriverRunListener.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver; - -import static org.assertj.core.api.Assertions.fail; - -import org.junit.runner.Description; -import org.junit.runner.notification.RunListener; - -/** - * Common parent of all driver tests, to store common configuration and perform sanity checks. - * - * @see "maven-surefire-plugin configuration in pom.xml" - */ -public class DriverRunListener extends RunListener { - - @Override - public void testFinished(Description description) throws Exception { - // If a test interrupted the main thread silently, this can make later tests fail. Instead, we - // fail the test and clear the interrupt status. - // Note: Thread.interrupted() also clears the flag, which is what we want. - if (Thread.interrupted()) { - fail(description.getMethodName() + " interrupted the main thread"); - } - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/DseTestDataProviders.java b/core/src/test/java/com/datastax/dse/driver/DseTestDataProviders.java deleted file mode 100644 index 7d9aecc28ed..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/DseTestDataProviders.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver; - -import static com.datastax.dse.driver.internal.core.graph.GraphProtocol.GRAPHSON_1_0; -import static com.datastax.dse.driver.internal.core.graph.GraphProtocol.GRAPHSON_2_0; -import static com.datastax.dse.driver.internal.core.graph.GraphProtocol.GRAPH_BINARY_1_0; - -import com.datastax.dse.driver.api.core.DseProtocolVersion; -import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; -import com.datastax.oss.driver.TestDataProviders; -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.tngtech.java.junit.dataprovider.DataProvider; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Arrays; -import java.util.stream.Stream; - -public class DseTestDataProviders { - - private static final ScriptGraphStatement UNDEFINED_IDEMPOTENCE_STATEMENT = - ScriptGraphStatement.newInstance("undefined idempotence"); - private static final ScriptGraphStatement IDEMPOTENT_STATEMENT = - ScriptGraphStatement.builder("idempotent").setIdempotence(true).build(); - private static final ScriptGraphStatement NON_IDEMPOTENT_STATEMENT = - ScriptGraphStatement.builder("non idempotent").setIdempotence(false).build(); - - @DataProvider - public static Object[][] allDseProtocolVersions() { - return concat(DseProtocolVersion.values()); - } - - @DataProvider - public static Object[][] allOssProtocolVersions() { - return concat(DefaultProtocolVersion.values()); - } - - @DataProvider - public static Object[][] allDseAndOssProtocolVersions() { - return concat(DefaultProtocolVersion.values(), DseProtocolVersion.values()); - } - - @DataProvider - public static Object[][] supportedGraphProtocols() { - return new Object[][] {{GRAPHSON_1_0}, {GRAPHSON_2_0}, {GRAPH_BINARY_1_0}}; - } - - /** - * The combination of the default idempotence option and statement setting that produce an - * idempotent statement. - */ - @DataProvider - public static Object[][] idempotentGraphConfig() { - return new Object[][] { - new Object[] {true, UNDEFINED_IDEMPOTENCE_STATEMENT}, - new Object[] {false, IDEMPOTENT_STATEMENT}, - new Object[] {true, IDEMPOTENT_STATEMENT}, - }; - } - - /** - * The combination of the default idempotence option and statement setting that produce a non - * idempotent statement. - */ - @DataProvider - public static Object[][] nonIdempotentGraphConfig() { - return new Object[][] { - new Object[] {false, UNDEFINED_IDEMPOTENCE_STATEMENT}, - new Object[] {true, NON_IDEMPOTENT_STATEMENT}, - new Object[] {false, NON_IDEMPOTENT_STATEMENT}, - }; - } - - @DataProvider - public static Object[][] allDseProtocolVersionsAndSupportedGraphProtocols() { - return TestDataProviders.combine(allDseProtocolVersions(), supportedGraphProtocols()); - } - - @NonNull - private static Object[][] concat(Object[]... values) { - return Stream.of(values) - .flatMap(Arrays::stream) - .map(o -> new Object[] {o}) - .toArray(Object[][]::new); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/DseTestFixtures.java b/core/src/test/java/com/datastax/dse/driver/DseTestFixtures.java deleted file mode 100644 index 7992dde4fea..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/DseTestFixtures.java +++ /dev/null @@ -1,125 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver; - -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.dse.driver.api.core.metadata.DseNodeProperties; -import com.datastax.dse.protocol.internal.response.result.DseRowsMetadata; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.metadata.Metadata; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; -import com.datastax.oss.driver.internal.core.metadata.MetadataManager; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.response.result.ColumnSpec; -import com.datastax.oss.protocol.internal.response.result.DefaultRows; -import com.datastax.oss.protocol.internal.response.result.RawType; -import com.datastax.oss.protocol.internal.response.result.Rows; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.nio.ByteBuffer; -import java.util.ArrayDeque; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Queue; -import java.util.UUID; - -public class DseTestFixtures { - - // Returns a single row, with a single "message" column with the value "hello, world" - public static Rows singleDseRow() { - DseRowsMetadata metadata = - new DseRowsMetadata( - ImmutableList.of( - new ColumnSpec( - "ks", - "table", - "message", - 0, - RawType.PRIMITIVES.get(ProtocolConstants.DataType.VARCHAR))), - null, - new int[] {}, - null, - 1, - true); - Queue> data = new ArrayDeque<>(); - data.add(ImmutableList.of(Bytes.fromHexString("0x68656C6C6F2C20776F726C64"))); - return new DefaultRows(metadata, data); - } - - // Returns 10 rows, each with a single "message" column with the value "hello, world" - public static Rows tenDseRows(int page, boolean last) { - DseRowsMetadata metadata = - new DseRowsMetadata( - ImmutableList.of( - new ColumnSpec( - "ks", - "table", - "message", - 0, - RawType.PRIMITIVES.get(ProtocolConstants.DataType.VARCHAR))), - last ? null : ByteBuffer.wrap(new byte[] {(byte) page}), - new int[] {}, - null, - page, - last); - Queue> data = new ArrayDeque<>(); - for (int i = 0; i < 10; i++) { - data.add(ImmutableList.of(Bytes.fromHexString("0x68656C6C6F2C20776F726C64"))); - } - return new DefaultRows(metadata, data); - } - - public static DefaultDriverContext mockNodesInMetadataWithVersions( - DefaultDriverContext mockContext, boolean treatNullAsMissing, Version... dseVersions) { - - // mock bits of the context - MetadataManager metadataManager = mock(MetadataManager.class); - Metadata metadata = mock(Metadata.class); - Map nodeMap = new HashMap<>((dseVersions != null) ? dseVersions.length : 1); - if (dseVersions == null) { - Node node = mock(Node.class); - Map nodeExtras = new HashMap<>(1); - if (!treatNullAsMissing) { - // put an explicit null in for DSE_VERSION - nodeExtras.put(DseNodeProperties.DSE_VERSION, null); - } - nodeMap.put(UUID.randomUUID(), node); - when(node.getExtras()).thenReturn(nodeExtras); - } else { - for (Version dseVersion : dseVersions) { - // create a node with DSE version in its extra data - Node node = mock(Node.class); - Map nodeExtras = new HashMap<>(1); - if (dseVersion != null || !treatNullAsMissing) { - nodeExtras.put(DseNodeProperties.DSE_VERSION, dseVersion); - } - nodeMap.put(UUID.randomUUID(), node); - when(node.getExtras()).thenReturn(nodeExtras); - } - } - // return mocked data when requested - when(metadata.getNodes()).thenReturn(nodeMap); - when(metadataManager.getMetadata()).thenReturn(metadata); - when(mockContext.getMetadataManager()).thenReturn(metadataManager); - return mockContext; - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/TinkerpopBufferAssert.java b/core/src/test/java/com/datastax/dse/driver/TinkerpopBufferAssert.java deleted file mode 100644 index 278e5a65070..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/TinkerpopBufferAssert.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.protocol.internal.util.Bytes; -import org.apache.tinkerpop.gremlin.structure.io.Buffer; -import org.assertj.core.api.AbstractAssert; - -public class TinkerpopBufferAssert extends AbstractAssert { - public TinkerpopBufferAssert(Buffer actual) { - super(actual, TinkerpopBufferAssert.class); - } - - public TinkerpopBufferAssert containsExactly(String hexString) { - - byte[] expectedBytes = Bytes.fromHexString(hexString).array(); - byte[] actualBytes = new byte[expectedBytes.length]; - actual.readBytes(actualBytes); - assertThat(actualBytes).containsExactly(expectedBytes); - assertThat(actual.readableBytes()).isEqualTo(0); - return this; - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/api/core/data/time/DateRangePrecisionTest.java b/core/src/test/java/com/datastax/dse/driver/api/core/data/time/DateRangePrecisionTest.java deleted file mode 100644 index 4cf8d43b748..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/api/core/data/time/DateRangePrecisionTest.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.data.time; - -import static org.assertj.core.api.Assertions.assertThat; - -import java.time.ZonedDateTime; -import org.junit.Test; - -public class DateRangePrecisionTest { - - @Test - public void should_round_up() { - ZonedDateTime timestamp = ZonedDateTime.parse("2011-02-03T04:05:16.789Z"); - assertThat(DateRangePrecision.MILLISECOND.roundUp(timestamp)) - .isEqualTo("2011-02-03T04:05:16.789Z"); - assertThat(DateRangePrecision.SECOND.roundUp(timestamp)).isEqualTo("2011-02-03T04:05:16.999Z"); - assertThat(DateRangePrecision.MINUTE.roundUp(timestamp)).isEqualTo("2011-02-03T04:05:59.999Z"); - assertThat(DateRangePrecision.HOUR.roundUp(timestamp)).isEqualTo("2011-02-03T04:59:59.999Z"); - assertThat(DateRangePrecision.DAY.roundUp(timestamp)).isEqualTo("2011-02-03T23:59:59.999Z"); - assertThat(DateRangePrecision.MONTH.roundUp(timestamp)).isEqualTo("2011-02-28T23:59:59.999Z"); - assertThat(DateRangePrecision.YEAR.roundUp(timestamp)).isEqualTo("2011-12-31T23:59:59.999Z"); - } - - @Test - public void should_round_down() { - ZonedDateTime timestamp = ZonedDateTime.parse("2011-02-03T04:05:16.789Z"); - assertThat(DateRangePrecision.MILLISECOND.roundDown(timestamp)) - .isEqualTo("2011-02-03T04:05:16.789Z"); - assertThat(DateRangePrecision.SECOND.roundDown(timestamp)) - .isEqualTo("2011-02-03T04:05:16.000Z"); - assertThat(DateRangePrecision.MINUTE.roundDown(timestamp)) - .isEqualTo("2011-02-03T04:05:00.000Z"); - assertThat(DateRangePrecision.HOUR.roundDown(timestamp)).isEqualTo("2011-02-03T04:00:00.000Z"); - assertThat(DateRangePrecision.DAY.roundDown(timestamp)).isEqualTo("2011-02-03T00:00:00.000Z"); - assertThat(DateRangePrecision.MONTH.roundDown(timestamp)).isEqualTo("2011-02-01T00:00:00.000Z"); - assertThat(DateRangePrecision.YEAR.roundDown(timestamp)).isEqualTo("2011-01-01T00:00:00.000Z"); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/api/core/data/time/DateRangeTest.java b/core/src/test/java/com/datastax/dse/driver/api/core/data/time/DateRangeTest.java deleted file mode 100644 index b067c12cad0..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/api/core/data/time/DateRangeTest.java +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.data.time; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; - -import com.datastax.oss.driver.internal.SerializationHelper; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.text.ParseException; -import java.time.temporal.ChronoField; -import java.util.function.Predicate; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class DateRangeTest { - - @Test - @UseDataProvider("rangeStrings") - public void should_parse_and_format(String source) throws Exception { - DateRange parsed = DateRange.parse(source); - assertThat(parsed.toString()).isEqualTo(source); - } - - @DataProvider - public static Object[][] rangeStrings() { - return new Object[][] { - {"[2011-01 TO 2015]"}, - {"[2010-01-02 TO 2015-05-05T13]"}, - {"[1973-06-30T13:57:28.123Z TO 1999-05-05T14:14:59]"}, - // leap year - {"[2010-01-01T15 TO 2016-02]"}, - // pre-epoch - {"[1500 TO 1501]"}, - {"[0001 TO 0001-01-02]"}, - {"[0000 TO 0000-01-02]"}, - {"[-0001 TO -0001-01-02]"}, - // unbounded - {"[* TO 2014-12-01]"}, - {"[1999 TO *]"}, - {"[* TO *]"}, - // single bound ranges - // AD/BC era boundary - {"0001-01-01"}, - {"-0001-01-01"}, - {"-0009"}, - {"2000-11"}, - {"*"} - }; - } - - @Test - public void should_use_proleptic_parser() throws Exception { - DateRange parsed = DateRange.parse("[0000 TO 0000-01-02]"); - assertThat(parsed.getLowerBound().getTimestamp().get(ChronoField.YEAR)).isEqualTo(0); - } - - @Test - public void should_fail_to_parse_invalid_strings() { - assertThatThrownBy(() -> DateRange.parse("foo")).matches(hasOffset(0)); - assertThatThrownBy(() -> DateRange.parse("[foo TO *]")).matches(hasOffset(1)); - assertThatThrownBy(() -> DateRange.parse("[* TO foo]")).matches(hasOffset(6)); - } - - private static Predicate hasOffset(int offset) { - return e -> ((ParseException) e).getErrorOffset() == offset; - } - - @Test - public void should_fail_to_parse_inverted_range() { - assertThatThrownBy(() -> DateRange.parse("[2001-01 TO 2000]")) - .hasMessage( - "Lower bound of a date range should be before upper bound, got: [2001-01 TO 2000]"); - } - - @Test - public void should_not_equate_single_date_open_to_both_open_range() throws Exception { - assertThat(DateRange.parse("*")).isNotEqualTo(DateRange.parse("[* TO *]")); - } - - @Test - public void should_not_equate_same_ranges_with_different_precisions() throws ParseException { - assertThat(DateRange.parse("[2001 TO 2002]")) - .isNotEqualTo(DateRange.parse("[2001-01 TO 2002-12]")); - } - - @Test - public void should_give_same_hashcode_to_equal_objects() throws ParseException { - assertThat(DateRange.parse("[2001 TO 2002]").hashCode()) - .isEqualTo(DateRange.parse("[2001 TO 2002]").hashCode()); - } - - @Test - public void should_serialize_and_deserialize() throws Exception { - DateRange initial = DateRange.parse("[1973-06-30T13:57:28.123Z TO 1999-05-05T14:14:59]"); - DateRange deserialized = SerializationHelper.serializeAndDeserialize(initial); - assertThat(deserialized).isEqualTo(initial); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/api/core/graph/predicates/CqlCollectionTest.java b/core/src/test/java/com/datastax/dse/driver/api/core/graph/predicates/CqlCollectionTest.java deleted file mode 100644 index a890720a3ef..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/api/core/graph/predicates/CqlCollectionTest.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph.predicates; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; - -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import com.datastax.oss.driver.shaded.guava.common.collect.Sets; -import java.util.ArrayList; -import java.util.Collection; -import java.util.HashMap; -import java.util.HashSet; -import java.util.LinkedHashMap; -import java.util.Map; -import org.apache.tinkerpop.gremlin.process.traversal.P; -import org.junit.Test; - -public class CqlCollectionTest { - - @Test - public void should_evaluate_contains() { - P> contains = CqlCollection.contains("foo"); - assertThat(contains.test(new HashSet<>())).isFalse(); - assertThat(contains.test(new ArrayList<>())).isFalse(); - assertThat(contains.test(Sets.newHashSet("foo"))).isTrue(); - assertThat(contains.test(Lists.newArrayList("foo"))).isTrue(); - assertThat(contains.test(Sets.newHashSet("bar"))).isFalse(); - assertThat(contains.test(Lists.newArrayList("bar"))).isFalse(); - assertThatThrownBy(() -> contains.test(null)).isInstanceOf(IllegalArgumentException.class); - assertThatThrownBy(() -> CqlCollection.contains(null).test(Sets.newHashSet("foo"))) - .isInstanceOf(IllegalArgumentException.class); - } - - @Test - public void should_evaluate_containsKey() { - P> containsKey = CqlCollection.containsKey("foo"); - assertThat(containsKey.test(new HashMap<>())).isFalse(); - assertThat(containsKey.test(new LinkedHashMap<>())).isFalse(); - assertThat(containsKey.test(ImmutableMap.of("foo", "bar"))).isTrue(); - assertThat(containsKey.test(ImmutableMap.of("bar", "foo"))).isFalse(); - assertThatThrownBy(() -> containsKey.test(null)).isInstanceOf(IllegalArgumentException.class); - assertThatThrownBy(() -> CqlCollection.containsKey(null).test(ImmutableMap.of("foo", "bar"))) - .isInstanceOf(IllegalArgumentException.class); - } - - @Test - public void should_evaluate_containsValue() { - P> containsValue = CqlCollection.containsValue("foo"); - assertThat(containsValue.test(new HashMap<>())).isFalse(); - assertThat(containsValue.test(new LinkedHashMap<>())).isFalse(); - assertThat(containsValue.test(ImmutableMap.of("bar", "foo"))).isTrue(); - assertThat(containsValue.test(ImmutableMap.of("foo", "bar"))).isFalse(); - assertThatThrownBy(() -> containsValue.test(null)).isInstanceOf(IllegalArgumentException.class); - assertThatThrownBy(() -> CqlCollection.containsValue(null).test(ImmutableMap.of("foo", "bar"))) - .isInstanceOf(IllegalArgumentException.class); - } - - @Test - public void should_evaluate_entryEq() { - P> entryEq = CqlCollection.entryEq("foo", "bar"); - assertThat(entryEq.test(new HashMap<>())).isFalse(); - assertThat(entryEq.test(new LinkedHashMap<>())).isFalse(); - assertThat(entryEq.test(ImmutableMap.of("foo", "bar"))).isTrue(); - assertThat(entryEq.test(ImmutableMap.of("bar", "foo"))).isFalse(); - assertThatThrownBy(() -> entryEq.test(null)).isInstanceOf(IllegalArgumentException.class); - assertThatThrownBy(() -> CqlCollection.entryEq(null, "foo").test(ImmutableMap.of("foo", "bar"))) - .isInstanceOf(IllegalArgumentException.class); - assertThatThrownBy(() -> CqlCollection.entryEq("foo", null).test(ImmutableMap.of("foo", "bar"))) - .isInstanceOf(IllegalArgumentException.class); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/api/core/graph/predicates/GeoTest.java b/core/src/test/java/com/datastax/dse/driver/api/core/graph/predicates/GeoTest.java deleted file mode 100644 index 143aec97b78..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/api/core/graph/predicates/GeoTest.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph.predicates; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.dse.driver.api.core.data.geometry.LineString; -import com.datastax.dse.driver.api.core.data.geometry.Point; -import com.datastax.dse.driver.api.core.data.geometry.Polygon; -import org.apache.tinkerpop.gremlin.process.traversal.P; -import org.junit.Test; - -public class GeoTest { - - @Test - public void should_convert_units_to_degrees() { - assertThat(Geo.Unit.DEGREES.toDegrees(100.0)).isEqualTo(100.0); - assertThat(Geo.Unit.MILES.toDegrees(68.9722)).isEqualTo(0.9982455747535043); - assertThat(Geo.Unit.KILOMETERS.toDegrees(111.0)).isEqualTo(0.9982456082154465); - assertThat(Geo.Unit.METERS.toDegrees(111000.0)).isEqualTo(0.9982456082154464); - } - - @Test - public void should_test_if_point_is_inside_circle_with_cartesian_coordinates() { - P inside = Geo.inside(Point.fromCoordinates(30, 30), 14.142135623730951); - assertThat(inside.test(Point.fromCoordinates(40, 40))).isTrue(); - assertThat(inside.test(Point.fromCoordinates(40.1, 40))).isFalse(); - } - - @Test - public void should_test_if_point_is_inside_circle_with_geo_coordinates() { - P inside = - Geo.inside(Point.fromCoordinates(30, 30), 12.908258700131379, Geo.Unit.DEGREES); - assertThat(inside.test(Point.fromCoordinates(40, 40))).isTrue(); - assertThat(inside.test(Point.fromCoordinates(40.1, 40))).isFalse(); - } - - @Test - public void should_test_if_point_is_inside_polygon() { - P inside = - Geo.inside( - Polygon.builder() - .addRing( - Point.fromCoordinates(30, 30), - Point.fromCoordinates(40, 40), - Point.fromCoordinates(40, 30)) - .build()); - assertThat(inside.test(Point.fromCoordinates(35, 32))).isTrue(); - assertThat(inside.test(Point.fromCoordinates(33, 37))).isFalse(); - } - - @Test - public void should_build_line_string_from_coordinates() { - LineString lineString = Geo.lineString(1, 2, 3, 4, 5, 6); - assertThat(lineString.getPoints()) - .hasSize(3) - .contains(Point.fromCoordinates(1, 2)) - .contains(Point.fromCoordinates(3, 4)) - .contains(Point.fromCoordinates(5, 6)); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_build_line_string_if_not_enough_coordinates() { - Geo.lineString(1, 2); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_build_line_string_if_uneven_number_of_coordinates() { - Geo.lineString(1, 2, 3, 4, 5); - } - - @Test - public void should_build_polygon_from_coordinates() { - Polygon polygon = Geo.polygon(1, 2, 3, 4, 5, 6, 7, 8); - assertThat(polygon.getExteriorRing()) - .hasSize(4) - .contains(Point.fromCoordinates(1, 2)) - .contains(Point.fromCoordinates(3, 4)) - .contains(Point.fromCoordinates(5, 6)) - .contains(Point.fromCoordinates(7, 8)); - assertThat(polygon.getInteriorRings()).isEmpty(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_build_polygon_if_not_enough_coordinates() { - Geo.polygon(1, 2, 3, 4); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_build_polygon_if_uneven_number_of_coordinates() { - Geo.polygon(1, 2, 3, 4, 5, 6, 7); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/api/core/graph/predicates/SearchTest.java b/core/src/test/java/com/datastax/dse/driver/api/core/graph/predicates/SearchTest.java deleted file mode 100644 index 591269e31ad..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/api/core/graph/predicates/SearchTest.java +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph.predicates; - -import static org.assertj.core.api.Assertions.assertThat; - -import org.apache.tinkerpop.gremlin.process.traversal.P; -import org.junit.Test; - -public class SearchTest { - - @Test - public void testToken() { - P p = Search.token("needle"); - assertThat(p.test("needle")).isTrue(); - assertThat(p.test("This is a needle in a haystack")).isTrue(); - assertThat(p.test("This is just the haystack")).isFalse(); - } - - @Test - public void testPrefix() { - P p = Search.prefix("abcd"); - assertThat(p.test("abcd")).isTrue(); - assertThat(p.test("abcdefg hijkl")).isTrue(); - assertThat(p.test("zabcd")).isFalse(); - } - - @Test - public void testTokenPrefix() { - P p = Search.tokenPrefix("abcd"); - assertThat(p.test("abcd")).isTrue(); - assertThat(p.test("abcdefg hijkl")).isTrue(); - assertThat(p.test("z abcd")).isTrue(); - assertThat(p.test("ab cd")).isFalse(); - } - - @Test - public void testRegex() { - P p = Search.regex("(foo|bar)"); - assertThat(p.test("foo")).isTrue(); - assertThat(p.test("bar")).isTrue(); - assertThat(p.test("foo bar")).isFalse(); - } - - @Test - public void testTokenRegex() { - P p = Search.tokenRegex("(foo|bar)"); - assertThat(p.test("foo")).isTrue(); - assertThat(p.test("bar")).isTrue(); - assertThat(p.test("foo bar")).isTrue(); - assertThat(p.test("foo bar qix")).isTrue(); - assertThat(p.test("qix")).isFalse(); - } - - @Test - public void testPhrase() { - P p = Search.phrase("Hello world", 2); - assertThat(p.test("Hello World")).isTrue(); - assertThat(p.test("Hello Big World")).isTrue(); - assertThat(p.test("Hello Big Wild World")).isTrue(); - assertThat(p.test("Hello The Big Wild World")).isFalse(); - assertThat(p.test("Goodbye world")).isFalse(); - } - - @Test - public void testPhraseFragment() { - // Tests JAVA-1744 - P p = Search.phrase("a b", 0); - assertThat(p.test("a b")).isTrue(); - assertThat(p.test("a")).isFalse(); - assertThat(p.test("b")).isFalse(); - } - - @Test - public void testFuzzy() { - P p = Search.fuzzy("abc", 1); - assertThat(p.test("abcd")).isTrue(); - assertThat(p.test("ab")).isTrue(); - assertThat(p.test("abce")).isTrue(); - assertThat(p.test("abdc")).isTrue(); - assertThat(p.test("badc")).isFalse(); - - // Make sure we do NOT calculate the Damerau–Levenshtein distance (2), but the optimal string - // alignment distance (3): - assertThat(Search.tokenFuzzy("ca", 2).test("abc")).isFalse(); - } - - @Test - public void testTokenFuzzy() { - P p = Search.tokenFuzzy("abc", 1); - assertThat(p.test("foo abcd")).isTrue(); - assertThat(p.test("foo ab")).isTrue(); - assertThat(p.test("foo abce")).isTrue(); - assertThat(p.test("foo abdc")).isTrue(); - assertThat(p.test("foo badc")).isFalse(); - - // Make sure we do NOT calculate the Damerau–Levenshtein distance (2), but the optimal string - // alignment distance (3): - assertThat(Search.tokenFuzzy("ca", 2).test("abc 123")).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/DependencyCheckTest.java b/core/src/test/java/com/datastax/dse/driver/internal/DependencyCheckTest.java deleted file mode 100644 index d001f791e82..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/DependencyCheckTest.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal; - -import java.nio.file.Path; -import java.nio.file.Paths; - -public class DependencyCheckTest extends DependencyCheckTestBase { - - @Override - protected Path getDepsTxtPath() { - return Paths.get( - getBaseResourcePathString(), - "target", - "classes", - "com", - "datastax", - "dse", - "driver", - "internal", - "deps.txt"); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/DependencyCheckTestBase.java b/core/src/test/java/com/datastax/dse/driver/internal/DependencyCheckTestBase.java deleted file mode 100644 index f2ce5513d65..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/DependencyCheckTestBase.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal; - -import static org.assertj.core.api.Assertions.assertThat; - -import java.io.IOException; -import java.io.InputStream; -import java.nio.file.Path; -import java.util.Properties; -import org.junit.Test; - -public abstract class DependencyCheckTestBase { - - private String baseResourcePath; - - protected DependencyCheckTestBase() { - Properties projectProperties = new Properties(); - try (InputStream is = this.getClass().getResourceAsStream("/project.properties")) { - projectProperties.load(is); - baseResourcePath = projectProperties.getProperty("project.basedir"); - } catch (IOException ioe) { - throw new AssertionError( - "Error retrieving \"project.basedir\" value from \"/project.properties\". Please check test resources in this project.", - ioe); - } - assert baseResourcePath != null; - } - - @Test - public void should_generate_deps_txt() { - assertThat(getDepsTxtPath()).exists(); - } - - protected final String getBaseResourcePathString() { - return baseResourcePath; - } - - protected abstract Path getDepsTxtPath(); -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/context/DseStartupOptionsBuilderTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/context/DseStartupOptionsBuilderTest.java deleted file mode 100644 index 9e4556e528d..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/context/DseStartupOptionsBuilderTest.java +++ /dev/null @@ -1,196 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.context; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatIllegalArgumentException; -import static org.mockito.Mockito.when; -import static org.mockito.MockitoAnnotations.initMocks; - -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.session.ProgrammaticArguments; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.core.uuid.Uuids; -import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; -import com.datastax.oss.driver.internal.core.context.StartupOptionsBuilder; -import com.datastax.oss.protocol.internal.request.Startup; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import java.util.UUID; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; - -@RunWith(DataProviderRunner.class) -public class DseStartupOptionsBuilderTest { - - private DefaultDriverContext driverContext; - - // Mocks for instantiating the DSE driver context - @Mock private DriverConfigLoader configLoader; - @Mock private DriverConfig driverConfig; - @Mock private DriverExecutionProfile defaultProfile; - - @Before - public void before() { - initMocks(this); - when(configLoader.getInitialConfig()).thenReturn(driverConfig); - when(driverConfig.getDefaultProfile()).thenReturn(defaultProfile); - when(defaultProfile.isDefined(DseDriverOption.CONTINUOUS_PAGING_PAGE_SIZE)).thenReturn(true); - } - - private void buildContext(UUID clientId, String applicationName, String applicationVersion) { - this.driverContext = - new DefaultDriverContext( - configLoader, - ProgrammaticArguments.builder() - .withStartupClientId(clientId) - .withStartupApplicationName(applicationName) - .withStartupApplicationVersion(applicationVersion) - .build()); - } - - private void assertDefaultStartupOptions(Startup startup) { - assertThat(startup.options).containsEntry(Startup.CQL_VERSION_KEY, "3.0.0"); - assertThat(startup.options) - .containsEntry( - StartupOptionsBuilder.DRIVER_NAME_KEY, Session.OSS_DRIVER_COORDINATES.getName()); - assertThat(startup.options).containsKey(StartupOptionsBuilder.DRIVER_VERSION_KEY); - Version version = Version.parse(startup.options.get(StartupOptionsBuilder.DRIVER_VERSION_KEY)); - assertThat(version).isEqualTo(Session.OSS_DRIVER_COORDINATES.getVersion()); - assertThat(startup.options).containsKey(StartupOptionsBuilder.CLIENT_ID_KEY); - } - - @Test - public void should_build_startup_options_with_no_compression_if_undefined() { - when(defaultProfile.getString(DefaultDriverOption.PROTOCOL_COMPRESSION, "none")) - .thenReturn("none"); - buildContext(null, null, null); - Startup startup = new Startup(driverContext.getStartupOptions()); - assertThat(startup.options).doesNotContainKey(Startup.COMPRESSION_KEY); - assertDefaultStartupOptions(startup); - } - - @Test - @DataProvider({"lz4", "snappy"}) - public void should_build_startup_options_with_compression(String compression) { - when(defaultProfile.getString(DefaultDriverOption.PROTOCOL_COMPRESSION, "none")) - .thenReturn(compression); - buildContext(null, null, null); - Startup startup = new Startup(driverContext.getStartupOptions()); - // assert the compression option is present - assertThat(startup.options).containsEntry(Startup.COMPRESSION_KEY, compression); - assertThat(startup.options).doesNotContainKey(StartupOptionsBuilder.APPLICATION_NAME_KEY); - assertThat(startup.options).doesNotContainKey(StartupOptionsBuilder.APPLICATION_VERSION_KEY); - assertDefaultStartupOptions(startup); - } - - @Test - public void should_fail_to_build_startup_options_with_invalid_compression() { - when(defaultProfile.getString(DefaultDriverOption.PROTOCOL_COMPRESSION, "none")) - .thenReturn("foobar"); - buildContext(null, null, null); - assertThatIllegalArgumentException() - .isThrownBy(() -> new Startup(driverContext.getStartupOptions())); - } - - @Test - public void should_build_startup_options_with_client_id() { - when(defaultProfile.getString(DefaultDriverOption.PROTOCOL_COMPRESSION, "none")) - .thenReturn("none"); - UUID customClientId = Uuids.random(); - buildContext(customClientId, null, null); - Startup startup = new Startup(driverContext.getStartupOptions()); - // assert the client id is present - assertThat(startup.options) - .containsEntry(StartupOptionsBuilder.CLIENT_ID_KEY, customClientId.toString()); - assertThat(startup.options).doesNotContainKey(Startup.COMPRESSION_KEY); - assertThat(startup.options).doesNotContainKey(StartupOptionsBuilder.APPLICATION_NAME_KEY); - assertThat(startup.options).doesNotContainKey(StartupOptionsBuilder.APPLICATION_VERSION_KEY); - assertDefaultStartupOptions(startup); - } - - @Test - public void should_build_startup_options_with_application_version_and_name() { - when(defaultProfile.getString(DefaultDriverOption.PROTOCOL_COMPRESSION, "none")) - .thenReturn("none"); - buildContext(null, "Custom_App_Name", "Custom_App_Version"); - Startup startup = new Startup(driverContext.getStartupOptions()); - // assert the app name and version are present - assertThat(startup.options) - .containsEntry(StartupOptionsBuilder.APPLICATION_NAME_KEY, "Custom_App_Name"); - assertThat(startup.options) - .containsEntry(StartupOptionsBuilder.APPLICATION_VERSION_KEY, "Custom_App_Version"); - assertThat(startup.options).doesNotContainKey(Startup.COMPRESSION_KEY); - assertDefaultStartupOptions(startup); - } - - @Test - public void should_build_startup_options_with_all_options() { - // mock config to specify "snappy" compression - when(defaultProfile.getString(DefaultDriverOption.PROTOCOL_COMPRESSION, "none")) - .thenReturn("snappy"); - - UUID customClientId = Uuids.random(); - - buildContext(customClientId, "Custom_App_Name", "Custom_App_Version"); - Startup startup = new Startup(driverContext.getStartupOptions()); - assertThat(startup.options) - .containsEntry(StartupOptionsBuilder.CLIENT_ID_KEY, customClientId.toString()) - .containsEntry(StartupOptionsBuilder.APPLICATION_NAME_KEY, "Custom_App_Name") - .containsEntry(StartupOptionsBuilder.APPLICATION_VERSION_KEY, "Custom_App_Version"); - assertThat(startup.options).containsEntry(Startup.COMPRESSION_KEY, "snappy"); - assertDefaultStartupOptions(startup); - } - - @Test - public void should_use_configuration_when_no_programmatic_values_provided() { - when(defaultProfile.getString(DseDriverOption.APPLICATION_NAME, null)) - .thenReturn("Config_App_Name"); - when(defaultProfile.getString(DseDriverOption.APPLICATION_VERSION, null)) - .thenReturn("Config_App_Version"); - when(defaultProfile.getString(DefaultDriverOption.PROTOCOL_COMPRESSION, "none")) - .thenReturn("none"); - - buildContext(null, null, null); - Startup startup = new Startup(driverContext.getStartupOptions()); - - assertThat(startup.options) - .containsEntry(StartupOptionsBuilder.APPLICATION_NAME_KEY, "Config_App_Name") - .containsEntry(StartupOptionsBuilder.APPLICATION_VERSION_KEY, "Config_App_Version"); - } - - @Test - public void should_ignore_configuration_when_programmatic_values_provided() { - when(defaultProfile.getString(DefaultDriverOption.PROTOCOL_COMPRESSION, "none")) - .thenReturn("none"); - - buildContext(null, "Custom_App_Name", "Custom_App_Version"); - Startup startup = new Startup(driverContext.getStartupOptions()); - - assertThat(startup.options) - .containsEntry(StartupOptionsBuilder.APPLICATION_NAME_KEY, "Custom_App_Name") - .containsEntry(StartupOptionsBuilder.APPLICATION_VERSION_KEY, "Custom_App_Version"); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestHandlerNodeTargetingTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestHandlerNodeTargetingTest.java deleted file mode 100644 index 1edb7c183bf..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestHandlerNodeTargetingTest.java +++ /dev/null @@ -1,182 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.continuous; - -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.Mockito.never; - -import com.datastax.dse.driver.DseTestDataProviders; -import com.datastax.dse.driver.DseTestFixtures; -import com.datastax.dse.driver.api.core.DseProtocolVersion; -import com.datastax.dse.driver.api.core.cql.continuous.ContinuousAsyncResultSet; -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.NodeUnavailableException; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.internal.core.cql.RequestHandlerTestHarness; -import com.datastax.oss.driver.internal.core.metadata.LoadBalancingPolicyWrapper; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.List; -import java.util.Map; -import java.util.concurrent.CompletionStage; -import org.junit.Test; -import org.mockito.InOrder; -import org.mockito.Mockito; - -public class ContinuousCqlRequestHandlerNodeTargetingTest - extends ContinuousCqlRequestHandlerTestBase { - - @Test - @UseDataProvider(value = "allDseProtocolVersions", location = DseTestDataProviders.class) - public void should_fail_if_targeted_node_not_available(DseProtocolVersion version) { - try (RequestHandlerTestHarness harness = - continuousHarnessBuilder() - .withResponse(node1, defaultFrameOf(DseTestFixtures.singleDseRow())) - .withResponse(node2, defaultFrameOf(DseTestFixtures.singleDseRow())) - .withEmptyPool(node3) - .withProtocolVersion(version) - .build()) { - - LoadBalancingPolicyWrapper loadBalancingPolicy = - harness.getContext().getLoadBalancingPolicyWrapper(); - InOrder invocations = Mockito.inOrder(loadBalancingPolicy); - - // target node3, which should be unavailable - CompletionStage resultSetFuture = - new ContinuousCqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT.setNode(node3), - harness.getSession(), - harness.getContext(), - "target node 3, unavailable") - .handle(); - - assertThatStage(resultSetFuture) - .isFailed( - error -> { - assertThat(error).isInstanceOf(AllNodesFailedException.class); - Map> errors = - ((AllNodesFailedException) error).getAllErrors(); - assertThat(errors).hasSize(1); - List nodeErrors = errors.values().iterator().next(); - assertThat(nodeErrors).singleElement().isInstanceOf(NodeUnavailableException.class); - invocations - .verify(loadBalancingPolicy, never()) - .newQueryPlan(any(Request.class), anyString(), any(Session.class)); - }); - - resultSetFuture = - new ContinuousCqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT, - harness.getSession(), - harness.getContext(), - "no node targeting, should use node 1") - .handle(); - - assertThatStage(resultSetFuture) - .isSuccess( - resultSet -> { - assertThat(resultSet.getExecutionInfo().getCoordinator()).isEqualTo(node1); - invocations - .verify(loadBalancingPolicy) - .newQueryPlan( - UNDEFINED_IDEMPOTENCE_STATEMENT, - DriverExecutionProfile.DEFAULT_NAME, - harness.getSession()); - }); - - resultSetFuture = - new ContinuousCqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT, - harness.getSession(), - harness.getContext(), - "no node targeting, should use node 2") - .handle(); - - assertThatStage(resultSetFuture) - .isSuccess( - resultSet -> { - assertThat(resultSet.getExecutionInfo().getCoordinator()).isEqualTo(node2); - invocations - .verify(loadBalancingPolicy) - .newQueryPlan( - UNDEFINED_IDEMPOTENCE_STATEMENT, - DriverExecutionProfile.DEFAULT_NAME, - harness.getSession()); - }); - } - } - - @Test - @UseDataProvider(value = "allDseProtocolVersions", location = DseTestDataProviders.class) - public void should_target_node(DseProtocolVersion version) { - try (RequestHandlerTestHarness harness = - continuousHarnessBuilder() - .withResponse(node1, defaultFrameOf(DseTestFixtures.singleDseRow())) - .withResponse(node2, defaultFrameOf(DseTestFixtures.singleDseRow())) - .withResponse(node3, defaultFrameOf(DseTestFixtures.singleDseRow())) - .withProtocolVersion(version) - .build()) { - - LoadBalancingPolicyWrapper loadBalancingPolicy = - harness.getContext().getLoadBalancingPolicyWrapper(); - InOrder invocations = Mockito.inOrder(loadBalancingPolicy); - - CompletionStage resultSetFuture = - new ContinuousCqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT.setNode(node3), - harness.getSession(), - harness.getContext(), - "target node 3") - .handle(); - - assertThatStage(resultSetFuture) - .isSuccess( - resultSet -> { - assertThat(resultSet.getExecutionInfo().getCoordinator()).isEqualTo(node3); - invocations - .verify(loadBalancingPolicy, never()) - .newQueryPlan(any(Request.class), anyString(), any(Session.class)); - }); - - resultSetFuture = - new ContinuousCqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT, - harness.getSession(), - harness.getContext(), - "no node targeting") - .handle(); - - assertThatStage(resultSetFuture) - .isSuccess( - resultSet -> { - assertThat(resultSet.getExecutionInfo().getCoordinator()).isEqualTo(node1); - invocations - .verify(loadBalancingPolicy) - .newQueryPlan( - UNDEFINED_IDEMPOTENCE_STATEMENT, - DriverExecutionProfile.DEFAULT_NAME, - harness.getSession()); - }); - } - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestHandlerReprepareTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestHandlerReprepareTest.java deleted file mode 100644 index fd8d0ea1f98..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestHandlerReprepareTest.java +++ /dev/null @@ -1,197 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.continuous; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static com.datastax.oss.protocol.internal.Frame.NO_PAYLOAD; -import static org.assertj.core.api.Assertions.catchThrowable; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyBoolean; -import static org.mockito.ArgumentMatchers.anyMap; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.dse.driver.DseTestDataProviders; -import com.datastax.dse.driver.DseTestFixtures; -import com.datastax.dse.driver.api.core.DseProtocolVersion; -import com.datastax.dse.driver.api.core.cql.continuous.ContinuousAsyncResultSet; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.servererrors.SyntaxError; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRequestHandler; -import com.datastax.oss.driver.internal.core.adminrequest.UnexpectedResponseException; -import com.datastax.oss.driver.internal.core.cql.RequestHandlerTestHarness; -import com.datastax.oss.driver.internal.core.session.RepreparePayload; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.ProtocolConstants.ErrorCode; -import com.datastax.oss.protocol.internal.request.Prepare; -import com.datastax.oss.protocol.internal.request.Query; -import com.datastax.oss.protocol.internal.response.Error; -import com.datastax.oss.protocol.internal.response.error.Unprepared; -import com.datastax.oss.protocol.internal.response.result.Prepared; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import io.netty.util.concurrent.Future; -import java.nio.ByteBuffer; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import org.junit.Test; -import org.mockito.Mock; - -public class ContinuousCqlRequestHandlerReprepareTest extends ContinuousCqlRequestHandlerTestBase { - - private final byte[] preparedId = {1, 2, 3}; - private final ByteBuffer preparedIdBuf = ByteBuffer.wrap(preparedId); - - private final RepreparePayload repreparePayload = - new RepreparePayload(preparedIdBuf, "irrelevant", CqlIdentifier.fromCql("ks"), NO_PAYLOAD); - - private final ConcurrentMap repreparePayloads = - new ConcurrentHashMap<>(ImmutableMap.of(preparedIdBuf, repreparePayload)); - - private final Unprepared unprepared = new Unprepared("test", preparedId); - private final Prepared prepared = new Prepared(preparedId, null, null, null); - private final Error unrecoverable = - new Error(ProtocolConstants.ErrorCode.SYNTAX_ERROR, "bad query"); - private final Error recoverable = new Error(ErrorCode.SERVER_ERROR, "sorry"); - - @Mock private Future future; - - @Override - public void setup() { - super.setup(); - when(future.isSuccess()).thenReturn(true); - } - - @Test - @UseDataProvider(value = "allDseProtocolVersions", location = DseTestDataProviders.class) - public void should_prepare_and_retry_on_same_node(DseProtocolVersion version) { - - try (RequestHandlerTestHarness harness = - continuousHarnessBuilder() - .withResponse(node1, defaultFrameOf(unprepared)) - .withProtocolVersion(version) - .build()) { - - when(harness.getSession().getRepreparePayloads()).thenReturn(repreparePayloads); - when(harness.getChannel(node1).write(any(Prepare.class), anyBoolean(), anyMap(), any())) - .then( - invocation -> { - AdminRequestHandler admin = invocation.getArgument(3); - admin.onResponse(defaultFrameOf(prepared)); - return future; - }); - - new ContinuousCqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT, harness.getSession(), harness.getContext(), "test") - .handle(); - - verify(harness.getChannel(node1)).write(any(Prepare.class), anyBoolean(), anyMap(), any()); - // should have attempted to execute the query twice on the same node - verify(harness.getChannel(node1), times(2)) - .write(any(Query.class), anyBoolean(), anyMap(), any()); - } - } - - @Test - @UseDataProvider(value = "allDseProtocolVersions", location = DseTestDataProviders.class) - public void should_abort_when_prepare_fails_with_unrecoverable_error(DseProtocolVersion version) { - - try (RequestHandlerTestHarness harness = - continuousHarnessBuilder() - .withResponse(node1, defaultFrameOf(unprepared)) - .withProtocolVersion(version) - .build()) { - - when(harness.getSession().getRepreparePayloads()).thenReturn(repreparePayloads); - when(harness.getChannel(node1).write(any(Prepare.class), anyBoolean(), anyMap(), any())) - .then( - invocation -> { - AdminRequestHandler admin = invocation.getArgument(3); - admin.onResponse(defaultFrameOf(unrecoverable)); - return future; - }); - - ContinuousCqlRequestHandler handler = - new ContinuousCqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT, harness.getSession(), harness.getContext(), "test"); - CompletionStage page1Future = handler.handle(); - - verify(harness.getChannel(node1)).write(any(Query.class), anyBoolean(), anyMap(), any()); - verify(harness.getChannel(node1)).write(any(Prepare.class), anyBoolean(), anyMap(), any()); - - assertThat(handler.getState()).isEqualTo(-2); - assertThat(page1Future).isCompletedExceptionally(); - Throwable t = catchThrowable(() -> page1Future.toCompletableFuture().get()); - assertThat(t).hasRootCauseInstanceOf(SyntaxError.class).hasMessageContaining("bad query"); - } - } - - @Test - @UseDataProvider(value = "allDseProtocolVersions", location = DseTestDataProviders.class) - public void should_try_next_node_when_prepare_fails_with_recoverable_error( - DseProtocolVersion version) { - - try (RequestHandlerTestHarness harness = - continuousHarnessBuilder() - .withResponse(node1, defaultFrameOf(unprepared)) - .withResponse(node2, defaultFrameOf(DseTestFixtures.singleDseRow())) - .withProtocolVersion(version) - .build()) { - - when(harness.getSession().getRepreparePayloads()).thenReturn(repreparePayloads); - when(harness.getChannel(node1).write(any(Prepare.class), anyBoolean(), anyMap(), any())) - .then( - invocation -> { - AdminRequestHandler admin = invocation.getArgument(3); - admin.onResponse(defaultFrameOf(recoverable)); - return future; - }); - - ContinuousCqlRequestHandler handler = - new ContinuousCqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT, harness.getSession(), harness.getContext(), "test"); - CompletionStage page1Future = handler.handle(); - - verify(harness.getChannel(node1)).write(any(Query.class), anyBoolean(), anyMap(), any()); - verify(harness.getChannel(node1)).write(any(Prepare.class), anyBoolean(), anyMap(), any()); - // should have tried the next host - verify(harness.getChannel(node2)).write(any(Query.class), anyBoolean(), anyMap(), any()); - - assertThat(handler.getState()).isEqualTo(-1); - assertThatStage(page1Future) - .isSuccess( - rs -> { - assertThat(rs.currentPage()).hasSize(1); - assertThat(rs.hasMorePages()).isFalse(); - assertThat(rs.getExecutionInfo().getCoordinator()).isEqualTo(node2); - assertThat(rs.getExecutionInfo().getErrors()) - .hasSize(1) - .allSatisfy( - entry -> { - assertThat(entry.getKey()).isEqualTo(node1); - assertThat(entry.getValue()) - .isInstanceOf(UnexpectedResponseException.class) - .hasMessageContaining(recoverable.toString()); - }); - }); - } - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestHandlerRetryTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestHandlerRetryTest.java deleted file mode 100644 index 97fe82985de..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestHandlerRetryTest.java +++ /dev/null @@ -1,600 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.continuous; - -import static com.datastax.dse.driver.DseTestDataProviders.allDseProtocolVersions; -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static com.datastax.oss.driver.TestDataProviders.combine; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.atMost; -import static org.mockito.Mockito.when; - -import com.datastax.dse.driver.DseTestFixtures; -import com.datastax.dse.driver.api.core.DseProtocolVersion; -import com.datastax.dse.driver.api.core.cql.continuous.ContinuousAsyncResultSet; -import com.datastax.oss.driver.TestDataProviders; -import com.datastax.oss.driver.api.core.DefaultConsistencyLevel; -import com.datastax.oss.driver.api.core.connection.HeartbeatException; -import com.datastax.oss.driver.api.core.cql.BatchStatement; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import com.datastax.oss.driver.api.core.retry.RetryVerdict; -import com.datastax.oss.driver.api.core.servererrors.BootstrappingException; -import com.datastax.oss.driver.api.core.servererrors.DefaultWriteType; -import com.datastax.oss.driver.api.core.servererrors.InvalidQueryException; -import com.datastax.oss.driver.api.core.servererrors.ReadTimeoutException; -import com.datastax.oss.driver.api.core.servererrors.ServerError; -import com.datastax.oss.driver.api.core.servererrors.UnavailableException; -import com.datastax.oss.driver.api.core.servererrors.WriteTimeoutException; -import com.datastax.oss.driver.internal.core.cql.RequestHandlerTestHarness; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.response.Error; -import com.datastax.oss.protocol.internal.response.error.ReadTimeout; -import com.datastax.oss.protocol.internal.response.error.Unavailable; -import com.datastax.oss.protocol.internal.response.error.WriteTimeout; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.TimeUnit; -import org.junit.Test; -import org.mockito.Mockito; - -public class ContinuousCqlRequestHandlerRetryTest extends ContinuousCqlRequestHandlerTestBase { - - @Test - @UseDataProvider("allIdempotenceConfigs") - public void should_always_try_next_node_if_bootstrapping( - boolean defaultIdempotence, Statement statement, DseProtocolVersion version) { - - try (RequestHandlerTestHarness harness = - continuousHarnessBuilder() - .withProtocolVersion(version) - .withDefaultIdempotence(defaultIdempotence) - .withResponse( - node1, - defaultFrameOf( - new Error(ProtocolConstants.ErrorCode.IS_BOOTSTRAPPING, "mock message"))) - .withResponse(node2, defaultFrameOf(DseTestFixtures.singleDseRow())) - .build()) { - - ContinuousCqlRequestHandler handler = - new ContinuousCqlRequestHandler( - statement, harness.getSession(), harness.getContext(), "test"); - CompletionStage resultSetFuture = handler.handle(); - - assertThat(handler.getState()).isEqualTo(-1); - - assertThatStage(resultSetFuture) - .isSuccess( - resultSet -> { - Iterator rows = resultSet.currentPage().iterator(); - assertThat(rows.hasNext()).isTrue(); - assertThat(rows.next().getString("message")).isEqualTo("hello, world"); - - ExecutionInfo executionInfo = resultSet.getExecutionInfo(); - assertThat(executionInfo.getCoordinator()).isEqualTo(node2); - assertThat(executionInfo.getErrors()).hasSize(1); - assertThat(executionInfo.getErrors().get(0).getKey()).isEqualTo(node1); - assertThat(executionInfo.getErrors().get(0).getValue()) - .isInstanceOf(BootstrappingException.class); - assertThat(executionInfo.getIncomingPayload()).isEmpty(); - assertThat(executionInfo.getPagingState()).isNull(); - assertThat(executionInfo.getSpeculativeExecutionCount()).isEqualTo(0); - assertThat(executionInfo.getSuccessfulExecutionIndex()).isEqualTo(0); - assertThat(executionInfo.getWarnings()).isEmpty(); - - Mockito.verifyNoMoreInteractions(harness.getContext().getRetryPolicy(anyString())); - }); - } - } - - @Test - @UseDataProvider("allIdempotenceConfigs") - public void should_always_rethrow_query_validation_error( - boolean defaultIdempotence, Statement statement, DseProtocolVersion version) { - - try (RequestHandlerTestHarness harness = - continuousHarnessBuilder() - .withProtocolVersion(version) - .withDefaultIdempotence(defaultIdempotence) - .withResponse( - node1, - defaultFrameOf(new Error(ProtocolConstants.ErrorCode.INVALID, "mock message"))) - .build()) { - - ContinuousCqlRequestHandler handler = - new ContinuousCqlRequestHandler( - statement, harness.getSession(), harness.getContext(), "test"); - CompletionStage resultSetFuture = handler.handle(); - - assertThat(handler.getState()).isEqualTo(-2); - - assertThatStage(resultSetFuture) - .isFailed( - error -> { - assertThat(error) - .isInstanceOf(InvalidQueryException.class) - .hasMessage("mock message"); - Mockito.verifyNoMoreInteractions(harness.getContext().getRetryPolicy(anyString())); - - Mockito.verify(nodeMetricUpdater1) - .incrementCounter(eq(DefaultNodeMetric.OTHER_ERRORS), anyString()); - Mockito.verify(nodeMetricUpdater1) - .updateTimer( - eq(DefaultNodeMetric.CQL_MESSAGES), - anyString(), - anyLong(), - eq(TimeUnit.NANOSECONDS)); - Mockito.verifyNoMoreInteractions(nodeMetricUpdater1); - }); - } - } - - @Test - @UseDataProvider("failureAndIdempotent") - public void should_try_next_node_if_idempotent_and_retry_policy_decides_so( - FailureScenario failureScenario, - boolean defaultIdempotence, - Statement statement, - DseProtocolVersion version) { - - RequestHandlerTestHarness.Builder harnessBuilder = - continuousHarnessBuilder() - .withProtocolVersion(version) - .withDefaultIdempotence(defaultIdempotence); - failureScenario.mockRequestError(harnessBuilder, node1); - harnessBuilder.withResponse(node2, defaultFrameOf(DseTestFixtures.singleDseRow())); - - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - failureScenario.mockRetryPolicyVerdict( - harness.getContext().getRetryPolicy(anyString()), RetryVerdict.RETRY_NEXT); - - ContinuousCqlRequestHandler handler = - new ContinuousCqlRequestHandler( - statement, harness.getSession(), harness.getContext(), "test"); - CompletionStage resultSetFuture = handler.handle(); - - assertThat(handler.getState()).isEqualTo(-1); - - assertThatStage(resultSetFuture) - .isSuccess( - resultSet -> { - Iterator rows = resultSet.currentPage().iterator(); - assertThat(rows.hasNext()).isTrue(); - assertThat(rows.next().getString("message")).isEqualTo("hello, world"); - - ExecutionInfo executionInfo = resultSet.getExecutionInfo(); - assertThat(executionInfo.getCoordinator()).isEqualTo(node2); - assertThat(executionInfo.getErrors()).hasSize(1); - assertThat(executionInfo.getErrors().get(0).getKey()).isEqualTo(node1); - - Mockito.verify(nodeMetricUpdater1) - .incrementCounter(eq(failureScenario.errorMetric), anyString()); - Mockito.verify(nodeMetricUpdater1) - .incrementCounter(eq(DefaultNodeMetric.RETRIES), anyString()); - Mockito.verify(nodeMetricUpdater1) - .incrementCounter(eq(failureScenario.retryMetric), anyString()); - Mockito.verify(nodeMetricUpdater1, atMost(1)) - .updateTimer( - eq(DefaultNodeMetric.CQL_MESSAGES), - anyString(), - anyLong(), - eq(TimeUnit.NANOSECONDS)); - Mockito.verifyNoMoreInteractions(nodeMetricUpdater1); - }); - } - } - - @Test - @UseDataProvider("failureAndIdempotent") - public void should_try_same_node_if_idempotent_and_retry_policy_decides_so( - FailureScenario failureScenario, - boolean defaultIdempotence, - Statement statement, - DseProtocolVersion version) { - - RequestHandlerTestHarness.Builder harnessBuilder = - continuousHarnessBuilder() - .withProtocolVersion(version) - .withDefaultIdempotence(defaultIdempotence); - failureScenario.mockRequestError(harnessBuilder, node1); - harnessBuilder.withResponse(node1, defaultFrameOf(DseTestFixtures.singleDseRow())); - - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - failureScenario.mockRetryPolicyVerdict( - harness.getContext().getRetryPolicy(anyString()), RetryVerdict.RETRY_SAME); - - ContinuousCqlRequestHandler handler = - new ContinuousCqlRequestHandler( - statement, harness.getSession(), harness.getContext(), "test"); - CompletionStage resultSetFuture = handler.handle(); - - assertThat(handler.getState()).isEqualTo(-1); - - assertThatStage(resultSetFuture) - .isSuccess( - resultSet -> { - Iterator rows = resultSet.currentPage().iterator(); - assertThat(rows.hasNext()).isTrue(); - assertThat(rows.next().getString("message")).isEqualTo("hello, world"); - - ExecutionInfo executionInfo = resultSet.getExecutionInfo(); - assertThat(executionInfo.getCoordinator()).isEqualTo(node1); - assertThat(executionInfo.getErrors()).hasSize(1); - assertThat(executionInfo.getErrors().get(0).getKey()).isEqualTo(node1); - - Mockito.verify(nodeMetricUpdater1) - .incrementCounter(eq(failureScenario.errorMetric), anyString()); - Mockito.verify(nodeMetricUpdater1) - .incrementCounter(eq(DefaultNodeMetric.RETRIES), anyString()); - Mockito.verify(nodeMetricUpdater1) - .incrementCounter(eq(failureScenario.retryMetric), anyString()); - Mockito.verify(nodeMetricUpdater1, atMost(2)) - .updateTimer( - eq(DefaultNodeMetric.CQL_MESSAGES), - anyString(), - anyLong(), - eq(TimeUnit.NANOSECONDS)); - Mockito.verifyNoMoreInteractions(nodeMetricUpdater1); - }); - } - } - - @Test - @UseDataProvider("failureAndIdempotent") - public void should_ignore_error_if_idempotent_and_retry_policy_decides_so( - FailureScenario failureScenario, - boolean defaultIdempotence, - Statement statement, - DseProtocolVersion version) { - - RequestHandlerTestHarness.Builder harnessBuilder = - continuousHarnessBuilder() - .withProtocolVersion(version) - .withDefaultIdempotence(defaultIdempotence); - failureScenario.mockRequestError(harnessBuilder, node1); - - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - failureScenario.mockRetryPolicyVerdict( - harness.getContext().getRetryPolicy(anyString()), RetryVerdict.IGNORE); - - ContinuousCqlRequestHandler handler = - new ContinuousCqlRequestHandler( - statement, harness.getSession(), harness.getContext(), "test"); - CompletionStage resultSetFuture = handler.handle(); - - assertThat(handler.getState()).isEqualTo(-1); - - assertThatStage(resultSetFuture) - .isSuccess( - resultSet -> { - Iterator rows = resultSet.currentPage().iterator(); - assertThat(rows.hasNext()).isFalse(); - - ExecutionInfo executionInfo = resultSet.getExecutionInfo(); - assertThat(executionInfo.getCoordinator()).isEqualTo(node1); - assertThat(executionInfo.getErrors()).hasSize(0); - - Mockito.verify(nodeMetricUpdater1) - .incrementCounter(eq(failureScenario.errorMetric), anyString()); - Mockito.verify(nodeMetricUpdater1) - .incrementCounter(eq(DefaultNodeMetric.IGNORES), anyString()); - Mockito.verify(nodeMetricUpdater1) - .incrementCounter(eq(failureScenario.ignoreMetric), anyString()); - Mockito.verify(nodeMetricUpdater1, atMost(1)) - .updateTimer( - eq(DefaultNodeMetric.CQL_MESSAGES), - anyString(), - anyLong(), - eq(TimeUnit.NANOSECONDS)); - Mockito.verifyNoMoreInteractions(nodeMetricUpdater1); - }); - } - } - - @Test - @UseDataProvider("failureAndIdempotent") - public void should_rethrow_error_if_idempotent_and_retry_policy_decides_so( - FailureScenario failureScenario, - boolean defaultIdempotence, - Statement statement, - DseProtocolVersion version) { - - RequestHandlerTestHarness.Builder harnessBuilder = - continuousHarnessBuilder() - .withProtocolVersion(version) - .withDefaultIdempotence(defaultIdempotence); - failureScenario.mockRequestError(harnessBuilder, node1); - - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - - failureScenario.mockRetryPolicyVerdict( - harness.getContext().getRetryPolicy(anyString()), RetryVerdict.RETHROW); - - ContinuousCqlRequestHandler handler = - new ContinuousCqlRequestHandler( - statement, harness.getSession(), harness.getContext(), "test"); - CompletionStage resultSetFuture = handler.handle(); - - assertThat(handler.getState()).isEqualTo(-2); - - assertThatStage(resultSetFuture) - .isFailed( - error -> { - assertThat(error).isInstanceOf(failureScenario.expectedExceptionClass); - - Mockito.verify(nodeMetricUpdater1) - .incrementCounter(eq(failureScenario.errorMetric), anyString()); - Mockito.verify(nodeMetricUpdater1, atMost(1)) - .updateTimer( - eq(DefaultNodeMetric.CQL_MESSAGES), - anyString(), - anyLong(), - eq(TimeUnit.NANOSECONDS)); - Mockito.verifyNoMoreInteractions(nodeMetricUpdater1); - }); - } - } - - @Test - @UseDataProvider("failureAndNotIdempotent") - public void should_rethrow_error_if_not_idempotent_and_error_unsafe_or_policy_rethrows( - FailureScenario failureScenario, - boolean defaultIdempotence, - Statement statement, - DseProtocolVersion version) { - - // For two of the possible exceptions, the retry policy is called even if the statement is not - // idempotent - boolean shouldCallRetryPolicy = - (failureScenario.expectedExceptionClass.equals(UnavailableException.class) - || failureScenario.expectedExceptionClass.equals(ReadTimeoutException.class)); - - RequestHandlerTestHarness.Builder harnessBuilder = - continuousHarnessBuilder() - .withProtocolVersion(version) - .withDefaultIdempotence(defaultIdempotence); - failureScenario.mockRequestError(harnessBuilder, node1); - harnessBuilder.withResponse(node2, defaultFrameOf(DseTestFixtures.singleDseRow())); - - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - - if (shouldCallRetryPolicy) { - failureScenario.mockRetryPolicyVerdict( - harness.getContext().getRetryPolicy(anyString()), RetryVerdict.RETHROW); - } - - ContinuousCqlRequestHandler handler = - new ContinuousCqlRequestHandler( - statement, harness.getSession(), harness.getContext(), "test"); - CompletionStage resultSetFuture = handler.handle(); - - assertThat(handler.getState()).isEqualTo(-2); - - assertThatStage(resultSetFuture) - .isFailed( - error -> { - assertThat(error).isInstanceOf(failureScenario.expectedExceptionClass); - // When non idempotent, the policy is bypassed completely: - if (!shouldCallRetryPolicy) { - Mockito.verifyNoMoreInteractions( - harness.getContext().getRetryPolicy(anyString())); - } - - Mockito.verify(nodeMetricUpdater1) - .incrementCounter(eq(failureScenario.errorMetric), anyString()); - Mockito.verify(nodeMetricUpdater1, atMost(1)) - .updateTimer( - eq(DefaultNodeMetric.CQL_MESSAGES), - anyString(), - anyLong(), - eq(TimeUnit.NANOSECONDS)); - Mockito.verifyNoMoreInteractions(nodeMetricUpdater1); - }); - } - } - - /** - * Sets up the mocks to simulate an error from a node, and make the retry policy return a given - * decision for that error. - */ - private abstract static class FailureScenario { - private final Class expectedExceptionClass; - final DefaultNodeMetric errorMetric; - final DefaultNodeMetric retryMetric; - final DefaultNodeMetric ignoreMetric; - - FailureScenario( - Class expectedExceptionClass, - DefaultNodeMetric errorMetric, - DefaultNodeMetric retryMetric, - DefaultNodeMetric ignoreMetric) { - this.expectedExceptionClass = expectedExceptionClass; - this.errorMetric = errorMetric; - this.retryMetric = retryMetric; - this.ignoreMetric = ignoreMetric; - } - - abstract void mockRequestError(RequestHandlerTestHarness.Builder builder, Node node); - - abstract void mockRetryPolicyVerdict(RetryPolicy policy, RetryVerdict verdict); - } - - @DataProvider - public static Object[][] failure() { - return TestDataProviders.fromList( - new FailureScenario( - ReadTimeoutException.class, - DefaultNodeMetric.READ_TIMEOUTS, - DefaultNodeMetric.RETRIES_ON_READ_TIMEOUT, - DefaultNodeMetric.IGNORES_ON_READ_TIMEOUT) { - @Override - public void mockRequestError(RequestHandlerTestHarness.Builder builder, Node node) { - builder.withResponse( - node, - defaultFrameOf( - new ReadTimeout( - "mock message", ProtocolConstants.ConsistencyLevel.LOCAL_ONE, 1, 2, true))); - } - - @Override - public void mockRetryPolicyVerdict(RetryPolicy policy, RetryVerdict verdict) { - when(policy.onReadTimeoutVerdict( - any(SimpleStatement.class), - eq(DefaultConsistencyLevel.LOCAL_ONE), - eq(2), - eq(1), - eq(true), - eq(0))) - .thenReturn(verdict); - } - }, - new FailureScenario( - WriteTimeoutException.class, - DefaultNodeMetric.WRITE_TIMEOUTS, - DefaultNodeMetric.RETRIES_ON_WRITE_TIMEOUT, - DefaultNodeMetric.IGNORES_ON_WRITE_TIMEOUT) { - @Override - public void mockRequestError(RequestHandlerTestHarness.Builder builder, Node node) { - builder.withResponse( - node, - defaultFrameOf( - new WriteTimeout( - "mock message", - ProtocolConstants.ConsistencyLevel.LOCAL_ONE, - 1, - 2, - ProtocolConstants.WriteType.SIMPLE))); - } - - @Override - public void mockRetryPolicyVerdict(RetryPolicy policy, RetryVerdict verdict) { - when(policy.onWriteTimeoutVerdict( - any(SimpleStatement.class), - eq(DefaultConsistencyLevel.LOCAL_ONE), - eq(DefaultWriteType.SIMPLE), - eq(2), - eq(1), - eq(0))) - .thenReturn(verdict); - } - }, - new FailureScenario( - UnavailableException.class, - DefaultNodeMetric.UNAVAILABLES, - DefaultNodeMetric.RETRIES_ON_UNAVAILABLE, - DefaultNodeMetric.IGNORES_ON_UNAVAILABLE) { - @Override - public void mockRequestError(RequestHandlerTestHarness.Builder builder, Node node) { - builder.withResponse( - node, - defaultFrameOf( - new Unavailable( - "mock message", ProtocolConstants.ConsistencyLevel.LOCAL_ONE, 2, 1))); - } - - @Override - public void mockRetryPolicyVerdict(RetryPolicy policy, RetryVerdict verdict) { - when(policy.onUnavailableVerdict( - any(SimpleStatement.class), - eq(DefaultConsistencyLevel.LOCAL_ONE), - eq(2), - eq(1), - eq(0))) - .thenReturn(verdict); - } - }, - new FailureScenario( - ServerError.class, - DefaultNodeMetric.OTHER_ERRORS, - DefaultNodeMetric.RETRIES_ON_OTHER_ERROR, - DefaultNodeMetric.IGNORES_ON_OTHER_ERROR) { - @Override - public void mockRequestError(RequestHandlerTestHarness.Builder builder, Node node) { - builder.withResponse( - node, - defaultFrameOf( - new Error(ProtocolConstants.ErrorCode.SERVER_ERROR, "mock server error"))); - } - - @Override - public void mockRetryPolicyVerdict(RetryPolicy policy, RetryVerdict verdict) { - when(policy.onErrorResponseVerdict( - any(SimpleStatement.class), any(ServerError.class), eq(0))) - .thenReturn(verdict); - } - }, - new FailureScenario( - HeartbeatException.class, - DefaultNodeMetric.ABORTED_REQUESTS, - DefaultNodeMetric.RETRIES_ON_ABORTED, - DefaultNodeMetric.IGNORES_ON_ABORTED) { - @Override - public void mockRequestError(RequestHandlerTestHarness.Builder builder, Node node) { - builder.withResponseFailure(node, Mockito.mock(HeartbeatException.class)); - } - - @Override - public void mockRetryPolicyVerdict(RetryPolicy policy, RetryVerdict verdict) { - when(policy.onRequestAbortedVerdict( - any(SimpleStatement.class), any(HeartbeatException.class), eq(0))) - .thenReturn(verdict); - } - }); - } - - @DataProvider - public static Object[][] failureAndIdempotent() { - return combine(failure(), excludeBatchStatements(idempotentConfig()), allDseProtocolVersions()); - } - - @DataProvider - public static Object[][] failureAndNotIdempotent() { - return combine( - failure(), excludeBatchStatements(nonIdempotentConfig()), allDseProtocolVersions()); - } - - @DataProvider - public static Object[][] allIdempotenceConfigs() { - return combine( - excludeBatchStatements(ContinuousCqlRequestHandlerTestBase.allIdempotenceConfigs()), - allDseProtocolVersions()); - } - - private static Object[][] excludeBatchStatements(Object[][] configs) { - List result = new ArrayList<>(); - for (Object[] config : configs) { - if (!(config[1] instanceof BatchStatement)) { - result.add(config); - } - } - return result.toArray(new Object[][] {}); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestHandlerTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestHandlerTest.java deleted file mode 100644 index a816183e9ee..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestHandlerTest.java +++ /dev/null @@ -1,529 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.continuous; - -import static com.datastax.dse.driver.api.core.DseProtocolVersion.DSE_V1; -import static com.datastax.dse.driver.api.core.DseProtocolVersion.DSE_V2; -import static com.datastax.dse.protocol.internal.DseProtocolConstants.RevisionType.CANCEL_CONTINUOUS_PAGING; -import static com.datastax.dse.protocol.internal.DseProtocolConstants.RevisionType.MORE_CONTINUOUS_PAGES; -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyBoolean; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.anyMap; -import static org.mockito.ArgumentMatchers.argThat; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.ArgumentMatchers.matches; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyNoMoreInteractions; -import static org.mockito.Mockito.when; - -import com.datastax.dse.driver.DseTestDataProviders; -import com.datastax.dse.driver.DseTestFixtures; -import com.datastax.dse.driver.api.core.DseProtocolVersion; -import com.datastax.dse.driver.api.core.cql.continuous.ContinuousAsyncResultSet; -import com.datastax.dse.protocol.internal.request.Revise; -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.DriverTimeoutException; -import com.datastax.oss.driver.api.core.NoNodeAvailableException; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.servererrors.BootstrappingException; -import com.datastax.oss.driver.api.core.tracker.RequestTracker; -import com.datastax.oss.driver.internal.core.ProtocolFeature; -import com.datastax.oss.driver.internal.core.cql.PoolBehavior; -import com.datastax.oss.driver.internal.core.cql.RequestHandlerTestHarness; -import com.datastax.oss.driver.internal.core.util.concurrent.CapturingTimer.CapturedTimeout; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.Iterator; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.TimeUnit; -import java.util.regex.Pattern; -import org.junit.Test; -import org.mockito.Mockito; - -public class ContinuousCqlRequestHandlerTest extends ContinuousCqlRequestHandlerTestBase { - - private static final Pattern LOG_PREFIX_PER_REQUEST = Pattern.compile("test\\|\\d*\\|\\d"); - - @Test - @UseDataProvider(value = "allDseProtocolVersions", location = DseTestDataProviders.class) - public void should_complete_single_page_result(DseProtocolVersion version) { - try (RequestHandlerTestHarness harness = - continuousHarnessBuilder() - .withProtocolVersion(version) - .withResponse(node1, defaultFrameOf(DseTestFixtures.singleDseRow())) - .build()) { - - CompletionStage resultSetFuture = - new ContinuousCqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT, - harness.getSession(), - harness.getContext(), - "test") - .handle(); - - assertThatStage(resultSetFuture) - .isSuccess( - resultSet -> { - Iterator rows = resultSet.currentPage().iterator(); - assertThat(rows.hasNext()).isTrue(); - assertThat(rows.next().getString("message")).isEqualTo("hello, world"); - ExecutionInfo executionInfo = resultSet.getExecutionInfo(); - assertThat(executionInfo.getCoordinator()).isEqualTo(node1); - assertThat(executionInfo.getErrors()).isEmpty(); - assertThat(executionInfo.getIncomingPayload()).isEmpty(); - assertThat(executionInfo.getPagingState()).isNull(); - assertThat(executionInfo.getSpeculativeExecutionCount()).isEqualTo(0); - assertThat(executionInfo.getSuccessfulExecutionIndex()).isEqualTo(0); - assertThat(executionInfo.getWarnings()).isEmpty(); - }); - } - } - - @Test - @UseDataProvider(value = "allDseProtocolVersions", location = DseTestDataProviders.class) - public void should_complete_multi_page_result(DseProtocolVersion version) { - RequestHandlerTestHarness.Builder builder = - continuousHarnessBuilder().withProtocolVersion(version); - PoolBehavior node1Behavior = builder.customBehavior(node1); - try (RequestHandlerTestHarness harness = builder.build()) { - - ContinuousCqlRequestHandler handler = - new ContinuousCqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT, harness.getSession(), harness.getContext(), "test"); - CompletionStage page1Future = handler.handle(); - - node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(1, false))); - - assertThatStage(page1Future) - .isSuccess( - page1 -> { - assertThat(page1.hasMorePages()).isTrue(); - assertThat(page1.pageNumber()).isEqualTo(1); - Iterator rows = page1.currentPage().iterator(); - assertThat(rows.hasNext()).isTrue(); - assertThat(rows).toIterable().hasSize(10); - ExecutionInfo executionInfo = page1.getExecutionInfo(); - assertThat(executionInfo.getCoordinator()).isEqualTo(node1); - assertThat(executionInfo.getErrors()).isEmpty(); - assertThat(executionInfo.getIncomingPayload()).isEmpty(); - assertThat(executionInfo.getPagingState()).isNotNull(); - assertThat(executionInfo.getSpeculativeExecutionCount()).isEqualTo(0); - assertThat(executionInfo.getSuccessfulExecutionIndex()).isEqualTo(0); - assertThat(executionInfo.getWarnings()).isEmpty(); - }); - - ContinuousAsyncResultSet page1 = CompletableFutures.getCompleted(page1Future); - assertThat(handler.getPendingResult()).isNull(); - CompletionStage page2Future = page1.fetchNextPage(); - assertThat(handler.getPendingResult()).isNotNull(); - node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(2, true))); - - assertThatStage(page2Future) - .isSuccess( - page2 -> { - assertThat(page2.hasMorePages()).isFalse(); - assertThat(page2.pageNumber()).isEqualTo(2); - Iterator rows = page2.currentPage().iterator(); - assertThat(rows.hasNext()).isTrue(); - assertThat(rows).toIterable().hasSize(10); - ExecutionInfo executionInfo = page2.getExecutionInfo(); - assertThat(executionInfo.getCoordinator()).isEqualTo(node1); - assertThat(executionInfo.getErrors()).isEmpty(); - assertThat(executionInfo.getIncomingPayload()).isEmpty(); - assertThat(executionInfo.getPagingState()).isNull(); - assertThat(executionInfo.getSpeculativeExecutionCount()).isEqualTo(0); - assertThat(executionInfo.getSuccessfulExecutionIndex()).isEqualTo(0); - assertThat(executionInfo.getWarnings()).isEmpty(); - }); - } - } - - @Test - @UseDataProvider(value = "allDseProtocolVersions", location = DseTestDataProviders.class) - public void should_fail_if_no_node_available(DseProtocolVersion version) { - try (RequestHandlerTestHarness harness = - continuousHarnessBuilder() - .withProtocolVersion(version) - // Mock no responses => this will produce an empty query plan - .build()) { - - CompletionStage resultSetFuture = - new ContinuousCqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT, - harness.getSession(), - harness.getContext(), - "test") - .handle(); - - assertThatStage(resultSetFuture) - .isFailed(error -> assertThat(error).isInstanceOf(NoNodeAvailableException.class)); - } - } - - @Test - @UseDataProvider(value = "allOssProtocolVersions", location = DseTestDataProviders.class) - public void should_throw_if_protocol_version_does_not_support_continuous_paging( - ProtocolVersion version) { - try (RequestHandlerTestHarness harness = - continuousHarnessBuilder().withProtocolVersion(version).build()) { - Mockito.when( - harness - .getContext() - .getProtocolVersionRegistry() - .supports(any(DefaultProtocolVersion.class), any(ProtocolFeature.class))) - .thenReturn(false); - assertThatThrownBy( - () -> - new ContinuousCqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT, - harness.getSession(), - harness.getContext(), - "test") - .handle()) - .isInstanceOf(IllegalStateException.class) - .hasMessage("Cannot execute continuous paging requests with protocol version " + version); - } - } - - @Test - @UseDataProvider(value = "allDseProtocolVersions", location = DseTestDataProviders.class) - public void should_time_out_if_first_page_takes_too_long(DseProtocolVersion version) - throws Exception { - RequestHandlerTestHarness.Builder builder = - continuousHarnessBuilder().withProtocolVersion(version); - PoolBehavior node1Behavior = builder.customBehavior(node1); - try (RequestHandlerTestHarness harness = builder.build()) { - - CompletionStage resultSetFuture = - new ContinuousCqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT, - harness.getSession(), - harness.getContext(), - "test") - .handle(); - - // mark the initial request as successful, which should schedule a timeout for the first page - node1Behavior.setWriteSuccess(); - CapturedTimeout page1Timeout = harness.nextScheduledTimeout(); - assertThat(page1Timeout.getDelay(TimeUnit.NANOSECONDS)) - .isEqualTo(TIMEOUT_FIRST_PAGE.toNanos()); - - page1Timeout.task().run(page1Timeout); - - assertThatStage(resultSetFuture) - .isFailed( - t -> - assertThat(t) - .isInstanceOf(DriverTimeoutException.class) - .hasMessageContaining("Timed out waiting for page 1")); - } - } - - @Test - @UseDataProvider(value = "allDseProtocolVersions", location = DseTestDataProviders.class) - public void should_time_out_if_other_page_takes_too_long(DseProtocolVersion version) - throws Exception { - RequestHandlerTestHarness.Builder builder = - continuousHarnessBuilder().withProtocolVersion(version); - PoolBehavior node1Behavior = builder.customBehavior(node1); - - try (RequestHandlerTestHarness harness = builder.build()) { - - CompletionStage page1Future = - new ContinuousCqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT, - harness.getSession(), - harness.getContext(), - "test") - .handle(); - - // mark the initial request as successful, which should schedule a timeout for the first page - node1Behavior.setWriteSuccess(); - CapturedTimeout page1Timeout = harness.nextScheduledTimeout(); - assertThat(page1Timeout.getDelay(TimeUnit.NANOSECONDS)) - .isEqualTo(TIMEOUT_FIRST_PAGE.toNanos()); - - // the server replies with page 1, the corresponding timeout should be cancelled - node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(1, false))); - assertThat(page1Timeout.isCancelled()).isTrue(); - - // request page 2, the queue is empty so this should request more pages and schedule another - // timeout - ContinuousAsyncResultSet page1 = CompletableFutures.getUninterruptibly(page1Future); - CompletionStage page2Future = page1.fetchNextPage(); - CapturedTimeout page2Timeout = harness.nextScheduledTimeout(); - assertThat(page2Timeout.getDelay(TimeUnit.NANOSECONDS)) - .isEqualTo(TIMEOUT_OTHER_PAGES.toNanos()); - - page2Timeout.task().run(page2Timeout); - - assertThatStage(page2Future) - .isFailed( - t -> - assertThat(t) - .isInstanceOf(DriverTimeoutException.class) - .hasMessageContaining("Timed out waiting for page 2")); - } - } - - @Test - @UseDataProvider(value = "allDseProtocolVersions", location = DseTestDataProviders.class) - public void should_cancel_future_if_session_cancelled(DseProtocolVersion version) { - RequestHandlerTestHarness.Builder builder = - continuousHarnessBuilder().withProtocolVersion(version); - PoolBehavior node1Behavior = builder.customBehavior(node1); - try (RequestHandlerTestHarness harness = builder.build()) { - - ContinuousCqlRequestHandler handler = - new ContinuousCqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT, harness.getSession(), harness.getContext(), "test"); - CompletionStage page1Future = handler.handle(); - - node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(1, false))); - // will be discarded - node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(2, false))); - - ContinuousAsyncResultSet page1 = CompletableFutures.getUninterruptibly(page1Future); - page1.cancel(); - - assertThat(handler.getState()).isEqualTo(-2); - assertThat(page1.fetchNextPage()).isCancelled(); - } - } - - @Test - @UseDataProvider(value = "allDseProtocolVersions", location = DseTestDataProviders.class) - public void should_cancel_session_if_future_cancelled(DseProtocolVersion version) { - RequestHandlerTestHarness.Builder builder = - continuousHarnessBuilder().withProtocolVersion(version); - PoolBehavior node1Behavior = builder.customBehavior(node1); - try (RequestHandlerTestHarness harness = builder.build()) { - - ContinuousCqlRequestHandler handler = - new ContinuousCqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT, harness.getSession(), harness.getContext(), "test"); - CompletionStage page1Future = handler.handle(); - - page1Future.toCompletableFuture().cancel(true); - // this should be ignored - node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(1, false))); - assertThat(handler.getState()).isEqualTo(-2); - } - } - - @Test - @UseDataProvider(value = "allDseProtocolVersions", location = DseTestDataProviders.class) - public void should_not_cancel_session_if_future_cancelled_but_already_done( - DseProtocolVersion version) { - RequestHandlerTestHarness.Builder builder = - continuousHarnessBuilder().withProtocolVersion(version); - PoolBehavior node1Behavior = builder.customBehavior(node1); - try (RequestHandlerTestHarness harness = builder.build()) { - - ContinuousCqlRequestHandler handler = - new ContinuousCqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT, harness.getSession(), harness.getContext(), "test"); - CompletionStage page1Future = handler.handle(); - - // this will complete page 1 future - node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(1, true))); - - // to late - page1Future.toCompletableFuture().cancel(true); - assertThat(handler.getState()).isEqualTo(-1); - } - } - - @Test - public void should_send_cancel_request_if_dse_v2() { - RequestHandlerTestHarness.Builder builder = - continuousHarnessBuilder().withProtocolVersion(DSE_V2); - PoolBehavior node1Behavior = builder.customBehavior(node1); - try (RequestHandlerTestHarness harness = builder.build()) { - - ContinuousCqlRequestHandler handler = - new ContinuousCqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT, harness.getSession(), harness.getContext(), "test"); - CompletionStage page1Future = handler.handle(); - - page1Future.toCompletableFuture().cancel(true); - assertThat(handler.getState()).isEqualTo(-2); - verify(node1Behavior.getChannel()) - .write(argThat(this::isCancelRequest), anyBoolean(), anyMap(), any()); - } - } - - @Test - public void should_toggle_channel_autoread_if_dse_v1() { - RequestHandlerTestHarness.Builder builder = - continuousHarnessBuilder().withProtocolVersion(DSE_V1); - PoolBehavior node1Behavior = builder.customBehavior(node1); - try (RequestHandlerTestHarness harness = builder.build()) { - - CompletionStage page1Future = - new ContinuousCqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT, - harness.getSession(), - harness.getContext(), - "test") - .handle(); - - // simulate the arrival of 5 pages, the first one will complete page1 future above, - // the following 4 will be enqueued and should trigger autoread off - node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(1, false))); - node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(2, false))); - node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(3, false))); - node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(4, false))); - node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(5, false))); - - verify(node1Behavior.getChannel().config()).setAutoRead(false); - - // simulate the retrieval of 2 pages, this should dequeue page 2 - // and trigger autoread on - ContinuousAsyncResultSet page1 = CompletableFutures.getCompleted(page1Future); - CompletableFutures.getCompleted(page1.fetchNextPage()); - - verify(node1Behavior.getChannel().config()).setAutoRead(true); - - // in DSE_V1, the backpressure request should not have been sent - verify(node1Behavior.getChannel(), never()) - .write(any(Revise.class), anyBoolean(), anyMap(), any()); - } - } - - @Test - public void should_send_backpressure_request_if_dse_v2() { - RequestHandlerTestHarness.Builder builder = - continuousHarnessBuilder().withProtocolVersion(DSE_V2); - PoolBehavior node1Behavior = builder.customBehavior(node1); - try (RequestHandlerTestHarness harness = builder.build()) { - - CompletionStage page1Future = - new ContinuousCqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT, - harness.getSession(), - harness.getContext(), - "test") - .handle(); - - // simulate the arrival of 4 pages, the first one will complete page1 future above, - // the following 3 will be enqueued - node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(1, false))); - node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(2, false))); - node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(3, false))); - node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(4, false))); - - // simulate the retrieval of 2 pages, this should dequeue page 2 - // and trigger a backpressure request as the queue is now half empty (2/4) - ContinuousAsyncResultSet page1 = CompletableFutures.getCompleted(page1Future); - CompletableFutures.getCompleted(page1.fetchNextPage()); - - verify(node1Behavior.getChannel()) - .write(argThat(this::isBackpressureRequest), anyBoolean(), anyMap(), any()); - // should not mess with autoread in dse v2 - verify(node1Behavior.getChannel().config(), never()).setAutoRead(anyBoolean()); - } - } - - @Test - @UseDataProvider(value = "allDseProtocolVersions", location = DseTestDataProviders.class) - public void should_invoke_request_tracker(DseProtocolVersion version) { - try (RequestHandlerTestHarness harness = - continuousHarnessBuilder() - .withProtocolVersion(version) - .withResponse( - node1, - defaultFrameOf( - new com.datastax.oss.protocol.internal.response.Error( - ProtocolConstants.ErrorCode.IS_BOOTSTRAPPING, "mock message"))) - .withResponse(node2, defaultFrameOf(DseTestFixtures.singleDseRow())) - .build()) { - - RequestTracker requestTracker = mock(RequestTracker.class); - when(harness.getContext().getRequestTracker()).thenReturn(requestTracker); - - CompletionStage resultSetFuture = - new ContinuousCqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT, - harness.getSession(), - harness.getContext(), - "test") - .handle(); - - assertThatStage(resultSetFuture) - .isSuccess( - resultSet -> { - Iterator rows = resultSet.currentPage().iterator(); - assertThat(rows.hasNext()).isTrue(); - assertThat(rows.next().getString("message")).isEqualTo("hello, world"); - ExecutionInfo executionInfo = resultSet.getExecutionInfo(); - assertThat(executionInfo.getCoordinator()).isEqualTo(node2); - assertThat(executionInfo.getErrors()).isNotEmpty(); - assertThat(executionInfo.getIncomingPayload()).isEmpty(); - assertThat(executionInfo.getPagingState()).isNull(); - assertThat(executionInfo.getSpeculativeExecutionCount()).isEqualTo(0); - assertThat(executionInfo.getSuccessfulExecutionIndex()).isEqualTo(0); - assertThat(executionInfo.getWarnings()).isEmpty(); - - verify(requestTracker) - .onNodeError( - eq(UNDEFINED_IDEMPOTENCE_STATEMENT), - any(BootstrappingException.class), - anyLong(), - any(DriverExecutionProfile.class), - eq(node1), - matches(LOG_PREFIX_PER_REQUEST)); - verify(requestTracker) - .onNodeSuccess( - eq(UNDEFINED_IDEMPOTENCE_STATEMENT), - anyLong(), - any(DriverExecutionProfile.class), - eq(node2), - matches(LOG_PREFIX_PER_REQUEST)); - verify(requestTracker) - .onSuccess( - eq(UNDEFINED_IDEMPOTENCE_STATEMENT), - anyLong(), - any(DriverExecutionProfile.class), - eq(node2), - matches(LOG_PREFIX_PER_REQUEST)); - verifyNoMoreInteractions(requestTracker); - }); - } - } - - private boolean isBackpressureRequest(Message argument) { - return argument instanceof Revise && ((Revise) argument).revisionType == MORE_CONTINUOUS_PAGES; - } - - private boolean isCancelRequest(Message argument) { - return argument instanceof Revise - && ((Revise) argument).revisionType == CANCEL_CONTINUOUS_PAGING; - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestHandlerTestBase.java b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestHandlerTestBase.java deleted file mode 100644 index 04195f5faf0..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestHandlerTestBase.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.continuous; - -import static com.datastax.dse.driver.api.core.config.DseDriverOption.CONTINUOUS_PAGING_MAX_ENQUEUED_PAGES; -import static com.datastax.dse.driver.api.core.config.DseDriverOption.CONTINUOUS_PAGING_TIMEOUT_FIRST_PAGE; -import static com.datastax.dse.driver.api.core.config.DseDriverOption.CONTINUOUS_PAGING_TIMEOUT_OTHER_PAGES; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.internal.core.cql.CqlRequestHandlerTestBase; -import com.datastax.oss.driver.internal.core.cql.RequestHandlerTestHarness; -import java.time.Duration; - -public abstract class ContinuousCqlRequestHandlerTestBase extends CqlRequestHandlerTestBase { - - static final Duration TIMEOUT_FIRST_PAGE = Duration.ofSeconds(2); - static final Duration TIMEOUT_OTHER_PAGES = Duration.ofSeconds(1); - - protected RequestHandlerTestHarness.Builder continuousHarnessBuilder() { - return new RequestHandlerTestHarness.Builder() { - @Override - public RequestHandlerTestHarness build() { - RequestHandlerTestHarness harness = super.build(); - DriverExecutionProfile config = harness.getContext().getConfig().getDefaultProfile(); - when(config.getDuration(CONTINUOUS_PAGING_TIMEOUT_FIRST_PAGE)) - .thenReturn(TIMEOUT_FIRST_PAGE); - when(config.getDuration(CONTINUOUS_PAGING_TIMEOUT_OTHER_PAGES)) - .thenReturn(TIMEOUT_OTHER_PAGES); - when(config.getInt(CONTINUOUS_PAGING_MAX_ENQUEUED_PAGES)).thenReturn(4); - return harness; - } - }; - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/DefaultContinuousAsyncResultSetTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/DefaultContinuousAsyncResultSetTest.java deleted file mode 100644 index 1e59559013f..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/DefaultContinuousAsyncResultSetTest.java +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.continuous; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static org.mockito.BDDMockito.given; -import static org.mockito.Mockito.verify; - -import com.datastax.dse.driver.api.core.cql.continuous.ContinuousAsyncResultSet; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.internal.core.util.CountingIterator; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import org.assertj.core.api.ThrowableAssert.ThrowingCallable; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -public class DefaultContinuousAsyncResultSetTest { - - @Mock private ColumnDefinitions columnDefinitions; - @Mock private ExecutionInfo executionInfo; - @Mock private ContinuousCqlRequestHandler handler; - @Mock private CountingIterator rows; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - } - - @Test - public void should_fail_to_fetch_next_page_if_last() { - // Given - given(executionInfo.getPagingState()).willReturn(null); - DefaultContinuousAsyncResultSet resultSet = - new DefaultContinuousAsyncResultSet( - rows, columnDefinitions, 1, false, executionInfo, handler); - - // When - boolean hasMorePages = resultSet.hasMorePages(); - ThrowingCallable nextPage = resultSet::fetchNextPage; - - // Then - assertThat(hasMorePages).isFalse(); - assertThatThrownBy(nextPage) - .isInstanceOf(IllegalStateException.class) - .hasMessageContaining("Can't call fetchNextPage() on the last page"); - } - - @Test - public void should_invoke_handler_to_fetch_next_page() { - // Given - CompletableFuture mockResultFuture = new CompletableFuture<>(); - given(handler.fetchNextPage()).willReturn(mockResultFuture); - DefaultContinuousAsyncResultSet resultSet = - new DefaultContinuousAsyncResultSet( - rows, columnDefinitions, 1, true, executionInfo, handler); - - // When - boolean hasMorePages = resultSet.hasMorePages(); - CompletionStage nextPageFuture = resultSet.fetchNextPage(); - - // Then - assertThat(hasMorePages).isTrue(); - verify(handler).fetchNextPage(); - assertThat(nextPageFuture).isEqualTo(mockResultFuture); - } - - @Test - public void should_invoke_handler_to_cancel() { - // Given - DefaultContinuousAsyncResultSet resultSet = - new DefaultContinuousAsyncResultSet( - rows, columnDefinitions, 1, true, executionInfo, handler); - // When - resultSet.cancel(); - - // Then - verify(handler).cancel(); - } - - @Test - public void should_report_remaining_rows() { - // Given - given(rows.remaining()).willReturn(42); - DefaultContinuousAsyncResultSet resultSet = - new DefaultContinuousAsyncResultSet( - rows, columnDefinitions, 1, true, executionInfo, handler); - - // When - int remaining = resultSet.remaining(); - Iterable currentPage = resultSet.currentPage(); - - // Then - assertThat(remaining).isEqualTo(42); - assertThat(currentPage.iterator()).isSameAs(rows); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/DefaultContinuousResultSetTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/DefaultContinuousResultSetTest.java deleted file mode 100644 index 2bfb4768e49..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/DefaultContinuousResultSetTest.java +++ /dev/null @@ -1,154 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.continuous; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.dse.driver.api.core.cql.continuous.ContinuousAsyncResultSet; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.internal.core.util.CountingIterator; -import java.util.Arrays; -import java.util.Iterator; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import org.junit.Test; -import org.mockito.Mockito; - -public class DefaultContinuousResultSetTest { - - @Test - public void should_create_result_set_from_single_page() { - // Given - ContinuousAsyncResultSet page1 = mockPage(false, 0, 1, 2); - - // When - ResultSet resultSet = new DefaultContinuousResultSet(page1); - - // Then - assertThat(resultSet.getColumnDefinitions()).isSameAs(page1.getColumnDefinitions()); - assertThat(resultSet.getExecutionInfo()).isSameAs(page1.getExecutionInfo()); - assertThat(resultSet.getExecutionInfos()).containsExactly(page1.getExecutionInfo()); - - Iterator iterator = resultSet.iterator(); - - assertNextRow(iterator, 0); - assertNextRow(iterator, 1); - assertNextRow(iterator, 2); - - assertThat(iterator.hasNext()).isFalse(); - } - - @Test - public void should_create_result_set_from_multiple_pages() { - // Given - ContinuousAsyncResultSet page1 = mockPage(true, 0, 1, 2); - ContinuousAsyncResultSet page2 = mockPage(true, 3, 4, 5); - ContinuousAsyncResultSet page3 = mockPage(false, 6, 7, 8); - - complete(page1.fetchNextPage(), page2); - complete(page2.fetchNextPage(), page3); - - // When - ResultSet resultSet = new DefaultContinuousResultSet(page1); - - // Then - assertThat(resultSet.iterator().hasNext()).isTrue(); - - assertThat(resultSet.getColumnDefinitions()).isSameAs(page1.getColumnDefinitions()); - assertThat(resultSet.getExecutionInfo()).isSameAs(page1.getExecutionInfo()); - assertThat(resultSet.getExecutionInfos()).containsExactly(page1.getExecutionInfo()); - - Iterator iterator = resultSet.iterator(); - - assertNextRow(iterator, 0); - assertNextRow(iterator, 1); - assertNextRow(iterator, 2); - - assertThat(iterator.hasNext()).isTrue(); - // This should have triggered the fetch of page2 - assertThat(resultSet.getExecutionInfo()).isEqualTo(page2.getExecutionInfo()); - assertThat(resultSet.getExecutionInfos()) - .containsExactly(page1.getExecutionInfo(), page2.getExecutionInfo()); - - assertNextRow(iterator, 3); - assertNextRow(iterator, 4); - assertNextRow(iterator, 5); - - assertThat(iterator.hasNext()).isTrue(); - // This should have triggered the fetch of page3 - assertThat(resultSet.getExecutionInfo()).isEqualTo(page3.getExecutionInfo()); - assertThat(resultSet.getExecutionInfos()) - .containsExactly( - page1.getExecutionInfo(), page2.getExecutionInfo(), page3.getExecutionInfo()); - - assertNextRow(iterator, 6); - assertNextRow(iterator, 7); - assertNextRow(iterator, 8); - } - - private static ContinuousAsyncResultSet mockPage(boolean nextPage, Integer... data) { - ContinuousAsyncResultSet page = Mockito.mock(ContinuousAsyncResultSet.class); - - ColumnDefinitions columnDefinitions = Mockito.mock(ColumnDefinitions.class); - Mockito.when(page.getColumnDefinitions()).thenReturn(columnDefinitions); - - ExecutionInfo executionInfo = Mockito.mock(ExecutionInfo.class); - Mockito.when(page.getExecutionInfo()).thenReturn(executionInfo); - - if (nextPage) { - Mockito.when(page.hasMorePages()).thenReturn(true); - Mockito.when(page.fetchNextPage()).thenReturn(Mockito.spy(new CompletableFuture<>())); - } else { - Mockito.when(page.hasMorePages()).thenReturn(false); - Mockito.when(page.fetchNextPage()).thenThrow(new IllegalStateException()); - } - - Iterator rows = Arrays.asList(data).iterator(); - CountingIterator iterator = - new CountingIterator(data.length) { - @Override - protected Row computeNext() { - return rows.hasNext() ? mockRow(rows.next()) : endOfData(); - } - }; - Mockito.when(page.currentPage()).thenReturn(() -> iterator); - Mockito.when(page.remaining()).thenAnswer(invocation -> iterator.remaining()); - - return page; - } - - private static Row mockRow(int index) { - Row row = Mockito.mock(Row.class); - Mockito.when(row.getInt(0)).thenReturn(index); - return row; - } - - private static void complete( - CompletionStage stage, ContinuousAsyncResultSet result) { - stage.toCompletableFuture().complete(result); - } - - private static void assertNextRow(Iterator iterator, int expectedValue) { - assertThat(iterator.hasNext()).isTrue(); - Row row0 = iterator.next(); - assertThat(row0.getInt(0)).isEqualTo(expectedValue); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/reactive/ContinuousCqlRequestReactiveProcessorTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/reactive/ContinuousCqlRequestReactiveProcessorTest.java deleted file mode 100644 index 0bfb00695d3..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/reactive/ContinuousCqlRequestReactiveProcessorTest.java +++ /dev/null @@ -1,173 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.continuous.reactive; - -import static com.datastax.dse.driver.api.core.DseProtocolVersion.DSE_V1; -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.dse.driver.DseTestDataProviders; -import com.datastax.dse.driver.DseTestFixtures; -import com.datastax.dse.driver.api.core.DseProtocolVersion; -import com.datastax.dse.driver.api.core.cql.continuous.reactive.ContinuousReactiveResultSet; -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveRow; -import com.datastax.dse.driver.internal.core.cql.continuous.ContinuousCqlRequestAsyncProcessor; -import com.datastax.dse.driver.internal.core.cql.continuous.ContinuousCqlRequestHandlerTestBase; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.cql.PoolBehavior; -import com.datastax.oss.driver.internal.core.cql.RequestHandlerTestHarness; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import io.reactivex.Flowable; -import java.util.List; -import org.junit.Test; - -public class ContinuousCqlRequestReactiveProcessorTest extends ContinuousCqlRequestHandlerTestBase { - - @Test - public void should_be_able_to_process_reactive_result_set() { - ContinuousCqlRequestReactiveProcessor processor = - new ContinuousCqlRequestReactiveProcessor(new ContinuousCqlRequestAsyncProcessor()); - assertThat( - processor.canProcess( - UNDEFINED_IDEMPOTENCE_STATEMENT, - ContinuousCqlRequestReactiveProcessor.CONTINUOUS_REACTIVE_RESULT_SET)) - .isTrue(); - } - - @Test - public void should_create_request_handler() { - RequestHandlerTestHarness.Builder builder = - continuousHarnessBuilder().withProtocolVersion(DSE_V1); - try (RequestHandlerTestHarness harness = builder.build()) { - ContinuousCqlRequestReactiveProcessor processor = - new ContinuousCqlRequestReactiveProcessor(new ContinuousCqlRequestAsyncProcessor()); - assertThat( - processor.process( - UNDEFINED_IDEMPOTENCE_STATEMENT, - harness.getSession(), - harness.getContext(), - "test")) - .isInstanceOf(DefaultContinuousReactiveResultSet.class); - } - } - - @Test - @UseDataProvider(value = "allDseProtocolVersions", location = DseTestDataProviders.class) - public void should_complete_single_page_result(DseProtocolVersion version) { - try (RequestHandlerTestHarness harness = - continuousHarnessBuilder() - .withProtocolVersion(version) - .withResponse(node1, defaultFrameOf(DseTestFixtures.singleDseRow())) - .build()) { - - DefaultSession session = harness.getSession(); - InternalDriverContext context = harness.getContext(); - - ContinuousReactiveResultSet publisher = - new ContinuousCqlRequestReactiveProcessor(new ContinuousCqlRequestAsyncProcessor()) - .process(UNDEFINED_IDEMPOTENCE_STATEMENT, session, context, "test"); - - List rows = Flowable.fromPublisher(publisher).toList().blockingGet(); - - assertThat(rows).hasSize(1); - ReactiveRow row = rows.get(0); - assertThat(row.getString("message")).isEqualTo("hello, world"); - ExecutionInfo executionInfo = row.getExecutionInfo(); - assertThat(executionInfo.getCoordinator()).isEqualTo(node1); - assertThat(executionInfo.getErrors()).isEmpty(); - assertThat(executionInfo.getIncomingPayload()).isEmpty(); - assertThat(executionInfo.getPagingState()).isNull(); - assertThat(executionInfo.getSpeculativeExecutionCount()).isEqualTo(0); - assertThat(executionInfo.getSuccessfulExecutionIndex()).isEqualTo(0); - assertThat(executionInfo.getWarnings()).isEmpty(); - - Flowable execInfosFlowable = - Flowable.fromPublisher(publisher.getExecutionInfos()); - assertThat(execInfosFlowable.toList().blockingGet()).containsExactly(executionInfo); - - Flowable colDefsFlowable = - Flowable.fromPublisher(publisher.getColumnDefinitions()); - assertThat(colDefsFlowable.toList().blockingGet()) - .containsExactly(row.getColumnDefinitions()); - - Flowable wasAppliedFlowable = Flowable.fromPublisher(publisher.wasApplied()); - assertThat(wasAppliedFlowable.toList().blockingGet()).containsExactly(row.wasApplied()); - } - } - - @Test - @UseDataProvider(value = "allDseProtocolVersions", location = DseTestDataProviders.class) - public void should_complete_multi_page_result(DseProtocolVersion version) { - RequestHandlerTestHarness.Builder builder = - continuousHarnessBuilder().withProtocolVersion(version); - PoolBehavior node1Behavior = builder.customBehavior(node1); - try (RequestHandlerTestHarness harness = builder.build()) { - - DefaultSession session = harness.getSession(); - InternalDriverContext context = harness.getContext(); - - ContinuousReactiveResultSet publisher = - new ContinuousCqlRequestReactiveProcessor(new ContinuousCqlRequestAsyncProcessor()) - .process(UNDEFINED_IDEMPOTENCE_STATEMENT, session, context, "test"); - - Flowable rowsPublisher = Flowable.fromPublisher(publisher).cache(); - rowsPublisher.subscribe(); - - node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(1, false))); - node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(2, true))); - - List rows = rowsPublisher.toList().blockingGet(); - assertThat(rows).hasSize(20); - - ReactiveRow first = rows.get(0); - ExecutionInfo firstExecutionInfo = first.getExecutionInfo(); - assertThat(firstExecutionInfo.getCoordinator()).isEqualTo(node1); - assertThat(firstExecutionInfo.getErrors()).isEmpty(); - assertThat(firstExecutionInfo.getIncomingPayload()).isEmpty(); - assertThat(firstExecutionInfo.getPagingState()).isNotNull(); - assertThat(firstExecutionInfo.getSpeculativeExecutionCount()).isEqualTo(0); - assertThat(firstExecutionInfo.getSuccessfulExecutionIndex()).isEqualTo(0); - assertThat(firstExecutionInfo.getWarnings()).isEmpty(); - - ReactiveRow inSecondPage = rows.get(10); - ExecutionInfo secondExecutionInfo = inSecondPage.getExecutionInfo(); - assertThat(secondExecutionInfo.getCoordinator()).isEqualTo(node1); - assertThat(secondExecutionInfo.getErrors()).isEmpty(); - assertThat(secondExecutionInfo.getIncomingPayload()).isEmpty(); - assertThat(secondExecutionInfo.getPagingState()).isNull(); - assertThat(secondExecutionInfo.getSpeculativeExecutionCount()).isEqualTo(0); - assertThat(secondExecutionInfo.getSuccessfulExecutionIndex()).isEqualTo(0); - assertThat(secondExecutionInfo.getWarnings()).isEmpty(); - - Flowable execInfosFlowable = - Flowable.fromPublisher(publisher.getExecutionInfos()); - assertThat(execInfosFlowable.toList().blockingGet()) - .containsExactly(firstExecutionInfo, secondExecutionInfo); - - Flowable colDefsFlowable = - Flowable.fromPublisher(publisher.getColumnDefinitions()); - assertThat(colDefsFlowable.toList().blockingGet()) - .containsExactly(first.getColumnDefinitions()); - - Flowable wasAppliedFlowable = Flowable.fromPublisher(publisher.wasApplied()); - assertThat(wasAppliedFlowable.toList().blockingGet()).containsExactly(first.wasApplied()); - } - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/CqlRequestReactiveProcessorTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/CqlRequestReactiveProcessorTest.java deleted file mode 100644 index a7a6bced9e8..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/CqlRequestReactiveProcessorTest.java +++ /dev/null @@ -1,188 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.reactive; - -import static com.datastax.dse.driver.DseTestFixtures.singleDseRow; -import static com.datastax.dse.driver.api.core.DseProtocolVersion.DSE_V1; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.dse.driver.DseTestDataProviders; -import com.datastax.dse.driver.DseTestFixtures; -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveResultSet; -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveRow; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.cql.Conversions; -import com.datastax.oss.driver.internal.core.cql.CqlRequestAsyncProcessor; -import com.datastax.oss.driver.internal.core.cql.CqlRequestHandlerTestBase; -import com.datastax.oss.driver.internal.core.cql.PoolBehavior; -import com.datastax.oss.driver.internal.core.cql.RequestHandlerTestHarness; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import io.reactivex.Flowable; -import java.util.List; -import java.util.concurrent.CompletableFuture; -import org.junit.Test; - -public class CqlRequestReactiveProcessorTest extends CqlRequestHandlerTestBase { - - @Test - public void should_be_able_to_process_reactive_result_set() { - CqlRequestReactiveProcessor processor = - new CqlRequestReactiveProcessor(new CqlRequestAsyncProcessor()); - assertThat( - processor.canProcess( - UNDEFINED_IDEMPOTENCE_STATEMENT, CqlRequestReactiveProcessor.REACTIVE_RESULT_SET)) - .isTrue(); - } - - @Test - public void should_create_request_handler() { - RequestHandlerTestHarness.Builder builder = - RequestHandlerTestHarness.builder().withProtocolVersion(DSE_V1); - try (RequestHandlerTestHarness harness = builder.build()) { - CqlRequestReactiveProcessor processor = - new CqlRequestReactiveProcessor(new CqlRequestAsyncProcessor()); - assertThat( - processor.process( - UNDEFINED_IDEMPOTENCE_STATEMENT, - harness.getSession(), - harness.getContext(), - "test")) - .isInstanceOf(DefaultReactiveResultSet.class); - } - } - - @Test - @UseDataProvider(value = "allDseAndOssProtocolVersions", location = DseTestDataProviders.class) - public void should_complete_single_page_result(ProtocolVersion version) { - try (RequestHandlerTestHarness harness = - RequestHandlerTestHarness.builder() - .withProtocolVersion(version) - .withResponse(node1, defaultFrameOf(singleDseRow())) - .build()) { - - DefaultSession session = harness.getSession(); - InternalDriverContext context = harness.getContext(); - - ReactiveResultSet publisher = - new CqlRequestReactiveProcessor(new CqlRequestAsyncProcessor()) - .process(UNDEFINED_IDEMPOTENCE_STATEMENT, session, context, "test"); - - List rows = Flowable.fromPublisher(publisher).toList().blockingGet(); - - assertThat(rows).hasSize(1); - ReactiveRow row = rows.get(0); - assertThat(row.getString("message")).isEqualTo("hello, world"); - ExecutionInfo executionInfo = row.getExecutionInfo(); - assertThat(executionInfo.getCoordinator()).isEqualTo(node1); - assertThat(executionInfo.getErrors()).isEmpty(); - assertThat(executionInfo.getIncomingPayload()).isEmpty(); - assertThat(executionInfo.getPagingState()).isNull(); - assertThat(executionInfo.getSpeculativeExecutionCount()).isEqualTo(0); - assertThat(executionInfo.getSuccessfulExecutionIndex()).isEqualTo(0); - assertThat(executionInfo.getWarnings()).isEmpty(); - - Flowable execInfosFlowable = - Flowable.fromPublisher(publisher.getExecutionInfos()); - assertThat(execInfosFlowable.toList().blockingGet()).containsExactly(executionInfo); - - Flowable colDefsFlowable = - Flowable.fromPublisher(publisher.getColumnDefinitions()); - assertThat(colDefsFlowable.toList().blockingGet()) - .containsExactly(row.getColumnDefinitions()); - - Flowable wasAppliedFlowable = Flowable.fromPublisher(publisher.wasApplied()); - assertThat(wasAppliedFlowable.toList().blockingGet()).containsExactly(row.wasApplied()); - } - } - - @Test - @UseDataProvider(value = "allDseAndOssProtocolVersions", location = DseTestDataProviders.class) - public void should_complete_multi_page_result(ProtocolVersion version) { - RequestHandlerTestHarness.Builder builder = - RequestHandlerTestHarness.builder().withProtocolVersion(version); - PoolBehavior node1Behavior = builder.customBehavior(node1); - try (RequestHandlerTestHarness harness = builder.build()) { - - DefaultSession session = harness.getSession(); - InternalDriverContext context = harness.getContext(); - - // The 2nd page is obtained by an "external" call to session.executeAsync(), - // so we need to mock that. - CompletableFuture page2Future = new CompletableFuture<>(); - when(session.executeAsync(any(Statement.class))).thenAnswer(invocation -> page2Future); - ExecutionInfo mockInfo = mock(ExecutionInfo.class); - - ReactiveResultSet publisher = - new CqlRequestReactiveProcessor(new CqlRequestAsyncProcessor()) - .process(UNDEFINED_IDEMPOTENCE_STATEMENT, session, context, "test"); - - Flowable rowsPublisher = Flowable.fromPublisher(publisher).cache(); - rowsPublisher.subscribe(); - - // emulate arrival of page 1 - node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(1, false))); - - // emulate arrival of page 2 following the call to session.executeAsync() - page2Future.complete( - Conversions.toResultSet( - DseTestFixtures.tenDseRows(2, true), - mockInfo, - harness.getSession(), - harness.getContext())); - - List rows = rowsPublisher.toList().blockingGet(); - assertThat(rows).hasSize(20); - - ReactiveRow first = rows.get(0); - ExecutionInfo firstExecutionInfo = first.getExecutionInfo(); - assertThat(firstExecutionInfo.getCoordinator()).isEqualTo(node1); - assertThat(firstExecutionInfo.getErrors()).isEmpty(); - assertThat(firstExecutionInfo.getIncomingPayload()).isEmpty(); - assertThat(firstExecutionInfo.getPagingState()).isNotNull(); - assertThat(firstExecutionInfo.getSpeculativeExecutionCount()).isEqualTo(0); - assertThat(firstExecutionInfo.getSuccessfulExecutionIndex()).isEqualTo(0); - assertThat(firstExecutionInfo.getWarnings()).isEmpty(); - - ReactiveRow inSecondPage = rows.get(10); - ExecutionInfo secondExecutionInfo = inSecondPage.getExecutionInfo(); - assertThat(secondExecutionInfo).isSameAs(mockInfo); - - Flowable execInfosFlowable = - Flowable.fromPublisher(publisher.getExecutionInfos()); - assertThat(execInfosFlowable.toList().blockingGet()) - .containsExactly(firstExecutionInfo, secondExecutionInfo); - - Flowable colDefsFlowable = - Flowable.fromPublisher(publisher.getColumnDefinitions()); - assertThat(colDefsFlowable.toList().blockingGet()) - .containsExactly(first.getColumnDefinitions()); - - Flowable wasAppliedFlowable = Flowable.fromPublisher(publisher.wasApplied()); - assertThat(wasAppliedFlowable.toList().blockingGet()).containsExactly(first.wasApplied()); - } - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/DefaultReactiveResultSetTckTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/DefaultReactiveResultSetTckTest.java deleted file mode 100644 index a9ff5222460..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/DefaultReactiveResultSetTckTest.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.reactive; - -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveRow; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import io.reactivex.Flowable; -import java.util.Collections; -import java.util.List; -import java.util.concurrent.CompletableFuture; -import org.reactivestreams.Publisher; -import org.reactivestreams.tck.PublisherVerification; -import org.reactivestreams.tck.TestEnvironment; - -public class DefaultReactiveResultSetTckTest extends PublisherVerification { - - public DefaultReactiveResultSetTckTest() { - super(new TestEnvironment()); - } - - @Override - public Publisher createPublisher(long elements) { - // The TCK usually requests between 0 and 20 items, or Long.MAX_VALUE. - // Past 3 elements it never checks how many elements have been effectively produced, - // so we can safely cap at, say, 20. - int effective = (int) Math.min(elements, 20L); - return new DefaultReactiveResultSet(() -> createResults(effective)); - } - - @Override - public Publisher createFailedPublisher() { - DefaultReactiveResultSet publisher = new DefaultReactiveResultSet(() -> createResults(1)); - // Since our publisher does not support multiple - // subscriptions, we use that to create a failed publisher. - publisher.subscribe(new TestSubscriber<>()); - return publisher; - } - - private static CompletableFuture createResults(int elements) { - CompletableFuture previous = null; - if (elements > 0) { - // create pages of 5 elements each to exercise pagination - List pages = - Flowable.range(0, elements).buffer(5).map(List::size).toList().blockingGet(); - Collections.reverse(pages); - for (Integer size : pages) { - CompletableFuture future = new CompletableFuture<>(); - future.complete(new MockAsyncResultSet(size, previous)); - previous = future; - } - } else { - previous = new CompletableFuture<>(); - previous.complete(new MockAsyncResultSet(0, null)); - } - return previous; - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/MockAsyncResultSet.java b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/MockAsyncResultSet.java deleted file mode 100644 index 3783a2c6922..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/MockAsyncResultSet.java +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.reactive; - -import static org.mockito.Mockito.mock; - -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.Row; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Iterator; -import java.util.List; -import java.util.concurrent.CompletionStage; -import java.util.stream.Collectors; -import java.util.stream.IntStream; -import org.assertj.core.util.Lists; - -public class MockAsyncResultSet implements AsyncResultSet { - - private final List rows; - private final Iterator iterator; - private final CompletionStage nextPage; - private final ExecutionInfo executionInfo = mock(ExecutionInfo.class); - private final ColumnDefinitions columnDefinitions = mock(ColumnDefinitions.class); - private int remaining; - - public MockAsyncResultSet(int size, CompletionStage nextPage) { - this(IntStream.range(0, size).boxed().map(MockRow::new).collect(Collectors.toList()), nextPage); - } - - public MockAsyncResultSet(List rows, CompletionStage nextPage) { - this.rows = rows; - iterator = rows.iterator(); - remaining = rows.size(); - this.nextPage = nextPage; - } - - @Override - public Row one() { - Row next = iterator.next(); - remaining--; - return next; - } - - @Override - public int remaining() { - return remaining; - } - - @NonNull - @Override - public List currentPage() { - return Lists.newArrayList(rows); - } - - @Override - public boolean hasMorePages() { - return nextPage != null; - } - - @NonNull - @Override - public CompletionStage fetchNextPage() throws IllegalStateException { - return nextPage; - } - - @NonNull - @Override - public ColumnDefinitions getColumnDefinitions() { - return columnDefinitions; - } - - @NonNull - @Override - public ExecutionInfo getExecutionInfo() { - return executionInfo; - } - - @Override - public boolean wasApplied() { - return true; - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/MockRow.java b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/MockRow.java deleted file mode 100644 index 792bfb432f6..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/MockRow.java +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.reactive; - -import static org.mockito.Mockito.mock; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.internal.core.cql.EmptyColumnDefinitions; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.util.Collections; -import java.util.List; - -class MockRow implements Row { - - private int index; - - MockRow(int index) { - this.index = index; - } - - @Override - public int size() { - return 0; - } - - @NonNull - @Override - public CodecRegistry codecRegistry() { - return mock(CodecRegistry.class); - } - - @NonNull - @Override - public ProtocolVersion protocolVersion() { - return DefaultProtocolVersion.V4; - } - - @NonNull - @Override - public ColumnDefinitions getColumnDefinitions() { - return EmptyColumnDefinitions.INSTANCE; - } - - @NonNull - @Override - public List allIndicesOf(@NonNull String name) { - return Collections.singletonList(0); - } - - @Override - public int firstIndexOf(@NonNull String name) { - return 0; - } - - @NonNull - @Override - public List allIndicesOf(@NonNull CqlIdentifier id) { - return Collections.singletonList(0); - } - - @Override - public int firstIndexOf(@NonNull CqlIdentifier id) { - return 0; - } - - @NonNull - @Override - public DataType getType(int i) { - return DataTypes.INT; - } - - @NonNull - @Override - public DataType getType(@NonNull String name) { - return DataTypes.INT; - } - - @NonNull - @Override - public DataType getType(@NonNull CqlIdentifier id) { - return DataTypes.INT; - } - - @Override - public ByteBuffer getBytesUnsafe(int i) { - return null; - } - - @Override - public boolean isDetached() { - return false; - } - - @Override - public void attach(@NonNull AttachmentPoint attachmentPoint) {} - - // equals and hashCode required for TCK tests that check that two subscribers - // receive the exact same set of items. - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof MockRow)) { - return false; - } - MockRow mockRow = (MockRow) o; - return index == mockRow.index; - } - - @Override - public int hashCode() { - return index; - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/ReactiveResultSetSubscriptionTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/ReactiveResultSetSubscriptionTest.java deleted file mode 100644 index 6a1a5d644e3..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/ReactiveResultSetSubscriptionTest.java +++ /dev/null @@ -1,180 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.reactive; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveRow; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.servererrors.UnavailableException; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.CompletableFuture; -import org.junit.Test; - -public class ReactiveResultSetSubscriptionTest { - - @Test - public void should_retrieve_entire_result_set() { - CompletableFuture future1 = new CompletableFuture<>(); - CompletableFuture future2 = new CompletableFuture<>(); - CompletableFuture future3 = new CompletableFuture<>(); - MockAsyncResultSet page1 = new MockAsyncResultSet(3, future2); - MockAsyncResultSet page2 = new MockAsyncResultSet(3, future3); - MockAsyncResultSet page3 = new MockAsyncResultSet(3, null); - TestSubscriber mainSubscriber = new TestSubscriber<>(); - TestSubscriber colDefsSubscriber = new TestSubscriber<>(); - TestSubscriber execInfosSubscriber = new TestSubscriber<>(); - TestSubscriber wasAppliedSubscriber = new TestSubscriber<>(); - ReactiveResultSetSubscription subscription = - new ReactiveResultSetSubscription<>( - mainSubscriber, colDefsSubscriber, execInfosSubscriber, wasAppliedSubscriber); - mainSubscriber.onSubscribe(subscription); - subscription.start(() -> future1); - future1.complete(page1); - future2.complete(page2); - future3.complete(page3); - mainSubscriber.awaitTermination(); - List expected = new ArrayList<>(page1.currentPage()); - expected.addAll(page2.currentPage()); - expected.addAll(page3.currentPage()); - assertThat(mainSubscriber.getElements()).extracting("row").isEqualTo(expected); - assertThat(colDefsSubscriber.getElements()) - .hasSize(1) - .containsExactly(page1.getColumnDefinitions()); - assertThat(execInfosSubscriber.getElements()) - .hasSize(3) - .containsExactly( - page1.getExecutionInfo(), page2.getExecutionInfo(), page3.getExecutionInfo()); - assertThat(wasAppliedSubscriber.getElements()).hasSize(1).containsExactly(true); - } - - @Test - public void should_report_error_on_first_page() { - CompletableFuture future1 = new CompletableFuture<>(); - TestSubscriber mainSubscriber = new TestSubscriber<>(); - TestSubscriber colDefsSubscriber = new TestSubscriber<>(); - TestSubscriber execInfosSubscriber = new TestSubscriber<>(); - TestSubscriber wasAppliedSubscriber = new TestSubscriber<>(); - ReactiveResultSetSubscription subscription = - new ReactiveResultSetSubscription<>( - mainSubscriber, colDefsSubscriber, execInfosSubscriber, wasAppliedSubscriber); - mainSubscriber.onSubscribe(subscription); - subscription.start(() -> future1); - future1.completeExceptionally(new UnavailableException(null, null, 0, 0)); - mainSubscriber.awaitTermination(); - assertThat(mainSubscriber.getError()).isNotNull().isInstanceOf(UnavailableException.class); - assertThat(colDefsSubscriber.getError()).isNotNull().isInstanceOf(UnavailableException.class); - assertThat(execInfosSubscriber.getError()).isNotNull().isInstanceOf(UnavailableException.class); - assertThat(wasAppliedSubscriber.getError()) - .isNotNull() - .isInstanceOf(UnavailableException.class); - } - - @Test - public void should_report_synchronous_failure_on_first_page() { - TestSubscriber mainSubscriber = new TestSubscriber<>(); - TestSubscriber colDefsSubscriber = new TestSubscriber<>(); - TestSubscriber execInfosSubscriber = new TestSubscriber<>(); - TestSubscriber wasAppliedSubscriber = new TestSubscriber<>(); - ReactiveResultSetSubscription subscription = - new ReactiveResultSetSubscription<>( - mainSubscriber, colDefsSubscriber, execInfosSubscriber, wasAppliedSubscriber); - mainSubscriber.onSubscribe(subscription); - subscription.start( - () -> { - throw new IllegalStateException(); - }); - mainSubscriber.awaitTermination(); - assertThat(mainSubscriber.getError()).isNotNull().isInstanceOf(IllegalStateException.class); - assertThat(colDefsSubscriber.getError()).isNotNull().isInstanceOf(IllegalStateException.class); - assertThat(execInfosSubscriber.getError()) - .isNotNull() - .isInstanceOf(IllegalStateException.class); - assertThat(wasAppliedSubscriber.getError()) - .isNotNull() - .isInstanceOf(IllegalStateException.class); - } - - @Test - public void should_report_error_on_intermediary_page() { - CompletableFuture future1 = new CompletableFuture<>(); - CompletableFuture future2 = new CompletableFuture<>(); - MockAsyncResultSet page1 = new MockAsyncResultSet(3, future2); - TestSubscriber mainSubscriber = new TestSubscriber<>(); - TestSubscriber colDefsSubscriber = new TestSubscriber<>(); - TestSubscriber execInfosSubscriber = new TestSubscriber<>(); - TestSubscriber wasAppliedSubscriber = new TestSubscriber<>(); - ReactiveResultSetSubscription subscription = - new ReactiveResultSetSubscription<>( - mainSubscriber, colDefsSubscriber, execInfosSubscriber, wasAppliedSubscriber); - mainSubscriber.onSubscribe(subscription); - subscription.start(() -> future1); - future1.complete(page1); - future2.completeExceptionally(new UnavailableException(null, null, 0, 0)); - mainSubscriber.awaitTermination(); - assertThat(mainSubscriber.getElements()).extracting("row").isEqualTo(page1.currentPage()); - assertThat(mainSubscriber.getError()).isNotNull().isInstanceOf(UnavailableException.class); - // colDefsSubscriber completed normally when page1 arrived - assertThat(colDefsSubscriber.getError()).isNull(); - assertThat(colDefsSubscriber.getElements()) - .hasSize(1) - .containsExactly(page1.getColumnDefinitions()); - // execInfosSubscriber completed with error, but should have emitted 1 item for page1 - assertThat(execInfosSubscriber.getElements()) - .hasSize(1) - .containsExactly(page1.getExecutionInfo()); - assertThat(execInfosSubscriber.getError()).isNotNull().isInstanceOf(UnavailableException.class); - // colDefsSubscriber completed normally when page1 arrived - assertThat(wasAppliedSubscriber.getElements()).hasSize(1).containsExactly(true); - assertThat(wasAppliedSubscriber.getError()).isNull(); - } - - @Test - public void should_handle_empty_non_final_pages() { - CompletableFuture future1 = new CompletableFuture<>(); - CompletableFuture future2 = new CompletableFuture<>(); - CompletableFuture future3 = new CompletableFuture<>(); - MockAsyncResultSet page1 = new MockAsyncResultSet(10, future2); - MockAsyncResultSet page2 = new MockAsyncResultSet(0, future3); - MockAsyncResultSet page3 = new MockAsyncResultSet(10, null); - TestSubscriber mainSubscriber = new TestSubscriber<>(1); - TestSubscriber colDefsSubscriber = new TestSubscriber<>(); - TestSubscriber execInfosSubscriber = new TestSubscriber<>(); - TestSubscriber wasAppliedSubscriber = new TestSubscriber<>(); - ReactiveResultSetSubscription subscription = - new ReactiveResultSetSubscription<>( - mainSubscriber, colDefsSubscriber, execInfosSubscriber, wasAppliedSubscriber); - mainSubscriber.onSubscribe(subscription); - subscription.start(() -> future1); - future1.complete(page1); - future2.complete(page2); - // emulate backpressure - subscription.request(1); - future3.complete(page3); - subscription.request(Long.MAX_VALUE); - mainSubscriber.awaitTermination(); - assertThat(mainSubscriber.getError()).isNull(); - List expected = new ArrayList<>(page1.currentPage()); - expected.addAll(page3.currentPage()); - assertThat(mainSubscriber.getElements()).hasSize(20).extracting("row").isEqualTo(expected); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/SimpleUnicastProcessorTckTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/SimpleUnicastProcessorTckTest.java deleted file mode 100644 index 3bdd138beef..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/SimpleUnicastProcessorTckTest.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.reactive; - -import org.reactivestreams.Publisher; -import org.reactivestreams.tck.PublisherVerification; -import org.reactivestreams.tck.TestEnvironment; - -public class SimpleUnicastProcessorTckTest extends PublisherVerification { - - public SimpleUnicastProcessorTckTest() { - super(new TestEnvironment()); - } - - @Override - public Publisher createPublisher(long elements) { - // The TCK usually requests between 0 and 20 items, or Long.MAX_VALUE. - // Past 3 elements it never checks how many elements have been effectively produced, - // so we can safely cap at, say, 20. - int effective = (int) Math.min(elements, 20L); - SimpleUnicastProcessor processor = new SimpleUnicastProcessor<>(); - for (int i = 0; i < effective; i++) { - processor.onNext(i); - } - processor.onComplete(); - return processor; - } - - @Override - public Publisher createFailedPublisher() { - SimpleUnicastProcessor processor = new SimpleUnicastProcessor<>(); - // Since our publisher does not support multiple - // subscriptions, we use that to create a failed publisher. - processor.subscribe(new TestSubscriber<>()); - return processor; - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/SimpleUnicastProcessorTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/SimpleUnicastProcessorTest.java deleted file mode 100644 index 3ad2173946b..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/SimpleUnicastProcessorTest.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.reactive; - -import static org.assertj.core.api.Assertions.assertThat; - -import org.junit.Test; - -public class SimpleUnicastProcessorTest { - - /** Test for JAVA-2387. */ - @Test - public void should_propagate_upstream_signals_when_downstream_already_subscribed() { - // given - SimpleUnicastProcessor processor = new SimpleUnicastProcessor<>(); - TestSubscriber subscriber = new TestSubscriber<>(); - // when - processor.subscribe(subscriber); // subscription happens before signals arrive - processor.onNext(1); - processor.onComplete(); - subscriber.awaitTermination(); - // then - assertThat(subscriber.getElements()).hasSize(1).containsExactly(1); - assertThat(subscriber.getError()).isNull(); - } - - @Test - public void should_delay_upstream_signals_until_downstream_is_subscribed() { - // given - SimpleUnicastProcessor processor = new SimpleUnicastProcessor<>(); - TestSubscriber subscriber = new TestSubscriber<>(); - // when - processor.onNext(1); - processor.onComplete(); - processor.subscribe(subscriber); // subscription happens after signals arrive - subscriber.awaitTermination(); - // then - assertThat(subscriber.getElements()).hasSize(1).containsExactly(1); - assertThat(subscriber.getError()).isNull(); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/TestSubscriber.java b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/TestSubscriber.java deleted file mode 100644 index 652155e5309..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/TestSubscriber.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.reactive; - -import static org.assertj.core.api.Fail.fail; - -import com.datastax.oss.driver.shaded.guava.common.util.concurrent.Uninterruptibles; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; -import org.reactivestreams.Subscriber; -import org.reactivestreams.Subscription; - -public class TestSubscriber implements Subscriber { - - private final List elements = new ArrayList<>(); - private final CountDownLatch latch = new CountDownLatch(1); - private final long demand; - private Subscription subscription; - private Throwable error; - - public TestSubscriber() { - this.demand = Long.MAX_VALUE; - } - - public TestSubscriber(long demand) { - this.demand = demand; - } - - @Override - public void onSubscribe(Subscription s) { - if (subscription != null) { - fail("already subscribed"); - } - subscription = s; - subscription.request(demand); - } - - @Override - public void onNext(T t) { - elements.add(t); - } - - @Override - public void onError(Throwable t) { - error = t; - latch.countDown(); - } - - @Override - public void onComplete() { - latch.countDown(); - } - - @Nullable - public Throwable getError() { - return error; - } - - @NonNull - public List getElements() { - return elements; - } - - public void awaitTermination() { - if (!Uninterruptibles.awaitUninterruptibly(latch, 1, TimeUnit.MINUTES)) { - fail("subscriber not terminated"); - } - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultLineStringTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultLineStringTest.java deleted file mode 100644 index 38dc84549c4..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultLineStringTest.java +++ /dev/null @@ -1,212 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.data.geometry; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Fail.fail; - -import com.datastax.dse.driver.api.core.data.geometry.LineString; -import com.datastax.dse.driver.api.core.data.geometry.Point; -import com.esri.core.geometry.ogc.OGCLineString; -import java.nio.ByteBuffer; -import java.nio.ByteOrder; -import org.junit.Test; - -public class DefaultLineStringTest { - private final LineString lineString = - LineString.fromPoints( - Point.fromCoordinates(30, 10), - Point.fromCoordinates(10, 30), - Point.fromCoordinates(40, 40)); - - private final String wkt = "LINESTRING (30 10, 10 30, 40 40)"; - - private final String json = - "{\"type\":\"LineString\",\"coordinates\":[[30.0,10.0],[10.0,30.0],[40.0,40.0]]}"; - - @Test - public void should_parse_valid_well_known_text() { - assertThat(LineString.fromWellKnownText(wkt)).isEqualTo(lineString); - } - - @Test - public void should_fail_to_parse_invalid_well_known_text() { - assertInvalidWkt("linestring()"); - assertInvalidWkt("linestring(30 10 20, 10 30 20)"); // 3d - assertInvalidWkt("linestring(0 0, 1 1, 0 1, 1 0)"); // crossing itself - assertInvalidWkt("superlinestring(30 10, 10 30, 40 40)"); - } - - @Test - public void should_convert_to_well_known_text() { - assertThat(lineString.toString()).isEqualTo(wkt); - } - - @Test - public void should_convert_to_well_known_binary() { - ByteBuffer actual = lineString.asWellKnownBinary(); - - ByteBuffer expected = ByteBuffer.allocate(1024).order(ByteOrder.nativeOrder()); - expected.position(0); - expected.put((byte) (ByteOrder.nativeOrder() == ByteOrder.LITTLE_ENDIAN ? 1 : 0)); // endianness - expected.putInt(2); // type - expected.putInt(3); // num lineStrings - expected.putDouble(30); // x1 - expected.putDouble(10); // y1 - expected.putDouble(10); // x2 - expected.putDouble(30); // y2 - expected.putDouble(40); // x3 - expected.putDouble(40); // y3 - expected.flip(); - - assertThat(actual).isEqualTo(expected); - } - - @Test - public void should_load_from_well_known_binary() { - ByteBuffer bb = ByteBuffer.allocate(1024).order(ByteOrder.nativeOrder()); - bb.position(0); - bb.put((byte) (ByteOrder.nativeOrder() == ByteOrder.LITTLE_ENDIAN ? 1 : 0)); // endianness - bb.putInt(2); // type - bb.putInt(3); // num lineStrings - bb.putDouble(30); // x1 - bb.putDouble(10); // y1 - bb.putDouble(10); // x2 - bb.putDouble(30); // y2 - bb.putDouble(40); // x3 - bb.putDouble(40); // y3 - bb.flip(); - - assertThat(LineString.fromWellKnownBinary(bb)).isEqualTo(lineString); - } - - @Test - public void should_parse_valid_geo_json() { - assertThat(LineString.fromGeoJson(json)).isEqualTo(lineString); - } - - @Test - public void should_convert_to_geo_json() { - assertThat(lineString.asGeoJson()).isEqualTo(json); - } - - @Test - public void should_convert_to_ogc_line_string() { - assertThat(((DefaultLineString) lineString).getOgcGeometry()).isInstanceOf(OGCLineString.class); - } - - @Test - public void should_produce_same_hashCode_for_equal_objects() { - LineString line1 = - LineString.fromPoints( - Point.fromCoordinates(30, 10), - Point.fromCoordinates(10, 30), - Point.fromCoordinates(40, 40)); - LineString line2 = LineString.fromWellKnownText(wkt); - assertThat(line1).isEqualTo(line2); - assertThat(line1.hashCode()).isEqualTo(line2.hashCode()); - } - - @Test - public void should_expose_points() { - assertThat(lineString.getPoints()) - .containsOnly( - Point.fromCoordinates(30, 10), - Point.fromCoordinates(10, 30), - Point.fromCoordinates(40, 40)); - assertThat(LineString.fromWellKnownText(wkt).getPoints()) - .containsOnly( - Point.fromCoordinates(30, 10), - Point.fromCoordinates(10, 30), - Point.fromCoordinates(40, 40)); - } - - @Test - public void should_encode_and_decode() throws Exception { - assertThat(SerializationUtils.serializeAndDeserialize(lineString)).isEqualTo(lineString); - } - - @Test - public void should_contain_self() { - assertThat(lineString.contains(lineString)).isTrue(); - } - - @Test - public void should_contain_all_intersected_points_except_start_and_end() { - LineString s = - LineString.fromPoints( - Point.fromCoordinates(0, 0), - Point.fromCoordinates(0, 30), - Point.fromCoordinates(30, 30)); - assertThat(s.contains(Point.fromCoordinates(0, 0))).isFalse(); - assertThat(s.contains(Point.fromCoordinates(0, 15))).isTrue(); - assertThat(s.contains(Point.fromCoordinates(0, 30))).isTrue(); - assertThat(s.contains(Point.fromCoordinates(15, 30))).isTrue(); - assertThat(s.contains(Point.fromCoordinates(30, 30))).isFalse(); - } - - @Test - public void should_contain_substring() { - assertThat( - lineString.contains( - LineString.fromPoints( - Point.fromCoordinates(30, 10), Point.fromCoordinates(10, 30)))) - .isTrue(); - } - - @Test - public void should_not_contain_unrelated_string() { - assertThat( - lineString.contains( - LineString.fromPoints( - Point.fromCoordinates(10, 10), Point.fromCoordinates(30, 30)))) - .isFalse(); - } - - @Test - public void should_not_contain_polygon() { - LineString s = - LineString.fromPoints( - Point.fromCoordinates(0, 0), - Point.fromCoordinates(0, 30), - Point.fromCoordinates(30, 30), - Point.fromCoordinates(30, 0)); - LineString p = - LineString.fromPoints( - Point.fromCoordinates(10, 10), - Point.fromCoordinates(10, 20), - Point.fromCoordinates(20, 20), - Point.fromCoordinates(20, 10)); - assertThat(s.contains(p)).isFalse(); - } - - @Test - public void should_accept_empty_shape() throws Exception { - DefaultLineString s = ((DefaultLineString) LineString.fromWellKnownText("LINESTRING EMPTY")); - assertThat(s.getOgcGeometry().isEmpty()).isTrue(); - } - - private void assertInvalidWkt(String s) { - try { - LineString.fromWellKnownText(s); - fail("Should have thrown InvalidTypeException"); - } catch (IllegalArgumentException e) { - // expected - } - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultPointTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultPointTest.java deleted file mode 100644 index 1e3a7366741..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultPointTest.java +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.data.geometry; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Fail.fail; - -import com.datastax.dse.driver.api.core.data.geometry.Point; -import com.esri.core.geometry.ogc.OGCPoint; -import java.nio.ByteBuffer; -import java.nio.ByteOrder; -import org.junit.Test; - -public class DefaultPointTest { - - private DefaultPoint point = new DefaultPoint(1.1, 2.2); - - private final String wkt = "POINT (1.1 2.2)"; - - private final String json = "{\"type\":\"Point\",\"coordinates\":[1.1,2.2]}"; - - @Test - public void should_parse_valid_well_known_text() { - assertThat(Point.fromWellKnownText(wkt)).isEqualTo(point); - } - - @Test - public void should_fail_to_parse_invalid_well_known_text() { - assertInvalidWkt("superpoint(1.1 2.2 3.3)"); - } - - @Test - public void should_convert_to_well_known_text() { - assertThat(point.toString()).isEqualTo(wkt); - } - - @Test - public void should_convert_to_well_knowm_binary() { - ByteBuffer actual = point.asWellKnownBinary(); - - ByteBuffer expected = ByteBuffer.allocate(1024).order(ByteOrder.nativeOrder()); - expected.position(0); - expected.put((byte) (ByteOrder.nativeOrder() == ByteOrder.LITTLE_ENDIAN ? 1 : 0)); // endianness - expected.putInt(1); // type - expected.putDouble(1.1); // x - expected.putDouble(2.2); // y - expected.flip(); - - assertThat(actual).isEqualTo(expected); - } - - @Test - public void should_load_from_well_known_binary() { - ByteBuffer bb = ByteBuffer.allocate(1024).order(ByteOrder.nativeOrder()); - bb.position(0); - bb.put((byte) (ByteOrder.nativeOrder() == ByteOrder.LITTLE_ENDIAN ? 1 : 0)); // endianness - bb.putInt(1); // type - bb.putDouble(1.1); // x - bb.putDouble(2.2); // y - bb.flip(); - - assertThat(Point.fromWellKnownBinary(bb)).isEqualTo(point); - } - - @Test - public void should_parse_valid_geo_json() { - assertThat(Point.fromGeoJson(json)).isEqualTo(point); - } - - @Test - public void should_convert_to_geo_json() { - assertThat(point.asGeoJson()).isEqualTo(json); - } - - @Test - public void should_convert_to_ogc_point() { - assertThat(point.getOgcGeometry()).isInstanceOf(OGCPoint.class); - } - - @Test - public void should_produce_same_hashCode_for_equal_objects() { - Point point1 = new DefaultPoint(10, 20); - Point point2 = Point.fromWellKnownText("POINT (10 20)"); - assertThat(point1).isEqualTo(point2); - assertThat(point1.hashCode()).isEqualTo(point2.hashCode()); - } - - @Test - public void should_encode_and_decode() throws Exception { - assertThat(SerializationUtils.serializeAndDeserialize(point)).isEqualTo(point); - } - - @Test - public void should_contain_self() { - assertThat(point.contains(point)).isTrue(); - } - - @Test - public void should_not_contain_any_other_shape_than_self() { - DefaultPoint point2 = new DefaultPoint(1, 2); - DefaultPoint point3 = new DefaultPoint(1, 3); - assertThat(point.contains(point2)).isFalse(); - assertThat(point.contains(new DefaultLineString(point, point2))).isFalse(); - assertThat(point.contains(new DefaultPolygon(point, point2, point3))).isFalse(); - } - - @Test - public void should_accept_empty_shape() throws Exception { - DefaultPoint point = ((DefaultPoint) Point.fromWellKnownText("POINT EMPTY")); - assertThat(point.getOgcGeometry().isEmpty()).isTrue(); - } - - private void assertInvalidWkt(String s) { - try { - Point.fromWellKnownText(s); - fail("Should have thrown InvalidTypeException"); - } catch (IllegalArgumentException e) { - // expected - } - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultPolygonTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultPolygonTest.java deleted file mode 100644 index d86e9cdc269..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultPolygonTest.java +++ /dev/null @@ -1,342 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.data.geometry; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Fail.fail; - -import com.datastax.dse.driver.api.core.data.geometry.LineString; -import com.datastax.dse.driver.api.core.data.geometry.Point; -import com.datastax.dse.driver.api.core.data.geometry.Polygon; -import com.esri.core.geometry.ogc.OGCPolygon; -import java.nio.ByteBuffer; -import java.nio.ByteOrder; -import org.junit.Test; - -public class DefaultPolygonTest { - - private Polygon polygon = - Polygon.fromPoints( - Point.fromCoordinates(30, 10), - Point.fromCoordinates(10, 20), - Point.fromCoordinates(20, 40), - Point.fromCoordinates(40, 40)); - - private String wkt = "POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))"; - - private String json = - "{\"type\":\"Polygon\",\"coordinates\":[[[30.0,10.0],[10.0,20.0],[20.0,40.0],[40.0,40.0],[30.0,10.0]]]}"; - - @Test - public void should_parse_valid_well_known_text() { - assertThat(Polygon.fromWellKnownText(wkt)).isEqualTo(polygon); - } - - @Test - public void should_fail_to_parse_invalid_well_known_text() { - assertInvalidWkt("polygon(())"); // malformed - assertInvalidWkt("polygon((30 10 1, 40 40 1, 20 40 1, 10 20 1, 30 10 1))"); // 3d - assertInvalidWkt("polygon((0 0, 1 1, 0 1, 1 0, 0 0))"); // crosses itself - assertInvalidWkt("polygon123((30 10, 40 40, 20 40, 10 20, 30 10))"); // malformed - } - - @Test - public void should_convert_to_well_known_binary() { - ByteBuffer actual = polygon.asWellKnownBinary(); - - ByteBuffer expected = ByteBuffer.allocate(1024).order(ByteOrder.nativeOrder()); - expected.position(0); - expected.put((byte) (ByteOrder.nativeOrder() == ByteOrder.LITTLE_ENDIAN ? 1 : 0)); // endianness - expected.putInt(3); // type - expected.putInt(1); // num rings - expected.putInt(5); // num polygons (ring 1/1) - expected.putDouble(30); // x1 - expected.putDouble(10); // y1 - expected.putDouble(40); // x2 - expected.putDouble(40); // y2 - expected.putDouble(20); // x3 - expected.putDouble(40); // y3 - expected.putDouble(10); // x4 - expected.putDouble(20); // y4 - expected.putDouble(30); // x5 - expected.putDouble(10); // y5 - expected.flip(); - - assertThat(actual).isEqualTo(expected); - } - - @Test - public void should_load_from_well_known_binary() { - ByteBuffer bb = ByteBuffer.allocate(1024).order(ByteOrder.nativeOrder()); - bb.position(0); - bb.put((byte) (ByteOrder.nativeOrder() == ByteOrder.LITTLE_ENDIAN ? 1 : 0)); // endianness - bb.putInt(3); // type - bb.putInt(1); // num rings - bb.putInt(5); // num polygons (ring 1/1) - bb.putDouble(30); // x1 - bb.putDouble(10); // y1 - bb.putDouble(40); // x2 - bb.putDouble(40); // y2 - bb.putDouble(20); // x3 - bb.putDouble(40); // y3 - bb.putDouble(10); // x4 - bb.putDouble(20); // y4 - bb.putDouble(30); // x5 - bb.putDouble(10); // y5 - bb.flip(); - - assertThat(Polygon.fromWellKnownBinary(bb)).isEqualTo(polygon); - } - - @Test - public void should_parse_valid_geo_json() { - assertThat(Polygon.fromGeoJson(json)).isEqualTo(polygon); - } - - @Test - public void should_convert_to_geo_json() { - assertThat(polygon.asGeoJson()).isEqualTo(json); - } - - @Test - public void should_convert_to_ogc_polygon() { - assertThat(((DefaultPolygon) polygon).getOgcGeometry()).isInstanceOf(OGCPolygon.class); - } - - @Test - public void should_produce_same_hashCode_for_equal_objects() { - Polygon polygon1 = - Polygon.fromPoints( - Point.fromCoordinates(30, 10), - Point.fromCoordinates(10, 20), - Point.fromCoordinates(20, 40), - Point.fromCoordinates(40, 40)); - Polygon polygon2 = Polygon.fromWellKnownText(wkt); - assertThat(polygon1).isEqualTo(polygon2); - assertThat(polygon1.hashCode()).isEqualTo(polygon2.hashCode()); - } - - @Test - public void should_build_with_constructor_without_checking_orientation() { - // By default, OGC requires outer rings to be clockwise and inner rings to be counterclockwise. - // We disable that in our constructors. - // This polygon has a single outer ring that is counterclockwise. - Polygon polygon = - Polygon.fromPoints( - Point.fromCoordinates(5, 0), - Point.fromCoordinates(5, 3), - Point.fromCoordinates(0, 3), - Point.fromCoordinates(0, 0)); - assertThat(polygon.asWellKnownText()).isEqualTo("POLYGON ((0 0, 5 0, 5 3, 0 3, 0 0))"); - } - - @Test - public void should_build_complex_polygon_with_builder() { - Polygon polygon = - Polygon.builder() - .addRing( - Point.fromCoordinates(0, 0), - Point.fromCoordinates(0, 3), - Point.fromCoordinates(5, 3), - Point.fromCoordinates(5, 0)) - .addRing( - Point.fromCoordinates(1, 1), - Point.fromCoordinates(1, 2), - Point.fromCoordinates(2, 2), - Point.fromCoordinates(2, 1)) - .addRing( - Point.fromCoordinates(3, 1), - Point.fromCoordinates(3, 2), - Point.fromCoordinates(4, 2), - Point.fromCoordinates(4, 1)) - .build(); - assertThat(polygon.asWellKnownText()) - .isEqualTo( - "POLYGON ((0 0, 5 0, 5 3, 0 3, 0 0), (1 1, 1 2, 2 2, 2 1, 1 1), (3 1, 3 2, 4 2, 4 1, 3 1))"); - } - - @Test - public void should_expose_rings() { - assertThat(polygon.getExteriorRing()) - .containsOnly( - Point.fromCoordinates(30, 10), - Point.fromCoordinates(10, 20), - Point.fromCoordinates(20, 40), - Point.fromCoordinates(40, 40)); - assertThat(polygon.getInteriorRings().isEmpty()).isTrue(); - - Polygon fromWkt = Polygon.fromWellKnownText(wkt); - assertThat(fromWkt.getExteriorRing()) - .containsOnly( - Point.fromCoordinates(30, 10), - Point.fromCoordinates(10, 20), - Point.fromCoordinates(20, 40), - Point.fromCoordinates(40, 40)); - assertThat(fromWkt.getInteriorRings().isEmpty()).isTrue(); - - Polygon complex = - Polygon.builder() - .addRing( - Point.fromCoordinates(0, 0), - Point.fromCoordinates(0, 3), - Point.fromCoordinates(5, 3), - Point.fromCoordinates(5, 0)) - .addRing( - Point.fromCoordinates(1, 1), - Point.fromCoordinates(1, 2), - Point.fromCoordinates(2, 2), - Point.fromCoordinates(2, 1)) - .addRing( - Point.fromCoordinates(3, 1), - Point.fromCoordinates(3, 2), - Point.fromCoordinates(4, 2), - Point.fromCoordinates(4, 1)) - .build(); - assertThat(complex.getExteriorRing()) - .containsOnly( - Point.fromCoordinates(0, 0), - Point.fromCoordinates(0, 3), - Point.fromCoordinates(5, 3), - Point.fromCoordinates(5, 0)); - assertThat(complex.getInteriorRings()).hasSize(2); - assertThat(complex.getInteriorRings().get(0)) - .containsOnly( - Point.fromCoordinates(1, 1), - Point.fromCoordinates(1, 2), - Point.fromCoordinates(2, 2), - Point.fromCoordinates(2, 1)); - assertThat(complex.getInteriorRings().get(1)) - .containsOnly( - Point.fromCoordinates(3, 1), - Point.fromCoordinates(3, 2), - Point.fromCoordinates(4, 2), - Point.fromCoordinates(4, 1)); - - Polygon complexFromWkt = - Polygon.fromWellKnownText( - "POLYGON ((0 0, 5 0, 5 3, 0 3, 0 0), (1 1, 1 2, 2 2, 2 1, 1 1), (3 1, 3 2, 4 2, 4 1, 3 1))"); - assertThat(complexFromWkt.getExteriorRing()) - .containsOnly( - Point.fromCoordinates(0, 0), - Point.fromCoordinates(0, 3), - Point.fromCoordinates(5, 3), - Point.fromCoordinates(5, 0)); - assertThat(complexFromWkt.getInteriorRings()).hasSize(2); - assertThat(complexFromWkt.getInteriorRings().get(0)) - .containsOnly( - Point.fromCoordinates(1, 1), - Point.fromCoordinates(1, 2), - Point.fromCoordinates(2, 2), - Point.fromCoordinates(2, 1)); - assertThat(complexFromWkt.getInteriorRings().get(1)) - .containsOnly( - Point.fromCoordinates(3, 1), - Point.fromCoordinates(3, 2), - Point.fromCoordinates(4, 2), - Point.fromCoordinates(4, 1)); - } - - @Test - public void should_encode_and_decode() throws Exception { - assertThat(SerializationUtils.serializeAndDeserialize(polygon)).isEqualTo(polygon); - } - - @Test - public void should_contain_self() { - assertThat(polygon.contains(polygon)).isTrue(); - } - - @Test - public void should_not_contain_point_or_linestring_on_exterior_ring() { - assertThat(polygon.contains(Point.fromCoordinates(30, 10))).isFalse(); - assertThat(polygon.contains(Point.fromCoordinates(30, 40))).isFalse(); - assertThat( - polygon.contains( - LineString.fromPoints( - Point.fromCoordinates(35, 40), Point.fromCoordinates(25, 40)))) - .isFalse(); - } - - @Test - public void should_contain_interior_shape() { - assertThat(polygon.contains(Point.fromCoordinates(20, 20))).isTrue(); - assertThat( - polygon.contains( - LineString.fromPoints( - Point.fromCoordinates(20, 20), Point.fromCoordinates(30, 20)))) - .isTrue(); - assertThat( - polygon.contains( - Polygon.fromPoints( - Point.fromCoordinates(20, 20), - Point.fromCoordinates(30, 20), - Point.fromCoordinates(20, 30)))) - .isTrue(); - } - - @Test - public void should_not_contain_exterior_shape() { - assertThat(polygon.contains(Point.fromCoordinates(10, 10))).isFalse(); - assertThat( - polygon.contains( - LineString.fromPoints( - Point.fromCoordinates(10, 10), Point.fromCoordinates(20, 20)))) - .isFalse(); - assertThat( - polygon.contains( - Polygon.fromPoints( - Point.fromCoordinates(0, 0), - Point.fromCoordinates(0, 10), - Point.fromCoordinates(10, 10)))) - .isFalse(); - } - - @Test - public void should_not_contain_shapes_in_interior_hole() { - Polygon complex = - Polygon.builder() - .addRing( - Point.fromCoordinates(0, 0), - Point.fromCoordinates(30, 0), - Point.fromCoordinates(30, 30), - Point.fromCoordinates(0, 30)) - .addRing( - Point.fromCoordinates(10, 10), - Point.fromCoordinates(20, 10), - Point.fromCoordinates(20, 20), - Point.fromCoordinates(10, 20)) - .build(); - assertThat(complex.contains(Point.fromCoordinates(15, 15))).isFalse(); - } - - @Test - public void should_accept_empty_shape() throws Exception { - Polygon polygon = Polygon.fromWellKnownText("POLYGON EMPTY"); - assertThat(polygon.getExteriorRing()).isEmpty(); - assertThat(((DefaultPolygon) polygon).getOgcGeometry().isEmpty()).isTrue(); - } - - private void assertInvalidWkt(String s) { - try { - Polygon.fromWellKnownText(s); - fail("Should have thrown InvalidTypeException"); - } catch (IllegalArgumentException e) { - // expected - } - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/data/geometry/DistanceTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/data/geometry/DistanceTest.java deleted file mode 100644 index ba158288891..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/data/geometry/DistanceTest.java +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.data.geometry; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.dse.driver.api.core.data.geometry.LineString; -import com.datastax.dse.driver.api.core.data.geometry.Point; -import com.datastax.dse.driver.api.core.data.geometry.Polygon; -import org.junit.Test; - -public class DistanceTest { - - private final Point point = Point.fromCoordinates(1.1, 2.2); - private final Distance distance = new Distance(point, 7.0); - private final String wkt = "DISTANCE((1.1 2.2) 7.0)"; - - @Test - public void should_parse_valid_well_known_text() { - Distance fromWkt = Distance.fromWellKnownText(wkt); - assertThat(fromWkt.getRadius()).isEqualTo(7.0); - assertThat(fromWkt.getCenter()).isEqualTo(point); - assertThat(Distance.fromWellKnownText(wkt)).isEqualTo(distance); - // whitespace doesn't matter between distance and spec. - assertThat(Distance.fromWellKnownText("DISTANCE ((1.1 2.2) 7.0)")).isEqualTo(distance); - // case doesn't matter. - assertThat(Distance.fromWellKnownText("distance((1.1 2.2) 7.0)")).isEqualTo(distance); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_invalid_well_known_text() { - Distance.fromWellKnownText("dist((1.1 2.2) 3.3)"); - } - - @Test - public void should_convert_to_well_known_text() { - assertThat(distance.asWellKnownText()).isEqualTo(wkt); - } - - @Test - public void should_contain_point() { - assertThat(distance.contains(Point.fromCoordinates(2.0, 3.0))).isTrue(); - } - - @Test - public void should_not_contain_point() { - // y axis falls outside of distance - assertThat(distance.contains(Point.fromCoordinates(2.0, 9.3))).isFalse(); - } - - @Test - public void should_contain_linestring() { - assertThat( - distance.contains( - LineString.fromPoints( - Point.fromCoordinates(2.0, 3.0), - Point.fromCoordinates(3.1, 6.2), - Point.fromCoordinates(-1.0, -2.0)))) - .isTrue(); - } - - @Test - public void should_not_contain_linestring() { - // second point falls outside of distance at y axis. - assertThat( - distance.contains( - LineString.fromPoints( - Point.fromCoordinates(2.0, 3.0), - Point.fromCoordinates(3.1, 9.2), - Point.fromCoordinates(-1.0, -2.0)))) - .isFalse(); - } - - @Test - public void should_contain_polygon() { - Polygon polygon = - Polygon.fromPoints( - Point.fromCoordinates(3, 1), - Point.fromCoordinates(1, 2), - Point.fromCoordinates(2, 4), - Point.fromCoordinates(4, 4)); - assertThat(distance.contains(polygon)).isTrue(); - } - - @Test - public void should_not_contain_polygon() { - Polygon polygon = - Polygon.fromPoints( - Point.fromCoordinates(3, 1), - Point.fromCoordinates(1, 2), - Point.fromCoordinates(2, 4), - Point.fromCoordinates(10, 4)); - // final point falls outside of distance at x axis. - assertThat(distance.contains(polygon)).isFalse(); - } - - @Test(expected = UnsupportedOperationException.class) - public void should_fail_to_convert_to_ogc() { - distance.getOgcGeometry(); - } - - @Test(expected = UnsupportedOperationException.class) - public void should_fail_to_convert_to_wkb() { - distance.asWellKnownBinary(); - } - - @Test(expected = UnsupportedOperationException.class) - public void should_fail_to_convert_to_geo_json() { - distance.asGeoJson(); - } - - @Test - public void should_serialize_and_deserialize() throws Exception { - assertThat(SerializationUtils.serializeAndDeserialize(distance)).isEqualTo(distance); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/data/geometry/SerializationUtils.java b/core/src/test/java/com/datastax/dse/driver/internal/core/data/geometry/SerializationUtils.java deleted file mode 100644 index 84bd1dab343..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/data/geometry/SerializationUtils.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.data.geometry; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.dse.driver.api.core.data.geometry.Geometry; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.io.ObjectInputStream; -import java.io.ObjectOutputStream; - -public class SerializationUtils { - - public static Object serializeAndDeserialize(Geometry geometry) - throws IOException, ClassNotFoundException { - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - ObjectOutputStream out = new ObjectOutputStream(baos); - - out.writeObject(geometry); - - byte[] bytes = baos.toByteArray(); - if (!(geometry instanceof Distance)) { - byte[] wkb = Bytes.getArray(geometry.asWellKnownBinary()); - assertThat(bytes).containsSequence(wkb); - } - ObjectInputStream in = new ObjectInputStream(new ByteArrayInputStream(bytes)); - return in.readObject(); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/ContinuousGraphRequestHandlerSpeculativeExecutionTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/ContinuousGraphRequestHandlerSpeculativeExecutionTest.java deleted file mode 100644 index c67be162181..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/ContinuousGraphRequestHandlerSpeculativeExecutionTest.java +++ /dev/null @@ -1,530 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import static com.datastax.dse.driver.internal.core.graph.GraphTestUtils.createGraphBinaryModule; -import static com.datastax.dse.driver.internal.core.graph.GraphTestUtils.defaultDseFrameOf; -import static com.datastax.dse.driver.internal.core.graph.GraphTestUtils.singleGraphRow; -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyNoMoreInteractions; -import static org.mockito.Mockito.when; - -import com.datastax.dse.driver.DseTestDataProviders; -import com.datastax.dse.driver.api.core.graph.AsyncGraphResultSet; -import com.datastax.dse.driver.api.core.graph.GraphStatement; -import com.datastax.dse.driver.api.core.metrics.DseNodeMetric; -import com.datastax.dse.driver.internal.core.graph.binary.GraphBinaryModule; -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.NoNodeAvailableException; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; -import com.datastax.oss.driver.api.core.metrics.NodeMetric; -import com.datastax.oss.driver.api.core.servererrors.BootstrappingException; -import com.datastax.oss.driver.api.core.specex.SpeculativeExecutionPolicy; -import com.datastax.oss.driver.internal.core.cql.PoolBehavior; -import com.datastax.oss.driver.internal.core.metadata.DefaultNode; -import com.datastax.oss.driver.internal.core.metrics.NodeMetricUpdater; -import com.datastax.oss.driver.internal.core.util.concurrent.CapturingTimer.CapturedTimeout; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.response.Error; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.time.Duration; -import java.util.List; -import java.util.Map; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.TimeUnit; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -/** - * These tests are almost exact copies of {@link - * com.datastax.oss.driver.internal.core.cql.CqlRequestHandlerSpeculativeExecutionTest}. - */ -@RunWith(DataProviderRunner.class) -public class ContinuousGraphRequestHandlerSpeculativeExecutionTest { - - @Mock DefaultNode node1; - @Mock DefaultNode node2; - @Mock DefaultNode node3; - - @Mock NodeMetricUpdater nodeMetricUpdater1; - @Mock NodeMetricUpdater nodeMetricUpdater2; - @Mock NodeMetricUpdater nodeMetricUpdater3; - - @Mock GraphSupportChecker graphSupportChecker; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - when(node1.getMetricUpdater()).thenReturn(nodeMetricUpdater1); - when(node2.getMetricUpdater()).thenReturn(nodeMetricUpdater2); - when(node3.getMetricUpdater()).thenReturn(nodeMetricUpdater3); - when(nodeMetricUpdater1.isEnabled(any(NodeMetric.class), anyString())).thenReturn(true); - when(nodeMetricUpdater2.isEnabled(any(NodeMetric.class), anyString())).thenReturn(true); - when(nodeMetricUpdater3.isEnabled(any(NodeMetric.class), anyString())).thenReturn(true); - when(graphSupportChecker.inferGraphProtocol(any(), any(), any())) - .thenReturn(GraphProtocol.GRAPH_BINARY_1_0); - } - - @Test - @UseDataProvider(location = DseTestDataProviders.class, value = "nonIdempotentGraphConfig") - public void should_not_schedule_speculative_executions_if_not_idempotent( - boolean defaultIdempotence, GraphStatement statement) { - GraphRequestHandlerTestHarness.Builder harnessBuilder = - GraphRequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); - PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1); - - try (GraphRequestHandlerTestHarness harness = harnessBuilder.build()) { - SpeculativeExecutionPolicy speculativeExecutionPolicy = - harness.getContext().getSpeculativeExecutionPolicy(DriverExecutionProfile.DEFAULT_NAME); - - GraphBinaryModule module = createGraphBinaryModule(harness.getContext()); - new ContinuousGraphRequestHandler( - statement, - harness.getSession(), - harness.getContext(), - "test", - module, - graphSupportChecker) - .handle(); - - node1Behavior.verifyWrite(); - node1Behavior.setWriteSuccess(); - - // should not schedule any timeout - assertThat(harness.nextScheduledTimeout()).isNull(); - - verifyNoMoreInteractions(speculativeExecutionPolicy); - verifyNoMoreInteractions(nodeMetricUpdater1); - } - } - - @Test - @UseDataProvider(location = DseTestDataProviders.class, value = "idempotentGraphConfig") - public void should_schedule_speculative_executions( - boolean defaultIdempotence, GraphStatement statement) throws Exception { - GraphRequestHandlerTestHarness.Builder harnessBuilder = - GraphRequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); - PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1); - PoolBehavior node2Behavior = harnessBuilder.customBehavior(node2); - PoolBehavior node3Behavior = harnessBuilder.customBehavior(node3); - - try (GraphRequestHandlerTestHarness harness = harnessBuilder.build()) { - SpeculativeExecutionPolicy speculativeExecutionPolicy = - harness.getContext().getSpeculativeExecutionPolicy(DriverExecutionProfile.DEFAULT_NAME); - long firstExecutionDelay = 100L; - long secondExecutionDelay = 200L; - when(speculativeExecutionPolicy.nextExecution( - any(Node.class), eq(null), eq(statement), eq(1))) - .thenReturn(firstExecutionDelay); - when(speculativeExecutionPolicy.nextExecution( - any(Node.class), eq(null), eq(statement), eq(2))) - .thenReturn(secondExecutionDelay); - when(speculativeExecutionPolicy.nextExecution( - any(Node.class), eq(null), eq(statement), eq(3))) - .thenReturn(-1L); - - GraphBinaryModule module = createGraphBinaryModule(harness.getContext()); - new ContinuousGraphRequestHandler( - statement, - harness.getSession(), - harness.getContext(), - "test", - module, - graphSupportChecker) - .handle(); - - node1Behavior.verifyWrite(); - node1Behavior.setWriteSuccess(); - - CapturedTimeout speculativeExecution1 = harness.nextScheduledTimeout(); - assertThat(speculativeExecution1.getDelay(TimeUnit.MILLISECONDS)) - .isEqualTo(firstExecutionDelay); - verifyNoMoreInteractions(nodeMetricUpdater1); - speculativeExecution1.task().run(speculativeExecution1); - verify(nodeMetricUpdater1) - .incrementCounter( - DefaultNodeMetric.SPECULATIVE_EXECUTIONS, DriverExecutionProfile.DEFAULT_NAME); - node2Behavior.verifyWrite(); - node2Behavior.setWriteSuccess(); - - CapturedTimeout speculativeExecution2 = harness.nextScheduledTimeout(); - assertThat(speculativeExecution2.getDelay(TimeUnit.MILLISECONDS)) - .isEqualTo(secondExecutionDelay); - verifyNoMoreInteractions(nodeMetricUpdater2); - speculativeExecution2.task().run(speculativeExecution2); - verify(nodeMetricUpdater2) - .incrementCounter( - DefaultNodeMetric.SPECULATIVE_EXECUTIONS, DriverExecutionProfile.DEFAULT_NAME); - node3Behavior.verifyWrite(); - node3Behavior.setWriteSuccess(); - - // No more scheduled tasks since the policy returns 0 on the third call. - assertThat(harness.nextScheduledTimeout()).isNull(); - - // Note that we don't need to complete any response, the test is just about checking that - // executions are started. - } - } - - @Test - @UseDataProvider(location = DseTestDataProviders.class, value = "idempotentGraphConfig") - public void should_not_start_execution_if_result_complete( - boolean defaultIdempotence, GraphStatement statement) throws Exception { - GraphRequestHandlerTestHarness.Builder harnessBuilder = - GraphRequestHandlerTestHarness.builder() - .withGraphTimeout(Duration.ofSeconds(10)) - .withDefaultIdempotence(defaultIdempotence); - PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1); - PoolBehavior node2Behavior = harnessBuilder.customBehavior(node2); - - try (GraphRequestHandlerTestHarness harness = harnessBuilder.build()) { - SpeculativeExecutionPolicy speculativeExecutionPolicy = - harness.getContext().getSpeculativeExecutionPolicy(DriverExecutionProfile.DEFAULT_NAME); - long firstExecutionDelay = 100L; - when(speculativeExecutionPolicy.nextExecution( - any(Node.class), eq(null), eq(statement), eq(1))) - .thenReturn(firstExecutionDelay); - - GraphBinaryModule module = createGraphBinaryModule(harness.getContext()); - ContinuousGraphRequestHandler requestHandler = - new ContinuousGraphRequestHandler( - statement, - harness.getSession(), - harness.getContext(), - "test", - module, - graphSupportChecker); - CompletionStage resultSetFuture = requestHandler.handle(); - node1Behavior.verifyWrite(); - node1Behavior.setWriteSuccess(); - - // The first timeout scheduled should be the global timeout - CapturedTimeout globalTimeout = harness.nextScheduledTimeout(); - assertThat(globalTimeout.getDelay(TimeUnit.SECONDS)).isEqualTo(10); - - // Check that the first execution was scheduled but don't run it yet - CapturedTimeout speculativeExecution1 = harness.nextScheduledTimeout(); - assertThat(speculativeExecution1.getDelay(TimeUnit.MILLISECONDS)) - .isEqualTo(firstExecutionDelay); - - // Complete the request from the initial execution - node1Behavior.setResponseSuccess( - defaultDseFrameOf(singleGraphRow(GraphProtocol.GRAPH_BINARY_1_0, module))); - assertThatStage(resultSetFuture).isSuccess(); - - // Pending speculative executions should have been cancelled. However we don't check - // firstExecutionTask directly because the request handler's onResponse can sometimes be - // invoked before operationComplete (this is very unlikely in practice, but happens in our - // Travis CI build). When that happens, the speculative execution is not recorded yet when - // cancelScheduledTasks runs. - - // The fact that we missed the speculative execution is not a problem; even if it starts, it - // will eventually find out that the result is already complete and cancel itself: - speculativeExecution1.task().run(speculativeExecution1); - node2Behavior.verifyNoWrite(); - - verify(nodeMetricUpdater1) - .updateTimer( - eq(DseNodeMetric.GRAPH_MESSAGES), - eq(DriverExecutionProfile.DEFAULT_NAME), - anyLong(), - eq(TimeUnit.NANOSECONDS)); - verifyNoMoreInteractions(nodeMetricUpdater1); - } - } - - @Test - @UseDataProvider(location = DseTestDataProviders.class, value = "idempotentGraphConfig") - public void should_fail_if_no_nodes(boolean defaultIdempotence, GraphStatement statement) { - GraphRequestHandlerTestHarness.Builder harnessBuilder = - GraphRequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); - // No configured behaviors => will yield an empty query plan - - try (GraphRequestHandlerTestHarness harness = harnessBuilder.build()) { - SpeculativeExecutionPolicy speculativeExecutionPolicy = - harness.getContext().getSpeculativeExecutionPolicy(DriverExecutionProfile.DEFAULT_NAME); - long firstExecutionDelay = 100L; - when(speculativeExecutionPolicy.nextExecution( - any(Node.class), eq(null), eq(statement), eq(1))) - .thenReturn(firstExecutionDelay); - - GraphBinaryModule module = createGraphBinaryModule(harness.getContext()); - CompletionStage resultSetFuture = - new ContinuousGraphRequestHandler( - statement, - harness.getSession(), - harness.getContext(), - "test", - module, - graphSupportChecker) - .handle(); - - assertThatStage(resultSetFuture) - .isFailed(error -> assertThat(error).isInstanceOf(NoNodeAvailableException.class)); - } - } - - @Test - @UseDataProvider(location = DseTestDataProviders.class, value = "idempotentGraphConfig") - public void should_fail_if_no_more_nodes_and_initial_execution_is_last( - boolean defaultIdempotence, GraphStatement statement) throws Exception { - GraphRequestHandlerTestHarness.Builder harnessBuilder = - GraphRequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); - PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1); - harnessBuilder.withResponse( - node2, - defaultDseFrameOf(new Error(ProtocolConstants.ErrorCode.IS_BOOTSTRAPPING, "mock message"))); - - try (GraphRequestHandlerTestHarness harness = harnessBuilder.build()) { - SpeculativeExecutionPolicy speculativeExecutionPolicy = - harness.getContext().getSpeculativeExecutionPolicy(DriverExecutionProfile.DEFAULT_NAME); - long firstExecutionDelay = 100L; - when(speculativeExecutionPolicy.nextExecution( - any(Node.class), eq(null), eq(statement), eq(1))) - .thenReturn(firstExecutionDelay); - - GraphBinaryModule module = createGraphBinaryModule(harness.getContext()); - CompletionStage resultSetFuture = - new ContinuousGraphRequestHandler( - statement, - harness.getSession(), - harness.getContext(), - "test", - module, - graphSupportChecker) - .handle(); - node1Behavior.verifyWrite(); - node1Behavior.setWriteSuccess(); - // do not simulate a response from node1 yet - - // Run the next scheduled task to start the speculative execution. node2 will reply with a - // BOOTSTRAPPING error, causing a RETRY_NEXT; but the query plan is now empty so the - // speculative execution stops. - // next scheduled timeout should be the first speculative execution. Get it and run it. - CapturedTimeout speculativeExecution1 = harness.nextScheduledTimeout(); - assertThat(speculativeExecution1.getDelay(TimeUnit.MILLISECONDS)) - .isEqualTo(firstExecutionDelay); - speculativeExecution1.task().run(speculativeExecution1); - - // node1 now replies with the same response, that triggers a RETRY_NEXT - node1Behavior.setResponseSuccess( - defaultDseFrameOf( - new Error(ProtocolConstants.ErrorCode.IS_BOOTSTRAPPING, "mock message"))); - - // But again the query plan is empty so that should fail the request - assertThatStage(resultSetFuture) - .isFailed( - error -> { - assertThat(error).isInstanceOf(AllNodesFailedException.class); - Map> nodeErrors = - ((AllNodesFailedException) error).getAllErrors(); - assertThat(nodeErrors).containsOnlyKeys(node1, node2); - assertThat(nodeErrors.get(node1).get(0)).isInstanceOf(BootstrappingException.class); - assertThat(nodeErrors.get(node2).get(0)).isInstanceOf(BootstrappingException.class); - }); - } - } - - @Test - @UseDataProvider(location = DseTestDataProviders.class, value = "idempotentGraphConfig") - public void should_fail_if_no_more_nodes_and_speculative_execution_is_last( - boolean defaultIdempotence, GraphStatement statement) throws Exception { - GraphRequestHandlerTestHarness.Builder harnessBuilder = - GraphRequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); - PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1); - PoolBehavior node2Behavior = harnessBuilder.customBehavior(node2); - - try (GraphRequestHandlerTestHarness harness = harnessBuilder.build()) { - SpeculativeExecutionPolicy speculativeExecutionPolicy = - harness.getContext().getSpeculativeExecutionPolicy(DriverExecutionProfile.DEFAULT_NAME); - long firstExecutionDelay = 100L; - when(speculativeExecutionPolicy.nextExecution( - any(Node.class), eq(null), eq(statement), eq(1))) - .thenReturn(firstExecutionDelay); - - GraphBinaryModule module = createGraphBinaryModule(harness.getContext()); - CompletionStage resultSetFuture = - new ContinuousGraphRequestHandler( - statement, - harness.getSession(), - harness.getContext(), - "test", - module, - graphSupportChecker) - .handle(); - node1Behavior.verifyWrite(); - node1Behavior.setWriteSuccess(); - // do not simulate a response from node1 yet - - // next scheduled timeout should be the first speculative execution. Get it and run it. - CapturedTimeout speculativeExecution1 = harness.nextScheduledTimeout(); - assertThat(speculativeExecution1.getDelay(TimeUnit.MILLISECONDS)) - .isEqualTo(firstExecutionDelay); - speculativeExecution1.task().run(speculativeExecution1); - - // node1 now replies with a BOOTSTRAPPING error that triggers a RETRY_NEXT - // but the query plan is empty so the initial execution stops - node1Behavior.setResponseSuccess( - defaultDseFrameOf( - new Error(ProtocolConstants.ErrorCode.IS_BOOTSTRAPPING, "mock message"))); - - // Same thing with node2, so the speculative execution should reach the end of the query plan - // and fail the request - node2Behavior.setResponseSuccess( - defaultDseFrameOf( - new Error(ProtocolConstants.ErrorCode.IS_BOOTSTRAPPING, "mock message"))); - - assertThatStage(resultSetFuture) - .isFailed( - error -> { - assertThat(error).isInstanceOf(AllNodesFailedException.class); - Map> nodeErrors = - ((AllNodesFailedException) error).getAllErrors(); - assertThat(nodeErrors).containsOnlyKeys(node1, node2); - assertThat(nodeErrors.get(node1).get(0)).isInstanceOf(BootstrappingException.class); - assertThat(nodeErrors.get(node2).get(0)).isInstanceOf(BootstrappingException.class); - }); - } - } - - @Test - @UseDataProvider(location = DseTestDataProviders.class, value = "idempotentGraphConfig") - public void should_retry_in_speculative_executions( - boolean defaultIdempotence, GraphStatement statement) throws Exception { - GraphRequestHandlerTestHarness.Builder harnessBuilder = - GraphRequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); - PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1); - PoolBehavior node2Behavior = harnessBuilder.customBehavior(node2); - PoolBehavior node3Behavior = harnessBuilder.customBehavior(node3); - - try (GraphRequestHandlerTestHarness harness = harnessBuilder.build()) { - SpeculativeExecutionPolicy speculativeExecutionPolicy = - harness.getContext().getSpeculativeExecutionPolicy(DriverExecutionProfile.DEFAULT_NAME); - long firstExecutionDelay = 100L; - when(speculativeExecutionPolicy.nextExecution( - any(Node.class), eq(null), eq(statement), eq(1))) - .thenReturn(firstExecutionDelay); - - GraphBinaryModule module = createGraphBinaryModule(harness.getContext()); - CompletionStage resultSetFuture = - new ContinuousGraphRequestHandler( - statement, - harness.getSession(), - harness.getContext(), - "test", - module, - graphSupportChecker) - .handle(); - node1Behavior.verifyWrite(); - node1Behavior.setWriteSuccess(); - // do not simulate a response from node1. The request will stay hanging for the rest of this - // test - - // next scheduled timeout should be the first speculative execution. Get it and run it. - CapturedTimeout speculativeExecution1 = harness.nextScheduledTimeout(); - assertThat(speculativeExecution1.getDelay(TimeUnit.MILLISECONDS)) - .isEqualTo(firstExecutionDelay); - speculativeExecution1.task().run(speculativeExecution1); - - node2Behavior.verifyWrite(); - node2Behavior.setWriteSuccess(); - - // node2 replies with a response that triggers a RETRY_NEXT - node2Behavior.setResponseSuccess( - defaultDseFrameOf( - new Error(ProtocolConstants.ErrorCode.IS_BOOTSTRAPPING, "mock message"))); - - node3Behavior.setResponseSuccess( - defaultDseFrameOf(singleGraphRow(GraphProtocol.GRAPH_BINARY_1_0, module))); - - // The second execution should move to node3 and complete the request - assertThatStage(resultSetFuture).isSuccess(); - - // The request to node1 was still in flight, it should have been cancelled - node1Behavior.verifyCancellation(); - } - } - - @Test - @UseDataProvider(location = DseTestDataProviders.class, value = "idempotentGraphConfig") - public void should_stop_retrying_other_executions_if_result_complete( - boolean defaultIdempotence, GraphStatement statement) throws Exception { - GraphRequestHandlerTestHarness.Builder harnessBuilder = - GraphRequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); - PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1); - PoolBehavior node2Behavior = harnessBuilder.customBehavior(node2); - PoolBehavior node3Behavior = harnessBuilder.customBehavior(node3); - - try (GraphRequestHandlerTestHarness harness = harnessBuilder.build()) { - SpeculativeExecutionPolicy speculativeExecutionPolicy = - harness.getContext().getSpeculativeExecutionPolicy(DriverExecutionProfile.DEFAULT_NAME); - long firstExecutionDelay = 100L; - when(speculativeExecutionPolicy.nextExecution( - any(Node.class), eq(null), eq(statement), eq(1))) - .thenReturn(firstExecutionDelay); - - GraphBinaryModule module = createGraphBinaryModule(harness.getContext()); - CompletionStage resultSetFuture = - new ContinuousGraphRequestHandler( - statement, - harness.getSession(), - harness.getContext(), - "test", - module, - graphSupportChecker) - .handle(); - node1Behavior.verifyWrite(); - node1Behavior.setWriteSuccess(); - - // next scheduled timeout should be the first speculative execution. Get it and run it. - CapturedTimeout speculativeExecution1 = harness.nextScheduledTimeout(); - assertThat(speculativeExecution1.getDelay(TimeUnit.MILLISECONDS)) - .isEqualTo(firstExecutionDelay); - speculativeExecution1.task().run(speculativeExecution1); - - node2Behavior.verifyWrite(); - node2Behavior.setWriteSuccess(); - - // Complete the request from the initial execution - node1Behavior.setResponseSuccess( - defaultDseFrameOf(singleGraphRow(GraphProtocol.GRAPH_BINARY_1_0, module))); - assertThatStage(resultSetFuture).isSuccess(); - - // node2 replies with a response that would trigger a RETRY_NEXT if the request was still - // running - node2Behavior.setResponseSuccess( - defaultDseFrameOf( - new Error(ProtocolConstants.ErrorCode.IS_BOOTSTRAPPING, "mock message"))); - - // The speculative execution should not move to node3 because it is stopped - node3Behavior.verifyNoWrite(); - } - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/ContinuousGraphRequestHandlerTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/ContinuousGraphRequestHandlerTest.java deleted file mode 100644 index b374539f12e..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/ContinuousGraphRequestHandlerTest.java +++ /dev/null @@ -1,260 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import static com.datastax.dse.driver.internal.core.graph.GraphTestUtils.createGraphBinaryModule; -import static com.datastax.dse.driver.internal.core.graph.GraphTestUtils.defaultDseFrameOf; -import static com.datastax.dse.driver.internal.core.graph.GraphTestUtils.tenGraphRows; -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyNoMoreInteractions; -import static org.mockito.Mockito.when; - -import com.datastax.dse.driver.DseTestDataProviders; -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.dse.driver.api.core.graph.AsyncGraphResultSet; -import com.datastax.dse.driver.api.core.graph.GraphNode; -import com.datastax.dse.driver.api.core.graph.GraphStatement; -import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; -import com.datastax.dse.driver.api.core.metrics.DseNodeMetric; -import com.datastax.dse.driver.api.core.metrics.DseSessionMetric; -import com.datastax.dse.driver.internal.core.graph.binary.GraphBinaryModule; -import com.datastax.oss.driver.api.core.DriverTimeoutException; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; -import com.datastax.oss.driver.internal.core.cql.PoolBehavior; -import com.datastax.oss.driver.internal.core.cql.RequestHandlerTestHarness; -import com.datastax.oss.driver.internal.core.metadata.DefaultNode; -import com.datastax.oss.driver.internal.core.metrics.NodeMetricUpdater; -import com.datastax.oss.driver.internal.core.util.concurrent.CapturingTimer.CapturedTimeout; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.io.IOException; -import java.time.Duration; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.TimeUnit; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -@RunWith(DataProviderRunner.class) -public class ContinuousGraphRequestHandlerTest { - - @Mock DefaultDriverContext mockContext; - @Mock DefaultNode node; - @Mock NodeMetricUpdater nodeMetricUpdater1; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - when(node.getMetricUpdater()).thenReturn(nodeMetricUpdater1); - } - - @Test - @UseDataProvider(location = DseTestDataProviders.class, value = "supportedGraphProtocols") - public void should_return_paged_results(GraphProtocol graphProtocol) throws IOException { - String profileName = "test-graph"; - when(nodeMetricUpdater1.isEnabled(DseNodeMetric.GRAPH_MESSAGES, profileName)).thenReturn(true); - - GraphBinaryModule module = createGraphBinaryModule(mockContext); - - GraphRequestHandlerTestHarness.Builder builder = - GraphRequestHandlerTestHarness.builder().withGraphProtocolForTestConfig(graphProtocol); - PoolBehavior node1Behavior = builder.customBehavior(node); - - try (RequestHandlerTestHarness harness = builder.build()) { - - GraphStatement graphStatement = - ScriptGraphStatement.newInstance("mockQuery").setExecutionProfileName(profileName); - - ContinuousGraphRequestHandler handler = - new ContinuousGraphRequestHandler( - graphStatement, - harness.getSession(), - harness.getContext(), - "test", - module, - new GraphSupportChecker()); - - // send the initial request - CompletionStage page1Future = handler.handle(); - - node1Behavior.setResponseSuccess( - defaultDseFrameOf(tenGraphRows(graphProtocol, module, 1, false))); - - assertThatStage(page1Future) - .isSuccess( - page1 -> { - assertThat(page1.hasMorePages()).isTrue(); - assertThat(page1.currentPage()).hasSize(10).allMatch(GraphNode::isVertex); - ExecutionInfo executionInfo = page1.getRequestExecutionInfo(); - assertThat(executionInfo.getCoordinator()).isEqualTo(node); - assertThat(executionInfo.getErrors()).isEmpty(); - assertThat(executionInfo.getIncomingPayload()).isEmpty(); - assertThat(executionInfo.getSpeculativeExecutionCount()).isEqualTo(0); - assertThat(executionInfo.getSuccessfulExecutionIndex()).isEqualTo(0); - assertThat(executionInfo.getWarnings()).isEmpty(); - }); - - AsyncGraphResultSet page1 = CompletableFutures.getCompleted(page1Future); - CompletionStage page2Future = page1.fetchNextPage(); - - node1Behavior.setResponseSuccess( - defaultDseFrameOf(tenGraphRows(graphProtocol, module, 2, true))); - - assertThatStage(page2Future) - .isSuccess( - page2 -> { - assertThat(page2.hasMorePages()).isFalse(); - assertThat(page2.currentPage()).hasSize(10).allMatch(GraphNode::isVertex); - ExecutionInfo executionInfo = page2.getRequestExecutionInfo(); - assertThat(executionInfo.getCoordinator()).isEqualTo(node); - assertThat(executionInfo.getErrors()).isEmpty(); - assertThat(executionInfo.getIncomingPayload()).isEmpty(); - assertThat(executionInfo.getSpeculativeExecutionCount()).isEqualTo(0); - assertThat(executionInfo.getSuccessfulExecutionIndex()).isEqualTo(0); - assertThat(executionInfo.getWarnings()).isEmpty(); - }); - - validateMetrics(profileName, harness); - } - } - - @Test - public void should_honor_default_timeout() throws Exception { - // given - GraphBinaryModule binaryModule = createGraphBinaryModule(mockContext); - Duration defaultTimeout = Duration.ofSeconds(1); - - RequestHandlerTestHarness.Builder builder = - GraphRequestHandlerTestHarness.builder().withGraphTimeout(defaultTimeout); - PoolBehavior node1Behavior = builder.customBehavior(node); - - try (RequestHandlerTestHarness harness = builder.build()) { - - DriverExecutionProfile profile = harness.getContext().getConfig().getDefaultProfile(); - when(profile.isDefined(DseDriverOption.GRAPH_SUB_PROTOCOL)).thenReturn(true); - when(profile.getString(DseDriverOption.GRAPH_SUB_PROTOCOL)) - .thenReturn(GraphProtocol.GRAPH_BINARY_1_0.toInternalCode()); - - GraphStatement graphStatement = ScriptGraphStatement.newInstance("mockQuery"); - - // when - ContinuousGraphRequestHandler handler = - new ContinuousGraphRequestHandler( - graphStatement, - harness.getSession(), - harness.getContext(), - "test", - binaryModule, - new GraphSupportChecker()); - - // send the initial request - CompletionStage page1Future = handler.handle(); - - // acknowledge the write, will set the global timeout - node1Behavior.verifyWrite(); - node1Behavior.setWriteSuccess(); - - CapturedTimeout globalTimeout = harness.nextScheduledTimeout(); - assertThat(globalTimeout.getDelay(TimeUnit.NANOSECONDS)).isEqualTo(defaultTimeout.toNanos()); - - // will trigger the global timeout and complete it exceptionally - globalTimeout.task().run(globalTimeout); - assertThat(page1Future.toCompletableFuture()).isCompletedExceptionally(); - - assertThatThrownBy(() -> page1Future.toCompletableFuture().get()) - .hasRootCauseExactlyInstanceOf(DriverTimeoutException.class) - .hasMessageContaining("Query timed out after " + defaultTimeout); - } - } - - @Test - public void should_honor_statement_timeout() throws Exception { - // given - GraphBinaryModule binaryModule = createGraphBinaryModule(mockContext); - Duration defaultTimeout = Duration.ofSeconds(1); - Duration statementTimeout = Duration.ofSeconds(2); - - RequestHandlerTestHarness.Builder builder = - GraphRequestHandlerTestHarness.builder().withGraphTimeout(defaultTimeout); - PoolBehavior node1Behavior = builder.customBehavior(node); - - try (RequestHandlerTestHarness harness = builder.build()) { - - DriverExecutionProfile profile = harness.getContext().getConfig().getDefaultProfile(); - when(profile.isDefined(DseDriverOption.GRAPH_SUB_PROTOCOL)).thenReturn(true); - when(profile.getString(DseDriverOption.GRAPH_SUB_PROTOCOL)) - .thenReturn(GraphProtocol.GRAPH_BINARY_1_0.toInternalCode()); - - GraphStatement graphStatement = - ScriptGraphStatement.newInstance("mockQuery").setTimeout(statementTimeout); - - // when - ContinuousGraphRequestHandler handler = - new ContinuousGraphRequestHandler( - graphStatement, - harness.getSession(), - harness.getContext(), - "test", - binaryModule, - new GraphSupportChecker()); - - // send the initial request - CompletionStage page1Future = handler.handle(); - - // acknowledge the write, will set the global timeout - node1Behavior.verifyWrite(); - node1Behavior.setWriteSuccess(); - - CapturedTimeout globalTimeout = harness.nextScheduledTimeout(); - assertThat(globalTimeout.getDelay(TimeUnit.NANOSECONDS)) - .isEqualTo(statementTimeout.toNanos()); - - // will trigger the global timeout and complete it exceptionally - globalTimeout.task().run(globalTimeout); - assertThat(page1Future.toCompletableFuture()).isCompletedExceptionally(); - - assertThatThrownBy(() -> page1Future.toCompletableFuture().get()) - .hasRootCauseExactlyInstanceOf(DriverTimeoutException.class) - .hasMessageContaining("Query timed out after " + statementTimeout); - } - } - - private void validateMetrics(String profileName, RequestHandlerTestHarness harness) { - // GRAPH_MESSAGES metrics update is invoked only for the first page - verify(nodeMetricUpdater1, times(1)) - .updateTimer( - eq(DseNodeMetric.GRAPH_MESSAGES), eq(profileName), anyLong(), eq(TimeUnit.NANOSECONDS)); - verifyNoMoreInteractions(nodeMetricUpdater1); - - verify(harness.getSession().getMetricUpdater()) - .updateTimer( - eq(DseSessionMetric.GRAPH_REQUESTS), eq(null), anyLong(), eq(TimeUnit.NANOSECONDS)); - verifyNoMoreInteractions(harness.getSession().getMetricUpdater()); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphExecutionInfoConverterTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphExecutionInfoConverterTest.java deleted file mode 100644 index 1814b12aa4e..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphExecutionInfoConverterTest.java +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.dse.driver.api.core.graph.GraphStatement; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.servererrors.ServerError; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.nio.ByteBuffer; -import java.util.AbstractMap.SimpleEntry; -import java.util.Collections; -import java.util.List; -import java.util.Map.Entry; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.Strict.class) -@SuppressWarnings("deprecation") -public class GraphExecutionInfoConverterTest { - - @Mock GraphStatement request; - @Mock Node node; - - private List> errors; - private List warnings; - private ImmutableMap payload; - - @Before - public void setUp() { - errors = - Collections.singletonList( - new SimpleEntry<>(node, new ServerError(node, "this is a server error"))); - warnings = Collections.singletonList("this is a warning"); - payload = ImmutableMap.of("key", Bytes.fromHexString("0xcafebabe")); - } - - @Test - public void should_convert_to_graph_execution_info() { - - // given - ExecutionInfo executionInfo = mock(ExecutionInfo.class); - when(executionInfo.getRequest()).thenReturn(request); - when(executionInfo.getCoordinator()).thenReturn(node); - when(executionInfo.getSpeculativeExecutionCount()).thenReturn(42); - when(executionInfo.getSuccessfulExecutionIndex()).thenReturn(10); - when(executionInfo.getErrors()).thenReturn(errors); - when(executionInfo.getWarnings()).thenReturn(warnings); - when(executionInfo.getIncomingPayload()).thenReturn(payload); - - // when - com.datastax.dse.driver.api.core.graph.GraphExecutionInfo graphExecutionInfo = - GraphExecutionInfoConverter.convert(executionInfo); - - // then - assertThat(graphExecutionInfo.getStatement()).isSameAs(request); - assertThat(graphExecutionInfo.getCoordinator()).isSameAs(node); - assertThat(graphExecutionInfo.getSpeculativeExecutionCount()).isEqualTo(42); - assertThat(graphExecutionInfo.getSuccessfulExecutionIndex()).isEqualTo(10); - assertThat(graphExecutionInfo.getErrors()).isEqualTo(errors); - assertThat(graphExecutionInfo.getWarnings()).isEqualTo(warnings); - assertThat(graphExecutionInfo.getIncomingPayload()).isEqualTo(payload); - } - - @Test - public void should_convert_from_graph_execution_info() { - - // given - com.datastax.dse.driver.api.core.graph.GraphExecutionInfo graphExecutionInfo = - mock(com.datastax.dse.driver.api.core.graph.GraphExecutionInfo.class); - when(graphExecutionInfo.getStatement()).thenAnswer(args -> request); - when(graphExecutionInfo.getCoordinator()).thenReturn(node); - when(graphExecutionInfo.getSpeculativeExecutionCount()).thenReturn(42); - when(graphExecutionInfo.getSuccessfulExecutionIndex()).thenReturn(10); - when(graphExecutionInfo.getErrors()).thenReturn(errors); - when(graphExecutionInfo.getWarnings()).thenReturn(warnings); - when(graphExecutionInfo.getIncomingPayload()).thenReturn(payload); - - // when - ExecutionInfo executionInfo = GraphExecutionInfoConverter.convert(graphExecutionInfo); - - // then - assertThat(executionInfo.getRequest()).isSameAs(request); - assertThatThrownBy(executionInfo::getStatement).isInstanceOf(ClassCastException.class); - assertThat(executionInfo.getCoordinator()).isSameAs(node); - assertThat(executionInfo.getSpeculativeExecutionCount()).isEqualTo(42); - assertThat(executionInfo.getSuccessfulExecutionIndex()).isEqualTo(10); - assertThat(executionInfo.getErrors()).isEqualTo(errors); - assertThat(executionInfo.getWarnings()).isEqualTo(warnings); - assertThat(executionInfo.getIncomingPayload()).isEqualTo(payload); - assertThat(executionInfo.getPagingState()).isNull(); - assertThat(executionInfo.isSchemaInAgreement()).isTrue(); - assertThat(executionInfo.getQueryTraceAsync()).isCompletedExceptionally(); - assertThatThrownBy(executionInfo::getQueryTrace) - .isInstanceOf(IllegalStateException.class) - .hasMessage("Tracing was disabled for this request"); - assertThat(executionInfo.getResponseSizeInBytes()).isEqualTo(-1L); - assertThat(executionInfo.getCompressedResponseSizeInBytes()).isEqualTo(-1L); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphNodeTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphNodeTest.java deleted file mode 100644 index d7ded441e70..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphNodeTest.java +++ /dev/null @@ -1,299 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import static com.datastax.dse.driver.internal.core.graph.GraphProtocol.GRAPHSON_1_0; -import static com.datastax.dse.driver.internal.core.graph.GraphProtocol.GRAPHSON_2_0; -import static com.datastax.dse.driver.internal.core.graph.GraphProtocol.GRAPH_BINARY_1_0; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.dse.driver.api.core.DseProtocolVersion; -import com.datastax.dse.driver.api.core.graph.GraphNode; -import com.datastax.dse.driver.internal.core.graph.binary.GraphBinaryModule; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import org.apache.tinkerpop.gremlin.process.remote.traversal.DefaultRemoteTraverser; -import org.apache.tinkerpop.gremlin.process.traversal.Traverser; -import org.apache.tinkerpop.gremlin.process.traversal.step.util.EmptyPath; -import org.apache.tinkerpop.gremlin.structure.io.Buffer; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryReader; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryWriter; -import org.apache.tinkerpop.gremlin.structure.io.binary.TypeSerializerRegistry; -import org.apache.tinkerpop.gremlin.structure.util.detached.DetachedEdge; -import org.apache.tinkerpop.gremlin.structure.util.detached.DetachedProperty; -import org.apache.tinkerpop.gremlin.structure.util.detached.DetachedVertex; -import org.apache.tinkerpop.gremlin.structure.util.detached.DetachedVertexProperty; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class GraphNodeTest { - - private GraphBinaryModule graphBinaryModule; - - @Before - public void setup() { - DefaultDriverContext dseDriverContext = mock(DefaultDriverContext.class); - when(dseDriverContext.getCodecRegistry()).thenReturn(CodecRegistry.DEFAULT); - when(dseDriverContext.getProtocolVersion()).thenReturn(DseProtocolVersion.DSE_V2); - - TypeSerializerRegistry registry = - GraphBinaryModule.createDseTypeSerializerRegistry(dseDriverContext); - graphBinaryModule = - new GraphBinaryModule(new GraphBinaryReader(registry), new GraphBinaryWriter(registry)); - } - - @Test - public void should_not_support_set_for_graphson_2_0() throws IOException { - // when - GraphNode graphNode = serdeAndCreateGraphNode(ImmutableSet.of("value"), GRAPHSON_2_0); - - // then - assertThat(graphNode.isSet()).isFalse(); - } - - @Test - public void should_throw_for_set_for_graphson_1_0() throws IOException { - // when - GraphNode graphNode = serdeAndCreateGraphNode(ImmutableSet.of("value"), GRAPHSON_1_0); - - // then - assertThat(graphNode.isSet()).isFalse(); - assertThatThrownBy(graphNode::asSet).isExactlyInstanceOf(UnsupportedOperationException.class); - } - - @Test - @UseDataProvider(value = "allGraphProtocols") - public void should_create_graph_node_for_list(GraphProtocol graphVersion) throws IOException { - // when - GraphNode graphNode = serdeAndCreateGraphNode(ImmutableList.of("value"), graphVersion); - - // then - assertThat(graphNode.isList()).isTrue(); - List result = graphNode.asList(); - assertThat(result).isEqualTo(ImmutableList.of("value")); - } - - @Test - @UseDataProvider("allGraphProtocols") - public void should_create_graph_node_for_map(GraphProtocol graphProtocol) throws IOException { - // when - GraphNode graphNode = serdeAndCreateGraphNode(ImmutableMap.of("value", 1234), graphProtocol); - - // then - assertThat(graphNode.isMap()).isTrue(); - Map result = graphNode.asMap(); - assertThat(result).isEqualTo(ImmutableMap.of("value", 1234)); - } - - @Test - @UseDataProvider("graphson1_0and2_0") - public void should_create_graph_node_for_map_for_non_string_key(GraphProtocol graphProtocol) - throws IOException { - // when - GraphNode graphNode = serdeAndCreateGraphNode(ImmutableMap.of(12, 1234), graphProtocol); - - // then - assertThat(graphNode.isMap()).isTrue(); - Map result = graphNode.asMap(); - assertThat(result).isEqualTo(ImmutableMap.of("12", 1234)); - } - - @Test - @UseDataProvider(value = "allGraphProtocols") - public void should_calculate_size_of_collection_types(GraphProtocol graphProtocol) - throws IOException { - // when - GraphNode mapNode = serdeAndCreateGraphNode(ImmutableMap.of(12, 1234), graphProtocol); - GraphNode setNode = serdeAndCreateGraphNode(ImmutableSet.of(12, 1234), graphProtocol); - GraphNode listNode = serdeAndCreateGraphNode(ImmutableList.of(12, 1234, 99999), graphProtocol); - - // then - assertThat(mapNode.size()).isEqualTo(1); - assertThat(setNode.size()).isEqualTo(2); - assertThat(listNode.size()).isEqualTo(3); - } - - @Test - @UseDataProvider(value = "allGraphProtocols") - public void should_return_is_value_only_for_scalar_value(GraphProtocol graphProtocol) - throws IOException { - // when - GraphNode mapNode = serdeAndCreateGraphNode(ImmutableMap.of(12, 1234), graphProtocol); - GraphNode setNode = serdeAndCreateGraphNode(ImmutableMap.of(12, 1234), graphProtocol); - GraphNode listNode = serdeAndCreateGraphNode(ImmutableMap.of(12, 1234), graphProtocol); - GraphNode vertexNode = - serdeAndCreateGraphNode(new DetachedVertex("a", "l", null), graphProtocol); - GraphNode edgeNode = - serdeAndCreateGraphNode( - new DetachedEdge("a", "l", Collections.emptyMap(), "v1", "l1", "v2", "l2"), - graphProtocol); - GraphNode pathNode = serdeAndCreateGraphNode(EmptyPath.instance(), graphProtocol); - GraphNode propertyNode = serdeAndCreateGraphNode(new DetachedProperty<>("a", 1), graphProtocol); - GraphNode vertexPropertyNode = - serdeAndCreateGraphNode( - new DetachedVertexProperty<>("id", "l", "v", null, new DetachedVertex("a", "l", null)), - graphProtocol); - GraphNode scalarValueNode = serdeAndCreateGraphNode(true, graphProtocol); - - // then - assertThat(mapNode.isValue()).isFalse(); - assertThat(setNode.isValue()).isFalse(); - assertThat(listNode.isValue()).isFalse(); - assertThat(vertexNode.isValue()).isFalse(); - assertThat(edgeNode.isValue()).isFalse(); - assertThat(pathNode.isValue()).isFalse(); - assertThat(propertyNode.isValue()).isFalse(); - assertThat(vertexPropertyNode.isValue()).isFalse(); - assertThat(scalarValueNode.isValue()).isTrue(); - } - - @Test - @UseDataProvider("objectGraphNodeProtocols") - public void should_check_if_node_is_property_not_map(GraphProtocol graphProtocol) - throws IOException { - // when - GraphNode propertyNode = serdeAndCreateGraphNode(new DetachedProperty<>("a", 1), graphProtocol); - - // then - assertThat(propertyNode.isProperty()).isTrue(); - assertThat(propertyNode.isMap()).isFalse(); - assertThat(propertyNode.asProperty()).isNotNull(); - } - - @Test - public void should_check_if_node_is_property_or_map_for_1_0() throws IOException { - // when - GraphNode propertyNode = serdeAndCreateGraphNode(new DetachedProperty<>("a", 1), GRAPHSON_1_0); - - // then - assertThat(propertyNode.isProperty()).isTrue(); - assertThat(propertyNode.isMap()).isTrue(); - assertThat(propertyNode.asProperty()).isNotNull(); - } - - @Test - @UseDataProvider("allGraphProtocols") - public void should_check_if_node_is_vertex_property(GraphProtocol graphProtocol) - throws IOException { - // when - GraphNode vertexPropertyNode = - serdeAndCreateGraphNode( - new DetachedVertexProperty<>("id", "l", "v", null, new DetachedVertex("a", "l", null)), - graphProtocol); - - // then - assertThat(vertexPropertyNode.isVertexProperty()).isTrue(); - assertThat(vertexPropertyNode.isVertexProperty()).isNotNull(); - } - - @Test - public void should_check_if_node_is_path_for_graphson_1_0() throws IOException { - // when - GraphNode pathNode = serdeAndCreateGraphNode(EmptyPath.instance(), GRAPHSON_1_0); - - // then - assertThat(pathNode.isPath()).isFalse(); - assertThatThrownBy(pathNode::asPath).isExactlyInstanceOf(UnsupportedOperationException.class); - } - - @Test - @UseDataProvider("objectGraphNodeProtocols") - public void should_check_if_node_is_path(GraphProtocol graphProtocol) throws IOException { - // when - GraphNode pathNode = serdeAndCreateGraphNode(EmptyPath.instance(), graphProtocol); - - // then - assertThat(pathNode.isPath()).isTrue(); - assertThat(pathNode.asPath()).isNotNull(); - } - - @Test - @UseDataProvider("allGraphProtocols") - public void should_check_if_node_is_vertex(GraphProtocol graphProtocol) throws IOException { - // when - GraphNode vertexNode = - serdeAndCreateGraphNode(new DetachedVertex("a", "l", null), graphProtocol); - - // then - assertThat(vertexNode.isVertex()).isTrue(); - assertThat(vertexNode.asVertex()).isNotNull(); - } - - @Test - @UseDataProvider("allGraphProtocols") - public void should_check_if_node_is_edge(GraphProtocol graphProtocol) throws IOException { - // when - GraphNode edgeNode = - serdeAndCreateGraphNode( - new DetachedEdge("a", "l", Collections.emptyMap(), "v1", "l1", "v2", "l2"), - graphProtocol); - - // then - assertThat(edgeNode.isEdge()).isTrue(); - assertThat(edgeNode.asEdge()).isNotNull(); - } - - private GraphNode serdeAndCreateGraphNode(Object inputValue, GraphProtocol graphProtocol) - throws IOException { - if (graphProtocol.isGraphBinary()) { - Buffer tinkerBuf = graphBinaryModule.serialize(new DefaultRemoteTraverser<>(inputValue, 0L)); - ByteBuffer nioBuffer = tinkerBuf.nioBuffer(); - tinkerBuf.release(); - return new ObjectGraphNode( - GraphConversions.createGraphBinaryGraphNode( - ImmutableList.of(nioBuffer), graphBinaryModule) - .as(Traverser.class) - .get()); - } else { - return GraphSONUtils.createGraphNode( - ImmutableList.of(GraphSONUtils.serializeToByteBuffer(inputValue, graphProtocol)), - graphProtocol); - } - } - - @DataProvider - public static Object[][] allGraphProtocols() { - return new Object[][] {{GRAPHSON_1_0}, {GRAPHSON_2_0}, {GRAPH_BINARY_1_0}}; - } - - @DataProvider - public static Object[][] graphson1_0and2_0() { - return new Object[][] {{GRAPHSON_1_0}, {GRAPHSON_2_0}}; - } - - @DataProvider - public static Object[][] objectGraphNodeProtocols() { - return new Object[][] {{GRAPHSON_2_0}, {GRAPH_BINARY_1_0}}; - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphRequestHandlerTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphRequestHandlerTest.java deleted file mode 100644 index 9f325003610..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphRequestHandlerTest.java +++ /dev/null @@ -1,589 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import static com.datastax.dse.driver.internal.core.graph.GraphProtocol.GRAPHSON_1_0; -import static com.datastax.dse.driver.internal.core.graph.GraphProtocol.GRAPHSON_2_0; -import static com.datastax.dse.driver.internal.core.graph.GraphProtocol.GRAPH_BINARY_1_0; -import static com.datastax.dse.driver.internal.core.graph.GraphTestUtils.createGraphBinaryModule; -import static com.datastax.dse.driver.internal.core.graph.GraphTestUtils.defaultDseFrameOf; -import static com.datastax.dse.driver.internal.core.graph.GraphTestUtils.serialize; -import static com.datastax.dse.driver.internal.core.graph.GraphTestUtils.singleGraphRow; -import static com.datastax.oss.driver.api.core.type.codec.TypeCodecs.BIGINT; -import static com.datastax.oss.driver.api.core.type.codec.TypeCodecs.TEXT; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.ArgumentMatchers.matches; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyNoMoreInteractions; -import static org.mockito.Mockito.when; - -import com.datastax.dse.driver.DseTestDataProviders; -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.dse.driver.api.core.data.geometry.Point; -import com.datastax.dse.driver.api.core.graph.BatchGraphStatement; -import com.datastax.dse.driver.api.core.graph.DseGraph; -import com.datastax.dse.driver.api.core.graph.FluentGraphStatement; -import com.datastax.dse.driver.api.core.graph.GraphNode; -import com.datastax.dse.driver.api.core.graph.GraphResultSet; -import com.datastax.dse.driver.api.core.graph.GraphStatement; -import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; -import com.datastax.dse.driver.api.core.metrics.DseNodeMetric; -import com.datastax.dse.driver.api.core.metrics.DseSessionMetric; -import com.datastax.dse.driver.internal.core.graph.binary.GraphBinaryModule; -import com.datastax.dse.protocol.internal.request.RawBytesQuery; -import com.datastax.dse.protocol.internal.request.query.DseQueryOptions; -import com.datastax.oss.driver.api.core.DefaultConsistencyLevel; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.tracker.RequestTracker; -import com.datastax.oss.driver.api.core.uuid.Uuids; -import com.datastax.oss.driver.internal.core.cql.Conversions; -import com.datastax.oss.driver.internal.core.cql.PoolBehavior; -import com.datastax.oss.driver.internal.core.metadata.DefaultNode; -import com.datastax.oss.driver.internal.core.metrics.NodeMetricUpdater; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.request.Query; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.io.IOException; -import java.math.BigInteger; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.time.LocalDateTime; -import java.time.ZoneOffset; -import java.util.List; -import java.util.Map; -import java.util.concurrent.TimeUnit; -import java.util.regex.Pattern; -import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; -import org.apache.tinkerpop.gremlin.structure.Vertex; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.Mockito; -import org.mockito.MockitoAnnotations; - -@RunWith(DataProviderRunner.class) -public class GraphRequestHandlerTest { - - private static final Pattern LOG_PREFIX_PER_REQUEST = Pattern.compile("test-graph\\|\\d+"); - - @Mock DefaultNode node; - - @Mock protected NodeMetricUpdater nodeMetricUpdater1; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - when(node.getMetricUpdater()).thenReturn(nodeMetricUpdater1); - } - - @Test - @UseDataProvider(location = DseTestDataProviders.class, value = "supportedGraphProtocols") - public void should_create_query_message_from_script_statement(GraphProtocol graphProtocol) - throws IOException { - // initialization - GraphRequestHandlerTestHarness harness = GraphRequestHandlerTestHarness.builder().build(); - ScriptGraphStatement graphStatement = - ScriptGraphStatement.newInstance("mockQuery") - .setQueryParam("p1", 1L) - .setQueryParam("p2", Uuids.random()); - - GraphBinaryModule module = createGraphBinaryModule(harness.getContext()); - - // when - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(graphStatement, harness.getContext()); - - Message m = - GraphConversions.createMessageFromGraphStatement( - graphStatement, graphProtocol, executionProfile, harness.getContext(), module); - - // checks - assertThat(m).isInstanceOf(Query.class); - Query q = ((Query) m); - assertThat(q.query).isEqualTo("mockQuery"); - assertThat(q.options.positionalValues) - .containsExactly(serialize(graphStatement.getQueryParams(), graphProtocol, module)); - assertThat(q.options.namedValues).isEmpty(); - } - - @Test - @UseDataProvider(location = DseTestDataProviders.class, value = "supportedGraphProtocols") - public void should_create_query_message_from_fluent_statement(GraphProtocol graphProtocol) - throws IOException { - // initialization - GraphRequestHandlerTestHarness harness = GraphRequestHandlerTestHarness.builder().build(); - GraphTraversal traversalTest = - DseGraph.g.V().has("person", "name", "marko").has("p1", 1L).has("p2", Uuids.random()); - GraphStatement graphStatement = FluentGraphStatement.newInstance(traversalTest); - - GraphBinaryModule module = createGraphBinaryModule(harness.getContext()); - - // when - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(graphStatement, harness.getContext()); - - Message m = - GraphConversions.createMessageFromGraphStatement( - graphStatement, graphProtocol, executionProfile, harness.getContext(), module); - - Map createdCustomPayload = - GraphConversions.createCustomPayload( - graphStatement, graphProtocol, executionProfile, harness.getContext(), module); - - // checks - assertThat(m).isInstanceOf(RawBytesQuery.class); - testQueryRequestAndPayloadContents( - ((RawBytesQuery) m), - createdCustomPayload, - GraphConversions.bytecodeToSerialize(graphStatement), - graphProtocol, - module); - } - - @Test - @UseDataProvider(location = DseTestDataProviders.class, value = "supportedGraphProtocols") - public void should_create_query_message_from_batch_statement(GraphProtocol graphProtocol) - throws IOException { - // initialization - GraphRequestHandlerTestHarness harness = GraphRequestHandlerTestHarness.builder().build(); - @SuppressWarnings("rawtypes") - List traversalsTest = - ImmutableList.of( - // randomly testing some complex data types. Complete suite of data types test is in - // GraphDataTypesTest - DseGraph.g - .addV("person") - .property("p1", 2.3f) - .property("p2", LocalDateTime.now(ZoneOffset.UTC)), - DseGraph.g - .addV("software") - .property("p3", new BigInteger("123456789123456789123456789123456789")) - .property("p4", ImmutableList.of(Point.fromCoordinates(30.4, 25.63746284)))); - GraphStatement graphStatement = - BatchGraphStatement.builder().addTraversals(traversalsTest).build(); - - GraphBinaryModule module = createGraphBinaryModule(harness.getContext()); - - // when - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(graphStatement, harness.getContext()); - - Message m = - GraphConversions.createMessageFromGraphStatement( - graphStatement, graphProtocol, executionProfile, harness.getContext(), module); - - Map createdCustomPayload = - GraphConversions.createCustomPayload( - graphStatement, graphProtocol, executionProfile, harness.getContext(), module); - - // checks - assertThat(m).isInstanceOf(RawBytesQuery.class); - testQueryRequestAndPayloadContents( - ((RawBytesQuery) m), - createdCustomPayload, - GraphConversions.bytecodeToSerialize(graphStatement), - graphProtocol, - module); - } - - private void testQueryRequestAndPayloadContents( - RawBytesQuery q, - Map customPayload, - Object traversalTest, - GraphProtocol graphProtocol, - GraphBinaryModule module) - throws IOException { - if (graphProtocol.isGraphBinary()) { - assertThat(q.query).isEqualTo(GraphConversions.EMPTY_STRING_QUERY); - assertThat(customPayload).containsKey(GraphConversions.GRAPH_BINARY_QUERY_OPTION_KEY); - ByteBuffer encodedQuery = customPayload.get(GraphConversions.GRAPH_BINARY_QUERY_OPTION_KEY); - assertThat(encodedQuery).isNotNull(); - assertThat(encodedQuery).isEqualTo(serialize(traversalTest, graphProtocol, module)); - } else { - assertThat(q.query).isEqualTo(serialize(traversalTest, graphProtocol, module).array()); - assertThat(customPayload).doesNotContainKey(GraphConversions.GRAPH_BINARY_QUERY_OPTION_KEY); - } - } - - @Test - @UseDataProvider(location = DseTestDataProviders.class, value = "supportedGraphProtocols") - public void should_set_correct_query_options_from_graph_statement(GraphProtocol subProtocol) - throws IOException { - // initialization - GraphRequestHandlerTestHarness harness = GraphRequestHandlerTestHarness.builder().build(); - GraphStatement graphStatement = - ScriptGraphStatement.newInstance("mockQuery").setQueryParam("name", "value"); - - GraphBinaryModule module = createGraphBinaryModule(harness.getContext()); - - // when - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(graphStatement, harness.getContext()); - Message m = - GraphConversions.createMessageFromGraphStatement( - graphStatement, subProtocol, executionProfile, harness.getContext(), module); - - // checks - Query query = ((Query) m); - DseQueryOptions options = ((DseQueryOptions) query.options); - assertThat(options.consistency) - .isEqualTo( - DefaultConsistencyLevel.valueOf( - executionProfile.getString(DefaultDriverOption.REQUEST_CONSISTENCY)) - .getProtocolCode()); - // set by the mock timestamp generator - assertThat(options.defaultTimestamp).isEqualTo(-9223372036854775808L); - assertThat(options.positionalValues) - .isEqualTo( - ImmutableList.of(serialize(ImmutableMap.of("name", "value"), subProtocol, module))); - - m = - GraphConversions.createMessageFromGraphStatement( - graphStatement.setTimestamp(2L), - subProtocol, - executionProfile, - harness.getContext(), - module); - query = ((Query) m); - options = ((DseQueryOptions) query.options); - assertThat(options.defaultTimestamp).isEqualTo(2L); - } - - @Test - @UseDataProvider(location = DseTestDataProviders.class, value = "supportedGraphProtocols") - public void should_create_payload_from_config_options(GraphProtocol subProtocol) { - // initialization - GraphRequestHandlerTestHarness harness = GraphRequestHandlerTestHarness.builder().build(); - GraphStatement graphStatement = - ScriptGraphStatement.newInstance("mockQuery").setExecutionProfileName("test-graph"); - - GraphBinaryModule module = createGraphBinaryModule(harness.getContext()); - - // when - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(graphStatement, harness.getContext()); - - Map requestPayload = - GraphConversions.createCustomPayload( - graphStatement, subProtocol, executionProfile, harness.getContext(), module); - - // checks - Mockito.verify(executionProfile).getString(DseDriverOption.GRAPH_TRAVERSAL_SOURCE, null); - Mockito.verify(executionProfile).getString(DseDriverOption.GRAPH_NAME, null); - Mockito.verify(executionProfile).getBoolean(DseDriverOption.GRAPH_IS_SYSTEM_QUERY, false); - Mockito.verify(executionProfile).getDuration(DseDriverOption.GRAPH_TIMEOUT, Duration.ZERO); - Mockito.verify(executionProfile).getString(DseDriverOption.GRAPH_READ_CONSISTENCY_LEVEL, null); - Mockito.verify(executionProfile).getString(DseDriverOption.GRAPH_WRITE_CONSISTENCY_LEVEL, null); - - assertThat(requestPayload.get(GraphConversions.GRAPH_SOURCE_OPTION_KEY)) - .isEqualTo(TEXT.encode("a", harness.getContext().getProtocolVersion())); - assertThat(requestPayload.get(GraphConversions.GRAPH_RESULTS_OPTION_KEY)) - .isEqualTo( - TEXT.encode(subProtocol.toInternalCode(), harness.getContext().getProtocolVersion())); - assertThat(requestPayload.get(GraphConversions.GRAPH_NAME_OPTION_KEY)) - .isEqualTo(TEXT.encode("mockGraph", harness.getContext().getProtocolVersion())); - assertThat(requestPayload.get(GraphConversions.GRAPH_LANG_OPTION_KEY)) - .isEqualTo(TEXT.encode("gremlin-groovy", harness.getContext().getProtocolVersion())); - assertThat(requestPayload.get(GraphConversions.GRAPH_TIMEOUT_OPTION_KEY)) - .isEqualTo(BIGINT.encode(2L, harness.getContext().getProtocolVersion())); - assertThat(requestPayload.get(GraphConversions.GRAPH_READ_CONSISTENCY_LEVEL_OPTION_KEY)) - .isEqualTo(TEXT.encode("LOCAL_TWO", harness.getContext().getProtocolVersion())); - assertThat(requestPayload.get(GraphConversions.GRAPH_WRITE_CONSISTENCY_LEVEL_OPTION_KEY)) - .isEqualTo(TEXT.encode("LOCAL_THREE", harness.getContext().getProtocolVersion())); - } - - @Test - @UseDataProvider(location = DseTestDataProviders.class, value = "supportedGraphProtocols") - public void should_create_payload_from_statement_options(GraphProtocol subProtocol) { - // initialization - GraphRequestHandlerTestHarness harness = GraphRequestHandlerTestHarness.builder().build(); - GraphStatement graphStatement = - ScriptGraphStatement.builder("mockQuery") - .setGraphName("mockGraph") - .setTraversalSource("a") - .setTimeout(Duration.ofMillis(2)) - .setReadConsistencyLevel(DefaultConsistencyLevel.TWO) - .setWriteConsistencyLevel(DefaultConsistencyLevel.THREE) - .setSystemQuery(false) - .build(); - - GraphBinaryModule module = createGraphBinaryModule(harness.getContext()); - - // when - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(graphStatement, harness.getContext()); - - Map requestPayload = - GraphConversions.createCustomPayload( - graphStatement, subProtocol, executionProfile, harness.getContext(), module); - - // checks - Mockito.verify(executionProfile, never()) - .getString(DseDriverOption.GRAPH_TRAVERSAL_SOURCE, null); - Mockito.verify(executionProfile, never()).getString(DseDriverOption.GRAPH_NAME, null); - Mockito.verify(executionProfile, never()) - .getBoolean(DseDriverOption.GRAPH_IS_SYSTEM_QUERY, false); - Mockito.verify(executionProfile, never()) - .getDuration(DseDriverOption.GRAPH_TIMEOUT, Duration.ZERO); - Mockito.verify(executionProfile, never()) - .getString(DseDriverOption.GRAPH_READ_CONSISTENCY_LEVEL, null); - Mockito.verify(executionProfile, never()) - .getString(DseDriverOption.GRAPH_WRITE_CONSISTENCY_LEVEL, null); - - assertThat(requestPayload.get(GraphConversions.GRAPH_SOURCE_OPTION_KEY)) - .isEqualTo(TEXT.encode("a", harness.getContext().getProtocolVersion())); - assertThat(requestPayload.get(GraphConversions.GRAPH_RESULTS_OPTION_KEY)) - .isEqualTo( - TEXT.encode(subProtocol.toInternalCode(), harness.getContext().getProtocolVersion())); - assertThat(requestPayload.get(GraphConversions.GRAPH_NAME_OPTION_KEY)) - .isEqualTo(TEXT.encode("mockGraph", harness.getContext().getProtocolVersion())); - assertThat(requestPayload.get(GraphConversions.GRAPH_LANG_OPTION_KEY)) - .isEqualTo(TEXT.encode("gremlin-groovy", harness.getContext().getProtocolVersion())); - assertThat(requestPayload.get(GraphConversions.GRAPH_TIMEOUT_OPTION_KEY)) - .isEqualTo(BIGINT.encode(2L, harness.getContext().getProtocolVersion())); - assertThat(requestPayload.get(GraphConversions.GRAPH_READ_CONSISTENCY_LEVEL_OPTION_KEY)) - .isEqualTo(TEXT.encode("TWO", harness.getContext().getProtocolVersion())); - assertThat(requestPayload.get(GraphConversions.GRAPH_WRITE_CONSISTENCY_LEVEL_OPTION_KEY)) - .isEqualTo(TEXT.encode("THREE", harness.getContext().getProtocolVersion())); - } - - @Test - @UseDataProvider(location = DseTestDataProviders.class, value = "supportedGraphProtocols") - public void should_not_set_graph_name_on_system_queries(GraphProtocol subProtocol) { - // initialization - GraphRequestHandlerTestHarness harness = GraphRequestHandlerTestHarness.builder().build(); - GraphStatement graphStatement = - ScriptGraphStatement.newInstance("mockQuery").setSystemQuery(true); - - GraphBinaryModule module = createGraphBinaryModule(harness.getContext()); - - // when - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(graphStatement, harness.getContext()); - - Map requestPayload = - GraphConversions.createCustomPayload( - graphStatement, subProtocol, executionProfile, harness.getContext(), module); - - // checks - assertThat(requestPayload.get(GraphConversions.GRAPH_NAME_OPTION_KEY)).isNull(); - assertThat(requestPayload.get(GraphConversions.GRAPH_SOURCE_OPTION_KEY)).isNull(); - } - - @Test - @UseDataProvider("supportedGraphProtocolsWithDseVersions") - public void should_return_results_for_statements(GraphProtocol graphProtocol, Version dseVersion) - throws IOException { - - GraphRequestHandlerTestHarness.Builder builder = - GraphRequestHandlerTestHarness.builder() - .withGraphProtocolForTestConfig(graphProtocol) - .withDseVersionInMetadata(dseVersion); - PoolBehavior node1Behavior = builder.customBehavior(node); - GraphRequestHandlerTestHarness harness = builder.build(); - - GraphBinaryModule module = createGraphBinaryModule(harness.getContext()); - - // ideally we would be able to provide a function here to - // produce results instead of a static predefined response. - // Function to which we would pass the harness instance or a (mocked)DriverContext. - // Since that's not possible in the RequestHandlerTestHarness API at the moment, we - // have to use another DseDriverContext and GraphBinaryModule here, - // instead of reusing the one in the harness' DriverContext - node1Behavior.setResponseSuccess(defaultDseFrameOf(singleGraphRow(graphProtocol, module))); - - GraphSupportChecker graphSupportChecker = mock(GraphSupportChecker.class); - when(graphSupportChecker.isPagingEnabled(any(), any())).thenReturn(false); - when(graphSupportChecker.inferGraphProtocol(any(), any(), any())).thenReturn(graphProtocol); - - GraphRequestAsyncProcessor p = - Mockito.spy(new GraphRequestAsyncProcessor(harness.getContext(), graphSupportChecker)); - when(p.getGraphBinaryModule()).thenReturn(module); - - GraphStatement graphStatement = - ScriptGraphStatement.newInstance("mockQuery").setExecutionProfileName("test-graph"); - GraphResultSet grs = - new GraphRequestSyncProcessor(p) - .process(graphStatement, harness.getSession(), harness.getContext(), "test-graph"); - - List nodes = grs.all(); - assertThat(nodes.size()).isEqualTo(1); - - GraphNode graphNode = nodes.get(0); - assertThat(graphNode.isVertex()).isTrue(); - - Vertex vRead = graphNode.asVertex(); - assertThat(vRead.label()).isEqualTo("person"); - assertThat(vRead.id()).isEqualTo(1); - if (!graphProtocol.isGraphBinary()) { - // GraphBinary does not encode properties regardless of whether they are present in the - // parent element or not :/ - assertThat(vRead.property("name").id()).isEqualTo(11); - assertThat(vRead.property("name").value()).isEqualTo("marko"); - } - } - - @DataProvider - public static Object[][] supportedGraphProtocolsWithDseVersions() { - return new Object[][] { - {GRAPHSON_1_0, Version.parse("6.7.0")}, - {GRAPHSON_1_0, Version.parse("6.8.0")}, - {GRAPHSON_2_0, Version.parse("6.7.0")}, - {GRAPHSON_2_0, Version.parse("6.8.0")}, - {GRAPH_BINARY_1_0, Version.parse("6.7.0")}, - {GRAPH_BINARY_1_0, Version.parse("6.8.0")}, - }; - } - - @Test - @UseDataProvider("dseVersionsWithDefaultGraphProtocol") - public void should_invoke_request_tracker_and_update_metrics( - GraphProtocol graphProtocol, Version dseVersion) throws IOException { - when(nodeMetricUpdater1.isEnabled( - DseNodeMetric.GRAPH_MESSAGES, DriverExecutionProfile.DEFAULT_NAME)) - .thenReturn(true); - - GraphRequestHandlerTestHarness.Builder builder = - GraphRequestHandlerTestHarness.builder() - .withGraphProtocolForTestConfig(graphProtocol) - .withDseVersionInMetadata(dseVersion); - PoolBehavior node1Behavior = builder.customBehavior(node); - GraphRequestHandlerTestHarness harness = builder.build(); - - GraphBinaryModule module = createGraphBinaryModule(harness.getContext()); - - GraphSupportChecker graphSupportChecker = mock(GraphSupportChecker.class); - when(graphSupportChecker.isPagingEnabled(any(), any())).thenReturn(false); - when(graphSupportChecker.inferGraphProtocol(any(), any(), any())).thenReturn(graphProtocol); - - GraphRequestAsyncProcessor p = - Mockito.spy(new GraphRequestAsyncProcessor(harness.getContext(), graphSupportChecker)); - when(p.getGraphBinaryModule()).thenReturn(module); - - RequestTracker requestTracker = mock(RequestTracker.class); - when(harness.getContext().getRequestTracker()).thenReturn(requestTracker); - - GraphStatement graphStatement = ScriptGraphStatement.newInstance("mockQuery"); - - node1Behavior.setResponseSuccess(defaultDseFrameOf(singleGraphRow(graphProtocol, module))); - - GraphResultSet grs = - new GraphRequestSyncProcessor( - new GraphRequestAsyncProcessor(harness.getContext(), graphSupportChecker)) - .process(graphStatement, harness.getSession(), harness.getContext(), "test-graph"); - - List nodes = grs.all(); - assertThat(nodes.size()).isEqualTo(1); - - GraphNode graphNode = nodes.get(0); - assertThat(graphNode.isVertex()).isTrue(); - - Vertex actual = graphNode.asVertex(); - assertThat(actual.label()).isEqualTo("person"); - assertThat(actual.id()).isEqualTo(1); - if (!graphProtocol.isGraphBinary()) { - // GraphBinary does not encode properties regardless of whether they are present in the - // parent element or not :/ - assertThat(actual.property("name").id()).isEqualTo(11); - assertThat(actual.property("name").value()).isEqualTo("marko"); - } - - verify(requestTracker) - .onSuccess( - eq(graphStatement), - anyLong(), - any(DriverExecutionProfile.class), - eq(node), - matches(LOG_PREFIX_PER_REQUEST)); - verify(requestTracker) - .onNodeSuccess( - eq(graphStatement), - anyLong(), - any(DriverExecutionProfile.class), - eq(node), - matches(LOG_PREFIX_PER_REQUEST)); - verifyNoMoreInteractions(requestTracker); - - verify(nodeMetricUpdater1) - .isEnabled(DseNodeMetric.GRAPH_MESSAGES, DriverExecutionProfile.DEFAULT_NAME); - verify(nodeMetricUpdater1) - .updateTimer( - eq(DseNodeMetric.GRAPH_MESSAGES), - eq(DriverExecutionProfile.DEFAULT_NAME), - anyLong(), - eq(TimeUnit.NANOSECONDS)); - verifyNoMoreInteractions(nodeMetricUpdater1); - - verify(harness.getSession().getMetricUpdater()) - .isEnabled(DseSessionMetric.GRAPH_REQUESTS, DriverExecutionProfile.DEFAULT_NAME); - verify(harness.getSession().getMetricUpdater()) - .updateTimer( - eq(DseSessionMetric.GRAPH_REQUESTS), - eq(DriverExecutionProfile.DEFAULT_NAME), - anyLong(), - eq(TimeUnit.NANOSECONDS)); - verifyNoMoreInteractions(harness.getSession().getMetricUpdater()); - } - - @Test - public void should_honor_statement_consistency_level() { - // initialization - GraphRequestHandlerTestHarness harness = GraphRequestHandlerTestHarness.builder().build(); - ScriptGraphStatement graphStatement = - ScriptGraphStatement.builder("mockScript") - .setConsistencyLevel(DefaultConsistencyLevel.THREE) - .build(); - - GraphBinaryModule module = createGraphBinaryModule(harness.getContext()); - - // when - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(graphStatement, harness.getContext()); - - Message m = - GraphConversions.createMessageFromGraphStatement( - graphStatement, GRAPH_BINARY_1_0, executionProfile, harness.getContext(), module); - - // checks - assertThat(m).isInstanceOf(Query.class); - Query q = ((Query) m); - assertThat(q.options.consistency).isEqualTo(DefaultConsistencyLevel.THREE.getProtocolCode()); - } - - @DataProvider - public static Object[][] dseVersionsWithDefaultGraphProtocol() { - // Default GraphSON sub protocol version differs based on DSE version, so test with a version - // less than DSE 6.8 as well as DSE 6.8. - return new Object[][] { - {GRAPHSON_2_0, Version.parse("6.7.0")}, - {GRAPH_BINARY_1_0, Version.parse("6.8.0")}, - }; - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphRequestHandlerTestHarness.java b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphRequestHandlerTestHarness.java deleted file mode 100644 index 7e46b09bd59..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphRequestHandlerTestHarness.java +++ /dev/null @@ -1,232 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import static org.mockito.Mockito.when; - -import com.datastax.dse.driver.DseTestFixtures; -import com.datastax.dse.driver.api.core.DseProtocolVersion; -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.oss.driver.api.core.DefaultConsistencyLevel; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.internal.core.DefaultConsistencyLevelRegistry; -import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; -import com.datastax.oss.driver.internal.core.cql.RequestHandlerTestHarness; -import com.datastax.oss.driver.internal.core.servererrors.DefaultWriteTypeRegistry; -import com.datastax.oss.driver.internal.core.session.throttling.PassThroughRequestThrottler; -import com.datastax.oss.driver.internal.core.tracker.NoopRequestTracker; -import com.datastax.oss.protocol.internal.Frame; -import io.netty.channel.EventLoop; -import java.time.Duration; -import javax.annotation.Nullable; -import org.mockito.ArgumentMatchers; -import org.mockito.Mock; - -/** - * Provides the environment to test a request handler, where a query plan can be defined, and the - * behavior of each successive node simulated. - */ -public class GraphRequestHandlerTestHarness extends RequestHandlerTestHarness { - - @Mock DriverExecutionProfile testProfile; - - @Mock DriverExecutionProfile systemQueryExecutionProfile; - - @Mock DefaultDriverContext dseDriverContext; - - @Mock EventLoop eventLoop; - - protected GraphRequestHandlerTestHarness( - Builder builder, - @Nullable GraphProtocol graphProtocolForTestConfig, - Duration graphTimeout, - @Nullable Version dseVersionForTestMetadata) { - super(builder); - - // not mocked by RequestHandlerTestHarness, will be used when DseDriverOptions.GRAPH_TIMEOUT - // is not zero in the config - when(eventLoopGroup.next()).thenReturn(eventLoop); - - // default graph options as in the reference.conf file - when(defaultProfile.getString(DseDriverOption.GRAPH_TRAVERSAL_SOURCE, null)).thenReturn("g"); - when(defaultProfile.isDefined(DseDriverOption.GRAPH_SUB_PROTOCOL)).thenReturn(Boolean.FALSE); - when(defaultProfile.getBoolean(DseDriverOption.GRAPH_IS_SYSTEM_QUERY, false)).thenReturn(false); - when(defaultProfile.getString(DseDriverOption.GRAPH_NAME, null)).thenReturn("mockGraph"); - when(defaultProfile.getDuration(DseDriverOption.GRAPH_TIMEOUT, Duration.ZERO)) - .thenReturn(graphTimeout); - - when(testProfile.getName()).thenReturn("test-graph"); - when(testProfile.getDuration(DseDriverOption.GRAPH_TIMEOUT, Duration.ZERO)) - .thenReturn(Duration.ofMillis(2L)); - when(testProfile.getString(DefaultDriverOption.REQUEST_CONSISTENCY)) - .thenReturn(DefaultConsistencyLevel.LOCAL_ONE.name()); - when(testProfile.getInt(DefaultDriverOption.REQUEST_PAGE_SIZE)).thenReturn(5000); - when(testProfile.getString(DefaultDriverOption.REQUEST_SERIAL_CONSISTENCY)) - .thenReturn(DefaultConsistencyLevel.SERIAL.name()); - when(testProfile.getBoolean(DefaultDriverOption.REQUEST_DEFAULT_IDEMPOTENCE)).thenReturn(false); - when(testProfile.getBoolean(DefaultDriverOption.PREPARE_ON_ALL_NODES)).thenReturn(true); - when(testProfile.getString(DseDriverOption.GRAPH_TRAVERSAL_SOURCE, null)).thenReturn("a"); - when(testProfile.isDefined(DseDriverOption.GRAPH_SUB_PROTOCOL)) - .thenReturn(graphProtocolForTestConfig != null); - // only mock the config if graphProtocolForTestConfig is not null - if (graphProtocolForTestConfig != null) { - when(testProfile.getString(DseDriverOption.GRAPH_SUB_PROTOCOL)) - .thenReturn(graphProtocolForTestConfig.toInternalCode()); - } - when(testProfile.getBoolean(DseDriverOption.GRAPH_IS_SYSTEM_QUERY, false)).thenReturn(false); - when(testProfile.getString(DseDriverOption.GRAPH_NAME, null)).thenReturn("mockGraph"); - when(testProfile.getString(DseDriverOption.GRAPH_READ_CONSISTENCY_LEVEL, null)) - .thenReturn("LOCAL_TWO"); - when(testProfile.getString(DseDriverOption.GRAPH_WRITE_CONSISTENCY_LEVEL, null)) - .thenReturn("LOCAL_THREE"); - when(config.getProfile("test-graph")).thenReturn(testProfile); - - when(systemQueryExecutionProfile.getName()).thenReturn("graph-system-query"); - when(systemQueryExecutionProfile.getDuration(DseDriverOption.GRAPH_TIMEOUT, Duration.ZERO)) - .thenReturn(Duration.ZERO); - when(systemQueryExecutionProfile.getString(DefaultDriverOption.REQUEST_CONSISTENCY)) - .thenReturn(DefaultConsistencyLevel.LOCAL_ONE.name()); - when(systemQueryExecutionProfile.getInt(DefaultDriverOption.REQUEST_PAGE_SIZE)) - .thenReturn(5000); - when(systemQueryExecutionProfile.getString(DefaultDriverOption.REQUEST_SERIAL_CONSISTENCY)) - .thenReturn(DefaultConsistencyLevel.SERIAL.name()); - when(systemQueryExecutionProfile.getBoolean(DefaultDriverOption.REQUEST_DEFAULT_IDEMPOTENCE)) - .thenReturn(false); - when(systemQueryExecutionProfile.getBoolean(DefaultDriverOption.PREPARE_ON_ALL_NODES)) - .thenReturn(true); - when(systemQueryExecutionProfile.getName()).thenReturn("graph-system-query"); - when(systemQueryExecutionProfile.getDuration(DseDriverOption.GRAPH_TIMEOUT, Duration.ZERO)) - .thenReturn(Duration.ofMillis(2)); - when(systemQueryExecutionProfile.getBoolean(DseDriverOption.GRAPH_IS_SYSTEM_QUERY, false)) - .thenReturn(true); - when(systemQueryExecutionProfile.getString(DseDriverOption.GRAPH_READ_CONSISTENCY_LEVEL, null)) - .thenReturn("LOCAL_TWO"); - when(systemQueryExecutionProfile.getString(DseDriverOption.GRAPH_WRITE_CONSISTENCY_LEVEL, null)) - .thenReturn("LOCAL_THREE"); - - when(config.getProfile("graph-system-query")).thenReturn(systemQueryExecutionProfile); - - // need to re-mock everything on the context because the RequestHandlerTestHarness returns a - // InternalDriverContext and not a DseDriverContext. Couldn't figure out a way with mockito - // to say "mock this object (this.dseDriverContext), and delegate every call to that - // other object (this.context), except _this_ call and _this_ and so on" - // Spy wouldn't work because the spied object has to be of the same type as the final object - when(dseDriverContext.getConfig()).thenReturn(config); - when(dseDriverContext.getNettyOptions()).thenReturn(nettyOptions); - when(dseDriverContext.getLoadBalancingPolicyWrapper()).thenReturn(loadBalancingPolicyWrapper); - when(dseDriverContext.getRetryPolicy(ArgumentMatchers.anyString())).thenReturn(retryPolicy); - when(dseDriverContext.getSpeculativeExecutionPolicy(ArgumentMatchers.anyString())) - .thenReturn(speculativeExecutionPolicy); - when(dseDriverContext.getCodecRegistry()).thenReturn(CodecRegistry.DEFAULT); - when(dseDriverContext.getTimestampGenerator()).thenReturn(timestampGenerator); - when(dseDriverContext.getProtocolVersion()).thenReturn(DseProtocolVersion.DSE_V2); - when(dseDriverContext.getProtocolVersionRegistry()).thenReturn(protocolVersionRegistry); - when(dseDriverContext.getConsistencyLevelRegistry()) - .thenReturn(new DefaultConsistencyLevelRegistry()); - when(dseDriverContext.getWriteTypeRegistry()).thenReturn(new DefaultWriteTypeRegistry()); - when(dseDriverContext.getRequestThrottler()) - .thenReturn(new PassThroughRequestThrottler(dseDriverContext)); - when(dseDriverContext.getRequestTracker()).thenReturn(new NoopRequestTracker(dseDriverContext)); - // if DSE Version is specified for test metadata, then we need to mock that up on the context - if (dseVersionForTestMetadata != null) { - DseTestFixtures.mockNodesInMetadataWithVersions( - dseDriverContext, true, dseVersionForTestMetadata); - } - } - - @Override - public DefaultDriverContext getContext() { - return dseDriverContext; - } - - public static GraphRequestHandlerTestHarness.Builder builder() { - return new GraphRequestHandlerTestHarness.Builder(); - } - - public static class Builder extends RequestHandlerTestHarness.Builder { - - private GraphProtocol graphProtocolForTestConfig; - private Duration graphTimeout = Duration.ZERO; - private Version dseVersionForTestMetadata; - - public GraphRequestHandlerTestHarness.Builder withGraphProtocolForTestConfig( - GraphProtocol protocol) { - this.graphProtocolForTestConfig = protocol; - return this; - } - - public GraphRequestHandlerTestHarness.Builder withDseVersionInMetadata(Version dseVersion) { - this.dseVersionForTestMetadata = dseVersion; - return this; - } - - public GraphRequestHandlerTestHarness.Builder withGraphTimeout(Duration globalTimeout) { - this.graphTimeout = globalTimeout; - return this; - } - - @Override - public GraphRequestHandlerTestHarness.Builder withEmptyPool(Node node) { - super.withEmptyPool(node); - return this; - } - - @Override - public GraphRequestHandlerTestHarness.Builder withWriteFailure(Node node, Throwable cause) { - super.withWriteFailure(node, cause); - return this; - } - - @Override - public GraphRequestHandlerTestHarness.Builder withResponseFailure(Node node, Throwable cause) { - super.withResponseFailure(node, cause); - return this; - } - - @Override - public GraphRequestHandlerTestHarness.Builder withResponse(Node node, Frame response) { - super.withResponse(node, response); - return this; - } - - @Override - public GraphRequestHandlerTestHarness.Builder withDefaultIdempotence( - boolean defaultIdempotence) { - super.withDefaultIdempotence(defaultIdempotence); - return this; - } - - @Override - public GraphRequestHandlerTestHarness.Builder withProtocolVersion( - ProtocolVersion protocolVersion) { - super.withProtocolVersion(protocolVersion); - return this; - } - - @Override - public GraphRequestHandlerTestHarness build() { - return new GraphRequestHandlerTestHarness( - this, graphProtocolForTestConfig, graphTimeout, dseVersionForTestMetadata); - } - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphResultSetTestBase.java b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphResultSetTestBase.java deleted file mode 100644 index aed248675ae..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphResultSetTestBase.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.when; - -import com.datastax.dse.driver.api.core.graph.AsyncGraphResultSet; -import com.datastax.dse.driver.api.core.graph.GraphNode; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.internal.core.util.CountingIterator; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import java.util.Arrays; -import java.util.Iterator; -import java.util.Queue; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; - -public abstract class GraphResultSetTestBase { - - /** Mocks an async result set where column 0 has type INT, with rows with the provided data. */ - protected AsyncGraphResultSet mockPage(boolean nextPage, Integer... data) { - AsyncGraphResultSet page = mock(AsyncGraphResultSet.class); - - ExecutionInfo executionInfo = mock(ExecutionInfo.class); - when(page.getRequestExecutionInfo()).thenReturn(executionInfo); - - if (nextPage) { - when(page.hasMorePages()).thenReturn(true); - when(page.fetchNextPage()).thenReturn(spy(new CompletableFuture<>())); - } else { - when(page.hasMorePages()).thenReturn(false); - when(page.fetchNextPage()).thenThrow(new IllegalStateException()); - } - - // Emulate DefaultAsyncResultSet's internals (this is a bit sketchy, maybe it would be better - // to use real DefaultAsyncResultSet instances) - Queue queue = Lists.newLinkedList(Arrays.asList(data)); - CountingIterator iterator = - new CountingIterator(queue.size()) { - @Override - protected GraphNode computeNext() { - Integer index = queue.poll(); - return (index == null) ? endOfData() : mockRow(index); - } - }; - when(page.currentPage()).thenReturn(() -> iterator); - when(page.remaining()).thenAnswer(invocation -> iterator.remaining()); - - return page; - } - - private GraphNode mockRow(int index) { - GraphNode row = mock(GraphNode.class); - when(row.asInt()).thenReturn(index); - return row; - } - - protected static void complete( - CompletionStage stage, AsyncGraphResultSet result) { - stage.toCompletableFuture().complete(result); - } - - protected void assertNextRow(Iterator iterator, int expectedValue) { - assertThat(iterator.hasNext()).isTrue(); - GraphNode row = iterator.next(); - assertThat(row.asInt()).isEqualTo(expectedValue); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphResultSetsTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphResultSetsTest.java deleted file mode 100644 index fd5cffd2530..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphResultSetsTest.java +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.dse.driver.api.core.graph.AsyncGraphResultSet; -import com.datastax.dse.driver.api.core.graph.GraphNode; -import com.datastax.dse.driver.api.core.graph.GraphResultSet; -import java.util.Iterator; -import org.junit.Test; - -public class GraphResultSetsTest extends GraphResultSetTestBase { - - @Test - public void should_create_result_set_from_single_page() { - // Given - AsyncGraphResultSet page1 = mockPage(false, 0, 1, 2); - - // When - GraphResultSet resultSet = GraphResultSets.toSync(page1); - - // Then - assertThat(resultSet.getRequestExecutionInfo()).isSameAs(page1.getRequestExecutionInfo()); - - Iterator iterator = resultSet.iterator(); - - assertNextRow(iterator, 0); - assertNextRow(iterator, 1); - assertNextRow(iterator, 2); - - assertThat(iterator.hasNext()).isFalse(); - } - - @Test - public void should_create_result_set_from_multiple_pages() { - // Given - AsyncGraphResultSet page1 = mockPage(true, 0, 1, 2); - AsyncGraphResultSet page2 = mockPage(true, 3, 4, 5); - AsyncGraphResultSet page3 = mockPage(false, 6, 7, 8); - - complete(page1.fetchNextPage(), page2); - complete(page2.fetchNextPage(), page3); - - // When - GraphResultSet resultSet = GraphResultSets.toSync(page1); - - // Then - assertThat(resultSet.iterator().hasNext()).isTrue(); - - assertThat(resultSet.getRequestExecutionInfo()).isSameAs(page1.getRequestExecutionInfo()); - assertThat(((MultiPageGraphResultSet) resultSet).getRequestExecutionInfos()) - .containsExactly(page1.getRequestExecutionInfo()); - - Iterator iterator = resultSet.iterator(); - - assertNextRow(iterator, 0); - assertNextRow(iterator, 1); - assertNextRow(iterator, 2); - - assertThat(iterator.hasNext()).isTrue(); - // This should have triggered the fetch of page2 - assertThat(resultSet.getRequestExecutionInfo()).isEqualTo(page2.getRequestExecutionInfo()); - assertThat(((MultiPageGraphResultSet) resultSet).getRequestExecutionInfos()) - .containsExactly(page1.getRequestExecutionInfo(), page2.getRequestExecutionInfo()); - - assertNextRow(iterator, 3); - assertNextRow(iterator, 4); - assertNextRow(iterator, 5); - - assertThat(iterator.hasNext()).isTrue(); - // This should have triggered the fetch of page3 - assertThat(resultSet.getRequestExecutionInfo()).isEqualTo(page3.getRequestExecutionInfo()); - assertThat(((MultiPageGraphResultSet) resultSet).getRequestExecutionInfos()) - .containsExactly( - page1.getRequestExecutionInfo(), - page2.getRequestExecutionInfo(), - page3.getRequestExecutionInfo()); - - assertNextRow(iterator, 6); - assertNextRow(iterator, 7); - assertNextRow(iterator, 8); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphStatementBuilderBaseTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphStatementBuilderBaseTest.java deleted file mode 100644 index 4799437e617..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphStatementBuilderBaseTest.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.dse.driver.api.core.graph.FluentGraphStatement; -import com.datastax.dse.driver.api.core.graph.GraphStatementBuilderBase; -import com.datastax.oss.driver.api.core.cql.Statement; -import edu.umd.cs.findbugs.annotations.NonNull; -import org.junit.Test; - -public class GraphStatementBuilderBaseTest { - - private static class MockGraphStatementBuilder - extends GraphStatementBuilderBase { - - @NonNull - @Override - public FluentGraphStatement build() { - FluentGraphStatement rv = mock(FluentGraphStatement.class); - when(rv.getTimestamp()).thenReturn(this.timestamp); - return rv; - } - } - - @Test - public void should_use_timestamp_if_set() { - - MockGraphStatementBuilder builder = new MockGraphStatementBuilder(); - builder.setTimestamp(1); - assertThat(builder.build().getTimestamp()).isEqualTo(1); - } - - @Test - public void should_use_correct_default_timestamp_if_not_set() { - - MockGraphStatementBuilder builder = new MockGraphStatementBuilder(); - assertThat(builder.build().getTimestamp()).isEqualTo(Statement.NO_DEFAULT_TIMESTAMP); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphSupportCheckerTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphSupportCheckerTest.java deleted file mode 100644 index ec31bd4b12d..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphSupportCheckerTest.java +++ /dev/null @@ -1,325 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import static com.datastax.dse.driver.DseTestFixtures.mockNodesInMetadataWithVersions; -import static com.datastax.dse.driver.api.core.graph.PagingEnabledOptions.AUTO; -import static com.datastax.dse.driver.api.core.graph.PagingEnabledOptions.DISABLED; -import static com.datastax.dse.driver.api.core.graph.PagingEnabledOptions.ENABLED; -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static org.assertj.core.api.AssertionsForInterfaceTypes.assertThat; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verifyZeroInteractions; -import static org.mockito.Mockito.when; - -import com.datastax.dse.driver.DseTestDataProviders; -import com.datastax.dse.driver.api.core.DseProtocolVersion; -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.dse.driver.api.core.graph.GraphStatement; -import com.datastax.dse.driver.api.core.graph.PagingEnabledOptions; -import com.datastax.dse.driver.api.core.metadata.DseNodeProperties; -import com.datastax.dse.driver.internal.core.DseProtocolFeature; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Metadata; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.ProtocolVersionRegistry; -import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.MetadataManager; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class GraphSupportCheckerTest { - - @UseDataProvider("graphPagingEnabledAndDseVersions") - @Test - public void should_check_if_paging_is_supported( - boolean protocolWithPagingSupport, - PagingEnabledOptions statementGraphPagingEnabled, - PagingEnabledOptions contextGraphPagingEnabled, - List nodeDseVersions, - boolean expected) { - // given - GraphStatement graphStatement = mock(GraphStatement.class); - InternalDriverContext context = protocolWithPagingSupport(protocolWithPagingSupport); - statementGraphPagingEnabled(graphStatement, statementGraphPagingEnabled); - contextGraphPagingEnabled(context, contextGraphPagingEnabled); - addNodeWithDseVersion(context, nodeDseVersions); - - // when - boolean pagingEnabled = new GraphSupportChecker().isPagingEnabled(graphStatement, context); - - // then - assertThat(pagingEnabled).isEqualTo(expected); - } - - @Test - public void should_not_support_paging_when_statement_profile_not_present() { - // given - GraphStatement graphStatement = mock(GraphStatement.class); - InternalDriverContext context = protocolWithPagingSupport(true); - contextGraphPagingEnabled(context, DISABLED); - addNodeWithDseVersion(context, Collections.singletonList(Version.parse("6.8.0"))); - - // when - boolean pagingEnabled = new GraphSupportChecker().isPagingEnabled(graphStatement, context); - - // then - assertThat(pagingEnabled).isEqualTo(false); - } - - @Test - public void - should_support_paging_when_statement_profile_not_present_but_context_profile_has_paging_enabled() { - // given - GraphStatement graphStatement = mock(GraphStatement.class); - InternalDriverContext context = protocolWithPagingSupport(true); - contextGraphPagingEnabled(context, ENABLED); - addNodeWithDseVersion(context, Collections.singletonList(Version.parse("6.8.0"))); - - // when - boolean pagingEnabled = new GraphSupportChecker().isPagingEnabled(graphStatement, context); - - // then - assertThat(pagingEnabled).isEqualTo(true); - } - - @DataProvider() - public static Object[][] graphPagingEnabledAndDseVersions() { - List listWithGraphPagingNode = Collections.singletonList(Version.parse("6.8.0")); - List listWithoutGraphPagingNode = Collections.singletonList(Version.parse("6.7.0")); - List listWithNull = Collections.singletonList(null); - List listWithTwoNodesOneNotSupporting = - Arrays.asList(Version.parse("6.7.0"), Version.parse("6.8.0")); - - return new Object[][] { - {false, ENABLED, ENABLED, listWithGraphPagingNode, true}, - {true, ENABLED, ENABLED, listWithoutGraphPagingNode, true}, - {true, ENABLED, DISABLED, listWithGraphPagingNode, true}, - {true, ENABLED, ENABLED, listWithGraphPagingNode, true}, - {true, ENABLED, ENABLED, listWithNull, true}, - {true, ENABLED, ENABLED, listWithTwoNodesOneNotSupporting, true}, - {true, DISABLED, ENABLED, listWithGraphPagingNode, false}, - {true, DISABLED, AUTO, listWithGraphPagingNode, false}, - {true, DISABLED, DISABLED, listWithGraphPagingNode, false}, - {true, AUTO, AUTO, listWithGraphPagingNode, true}, - {true, AUTO, DISABLED, listWithGraphPagingNode, true}, - {false, AUTO, AUTO, listWithGraphPagingNode, false}, - {true, AUTO, AUTO, listWithTwoNodesOneNotSupporting, false}, - {true, AUTO, AUTO, listWithNull, false}, - }; - } - - private void addNodeWithDseVersion(InternalDriverContext context, List dseVersions) { - MetadataManager manager = mock(MetadataManager.class); - when(context.getMetadataManager()).thenReturn(manager); - Metadata metadata = mock(Metadata.class); - when(manager.getMetadata()).thenReturn(metadata); - Map nodes = new HashMap<>(); - for (Version v : dseVersions) { - Node node = mock(Node.class); - Map extras = new HashMap<>(); - extras.put(DseNodeProperties.DSE_VERSION, v); - when(node.getExtras()).thenReturn(extras); - nodes.put(UUID.randomUUID(), node); - } - when(metadata.getNodes()).thenReturn(nodes); - } - - private void contextGraphPagingEnabled( - InternalDriverContext context, PagingEnabledOptions option) { - DriverExecutionProfile driverExecutionProfile = mock(DriverExecutionProfile.class); - when(driverExecutionProfile.getString(DseDriverOption.GRAPH_PAGING_ENABLED)) - .thenReturn(option.name()); - DriverConfig config = mock(DriverConfig.class); - when(context.getConfig()).thenReturn(config); - when(config.getDefaultProfile()).thenReturn(driverExecutionProfile); - } - - private InternalDriverContext protocolWithPagingSupport(boolean pagingSupport) { - InternalDriverContext context = mock(InternalDriverContext.class); - when(context.getProtocolVersion()).thenReturn(DseProtocolVersion.DSE_V2); - ProtocolVersionRegistry protocolVersionRegistry = mock(ProtocolVersionRegistry.class); - when(protocolVersionRegistry.supports( - DseProtocolVersion.DSE_V2, DseProtocolFeature.CONTINUOUS_PAGING)) - .thenReturn(pagingSupport); - when(context.getProtocolVersionRegistry()).thenReturn(protocolVersionRegistry); - return context; - } - - private void statementGraphPagingEnabled( - GraphStatement graphStatement, PagingEnabledOptions option) { - DriverExecutionProfile driverExecutionProfile = mock(DriverExecutionProfile.class); - when(driverExecutionProfile.getString(DseDriverOption.GRAPH_PAGING_ENABLED)) - .thenReturn(option.name()); - when(graphStatement.getExecutionProfile()).thenReturn(driverExecutionProfile); - } - - @Test - @UseDataProvider("dseVersionsAndGraphProtocols") - public void should_determine_default_graph_protocol_from_dse_version( - Version[] dseVersions, GraphProtocol expectedProtocol) { - // mock up the metadata for the context - // using 'true' here will treat null test Versions as no DSE_VERSION info in the metadata - DefaultDriverContext context = - mockNodesInMetadataWithVersions(mock(DefaultDriverContext.class), true, dseVersions); - GraphProtocol graphProtocol = new GraphSupportChecker().getDefaultGraphProtocol(context); - assertThat(graphProtocol).isEqualTo(expectedProtocol); - } - - @Test - @UseDataProvider("dseVersionsAndGraphProtocols") - public void should_determine_default_graph_protocol_from_dse_version_with_null_versions( - Version[] dseVersions, GraphProtocol expectedProtocol) { - // mock up the metadata for the context - // using 'false' here will treat null test Versions as explicit NULL info for DSE_VERSION - DefaultDriverContext context = - mockNodesInMetadataWithVersions(mock(DefaultDriverContext.class), false, dseVersions); - GraphProtocol graphProtocol = new GraphSupportChecker().getDefaultGraphProtocol(context); - assertThat(graphProtocol).isEqualTo(expectedProtocol); - } - - @DataProvider - public static Object[][] dseVersionsAndGraphProtocols() { - return new Object[][] { - {new Version[] {Version.parse("5.0.3")}, GraphProtocol.GRAPHSON_2_0}, - {new Version[] {Version.parse("6.0.1")}, GraphProtocol.GRAPHSON_2_0}, - {new Version[] {Version.parse("6.7.4")}, GraphProtocol.GRAPHSON_2_0}, - {new Version[] {Version.parse("6.8.0")}, GraphProtocol.GRAPH_BINARY_1_0}, - {new Version[] {Version.parse("7.0.0")}, GraphProtocol.GRAPH_BINARY_1_0}, - {new Version[] {Version.parse("5.0.3"), Version.parse("6.8.0")}, GraphProtocol.GRAPHSON_2_0}, - {new Version[] {Version.parse("6.7.4"), Version.parse("6.8.0")}, GraphProtocol.GRAPHSON_2_0}, - {new Version[] {Version.parse("6.8.0"), Version.parse("6.7.4")}, GraphProtocol.GRAPHSON_2_0}, - { - new Version[] {Version.parse("6.8.0"), Version.parse("7.0.0")}, - GraphProtocol.GRAPH_BINARY_1_0 - }, - {new Version[] {Version.parse("6.7.4"), Version.parse("6.7.4")}, GraphProtocol.GRAPHSON_2_0}, - { - new Version[] {Version.parse("6.8.0"), Version.parse("6.8.0")}, - GraphProtocol.GRAPH_BINARY_1_0 - }, - {null, GraphProtocol.GRAPHSON_2_0}, - {new Version[] {null}, GraphProtocol.GRAPHSON_2_0}, - {new Version[] {null, Version.parse("6.8.0")}, GraphProtocol.GRAPHSON_2_0}, - { - new Version[] {Version.parse("6.8.0"), Version.parse("7.0.0"), null}, - GraphProtocol.GRAPHSON_2_0 - }, - }; - } - - @Test - @UseDataProvider(location = DseTestDataProviders.class, value = "supportedGraphProtocols") - public void should_pickup_graph_protocol_from_statement(GraphProtocol graphProtocol) { - GraphStatement graphStatement = mock(GraphStatement.class); - DriverExecutionProfile executionProfile = mock(DriverExecutionProfile.class); - when(graphStatement.getSubProtocol()).thenReturn(graphProtocol.toInternalCode()); - - GraphProtocol inferredProtocol = - new GraphSupportChecker() - .inferGraphProtocol( - graphStatement, executionProfile, mock(InternalDriverContext.class)); - - assertThat(inferredProtocol).isEqualTo(graphProtocol); - verifyZeroInteractions(executionProfile); - } - - @Test - @UseDataProvider("graphProtocolStringsAndDseVersions") - public void should_pickup_graph_protocol_and_parse_from_string_config( - String stringConfig, Version dseVersion) { - GraphStatement graphStatement = mock(GraphStatement.class); - DriverExecutionProfile executionProfile = mock(DriverExecutionProfile.class); - when(executionProfile.isDefined(DseDriverOption.GRAPH_SUB_PROTOCOL)).thenReturn(Boolean.TRUE); - when(executionProfile.getString(eq(DseDriverOption.GRAPH_SUB_PROTOCOL))) - .thenReturn(stringConfig); - - DefaultDriverContext context = - mockNodesInMetadataWithVersions(mock(DefaultDriverContext.class), true, dseVersion); - GraphProtocol inferredProtocol = - new GraphSupportChecker().inferGraphProtocol(graphStatement, executionProfile, context); - assertThat(inferredProtocol.toInternalCode()).isEqualTo(stringConfig); - } - - @DataProvider - public static Object[][] graphProtocolStringsAndDseVersions() { - // putting manual strings here to be sure to be notified if a value in - // GraphProtocol ever changes - return new Object[][] { - {"graphson-1.0", Version.parse("6.7.0")}, - {"graphson-1.0", Version.parse("6.8.0")}, - {"graphson-2.0", Version.parse("6.7.0")}, - {"graphson-2.0", Version.parse("6.8.0")}, - {"graph-binary-1.0", Version.parse("6.7.0")}, - {"graph-binary-1.0", Version.parse("6.8.0")}, - }; - } - - @Test - @UseDataProvider("dseVersions6") - public void should_use_correct_default_protocol_when_parsing(Version dseVersion) { - GraphStatement graphStatement = mock(GraphStatement.class); - DriverExecutionProfile executionProfile = mock(DriverExecutionProfile.class); - DefaultDriverContext context = - mockNodesInMetadataWithVersions(mock(DefaultDriverContext.class), true, dseVersion); - GraphProtocol inferredProtocol = - new GraphSupportChecker().inferGraphProtocol(graphStatement, executionProfile, context); - // For DSE 6.8 and newer, the default should be GraphSON binary - // for DSE older than 6.8, the default should be GraphSON2 - assertThat(inferredProtocol) - .isEqualTo( - (dseVersion.compareTo(Version.parse("6.8.0")) < 0) - ? GraphProtocol.GRAPHSON_2_0 - : GraphProtocol.GRAPH_BINARY_1_0); - } - - @DataProvider - public static Object[][] dseVersions6() { - return new Object[][] {{Version.parse("6.7.0")}, {Version.parse("6.8.0")}}; - } - - @Test - public void should_fail_if_graph_protocol_used_is_invalid() { - assertThatThrownBy(() -> GraphProtocol.fromString("invalid")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Graph protocol used [\"invalid\"] unknown. Possible values are: [ \"graphson-1.0\", \"graphson-2.0\", \"graph-binary-1.0\"]"); - } - - @Test - public void should_fail_if_graph_protocol_used_is_graphson_3() { - assertThatThrownBy(() -> GraphProtocol.fromString("graphson-3.0")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Graph protocol used [\"graphson-3.0\"] unknown. Possible values are: [ \"graphson-1.0\", \"graphson-2.0\", \"graph-binary-1.0\"]"); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphTestUtils.java b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphTestUtils.java deleted file mode 100644 index f58fc54d8c7..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphTestUtils.java +++ /dev/null @@ -1,189 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.dse.driver.api.core.DseProtocolVersion; -import com.datastax.dse.driver.internal.core.graph.binary.GraphBinaryModule; -import com.datastax.dse.protocol.internal.response.result.DseRowsMetadata; -import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.protocol.internal.Frame; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.response.result.ColumnSpec; -import com.datastax.oss.protocol.internal.response.result.DefaultRows; -import com.datastax.oss.protocol.internal.response.result.RawType; -import com.datastax.oss.protocol.internal.response.result.Rows; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.ArrayDeque; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.Queue; -import org.apache.tinkerpop.gremlin.process.remote.traversal.DefaultRemoteTraverser; -import org.apache.tinkerpop.gremlin.structure.Direction; -import org.apache.tinkerpop.gremlin.structure.T; -import org.apache.tinkerpop.gremlin.structure.Vertex; -import org.apache.tinkerpop.gremlin.structure.io.Buffer; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryReader; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryWriter; -import org.apache.tinkerpop.gremlin.structure.io.binary.TypeSerializerRegistry; -import org.apache.tinkerpop.gremlin.structure.util.detached.DetachedVertex; -import org.apache.tinkerpop.gremlin.structure.util.detached.DetachedVertexProperty; -import org.assertj.core.api.InstanceOfAssertFactories; - -public class GraphTestUtils { - - public static ByteBuffer serialize( - Object value, GraphProtocol graphProtocol, GraphBinaryModule graphBinaryModule) - throws IOException { - - Buffer tinkerBuf = graphBinaryModule.serialize(value); - ByteBuffer nioBuffer = tinkerBuf.nioBuffer(); - tinkerBuf.release(); - return graphProtocol.isGraphBinary() - ? nioBuffer - : GraphSONUtils.serializeToByteBuffer(value, graphProtocol); - } - - public static Frame defaultDseFrameOf(Message responseMessage) { - return Frame.forResponse( - DseProtocolVersion.DSE_V2.getCode(), - 0, - null, - Frame.NO_PAYLOAD, - Collections.emptyList(), - responseMessage); - } - - public static Message singleGraphRow(GraphProtocol graphProtocol, GraphBinaryModule module) - throws IOException { - Vertex value = - DetachedVertex.build() - .setId(1) - .setLabel("person") - .addProperty( - DetachedVertexProperty.build() - .setId(11) - .setLabel("name") - .setValue("marko") - .create()) - .create(); - DseRowsMetadata metadata = - new DseRowsMetadata( - ImmutableList.of( - new ColumnSpec( - "ks", - "table", - "gremlin", - 0, - graphProtocol.isGraphBinary() - ? RawType.PRIMITIVES.get(ProtocolConstants.DataType.BLOB) - : RawType.PRIMITIVES.get(ProtocolConstants.DataType.VARCHAR))), - null, - new int[] {}, - null, - 1, - true); - Queue> data = new ArrayDeque<>(); - data.add( - ImmutableList.of( - serialize( - graphProtocol.isGraphBinary() - // GraphBinary returns results directly inside a Traverser - ? new DefaultRemoteTraverser<>(value, 1) - : ImmutableMap.of("result", value), - graphProtocol, - module))); - return new DefaultRows(metadata, data); - } - - // Returns 10 rows, each with a vertex - public static Rows tenGraphRows( - GraphProtocol graphProtocol, GraphBinaryModule module, int page, boolean last) - throws IOException { - DseRowsMetadata metadata = - new DseRowsMetadata( - ImmutableList.of( - new ColumnSpec( - "ks", - "table", - "gremlin", - 0, - graphProtocol.isGraphBinary() - ? RawType.PRIMITIVES.get(ProtocolConstants.DataType.BLOB) - : RawType.PRIMITIVES.get(ProtocolConstants.DataType.VARCHAR))), - null, - new int[] {}, - null, - page, - last); - Queue> data = new ArrayDeque<>(); - int start = (page - 1) * 10; - for (int i = start; i < start + 10; i++) { - Vertex v = - DetachedVertex.build() - .setId("vertex" + i) - .setLabel("person") - .addProperty( - DetachedVertexProperty.build() - .setId("property" + i) - .setLabel("name") - .setValue("user" + i) - .create()) - .create(); - data.add( - ImmutableList.of( - serialize( - graphProtocol.isGraphBinary() - // GraphBinary returns results directly inside a Traverser - ? new DefaultRemoteTraverser<>(v, 1) - : ImmutableMap.of("result", v), - graphProtocol, - module))); - } - return new DefaultRows(metadata, data); - } - - public static GraphBinaryModule createGraphBinaryModule(DefaultDriverContext context) { - TypeSerializerRegistry registry = GraphBinaryModule.createDseTypeSerializerRegistry(context); - return new GraphBinaryModule(new GraphBinaryReader(registry), new GraphBinaryWriter(registry)); - } - - public static void assertThatContainsProperties( - Map properties, Object... propsToMatch) { - for (int i = 0; i < propsToMatch.length; i += 2) { - assertThat(properties).containsEntry(propsToMatch[i], propsToMatch[i + 1]); - } - } - - public static void assertThatContainsLabel( - Map properties, Direction direction, String label) { - assertThat(properties) - .hasEntrySatisfying( - direction, - value -> - assertThat(value) - .asInstanceOf(InstanceOfAssertFactories.MAP) - .containsEntry(T.label, label)); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/binary/GraphDataTypesTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/binary/GraphDataTypesTest.java deleted file mode 100644 index e36f7e97e5a..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/binary/GraphDataTypesTest.java +++ /dev/null @@ -1,333 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph.binary; - -import static com.datastax.oss.driver.api.core.type.DataTypes.BIGINT; -import static com.datastax.oss.driver.api.core.type.DataTypes.DOUBLE; -import static com.datastax.oss.driver.api.core.type.DataTypes.DURATION; -import static com.datastax.oss.driver.api.core.type.DataTypes.FLOAT; -import static com.datastax.oss.driver.api.core.type.DataTypes.INT; -import static com.datastax.oss.driver.api.core.type.DataTypes.TEXT; -import static com.datastax.oss.driver.api.core.type.DataTypes.listOf; -import static com.datastax.oss.driver.api.core.type.DataTypes.mapOf; -import static com.datastax.oss.driver.api.core.type.DataTypes.setOf; -import static com.datastax.oss.driver.api.core.type.DataTypes.tupleOf; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.when; - -import com.datastax.dse.driver.api.core.DseProtocolVersion; -import com.datastax.dse.driver.api.core.data.geometry.LineString; -import com.datastax.dse.driver.api.core.data.geometry.Point; -import com.datastax.dse.driver.api.core.data.geometry.Polygon; -import com.datastax.dse.driver.api.core.graph.BatchGraphStatement; -import com.datastax.dse.driver.api.core.graph.DseGraph; -import com.datastax.dse.driver.api.core.graph.GraphNode; -import com.datastax.dse.driver.api.core.type.DseDataTypes; -import com.datastax.dse.driver.api.core.type.codec.DseTypeCodecs; -import com.datastax.dse.driver.internal.core.data.geometry.Distance; -import com.datastax.dse.driver.internal.core.graph.EditDistance; -import com.datastax.dse.driver.internal.core.graph.GraphConversions; -import com.datastax.dse.driver.internal.core.graph.GraphProtocol; -import com.datastax.dse.driver.internal.core.graph.GraphSONUtils; -import com.datastax.oss.driver.TestDataProviders; -import com.datastax.oss.driver.api.core.data.CqlDuration; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.TupleType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.api.core.type.codec.registry.MutableCodecRegistry; -import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; -import com.datastax.oss.driver.internal.core.type.UserDefinedTypeBuilder; -import com.datastax.oss.driver.internal.core.type.codec.registry.DefaultCodecRegistry; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.io.IOException; -import java.math.BigDecimal; -import java.math.BigInteger; -import java.net.InetAddress; -import java.net.UnknownHostException; -import java.nio.ByteBuffer; -import java.time.Instant; -import java.time.LocalDate; -import java.time.LocalTime; -import java.time.ZoneOffset; -import java.util.List; -import java.util.Set; -import org.apache.tinkerpop.gremlin.structure.io.Buffer; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryReader; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryWriter; -import org.apache.tinkerpop.gremlin.structure.io.binary.TypeSerializerRegistry; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -@RunWith(DataProviderRunner.class) -public class GraphDataTypesTest { - - private GraphBinaryModule graphBinaryModule; - - @Mock private DefaultDriverContext context; - - private static final MutableCodecRegistry CODEC_REGISTRY = - new DefaultCodecRegistry("testDseRegistry"); - - static { - CODEC_REGISTRY.register(DseTypeCodecs.POINT, DseTypeCodecs.LINE_STRING, DseTypeCodecs.POLYGON); - } - - private static Object[][] graphsonOneDataTypes = - new Object[][] { - {"~’~^ää#123#ö"}, - {(byte) 34}, - {BigDecimal.TEN}, - {BigInteger.TEN}, - {Boolean.TRUE}, - {false}, - {23}, - {23L}, - {23.0d}, - {23f}, - {(short) 23}, - {LocalDate.now(ZoneOffset.UTC)}, - {LocalTime.now(ZoneOffset.UTC)}, - {java.util.UUID.randomUUID()}, - {Instant.now()}, - }; - - private static Object[][] graphsonTwoDataTypes = - new Object[][] { - {ImmutableList.of(1L, 2L, 3L)}, - {ImmutableSet.of(1L, 2L, 3L)}, - {ImmutableMap.of("a", 1, "b", 2)}, - {Point.fromCoordinates(3.3, 4.4)}, - { - LineString.fromPoints( - Point.fromCoordinates(1, 1), Point.fromCoordinates(2, 2), Point.fromCoordinates(3, 3)) - }, - { - Polygon.fromPoints( - Point.fromCoordinates(3, 4), Point.fromCoordinates(5, 4), Point.fromCoordinates(6, 6)) - }, - }; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - when(context.getCodecRegistry()).thenReturn(CODEC_REGISTRY); - when(context.getProtocolVersion()).thenReturn(DseProtocolVersion.DSE_V2); - - TypeSerializerRegistry registry = GraphBinaryModule.createDseTypeSerializerRegistry(context); - graphBinaryModule = - new GraphBinaryModule(new GraphBinaryReader(registry), new GraphBinaryWriter(registry)); - } - - @DataProvider - public static Object[][] graphsonOneDataProvider() { - - return graphsonOneDataTypes; - } - - @DataProvider - public static Object[][] graphsonTwoDataProvider() { - - return TestDataProviders.concat(graphsonOneDataTypes, graphsonTwoDataTypes); - } - - @DataProvider - public static Object[][] binaryDataProvider() throws UnknownHostException { - - Object[][] binaryDataTypes = - new Object[][] { - {InetAddress.getLocalHost()}, - {ImmutableList.of(ImmutableList.of(1L, 3L), ImmutableList.of(2L, 4L))}, - {ImmutableSet.of(ImmutableSet.of(1, 2, 3))}, - {ImmutableMap.of(ImmutableMap.of("a", 1), ImmutableMap.of(2, "b"))}, - {tupleOf(INT, TEXT, FLOAT).newValue(1, "2", 3.41f)}, - { - tupleOf(INT, TEXT, tupleOf(TEXT, DURATION)) - .newValue( - 1, "2", tupleOf(TEXT, DURATION).newValue("a", CqlDuration.newInstance(2, 1, 0))) - }, - { - tupleOf( - listOf(INT), - setOf(FLOAT), - DataTypes.mapOf(TEXT, BIGINT), - listOf(listOf(DOUBLE)), - setOf(setOf(FLOAT)), - listOf(tupleOf(INT, TEXT))) - .newValue( - ImmutableList.of(4, 8, 22, 34, 37, 59), - ImmutableSet.of(28f, 44f, 59f), - ImmutableMap.of("big10", 2345L), - ImmutableList.of( - ImmutableList.of(11.1d, 33.3d), ImmutableList.of(22.2d, 44.4d)), - ImmutableSet.of(ImmutableSet.of(55.5f)), - ImmutableList.of(tupleOf(INT, TEXT).newValue(3, "three"))) - }, - { - new UserDefinedTypeBuilder("ks", "udt1") - .withField("a", INT) - .withField("b", TEXT) - .build() - .newValue(1, "two") - }, - {new Distance(Point.fromCoordinates(3.4, 17.0), 2.5)}, - {new EditDistance("xyz", 3)}, - {DseGraph.g.V().has("name", "marko").asAdmin().getBytecode()}, - { - GraphConversions.bytecodeToSerialize( - BatchGraphStatement.builder() - .addTraversal(DseGraph.g.addV("person").property("name", "1")) - .addTraversal(DseGraph.g.addV("person").property("name", "1")) - .build()) - }, - }; - return TestDataProviders.concat(graphsonTwoDataProvider(), binaryDataTypes); - } - - @Test - @UseDataProvider("binaryDataProvider") - public void dataTypesTest(Object value) throws IOException { - verifySerDeBinary(value); - } - - @Test - @UseDataProvider("graphsonOneDataProvider") - public void dataTypesTestGraphsonOne(Object value) throws IOException { - verifySerDeGraphson(value, GraphProtocol.GRAPHSON_1_0); - } - - @Test - @UseDataProvider("graphsonTwoDataProvider") - public void dataTypesTestGraphsonTwo(Object value) throws IOException { - verifySerDeGraphson(value, GraphProtocol.GRAPHSON_2_0); - } - - @Test - public void complexUdtTests() throws IOException { - UserDefinedType type1 = - new UserDefinedTypeBuilder("ks", "udt1").withField("a", INT).withField("b", TEXT).build(); - verifySerDeBinary(type1.newValue(1, "2")); - - TupleType secondNested = tupleOf(BIGINT, listOf(BIGINT)); - TupleType firstNested = tupleOf(TEXT, secondNested); - - UserDefinedType type2 = - new UserDefinedTypeBuilder("ks", "udt2") - .withField("a", INT) - .withField("b", TEXT) - .withField("c", type1) - .withField("mylist", listOf(BIGINT)) - .withField("mytuple_withlist", firstNested) - .build(); - - verifySerDeBinary( - type2.newValue( - 1, - "2", - type1.newValue(3, "4"), - ImmutableList.of(5L), - firstNested.newValue("6", secondNested.newValue(7L, ImmutableList.of(8L))))); - - UserDefinedType type3 = - new UserDefinedTypeBuilder("ks", "udt3") - .withField("a", listOf(INT)) - .withField("b", setOf(FLOAT)) - .withField("c", mapOf(TEXT, BIGINT)) - .withField("d", listOf(listOf(DOUBLE))) - .withField("e", setOf(setOf(FLOAT))) - .withField("f", listOf(tupleOf(INT, TEXT))) - .build(); - - verifySerDeBinary( - type3.newValue( - ImmutableList.of(1), - ImmutableSet.of(2.1f), - ImmutableMap.of("3", 4L), - ImmutableList.of(ImmutableList.of(5.1d, 6.1d), ImmutableList.of(7.1d)), - ImmutableSet.of(ImmutableSet.of(8.1f), ImmutableSet.of(9.1f)), - ImmutableList.of(tupleOf(INT, TEXT).newValue(10, "11")))); - } - - @Test - public void complexTypesAndGeoTests() throws IOException { - - TupleType tuple = tupleOf(DseDataTypes.POINT, DseDataTypes.LINE_STRING, DseDataTypes.POLYGON); - tuple.attach(context); - - verifySerDeBinary( - tuple.newValue( - Point.fromCoordinates(3.3, 4.4), - LineString.fromPoints( - Point.fromCoordinates(1, 1), - Point.fromCoordinates(2, 2), - Point.fromCoordinates(3, 3)), - Polygon.fromPoints( - Point.fromCoordinates(3, 4), - Point.fromCoordinates(5, 4), - Point.fromCoordinates(6, 6)))); - - UserDefinedType udt = - new UserDefinedTypeBuilder("ks", "udt1") - .withField("a", DseDataTypes.POINT) - .withField("b", DseDataTypes.LINE_STRING) - .withField("c", DseDataTypes.POLYGON) - .build(); - udt.attach(context); - - verifySerDeBinary( - udt.newValue( - Point.fromCoordinates(3.3, 4.4), - LineString.fromPoints( - Point.fromCoordinates(1, 1), - Point.fromCoordinates(2, 2), - Point.fromCoordinates(3, 3)), - Polygon.fromPoints( - Point.fromCoordinates(3, 4), - Point.fromCoordinates(5, 4), - Point.fromCoordinates(6, 6)))); - } - - private void verifySerDeBinary(Object input) throws IOException { - Buffer result = graphBinaryModule.serialize(input); - Object deserialized = graphBinaryModule.deserialize(result); - result.release(); - assertThat(deserialized).isEqualTo(input); - } - - private void verifySerDeGraphson(Object input, GraphProtocol protocol) throws IOException { - ByteBuffer buffer = GraphSONUtils.serializeToByteBuffer(input, protocol); - Object deserialized = deserializeGraphson(buffer, protocol, input.getClass()); - - Object expected = (input instanceof Set) ? ImmutableList.copyOf((Set) input) : input; - assertThat(deserialized).isEqualTo(expected); - } - - private Object deserializeGraphson( - ByteBuffer buffer, GraphProtocol protocol, Class expectedClass) throws IOException { - List data = ImmutableList.of(buffer); - GraphNode node = GraphSONUtils.createGraphNode(data, protocol); - return node.as(expectedClass); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/reactive/ReactiveGraphRequestProcessorTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/reactive/ReactiveGraphRequestProcessorTest.java deleted file mode 100644 index 324c4ff4672..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/reactive/ReactiveGraphRequestProcessorTest.java +++ /dev/null @@ -1,192 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph.reactive; - -import static com.datastax.dse.driver.api.core.DseProtocolVersion.DSE_V1; -import static com.datastax.dse.driver.internal.core.graph.GraphTestUtils.createGraphBinaryModule; -import static com.datastax.dse.driver.internal.core.graph.GraphTestUtils.defaultDseFrameOf; -import static com.datastax.dse.driver.internal.core.graph.GraphTestUtils.tenGraphRows; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.dse.driver.DseTestDataProviders; -import com.datastax.dse.driver.api.core.DseProtocolVersion; -import com.datastax.dse.driver.api.core.graph.GraphStatement; -import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; -import com.datastax.dse.driver.api.core.graph.reactive.ReactiveGraphNode; -import com.datastax.dse.driver.api.core.graph.reactive.ReactiveGraphResultSet; -import com.datastax.dse.driver.internal.core.cql.continuous.ContinuousCqlRequestHandlerTestBase; -import com.datastax.dse.driver.internal.core.graph.GraphProtocol; -import com.datastax.dse.driver.internal.core.graph.GraphRequestAsyncProcessor; -import com.datastax.dse.driver.internal.core.graph.GraphRequestHandlerTestHarness; -import com.datastax.dse.driver.internal.core.graph.GraphSupportChecker; -import com.datastax.dse.driver.internal.core.graph.binary.GraphBinaryModule; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; -import com.datastax.oss.driver.internal.core.cql.PoolBehavior; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import io.reactivex.Flowable; -import java.io.IOException; -import java.util.List; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mockito; - -public class ReactiveGraphRequestProcessorTest extends ContinuousCqlRequestHandlerTestBase { - - private GraphRequestAsyncProcessor asyncProcessor; - private GraphSupportChecker graphSupportChecker; - - @Before - public void setUp() { - DefaultDriverContext context = mock(DefaultDriverContext.class); - graphSupportChecker = mock(GraphSupportChecker.class); - asyncProcessor = Mockito.spy(new GraphRequestAsyncProcessor(context, graphSupportChecker)); - } - - @Test - public void should_be_able_to_process_graph_reactive_result_set() { - ReactiveGraphRequestProcessor processor = new ReactiveGraphRequestProcessor(asyncProcessor); - assertThat( - processor.canProcess( - ScriptGraphStatement.newInstance("g.V()"), - ReactiveGraphRequestProcessor.REACTIVE_GRAPH_RESULT_SET)) - .isTrue(); - } - - @Test - public void should_create_reactive_result_set() { - GraphRequestHandlerTestHarness.Builder builder = - GraphRequestHandlerTestHarness.builder().withProtocolVersion(DSE_V1); - try (GraphRequestHandlerTestHarness harness = builder.build()) { - ReactiveGraphRequestProcessor processor = new ReactiveGraphRequestProcessor(asyncProcessor); - GraphStatement graphStatement = ScriptGraphStatement.newInstance("g.V()"); - assertThat( - processor.process(graphStatement, harness.getSession(), harness.getContext(), "test")) - .isInstanceOf(DefaultReactiveGraphResultSet.class); - } - } - - @Test - @UseDataProvider( - value = "allDseProtocolVersionsAndSupportedGraphProtocols", - location = DseTestDataProviders.class) - public void should_complete_single_page_result( - DseProtocolVersion version, GraphProtocol graphProtocol) throws IOException { - when(graphSupportChecker.isPagingEnabled(any(), any())).thenReturn(false); - when(graphSupportChecker.inferGraphProtocol(any(), any(), any())).thenReturn(graphProtocol); - - GraphRequestHandlerTestHarness.Builder builder = - GraphRequestHandlerTestHarness.builder().withProtocolVersion(version); - PoolBehavior node1Behavior = builder.customBehavior(node1); - try (GraphRequestHandlerTestHarness harness = builder.build()) { - - DefaultSession session = harness.getSession(); - DefaultDriverContext context = harness.getContext(); - GraphStatement graphStatement = ScriptGraphStatement.newInstance("g.V()"); - - GraphBinaryModule graphBinaryModule = createGraphBinaryModule(context); - when(asyncProcessor.getGraphBinaryModule()).thenReturn(graphBinaryModule); - - ReactiveGraphResultSet publisher = - new ReactiveGraphRequestProcessor(asyncProcessor) - .process(graphStatement, session, context, "test"); - - Flowable rowsPublisher = Flowable.fromPublisher(publisher).cache(); - rowsPublisher.subscribe(); - - // emulate single page - node1Behavior.setResponseSuccess( - defaultDseFrameOf(tenGraphRows(graphProtocol, graphBinaryModule, 1, true))); - - List rows = rowsPublisher.toList().blockingGet(); - - assertThat(rows).hasSize(10); - checkResultSet(rows); - - Flowable execInfosFlowable = - Flowable.fromPublisher(publisher.getExecutionInfos()); - assertThat(execInfosFlowable.toList().blockingGet()) - .hasSize(1) - .containsExactly(rows.get(0).getExecutionInfo()); - } - } - - @Test - @UseDataProvider( - value = "allDseProtocolVersionsAndSupportedGraphProtocols", - location = DseTestDataProviders.class) - public void should_complete_multi_page_result( - DseProtocolVersion version, GraphProtocol graphProtocol) throws IOException { - when(graphSupportChecker.isPagingEnabled(any(), any())).thenReturn(true); - when(graphSupportChecker.inferGraphProtocol(any(), any(), any())).thenReturn(graphProtocol); - - GraphRequestHandlerTestHarness.Builder builder = - GraphRequestHandlerTestHarness.builder().withProtocolVersion(version); - PoolBehavior node1Behavior = builder.customBehavior(node1); - try (GraphRequestHandlerTestHarness harness = builder.build()) { - - DefaultSession session = harness.getSession(); - DefaultDriverContext context = harness.getContext(); - GraphStatement graphStatement = ScriptGraphStatement.newInstance("g.V()"); - - GraphBinaryModule graphBinaryModule = createGraphBinaryModule(context); - when(asyncProcessor.getGraphBinaryModule()).thenReturn(graphBinaryModule); - - ReactiveGraphResultSet publisher = - new ReactiveGraphRequestProcessor(asyncProcessor) - .process(graphStatement, session, context, "test"); - - Flowable rowsPublisher = Flowable.fromPublisher(publisher).cache(); - rowsPublisher.subscribe(); - - // emulate page 1 - node1Behavior.setResponseSuccess( - defaultDseFrameOf(tenGraphRows(graphProtocol, graphBinaryModule, 1, false))); - // emulate page 2 - node1Behavior.setResponseSuccess( - defaultDseFrameOf(tenGraphRows(graphProtocol, graphBinaryModule, 2, true))); - - List rows = rowsPublisher.toList().blockingGet(); - assertThat(rows).hasSize(20); - checkResultSet(rows); - - Flowable execInfosFlowable = - Flowable.fromPublisher(publisher.getExecutionInfos()); - assertThat(execInfosFlowable.toList().blockingGet()) - .hasSize(2) - .containsExactly(rows.get(0).getExecutionInfo(), rows.get(10).getExecutionInfo()); - } - } - - private void checkResultSet(List rows) { - for (ReactiveGraphNode row : rows) { - assertThat(row.isVertex()).isTrue(); - ExecutionInfo executionInfo = row.getExecutionInfo(); - assertThat(executionInfo.getCoordinator()).isEqualTo(node1); - assertThat(executionInfo.getErrors()).isEmpty(); - assertThat(executionInfo.getIncomingPayload()).isEmpty(); - assertThat(executionInfo.getSpeculativeExecutionCount()).isEqualTo(0); - assertThat(executionInfo.getSuccessfulExecutionIndex()).isEqualTo(0); - assertThat(executionInfo.getWarnings()).isEmpty(); - } - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/schema/refresh/GraphSchemaRefreshTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/schema/refresh/GraphSchemaRefreshTest.java deleted file mode 100644 index 0d05f129520..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/schema/refresh/GraphSchemaRefreshTest.java +++ /dev/null @@ -1,418 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph.schema.refresh; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.when; - -import com.datastax.dse.driver.api.core.metadata.schema.DseEdgeMetadata; -import com.datastax.dse.driver.api.core.metadata.schema.DseGraphTableMetadata; -import com.datastax.dse.driver.api.core.metadata.schema.DseVertexMetadata; -import com.datastax.dse.driver.internal.core.metadata.schema.DefaultDseEdgeMetadata; -import com.datastax.dse.driver.internal.core.metadata.schema.DefaultDseKeyspaceMetadata; -import com.datastax.dse.driver.internal.core.metadata.schema.DefaultDseTableMetadata; -import com.datastax.dse.driver.internal.core.metadata.schema.DefaultDseVertexMetadata; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.TableMetadata; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.internal.core.channel.ChannelFactory; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.DefaultMetadata; -import com.datastax.oss.driver.internal.core.metadata.MetadataRefresh; -import com.datastax.oss.driver.internal.core.metadata.schema.DefaultColumnMetadata; -import com.datastax.oss.driver.internal.core.metadata.schema.events.KeyspaceChangeEvent; -import com.datastax.oss.driver.internal.core.metadata.schema.events.TableChangeEvent; -import com.datastax.oss.driver.internal.core.metadata.schema.refresh.SchemaRefresh; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Collections; -import java.util.Map; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public class GraphSchemaRefreshTest { - - private static final DefaultDseTableMetadata OLD_TABLE = - newTable( - CqlIdentifier.fromInternal("ks_with_engine"), - CqlIdentifier.fromInternal("tbl"), - null, - null); - private static final DefaultDseKeyspaceMetadata OLD_KS1 = newKeyspace("ks1", null); - private static final DefaultDseKeyspaceMetadata KS_WITH_ENGINE = - newKeyspace( - CqlIdentifier.fromInternal("ks_with_engine"), - "Core", - ImmutableMap.of(CqlIdentifier.fromInternal("tbl"), OLD_TABLE)); - - @Mock private InternalDriverContext context; - @Mock private ChannelFactory channelFactory; - private DefaultMetadata oldMetadata; - - @Before - public void setup() { - when(context.getChannelFactory()).thenReturn(channelFactory); - oldMetadata = - DefaultMetadata.EMPTY.withSchema( - ImmutableMap.of(OLD_KS1.getName(), OLD_KS1, KS_WITH_ENGINE.getName(), KS_WITH_ENGINE), - false, - context); - } - - @Test - public void should_detect_created_keyspace_without_graph_engine() { - DefaultDseKeyspaceMetadata ks2 = newKeyspace("ks2", null); - SchemaRefresh refresh = - new SchemaRefresh( - ImmutableMap.of( - OLD_KS1.getName(), - OLD_KS1, - KS_WITH_ENGINE.getName(), - KS_WITH_ENGINE, - ks2.getName(), - ks2)); - MetadataRefresh.Result result = refresh.compute(oldMetadata, false, context); - assertThat(result.newMetadata.getKeyspaces()).hasSize(3); - assertThat(result.events).containsExactly(KeyspaceChangeEvent.created(ks2)); - } - - @Test - public void should_detect_created_keyspace_with_graph_engine() { - DefaultDseKeyspaceMetadata ks2 = newKeyspace("ks2", "Core"); - SchemaRefresh refresh = - new SchemaRefresh( - ImmutableMap.of( - OLD_KS1.getName(), - OLD_KS1, - KS_WITH_ENGINE.getName(), - KS_WITH_ENGINE, - ks2.getName(), - ks2)); - MetadataRefresh.Result result = refresh.compute(oldMetadata, false, context); - assertThat(result.newMetadata.getKeyspaces()).hasSize(3); - assertThat(result.events).containsExactly(KeyspaceChangeEvent.created(ks2)); - } - - @Test - public void should_detect_top_level_graph_engine_update_in_keyspace() { - // Change only one top-level option (graph_engine) - DefaultDseKeyspaceMetadata newKs1 = newKeyspace("ks1", "Core"); - SchemaRefresh refresh = - new SchemaRefresh( - ImmutableMap.of(KS_WITH_ENGINE.getName(), KS_WITH_ENGINE, OLD_KS1.getName(), newKs1)); - MetadataRefresh.Result result = refresh.compute(oldMetadata, false, context); - assertThat(result.newMetadata.getKeyspaces()).hasSize(2); - assertThat(result.events).containsExactly(KeyspaceChangeEvent.updated(OLD_KS1, newKs1)); - } - - @Test - public void should_detect_adding_and_renaming_and_removing_vertex_label() { - DefaultDseTableMetadata newTable = - newTable( - KS_WITH_ENGINE.getName(), - CqlIdentifier.fromInternal("tbl"), - new DefaultDseVertexMetadata(CqlIdentifier.fromInternal("someLabel")), - null); - DefaultDseKeyspaceMetadata ks = - newKeyspace( - KS_WITH_ENGINE.getName(), - "Core", - ImmutableMap.of(CqlIdentifier.fromInternal("tbl"), newTable)); - SchemaRefresh refresh = - new SchemaRefresh( - ImmutableMap.of(KS_WITH_ENGINE.getName(), ks, OLD_KS1.getName(), OLD_KS1)); - MetadataRefresh.Result result = refresh.compute(oldMetadata, false, context); - assertThat(result.newMetadata.getKeyspaces()).hasSize(2); - assertThat(result.events).containsExactly(TableChangeEvent.updated(OLD_TABLE, newTable)); - assertThat(result.newMetadata.getKeyspaces().get(KS_WITH_ENGINE.getName())).isNotNull(); - assertThat( - ((DseGraphTableMetadata) - result - .newMetadata - .getKeyspaces() - .get(KS_WITH_ENGINE.getName()) - .getTable("tbl") - .get()) - .getVertex()) - .isNotNull(); - assertThat( - ((DseGraphTableMetadata) - result - .newMetadata - .getKeyspaces() - .get(KS_WITH_ENGINE.getName()) - .getTable("tbl") - .get()) - .getVertex() - .get() - .getLabelName() - .asInternal()) - .isEqualTo("someLabel"); - - // now rename the vertex label - newTable = - newTable( - KS_WITH_ENGINE.getName(), - CqlIdentifier.fromInternal("tbl"), - new DefaultDseVertexMetadata(CqlIdentifier.fromInternal("someNewLabel")), - null); - ks = - newKeyspace( - KS_WITH_ENGINE.getName(), - "Core", - ImmutableMap.of(CqlIdentifier.fromInternal("tbl"), newTable)); - refresh = - new SchemaRefresh( - ImmutableMap.of(KS_WITH_ENGINE.getName(), ks, OLD_KS1.getName(), OLD_KS1)); - result = refresh.compute(oldMetadata, false, context); - assertThat(result.newMetadata.getKeyspaces()).hasSize(2); - assertThat(result.events).containsExactly(TableChangeEvent.updated(OLD_TABLE, newTable)); - assertThat( - ((DseGraphTableMetadata) - result - .newMetadata - .getKeyspaces() - .get(KS_WITH_ENGINE.getName()) - .getTable("tbl") - .get()) - .getVertex() - .get() - .getLabelName() - .asInternal()) - .isEqualTo("someNewLabel"); - - // now remove the vertex label from the table - DefaultMetadata metadataWithVertexLabel = result.newMetadata; - DefaultDseTableMetadata tableWithRemovedLabel = - newTable(KS_WITH_ENGINE.getName(), CqlIdentifier.fromInternal("tbl"), null, null); - ks = - newKeyspace( - KS_WITH_ENGINE.getName(), - "Core", - ImmutableMap.of(CqlIdentifier.fromInternal("tbl"), tableWithRemovedLabel)); - refresh = - new SchemaRefresh( - ImmutableMap.of(KS_WITH_ENGINE.getName(), ks, OLD_KS1.getName(), OLD_KS1)); - result = refresh.compute(metadataWithVertexLabel, false, context); - assertThat(result.newMetadata.getKeyspaces()).hasSize(2); - assertThat(result.events) - .containsExactly(TableChangeEvent.updated(newTable, tableWithRemovedLabel)); - assertThat( - ((DseGraphTableMetadata) - result - .newMetadata - .getKeyspaces() - .get(KS_WITH_ENGINE.getName()) - .getTable("tbl") - .get()) - .getVertex() - .isPresent()) - .isFalse(); - } - - @Test - public void should_detect_adding_and_renaming_and_removing_edge_label() { - DefaultDseTableMetadata newTable = - newTable( - KS_WITH_ENGINE.getName(), - CqlIdentifier.fromInternal("tbl"), - null, - newEdgeMetadata( - CqlIdentifier.fromInternal("created"), - CqlIdentifier.fromInternal("person"), - CqlIdentifier.fromInternal("software"))); - DefaultDseKeyspaceMetadata ks = - newKeyspace( - KS_WITH_ENGINE.getName(), - "Core", - ImmutableMap.of(CqlIdentifier.fromInternal("tbl"), newTable)); - SchemaRefresh refresh = - new SchemaRefresh( - ImmutableMap.of(KS_WITH_ENGINE.getName(), ks, OLD_KS1.getName(), OLD_KS1)); - MetadataRefresh.Result result = refresh.compute(oldMetadata, false, context); - assertThat(result.newMetadata.getKeyspaces()).hasSize(2); - assertThat(result.events).containsExactly(TableChangeEvent.updated(OLD_TABLE, newTable)); - assertThat(result.newMetadata.getKeyspaces().get(KS_WITH_ENGINE.getName())).isNotNull(); - assertThat( - ((DseGraphTableMetadata) - result - .newMetadata - .getKeyspaces() - .get(KS_WITH_ENGINE.getName()) - .getTable("tbl") - .get()) - .getVertex()) - .isNotNull(); - assertThat( - ((DseGraphTableMetadata) - result - .newMetadata - .getKeyspaces() - .get(KS_WITH_ENGINE.getName()) - .getTable("tbl") - .get()) - .getEdge() - .get() - .getLabelName() - .asInternal()) - .isEqualTo("created"); - - // now rename the edge label - newTable = - newTable( - KS_WITH_ENGINE.getName(), - CqlIdentifier.fromInternal("tbl"), - null, - newEdgeMetadata( - CqlIdentifier.fromInternal("CHANGED"), - CqlIdentifier.fromInternal("person"), - CqlIdentifier.fromInternal("software"))); - ks = - newKeyspace( - KS_WITH_ENGINE.getName(), - "Core", - ImmutableMap.of(CqlIdentifier.fromInternal("tbl"), newTable)); - refresh = - new SchemaRefresh( - ImmutableMap.of(KS_WITH_ENGINE.getName(), ks, OLD_KS1.getName(), OLD_KS1)); - result = refresh.compute(oldMetadata, false, context); - assertThat(result.newMetadata.getKeyspaces()).hasSize(2); - assertThat(result.events).containsExactly(TableChangeEvent.updated(OLD_TABLE, newTable)); - assertThat( - ((DseGraphTableMetadata) - result - .newMetadata - .getKeyspaces() - .get(KS_WITH_ENGINE.getName()) - .getTable("tbl") - .get()) - .getEdge() - .get() - .getLabelName() - .asInternal()) - .isEqualTo("CHANGED"); - - // now remove the edge label from the table - DefaultMetadata metadataWithEdgeLabel = result.newMetadata; - DefaultDseTableMetadata tableWithRemovedLabel = - newTable(KS_WITH_ENGINE.getName(), CqlIdentifier.fromInternal("tbl"), null, null); - ks = - newKeyspace( - KS_WITH_ENGINE.getName(), - "Core", - ImmutableMap.of(CqlIdentifier.fromInternal("tbl"), tableWithRemovedLabel)); - refresh = - new SchemaRefresh( - ImmutableMap.of(KS_WITH_ENGINE.getName(), ks, OLD_KS1.getName(), OLD_KS1)); - result = refresh.compute(metadataWithEdgeLabel, false, context); - assertThat(result.newMetadata.getKeyspaces()).hasSize(2); - assertThat(result.events) - .containsExactly(TableChangeEvent.updated(newTable, tableWithRemovedLabel)); - assertThat( - ((DseGraphTableMetadata) - result - .newMetadata - .getKeyspaces() - .get(KS_WITH_ENGINE.getName()) - .getTable("tbl") - .get()) - .getEdge() - .isPresent()) - .isFalse(); - } - - private static DefaultDseKeyspaceMetadata newKeyspace(String name, String graphEngine) { - return new DefaultDseKeyspaceMetadata( - CqlIdentifier.fromInternal(name), - false, - false, - graphEngine, - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptyMap()); - } - - private static DefaultDseKeyspaceMetadata newKeyspace( - CqlIdentifier name, String graphEngine, @NonNull Map tables) { - return new DefaultDseKeyspaceMetadata( - name, - false, - false, - graphEngine, - Collections.emptyMap(), - Collections.emptyMap(), - tables, - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptyMap()); - } - - private static DefaultDseTableMetadata newTable( - @NonNull CqlIdentifier keyspace, - @NonNull CqlIdentifier name, - @Nullable DseVertexMetadata vertex, - @Nullable DseEdgeMetadata edge) { - ImmutableList cols = - ImmutableList.of( - new DefaultColumnMetadata( - keyspace, - CqlIdentifier.fromInternal("parent"), - CqlIdentifier.fromInternal("id"), - DataTypes.INT, - false)); - return new DefaultDseTableMetadata( - keyspace, - name, - null, - false, - false, - cols, - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptyMap(), - vertex, - edge); - } - - private static DefaultDseEdgeMetadata newEdgeMetadata( - @NonNull CqlIdentifier labelName, - @NonNull CqlIdentifier fromTable, - @NonNull CqlIdentifier toTable) { - return new DefaultDseEdgeMetadata( - labelName, - fromTable, - fromTable, - Collections.emptyList(), - Collections.emptyList(), - toTable, - toTable, - Collections.emptyList(), - Collections.emptyList()); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/insights/AddressFormatterTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/insights/AddressFormatterTest.java deleted file mode 100644 index 85af9b5691b..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/insights/AddressFormatterTest.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.net.UnknownHostException; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class AddressFormatterTest { - - @Test - @UseDataProvider("addressesProvider") - public void should_format_addresses(Object address, String expected) { - // when - String result = AddressFormatter.nullSafeToString(address); - - // then - assertThat(result).isEqualTo(expected); - } - - @DataProvider - public static Object[][] addressesProvider() throws UnknownHostException { - return new Object[][] { - {new InetSocketAddress(8888), "0.0.0.0:8888"}, - {new InetSocketAddress("127.0.0.1", 8888), "127.0.0.1:8888"}, - {InetSocketAddress.createUnresolved("127.0.0.2", 8080), "127.0.0.2:8080"}, - {InetAddress.getByName("127.0.0.1"), "127.0.0.1"}, - }; - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/insights/ConfigAntiPatternsFinderTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/insights/ConfigAntiPatternsFinderTest.java deleted file mode 100644 index d5466b23dbc..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/insights/ConfigAntiPatternsFinderTest.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights; - -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.SSL_ENGINE_FACTORY_CLASS; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.SSL_HOSTNAME_VALIDATION; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.Collections; -import java.util.Map; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class ConfigAntiPatternsFinderTest { - - private static final ImmutableMap SSL_ANTI_PATTERN = - ImmutableMap.of( - "sslWithoutCertValidation", - "Client-to-node encryption is enabled but server certificate validation is disabled"); - - @Test - @UseDataProvider("sslConfigProvider") - public void should_find_ssl_anti_pattern( - boolean sslEngineFactoryClassDefined, - boolean hostnameValidation, - Map expected) { - // given - InternalDriverContext context = - mockDefaultProfile(sslEngineFactoryClassDefined, hostnameValidation); - - // when - Map antiPatterns = new ConfigAntiPatternsFinder().findAntiPatterns(context); - - // then - assertThat(antiPatterns).isEqualTo(expected); - } - - private InternalDriverContext mockDefaultProfile( - boolean sslEngineFactoryClassDefined, boolean hostnameValidation) { - InternalDriverContext context = mock(InternalDriverContext.class); - DriverConfig driverConfig = mock(DriverConfig.class); - when(context.getConfig()).thenReturn(driverConfig); - DriverExecutionProfile profile = mock(DriverExecutionProfile.class); - when(profile.isDefined(SSL_ENGINE_FACTORY_CLASS)).thenReturn(sslEngineFactoryClassDefined); - when(profile.getBoolean(SSL_HOSTNAME_VALIDATION, false)).thenReturn(hostnameValidation); - when(driverConfig.getDefaultProfile()).thenReturn(profile); - return context; - } - - @DataProvider - public static Object[][] sslConfigProvider() { - return new Object[][] { - {true, true, Collections.emptyMap()}, - {true, false, SSL_ANTI_PATTERN}, - {false, false, Collections.emptyMap()}, - {false, true, Collections.emptyMap()} - }; - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/insights/DataCentersFinderTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/insights/DataCentersFinderTest.java deleted file mode 100644 index dde6db6059e..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/insights/DataCentersFinderTest.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights; - -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.CONNECTION_POOL_REMOTE_SIZE; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import com.datastax.oss.driver.shaded.guava.common.collect.Sets; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.Collection; -import java.util.Set; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class DataCentersFinderTest { - - @Test - @UseDataProvider("hostProvider") - public void should_detect_data_centers( - int numberOfRemoteHosts, - String dc1, - NodeDistance h1Distance, - String dc2, - NodeDistance h2Distance, - Set expected) { - // given - DriverExecutionProfile executionProfile = mock(DriverExecutionProfile.class); - when(executionProfile.getInt(CONNECTION_POOL_REMOTE_SIZE)).thenReturn(numberOfRemoteHosts); - Collection nodes = mockNodes(dc1, h1Distance, dc2, h2Distance); - - // when - Set dataCenters = new DataCentersFinder().getDataCenters(nodes, executionProfile); - - // then - assertThat(dataCenters).isEqualTo(Sets.newHashSet(expected)); - } - - @DataProvider - public static Object[][] hostProvider() { - return new Object[][] { - {1, "dc1", NodeDistance.LOCAL, "dc2", NodeDistance.REMOTE, Sets.newHashSet("dc1", "dc2")}, - {1, "dc1", NodeDistance.LOCAL, "dc1", NodeDistance.REMOTE, Sets.newHashSet("dc1")}, - {0, "dc1", NodeDistance.LOCAL, "dc2", NodeDistance.REMOTE, Sets.newHashSet("dc1")}, - {0, "dc1", NodeDistance.IGNORED, "dc2", NodeDistance.REMOTE, Sets.newHashSet()}, - {1, "dc1", NodeDistance.IGNORED, "dc2", NodeDistance.REMOTE, Sets.newHashSet("dc2")}, - {1, "dc1", NodeDistance.LOCAL, "dc2", NodeDistance.IGNORED, Sets.newHashSet("dc1")}, - {0, "dc1", NodeDistance.IGNORED, "dc2", NodeDistance.REMOTE, Sets.newHashSet()}, - {0, "dc1", NodeDistance.LOCAL, "dc2", NodeDistance.IGNORED, Sets.newHashSet("dc1")}, - }; - } - - private Collection mockNodes( - String dc1, NodeDistance h1Distance, String dc2, NodeDistance h2Distance) { - Node n1 = mock(Node.class); - when(n1.getDatacenter()).thenReturn(dc1); - when(n1.getDistance()).thenReturn(h1Distance); - - Node n2 = mock(Node.class); - when(n2.getDatacenter()).thenReturn(dc2); - when(n2.getDistance()).thenReturn(h2Distance); - - return ImmutableSet.of(n1, n2); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/insights/ExecutionProfileMockUtil.java b/core/src/test/java/com/datastax/dse/driver/internal/core/insights/ExecutionProfileMockUtil.java deleted file mode 100644 index de0f3a9d60b..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/insights/ExecutionProfileMockUtil.java +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights; - -import static com.datastax.dse.driver.api.core.config.DseDriverOption.GRAPH_TRAVERSAL_SOURCE; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.AUTH_PROVIDER_CLASS; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.CONNECTION_POOL_REMOTE_SIZE; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.HEARTBEAT_INTERVAL; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.LOAD_BALANCING_DISTANCE_EVALUATOR_CLASS; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.LOAD_BALANCING_POLICY_CLASS; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.PROTOCOL_COMPRESSION; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.RECONNECTION_BASE_DELAY; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.REQUEST_CONSISTENCY; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.REQUEST_SERIAL_CONSISTENCY; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.REQUEST_TIMEOUT; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.SPECULATIVE_EXECUTION_DELAY; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.SPECULATIVE_EXECUTION_MAX; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.SPECULATIVE_EXECUTION_POLICY_CLASS; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.SSL_ENGINE_FACTORY_CLASS; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import java.time.Duration; - -class ExecutionProfileMockUtil { - static final String DEFAULT_LOCAL_DC = "local-dc"; - static final int SPECEX_MAX_DEFAULT = 100; - static final int SPECEX_DELAY_DEFAULT = 20; - - static DriverExecutionProfile mockDefaultExecutionProfile() { - DriverExecutionProfile profile = mock(DriverExecutionProfile.class); - - when(profile.getDuration(REQUEST_TIMEOUT)).thenReturn(Duration.ofMillis(100)); - when(profile.getString(LOAD_BALANCING_POLICY_CLASS)).thenReturn("LoadBalancingPolicyImpl"); - when(profile.isDefined(LOAD_BALANCING_DISTANCE_EVALUATOR_CLASS)).thenReturn(true); - when(profile.isDefined(LOAD_BALANCING_LOCAL_DATACENTER)).thenReturn(true); - when(profile.getString(LOAD_BALANCING_LOCAL_DATACENTER)).thenReturn(DEFAULT_LOCAL_DC); - when(profile.isDefined(SPECULATIVE_EXECUTION_MAX)).thenReturn(true); - when(profile.getInt(SPECULATIVE_EXECUTION_MAX)).thenReturn(SPECEX_MAX_DEFAULT); - when(profile.isDefined(SPECULATIVE_EXECUTION_DELAY)).thenReturn(true); - when(profile.getInt(SPECULATIVE_EXECUTION_DELAY)).thenReturn(SPECEX_DELAY_DEFAULT); - when(profile.getString(SPECULATIVE_EXECUTION_POLICY_CLASS)) - .thenReturn("SpeculativeExecutionImpl"); - when(profile.getString(REQUEST_CONSISTENCY)).thenReturn("LOCAL_ONE"); - when(profile.getString(REQUEST_SERIAL_CONSISTENCY)).thenReturn("SERIAL"); - when(profile.getInt(CONNECTION_POOL_LOCAL_SIZE)).thenReturn(2); - when(profile.getInt(CONNECTION_POOL_REMOTE_SIZE)).thenReturn(1); - when(profile.getString(eq(PROTOCOL_COMPRESSION), any())).thenReturn("none"); - when(profile.getDuration(HEARTBEAT_INTERVAL)).thenReturn(Duration.ofMillis(100)); - when(profile.getDuration(RECONNECTION_BASE_DELAY)).thenReturn(Duration.ofMillis(100)); - when(profile.isDefined(SSL_ENGINE_FACTORY_CLASS)).thenReturn(true); - when(profile.getString(eq(AUTH_PROVIDER_CLASS), any())).thenReturn("AuthProviderImpl"); - when(profile.getString(GRAPH_TRAVERSAL_SOURCE, null)).thenReturn("src-graph"); - return profile; - } - - static DriverExecutionProfile mockNonDefaultRequestTimeoutExecutionProfile() { - DriverExecutionProfile profile = mockDefaultExecutionProfile(); - when(profile.getDuration(REQUEST_TIMEOUT)).thenReturn(Duration.ofMillis(50)); - return profile; - } - - static DriverExecutionProfile mockNonDefaultLoadBalancingExecutionProfile() { - DriverExecutionProfile profile = mockDefaultExecutionProfile(); - when(profile.getString(LOAD_BALANCING_POLICY_CLASS)).thenReturn("NonDefaultLoadBalancing"); - return profile; - } - - static DriverExecutionProfile mockUndefinedLocalDcExecutionProfile() { - DriverExecutionProfile profile = mockNonDefaultLoadBalancingExecutionProfile(); - when(profile.isDefined(LOAD_BALANCING_LOCAL_DATACENTER)).thenReturn(false); - return profile; - } - - static DriverExecutionProfile mockNonDefaultSpeculativeExecutionInfo() { - DriverExecutionProfile profile = mockDefaultExecutionProfile(); - when(profile.getString(SPECULATIVE_EXECUTION_POLICY_CLASS)) - .thenReturn("NonDefaultSpecexPolicy"); - return profile; - } - - static DriverExecutionProfile mockNonDefaultConsistency() { - DriverExecutionProfile profile = mockDefaultExecutionProfile(); - when(profile.getString(REQUEST_CONSISTENCY)).thenReturn("ALL"); - return profile; - } - - static DriverExecutionProfile mockNonDefaultSerialConsistency() { - DriverExecutionProfile profile = mockDefaultExecutionProfile(); - when(profile.getString(REQUEST_SERIAL_CONSISTENCY)).thenReturn("ONE"); - return profile; - } - - static DriverExecutionProfile mockNonDefaultGraphOptions() { - DriverExecutionProfile profile = mockDefaultExecutionProfile(); - when(profile.getString(GRAPH_TRAVERSAL_SOURCE, null)).thenReturn("non-default-graph"); - return profile; - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/insights/ExecutionProfilesInfoFinderTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/insights/ExecutionProfilesInfoFinderTest.java deleted file mode 100644 index fc92ab20521..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/insights/ExecutionProfilesInfoFinderTest.java +++ /dev/null @@ -1,235 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights; - -import static com.datastax.dse.driver.internal.core.insights.ExecutionProfileMockUtil.DEFAULT_LOCAL_DC; -import static com.datastax.dse.driver.internal.core.insights.ExecutionProfileMockUtil.SPECEX_DELAY_DEFAULT; -import static com.datastax.dse.driver.internal.core.insights.ExecutionProfileMockUtil.SPECEX_MAX_DEFAULT; -import static com.datastax.dse.driver.internal.core.insights.ExecutionProfileMockUtil.mockDefaultExecutionProfile; -import static com.datastax.dse.driver.internal.core.insights.ExecutionProfileMockUtil.mockNonDefaultConsistency; -import static com.datastax.dse.driver.internal.core.insights.ExecutionProfileMockUtil.mockNonDefaultGraphOptions; -import static com.datastax.dse.driver.internal.core.insights.ExecutionProfileMockUtil.mockNonDefaultLoadBalancingExecutionProfile; -import static com.datastax.dse.driver.internal.core.insights.ExecutionProfileMockUtil.mockNonDefaultRequestTimeoutExecutionProfile; -import static com.datastax.dse.driver.internal.core.insights.ExecutionProfileMockUtil.mockNonDefaultSerialConsistency; -import static com.datastax.dse.driver.internal.core.insights.ExecutionProfileMockUtil.mockNonDefaultSpeculativeExecutionInfo; -import static com.datastax.dse.driver.internal.core.insights.ExecutionProfileMockUtil.mockUndefinedLocalDcExecutionProfile; -import static com.datastax.dse.driver.internal.core.insights.PackageUtil.DEFAULT_LOAD_BALANCING_PACKAGE; -import static com.datastax.dse.driver.internal.core.insights.PackageUtil.DEFAULT_SPECULATIVE_EXECUTION_PACKAGE; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.dse.driver.internal.core.insights.schema.LoadBalancingInfo; -import com.datastax.dse.driver.internal.core.insights.schema.SpecificExecutionProfile; -import com.datastax.dse.driver.internal.core.insights.schema.SpeculativeExecutionInfo; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.Map; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mockito; - -@RunWith(DataProviderRunner.class) -public class ExecutionProfilesInfoFinderTest { - - @Test - public void should_include_info_about_default_profile() { - // given - DriverExecutionProfile defaultExecutionProfile = mockDefaultExecutionProfile(); - Map profiles = - ImmutableMap.of("default", defaultExecutionProfile); - - InternalDriverContext context = - mockDriverContextWithProfiles(defaultExecutionProfile, profiles); - - // when - Map executionProfilesInfo = - new ExecutionProfilesInfoFinder().getExecutionProfilesInfo(context); - - // then - assertThat(executionProfilesInfo) - .isEqualTo( - ImmutableMap.of( - "default", - new SpecificExecutionProfile( - 100, - new LoadBalancingInfo( - "LoadBalancingPolicyImpl", - ImmutableMap.of("localDataCenter", "local-dc", "filterFunction", true), - DEFAULT_LOAD_BALANCING_PACKAGE), - new SpeculativeExecutionInfo( - "SpeculativeExecutionImpl", - ImmutableMap.of("maxSpeculativeExecutions", 100, "delay", 20), - DEFAULT_SPECULATIVE_EXECUTION_PACKAGE), - "LOCAL_ONE", - "SERIAL", - ImmutableMap.of("source", "src-graph")))); - } - - @Test - @UseDataProvider("executionProfileProvider") - public void should_include_info_about_default_profile_and_only_difference_for_specific_profile( - DriverExecutionProfile nonDefaultExecutionProfile, SpecificExecutionProfile expected) { - // given - - DriverExecutionProfile defaultExecutionProfile = mockDefaultExecutionProfile(); - Map profiles = - ImmutableMap.of( - "default", defaultExecutionProfile, "non-default", nonDefaultExecutionProfile); - InternalDriverContext context = - mockDriverContextWithProfiles(defaultExecutionProfile, profiles); - // when - Map executionProfilesInfo = - new ExecutionProfilesInfoFinder().getExecutionProfilesInfo(context); - - // then - assertThat(executionProfilesInfo) - .isEqualTo( - ImmutableMap.of( - "default", - new SpecificExecutionProfile( - 100, - new LoadBalancingInfo( - "LoadBalancingPolicyImpl", - ImmutableMap.of("localDataCenter", "local-dc", "filterFunction", true), - DEFAULT_LOAD_BALANCING_PACKAGE), - new SpeculativeExecutionInfo( - "SpeculativeExecutionImpl", - ImmutableMap.of("maxSpeculativeExecutions", 100, "delay", 20), - DEFAULT_SPECULATIVE_EXECUTION_PACKAGE), - "LOCAL_ONE", - "SERIAL", - ImmutableMap.of("source", "src-graph")), - "non-default", - expected)); - } - - @DataProvider - public static Object[][] executionProfileProvider() { - return new Object[][] { - { - mockNonDefaultRequestTimeoutExecutionProfile(), - new SpecificExecutionProfile(50, null, null, null, null, null) - }, - { - mockNonDefaultLoadBalancingExecutionProfile(), - new SpecificExecutionProfile( - null, - new LoadBalancingInfo( - "NonDefaultLoadBalancing", - ImmutableMap.of("localDataCenter", DEFAULT_LOCAL_DC, "filterFunction", true), - DEFAULT_LOAD_BALANCING_PACKAGE), - null, - null, - null, - null) - }, - { - mockUndefinedLocalDcExecutionProfile(), - new SpecificExecutionProfile( - null, - new LoadBalancingInfo( - "NonDefaultLoadBalancing", - ImmutableMap.of("filterFunction", true), - DEFAULT_LOAD_BALANCING_PACKAGE), - null, - null, - null, - null) - }, - { - mockNonDefaultSpeculativeExecutionInfo(), - new SpecificExecutionProfile( - null, - null, - new SpeculativeExecutionInfo( - "NonDefaultSpecexPolicy", - ImmutableMap.of( - "maxSpeculativeExecutions", SPECEX_MAX_DEFAULT, "delay", SPECEX_DELAY_DEFAULT), - DEFAULT_SPECULATIVE_EXECUTION_PACKAGE), - null, - null, - null) - }, - { - mockNonDefaultConsistency(), - new SpecificExecutionProfile(null, null, null, "ALL", null, null) - }, - { - mockNonDefaultSerialConsistency(), - new SpecificExecutionProfile(null, null, null, null, "ONE", null) - }, - { - mockNonDefaultGraphOptions(), - new SpecificExecutionProfile( - null, null, null, null, null, ImmutableMap.of("source", "non-default-graph")) - }, - { - mockDefaultExecutionProfile(), - new SpecificExecutionProfile(null, null, null, null, null, null) - } - }; - } - - @Test - public void should_not_include_null_fields_in_json() throws JsonProcessingException { - // given - SpecificExecutionProfile specificExecutionProfile = - new SpecificExecutionProfile(50, null, null, "ONE", null, ImmutableMap.of("a", "b")); - - // when - String result = new ObjectMapper().writeValueAsString(specificExecutionProfile); - - // then - assertThat(result) - .isEqualTo("{\"readTimeout\":50,\"consistency\":\"ONE\",\"graphOptions\":{\"a\":\"b\"}}"); - } - - @Test - public void should_include_empty_execution_profile_if_has_all_nulls() - throws JsonProcessingException { - // given - Map executionProfiles = - ImmutableMap.of("p", new SpecificExecutionProfile(null, null, null, null, null, null)); - - // when - String result = new ObjectMapper().writeValueAsString(executionProfiles); - - // then - assertThat(result).isEqualTo("{\"p\":{}}"); - } - - private InternalDriverContext mockDriverContextWithProfiles( - DriverExecutionProfile defaultExecutionProfile, - Map profiles) { - InternalDriverContext context = mock(InternalDriverContext.class); - DriverConfig driverConfig = mock(DriverConfig.class); - Mockito.>when(driverConfig.getProfiles()) - .thenReturn(profiles); - when(driverConfig.getDefaultProfile()).thenReturn(defaultExecutionProfile); - when(context.getConfig()).thenReturn(driverConfig); - return context; - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/insights/InsightsClientTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/insights/InsightsClientTest.java deleted file mode 100644 index 74869893b72..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/insights/InsightsClientTest.java +++ /dev/null @@ -1,535 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights; - -import static com.datastax.dse.driver.api.core.DseProtocolVersion.DSE_V2; -import static com.datastax.dse.driver.internal.core.insights.ExecutionProfileMockUtil.mockDefaultExecutionProfile; -import static com.datastax.dse.driver.internal.core.insights.ExecutionProfileMockUtil.mockNonDefaultRequestTimeoutExecutionProfile; -import static com.datastax.dse.driver.internal.core.insights.PackageUtil.DEFAULT_AUTH_PROVIDER_PACKAGE; -import static com.datastax.dse.driver.internal.core.insights.PackageUtil.DEFAULT_LOAD_BALANCING_PACKAGE; -import static com.datastax.dse.driver.internal.core.insights.PackageUtil.DEFAULT_SPECULATIVE_EXECUTION_PACKAGE; -import static java.util.concurrent.TimeUnit.SECONDS; -import static org.assertj.core.api.Assertions.assertThat; -import static org.awaitility.Awaitility.await; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.dse.driver.api.core.metadata.DseNodeProperties; -import com.datastax.dse.driver.internal.core.insights.configuration.InsightsConfiguration; -import com.datastax.dse.driver.internal.core.insights.schema.AuthProviderType; -import com.datastax.dse.driver.internal.core.insights.schema.Insight; -import com.datastax.dse.driver.internal.core.insights.schema.InsightMetadata; -import com.datastax.dse.driver.internal.core.insights.schema.InsightType; -import com.datastax.dse.driver.internal.core.insights.schema.InsightsPlatformInfo; -import com.datastax.dse.driver.internal.core.insights.schema.InsightsPlatformInfo.CPUS; -import com.datastax.dse.driver.internal.core.insights.schema.InsightsPlatformInfo.OS; -import com.datastax.dse.driver.internal.core.insights.schema.InsightsPlatformInfo.RuntimeAndCompileTimeVersions; -import com.datastax.dse.driver.internal.core.insights.schema.InsightsStartupData; -import com.datastax.dse.driver.internal.core.insights.schema.InsightsStatusData; -import com.datastax.dse.driver.internal.core.insights.schema.LoadBalancingInfo; -import com.datastax.dse.driver.internal.core.insights.schema.PoolSizeByHostDistance; -import com.datastax.dse.driver.internal.core.insights.schema.ReconnectionPolicyInfo; -import com.datastax.dse.driver.internal.core.insights.schema.SSL; -import com.datastax.dse.driver.internal.core.insights.schema.SessionStateForNode; -import com.datastax.dse.driver.internal.core.insights.schema.SpecificExecutionProfile; -import com.datastax.dse.driver.internal.core.insights.schema.SpeculativeExecutionInfo; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.api.core.metadata.Metadata; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; -import com.datastax.oss.driver.internal.core.context.StartupOptionsBuilder; -import com.datastax.oss.driver.internal.core.control.ControlConnection; -import com.datastax.oss.driver.internal.core.metadata.DefaultNode; -import com.datastax.oss.driver.internal.core.metadata.MetadataManager; -import com.datastax.oss.driver.internal.core.pool.ChannelPool; -import com.datastax.oss.driver.internal.core.session.PoolManager; -import com.datastax.oss.driver.shaded.guava.common.base.Suppliers; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import com.datastax.oss.driver.shaded.guava.common.collect.Sets; -import com.fasterxml.jackson.core.type.TypeReference; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import io.netty.channel.DefaultEventLoop; -import java.io.IOException; -import java.net.InetSocketAddress; -import java.net.UnknownHostException; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.Executors; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.function.Supplier; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mockito; - -@RunWith(DataProviderRunner.class) -public class InsightsClientTest { - private static final StackTraceElement[] EMPTY_STACK_TRACE = {}; - private static final Map EMPTY_OBJECT_MAP = Collections.emptyMap(); - private static final Supplier MOCK_TIME_SUPPLIER = Suppliers.ofInstance(1L); - private static final InsightsConfiguration INSIGHTS_CONFIGURATION = - new InsightsConfiguration(true, 300000L, new DefaultEventLoop()); - - @Test - public void should_construct_json_event_startup_message() throws IOException { - // given - DefaultDriverContext context = mockDefaultDriverContext(); - PlatformInfoFinder platformInfoFinder = mock(PlatformInfoFinder.class); - OS os = new OS("linux", "1.2", "x64"); - CPUS cpus = new CPUS(8, "intel i7"); - Map javaDeps = - ImmutableMap.of("version", new RuntimeAndCompileTimeVersions("1.8.0", "1.8.0", false)); - Map> runtimeInfo = - ImmutableMap.of("java", javaDeps); - InsightsPlatformInfo insightsPlatformInfo = new InsightsPlatformInfo(os, cpus, runtimeInfo); - when(platformInfoFinder.getInsightsPlatformInfo()).thenReturn(insightsPlatformInfo); - - ConfigAntiPatternsFinder configAntiPatternsFinder = mock(ConfigAntiPatternsFinder.class); - when(configAntiPatternsFinder.findAntiPatterns(any(DefaultDriverContext.class))) - .thenReturn( - ImmutableMap.of( - "contactPointsMultipleDCs", - "Contact points contain hosts from multiple data centers")); - - DataCentersFinder dataCentersFinder = mock(DataCentersFinder.class); - when(dataCentersFinder.getDataCenters(any(DefaultDriverContext.class))) - .thenReturn(Sets.newHashSet("dc1", "dc2")); - ReconnectionPolicyInfoFinder reconnectionPolicyInfoFinder = - mock(ReconnectionPolicyInfoFinder.class); - when(reconnectionPolicyInfoFinder.getReconnectionPolicyInfo(any(), any())) - .thenReturn( - new ReconnectionPolicyInfo( - "reconnection-policy-a", ImmutableMap.of("opt-a", 1), "com.datastax.dse")); - - InsightsClient insightsClient = - new InsightsClient( - context, - MOCK_TIME_SUPPLIER, - INSIGHTS_CONFIGURATION, - platformInfoFinder, - reconnectionPolicyInfoFinder, - new ExecutionProfilesInfoFinder(), - configAntiPatternsFinder, - dataCentersFinder, - EMPTY_STACK_TRACE); - - // when - String startupMessage = insightsClient.createStartupMessage(); - Insight insight = - new ObjectMapper() - .readValue(startupMessage, new TypeReference>() {}); - - // then - assertThat(insight.getMetadata()) - .isEqualTo( - new InsightMetadata( - "driver.startup", - 1L, - ImmutableMap.of("language", "java"), - InsightType.EVENT, - "v1")); - - InsightsStartupData insightData = insight.getInsightData(); - assertThat(insightData.getClientId()).isEqualTo("client-id"); - assertThat(insightData.getSessionId()).isNotNull(); - assertThat(insightData.getDriverName()).isEqualTo("DataStax Enterprise Java Driver"); - assertThat(insightData.getDriverVersion()).isNotEmpty(); - assertThat(insightData.getApplicationName()).isEqualTo("app-name"); - assertThat(insightData.getApplicationVersion()).isEqualTo("1.0.0"); - assertThat(insightData.isApplicationNameWasGenerated()).isEqualTo(false); - assertThat(insightData.getContactPoints()) - .isEqualTo(ImmutableMap.of("localhost", Collections.singletonList("127.0.0.1:9999"))); - - assertThat(insightData.getInitialControlConnection()).isEqualTo("127.0.0.1:10"); - assertThat(insightData.getLocalAddress()).isEqualTo("127.0.0.1"); - assertThat(insightData.getHostName()).isNotEmpty(); - assertThat(insightData.getProtocolVersion()).isEqualTo(DSE_V2.getCode()); - assertThat(insightData.getExecutionProfiles()) - .isEqualTo( - ImmutableMap.of( - "default", - new SpecificExecutionProfile( - 100, - new LoadBalancingInfo( - "LoadBalancingPolicyImpl", - ImmutableMap.of("localDataCenter", "local-dc", "filterFunction", true), - DEFAULT_LOAD_BALANCING_PACKAGE), - new SpeculativeExecutionInfo( - "SpeculativeExecutionImpl", - ImmutableMap.of("maxSpeculativeExecutions", 100, "delay", 20), - DEFAULT_SPECULATIVE_EXECUTION_PACKAGE), - "LOCAL_ONE", - "SERIAL", - ImmutableMap.of("source", "src-graph")), - "non-default", - new SpecificExecutionProfile(50, null, null, null, null, null))); - assertThat(insightData.getPoolSizeByHostDistance()) - .isEqualTo(new PoolSizeByHostDistance(2, 1, 0)); - assertThat(insightData.getHeartbeatInterval()).isEqualTo(100); - assertThat(insightData.getCompression()).isEqualTo("none"); - assertThat(insightData.getReconnectionPolicy()) - .isEqualTo( - new ReconnectionPolicyInfo( - "reconnection-policy-a", ImmutableMap.of("opt-a", 1), "com.datastax.dse")); - assertThat(insightData.getSsl()).isEqualTo(new SSL(true, false)); - assertThat(insightData.getAuthProvider()) - .isEqualTo(new AuthProviderType("AuthProviderImpl", DEFAULT_AUTH_PROVIDER_PACKAGE)); - assertThat(insightData.getOtherOptions()).isEqualTo(EMPTY_OBJECT_MAP); - assertThat(insightData.getPlatformInfo()).isEqualTo(insightsPlatformInfo); - assertThat(insightData.getConfigAntiPatterns()) - .isEqualTo( - ImmutableMap.of( - "contactPointsMultipleDCs", - "Contact points contain hosts from multiple data centers")); - assertThat(insightData.getPeriodicStatusInterval()).isEqualTo(300); - assertThat(insightData.getDataCenters()).isEqualTo(Sets.newHashSet("dc1", "dc2")); - } - - @Test - public void should_group_contact_points_by_host_name() { - // given - Set contactPoints = - ImmutableSet.of( - InetSocketAddress.createUnresolved("127.0.0.1", 8080), - InetSocketAddress.createUnresolved("127.0.0.1", 8081), - InetSocketAddress.createUnresolved("127.0.0.2", 8081)); - - Map> expected = - ImmutableMap.of( - "127.0.0.1", - ImmutableList.of("127.0.0.1:8080", "127.0.0.1:8081"), - "127.0.0.2", - ImmutableList.of("127.0.0.2:8081")); - - // when - Map> resolvedContactPoints = - InsightsClient.getResolvedContactPoints(contactPoints); - - // then - assertThat(resolvedContactPoints).isEqualTo(expected); - } - - @Test - public void should_construct_json_event_status_message() throws IOException { - // given - InsightsClient insightsClient = - new InsightsClient( - mockDefaultDriverContext(), - MOCK_TIME_SUPPLIER, - INSIGHTS_CONFIGURATION, - null, - null, - null, - null, - null, - EMPTY_STACK_TRACE); - - // when - String statusMessage = insightsClient.createStatusMessage(); - - // then - Insight insight = - new ObjectMapper() - .readValue(statusMessage, new TypeReference>() {}); - assertThat(insight.getMetadata()) - .isEqualTo( - new InsightMetadata( - "driver.status", 1L, ImmutableMap.of("language", "java"), InsightType.EVENT, "v1")); - InsightsStatusData insightData = insight.getInsightData(); - assertThat(insightData.getClientId()).isEqualTo("client-id"); - assertThat(insightData.getSessionId()).isNotNull(); - assertThat(insightData.getControlConnection()).isEqualTo("127.0.0.1:10"); - assertThat(insightData.getConnectedNodes()) - .isEqualTo( - ImmutableMap.of( - "127.0.0.1:10", new SessionStateForNode(1, 10), - "127.0.0.1:20", new SessionStateForNode(2, 20))); - } - - @Test - public void should_schedule_task_with_initial_delay() { - // given - final AtomicInteger counter = new AtomicInteger(); - Runnable runnable = counter::incrementAndGet; - - // when - InsightsClient.scheduleInsightsTask(100L, Executors.newScheduledThreadPool(1), runnable); - - // then - await().atMost(1, SECONDS).until(() -> counter.get() >= 1); - } - - @Test - @UseDataProvider(value = "stackTraceProvider") - public void should_get_caller_of_create_cluster(StackTraceElement[] stackTrace, String expected) { - // when - String result = InsightsClient.getClusterCreateCaller(stackTrace); - - // then - assertThat(result).isEqualTo(expected); - } - - @Test - @SuppressWarnings("ResultOfMethodCallIgnored") - public void should_execute_should_send_event_check_only_once() - throws UnknownHostException, InterruptedException { - // given - InsightsConfiguration insightsConfiguration = mock(InsightsConfiguration.class); - when(insightsConfiguration.isMonitorReportingEnabled()).thenReturn(true); - when(insightsConfiguration.getStatusEventDelayMillis()).thenReturn(10L); - when(insightsConfiguration.getExecutor()).thenReturn(new DefaultEventLoop()); - - InsightsClient insightsClient = - new InsightsClient( - mockDefaultDriverContext(), - MOCK_TIME_SUPPLIER, - insightsConfiguration, - null, - null, - null, - null, - null, - EMPTY_STACK_TRACE); - - // when - insightsClient.scheduleStatusMessageSend(); - // emulate periodic calls to sendStatusMessage - insightsClient.sendStatusMessage(); - insightsClient.sendStatusMessage(); - insightsClient.sendStatusMessage(); - - // then - verify(insightsConfiguration, times(1)).isMonitorReportingEnabled(); - } - - @DataProvider - public static Object[][] stackTraceProvider() { - StackTraceElement[] onlyInitCall = - new StackTraceElement[] { - new StackTraceElement( - "com.datastax.oss.driver.internal.core.context.DefaultDriverContext", - "", - "DefaultDriverContext.java", - 94), - }; - - StackTraceElement[] stackTraceElementsWithoutInitCall = - new StackTraceElement[] { - new StackTraceElement("java.lang.Thread", "getStackTrace", "Thread.java", 1559), - new StackTraceElement( - "com.datastax.driver.core.InsightsClient", - "getClusterCreateCaller", - "InsightsClient.java", - 302) - }; - StackTraceElement[] stackTraceWithOneInitCall = - new StackTraceElement[] { - new StackTraceElement("java.lang.Thread", "getStackTrace", "Thread.java", 1559), - new StackTraceElement( - "com.datastax.oss.driver.internal.core.context.DefaultDriverContext", - "", - "DefaultDriverContext.java", - 243), - }; - StackTraceElement[] stackTraceWithOneInitCallAndCaller = - new StackTraceElement[] { - new StackTraceElement("java.lang.Thread", "getStackTrace", "Thread.java", 1559), - new StackTraceElement( - "com.datastax.oss.driver.internal.core.context.DefaultDriverContext", - "", - "DefaultDriverContext.java", - 243), - new StackTraceElement( - "com.example.ActualCallerNameApp", "main", "ActualCallerNameApp.java", 1) - }; - - StackTraceElement[] stackTraceWithTwoInitCallsAndCaller = - new StackTraceElement[] { - new StackTraceElement("java.lang.Thread", "getStackTrace", "Thread.java", 1559), - new StackTraceElement( - "com.datastax.oss.driver.internal.core.context.DefaultDriverContext", - "", - "DefaultDriverContext.java", - 243), - new StackTraceElement( - "com.datastax.oss.driver.api.core.session.SessionBuilder", - "buildDefaultSessionAsync", - "SessionBuilder.java", - 300), - new StackTraceElement( - "com.example.ActualCallerNameApp", "main", "ActualCallerNameApp.java", 1) - }; - StackTraceElement[] stackTraceWithChainOfInitCalls = - new StackTraceElement[] { - new StackTraceElement( - "com.datastax.oss.driver.internal.core.context.DefaultDriverContext", - "", - "DefaultDriverContext.java", - 243), - new StackTraceElement( - "com.datastax.oss.driver.api.core.session.SessionBuilder", - "buildDefaultSessionAsync", - "SessionBuilder.java", - 332), - new StackTraceElement( - "com.datastax.oss.driver.api.core.session.SessionBuilder", - "buildAsync", - "SessionBuilder.java", - 291), - new StackTraceElement( - "com.datastax.oss.driver.api.core.session.SessionBuilder", - "build", - "SessionBuilder.java", - 306) - }; - StackTraceElement[] stackTraceWithChainOfInitCallsAndCaller = - new StackTraceElement[] { - new StackTraceElement("java.lang.Thread", "getStackTrace", "Thread.java", 1559), - new StackTraceElement( - "com.datastax.oss.driver.internal.core.context.DefaultDriverContext", - "", - "DefaultDriverContext.java", - 243), - new StackTraceElement( - "com.datastax.oss.driver.api.core.session.SessionBuilder", - "buildContext", - "SessionBuilder.java", - 687), - new StackTraceElement( - "com.datastax.oss.driver.api.core.session.SessionBuilder", - "buildDefaultSessionAsync", - "SessionBuilder.java", - 332), - new StackTraceElement( - "com.datastax.oss.driver.api.core.session.SessionBuilder", - "buildAsync", - "SessionBuilder.java", - 291), - new StackTraceElement( - "com.datastax.oss.driver.api.core.session.SessionBuilder", - "build", - "SessionBuilder.java", - 306), - new StackTraceElement( - "com.example.ActualCallerNameApp", "main", "ActualCallerNameApp.java", 8) - }; - - return new Object[][] { - {new StackTraceElement[] {}, InsightsClient.DEFAULT_JAVA_APPLICATION}, - {stackTraceElementsWithoutInitCall, InsightsClient.DEFAULT_JAVA_APPLICATION}, - {stackTraceWithOneInitCall, InsightsClient.DEFAULT_JAVA_APPLICATION}, - {onlyInitCall, InsightsClient.DEFAULT_JAVA_APPLICATION}, - {stackTraceWithOneInitCallAndCaller, "com.example.ActualCallerNameApp"}, - {stackTraceWithTwoInitCallsAndCaller, "com.example.ActualCallerNameApp"}, - {stackTraceWithChainOfInitCalls, InsightsClient.DEFAULT_JAVA_APPLICATION}, - {stackTraceWithChainOfInitCallsAndCaller, "com.example.ActualCallerNameApp"} - }; - } - - private DefaultDriverContext mockDefaultDriverContext() throws UnknownHostException { - DefaultDriverContext context = mock(DefaultDriverContext.class); - mockConnectionPools(context); - MetadataManager manager = mock(MetadataManager.class); - when(context.getMetadataManager()).thenReturn(manager); - Metadata metadata = mock(Metadata.class); - when(manager.getMetadata()).thenReturn(metadata); - Node node = mock(Node.class); - when(node.getExtras()) - .thenReturn( - ImmutableMap.of( - DseNodeProperties.DSE_VERSION, Objects.requireNonNull(Version.parse("6.0.5")))); - when(metadata.getNodes()).thenReturn(ImmutableMap.of(UUID.randomUUID(), node)); - DriverExecutionProfile defaultExecutionProfile = mockDefaultExecutionProfile(); - DriverExecutionProfile nonDefaultExecutionProfile = - mockNonDefaultRequestTimeoutExecutionProfile(); - - Map startupOptions = new HashMap<>(); - startupOptions.put(StartupOptionsBuilder.CLIENT_ID_KEY, "client-id"); - startupOptions.put(StartupOptionsBuilder.APPLICATION_VERSION_KEY, "1.0.0"); - startupOptions.put(StartupOptionsBuilder.APPLICATION_NAME_KEY, "app-name"); - startupOptions.put(StartupOptionsBuilder.DRIVER_VERSION_KEY, "2.x"); - startupOptions.put(StartupOptionsBuilder.DRIVER_NAME_KEY, "DataStax Enterprise Java Driver"); - - when(context.getStartupOptions()).thenReturn(startupOptions); - when(context.getProtocolVersion()).thenReturn(DSE_V2); - DefaultNode contactPoint = mock(DefaultNode.class); - EndPoint contactEndPoint = mock(EndPoint.class); - when(contactEndPoint.resolve()).thenReturn(new InetSocketAddress("127.0.0.1", 9999)); - when(contactPoint.getEndPoint()).thenReturn(contactEndPoint); - when(manager.getContactPoints()).thenReturn(ImmutableSet.of(contactPoint)); - - DriverConfig driverConfig = mock(DriverConfig.class); - when(context.getConfig()).thenReturn(driverConfig); - Map profiles = - ImmutableMap.of( - "default", defaultExecutionProfile, "non-default", nonDefaultExecutionProfile); - Mockito.>when(driverConfig.getProfiles()) - .thenReturn(profiles); - when(driverConfig.getDefaultProfile()).thenReturn(defaultExecutionProfile); - - ControlConnection controlConnection = mock(ControlConnection.class); - DriverChannel channel = mock(DriverChannel.class); - EndPoint controlConnectionEndpoint = mock(EndPoint.class); - when(controlConnectionEndpoint.resolve()).thenReturn(new InetSocketAddress("127.0.0.1", 10)); - - when(channel.getEndPoint()).thenReturn(controlConnectionEndpoint); - when(channel.localAddress()).thenReturn(new InetSocketAddress("127.0.0.1", 10)); - when(controlConnection.channel()).thenReturn(channel); - when(context.getControlConnection()).thenReturn(controlConnection); - return context; - } - - private void mockConnectionPools(DefaultDriverContext driverContext) { - Node node1 = mock(Node.class); - EndPoint endPoint1 = mock(EndPoint.class); - when(endPoint1.resolve()).thenReturn(new InetSocketAddress("127.0.0.1", 10)); - when(node1.getEndPoint()).thenReturn(endPoint1); - when(node1.getOpenConnections()).thenReturn(1); - ChannelPool channelPool1 = mock(ChannelPool.class); - when(channelPool1.getInFlight()).thenReturn(10); - - Node node2 = mock(Node.class); - EndPoint endPoint2 = mock(EndPoint.class); - when(endPoint2.resolve()).thenReturn(new InetSocketAddress("127.0.0.1", 20)); - when(node2.getEndPoint()).thenReturn(endPoint2); - when(node2.getOpenConnections()).thenReturn(2); - ChannelPool channelPool2 = mock(ChannelPool.class); - when(channelPool2.getInFlight()).thenReturn(20); - - Map channelPools = ImmutableMap.of(node1, channelPool1, node2, channelPool2); - PoolManager poolManager = mock(PoolManager.class); - when(poolManager.getPools()).thenReturn(channelPools); - when(driverContext.getPoolManager()).thenReturn(poolManager); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/insights/InsightsSupportVerifierTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/insights/InsightsSupportVerifierTest.java deleted file mode 100644 index 9edd4494bdd..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/insights/InsightsSupportVerifierTest.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.dse.driver.api.core.metadata.DseNodeProperties; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.Collection; -import java.util.Collections; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class InsightsSupportVerifierTest { - - @Test - @UseDataProvider(value = "dseHostsProvider") - public void should_detect_DSE_versions_that_supports_insights( - Collection hosts, boolean expected) { - // when - boolean result = InsightsSupportVerifier.supportsInsights(hosts); - - // then - assertThat(result).isEqualTo(expected); - } - - @DataProvider - public static Object[][] dseHostsProvider() { - Node dse605 = mock(Node.class); - when(dse605.getExtras()) - .thenReturn(ImmutableMap.of(DseNodeProperties.DSE_VERSION, Version.parse("6.0.5"))); - Node dse604 = mock(Node.class); - when(dse604.getExtras()) - .thenReturn(ImmutableMap.of(DseNodeProperties.DSE_VERSION, Version.parse("6.0.4"))); - Node dse600 = mock(Node.class); - when(dse600.getExtras()) - .thenReturn(ImmutableMap.of(DseNodeProperties.DSE_VERSION, Version.parse("6.0.0"))); - Node dse5113 = mock(Node.class); - when(dse5113.getExtras()) - .thenReturn(ImmutableMap.of(DseNodeProperties.DSE_VERSION, Version.parse("5.1.13"))); - Node dse500 = mock(Node.class); - when(dse500.getExtras()) - .thenReturn(ImmutableMap.of(DseNodeProperties.DSE_VERSION, Version.parse("5.0.0"))); - Node nodeWithoutExtras = mock(Node.class); - when(nodeWithoutExtras.getExtras()).thenReturn(Collections.emptyMap()); - - return new Object[][] { - {ImmutableList.of(dse605), true}, - {ImmutableList.of(dse604), false}, - {ImmutableList.of(dse600), false}, - {ImmutableList.of(dse5113), true}, - {ImmutableList.of(dse500), false}, - {ImmutableList.of(dse5113, dse605), true}, - {ImmutableList.of(dse5113, dse600), false}, - {ImmutableList.of(dse500, dse600), false}, - {ImmutableList.of(), false}, - {ImmutableList.of(nodeWithoutExtras), false} - }; - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/insights/PackageUtilTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/insights/PackageUtilTest.java deleted file mode 100644 index 336f19184d3..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/insights/PackageUtilTest.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class PackageUtilTest { - - private static final String DEFAULT_PACKAGE = "default.package"; - - @Test - public void should_find_package_name_for_class() { - // given - TestClass testClass = new TestClass(); - - // then - String namespace = PackageUtil.getNamespace(testClass.getClass()); - - // then - assertThat(namespace).isEqualTo("com.datastax.dse.driver.internal.core.insights"); - } - - @Test - @UseDataProvider("packagesProvider") - public void should_get_full_package_or_return_default(String fullClassSetting, String expected) { - // when - String result = PackageUtil.getFullPackageOrDefault(fullClassSetting, DEFAULT_PACKAGE); - - // then - assertThat(result).isEqualTo(expected); - } - - @Test - @UseDataProvider("classesProvider") - public void should_get_class_name_from_full_class_setting( - String fullClassSetting, String expected) { - // when - String result = PackageUtil.getClassName(fullClassSetting); - - // then - assertThat(result).isEqualTo(expected); - } - - @DataProvider - public static Object[][] packagesProvider() { - return new Object[][] { - {"com.P", "com"}, - {"ClassName", DEFAULT_PACKAGE}, - {"", DEFAULT_PACKAGE}, - {"com.p.a.2.x.12.Class", "com.p.a.2.x.12"}, - }; - } - - @DataProvider - public static Object[][] classesProvider() { - return new Object[][] { - {"com.P", "P"}, - {"ClassName", "ClassName"}, - {"", ""}, - {"com.p.a.2.x.12.Class", "Class"}, - }; - } - - private static class TestClass {} -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/insights/PlatformInfoFinderTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/insights/PlatformInfoFinderTest.java deleted file mode 100644 index 2a098363d46..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/insights/PlatformInfoFinderTest.java +++ /dev/null @@ -1,220 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights; - -import static com.datastax.dse.driver.internal.core.insights.PlatformInfoFinder.UNVERIFIED_RUNTIME_VERSION; -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.dse.driver.internal.core.insights.schema.InsightsPlatformInfo.RuntimeAndCompileTimeVersions; -import java.io.InputStream; -import java.net.URL; -import java.util.HashMap; -import java.util.Iterator; -import java.util.LinkedHashMap; -import java.util.Map; -import org.junit.Test; - -public class PlatformInfoFinderTest { - - private URL nullUrlProvider(PlatformInfoFinder.DependencyFromFile d) { - return null; - } - - private URL nettyUrlProvider(PlatformInfoFinder.DependencyFromFile d) { - return this.getClass().getResource("/insights/pom.properties"); - } - - private URL malformedUrlProvider(PlatformInfoFinder.DependencyFromFile d) { - return this.getClass().getResource("/insights/malformed-pom.properties"); - } - - private URL nonExistingUrlProvider(PlatformInfoFinder.DependencyFromFile d) { - return this.getClass().getResource("/insights/non-existing.pom"); - } - - @Test - public void should_find_dependencies_from_file() { - // given - InputStream inputStream = - this.getClass().getResourceAsStream("/insights/test-dependencies.txt"); - Map expected = new HashMap<>(); - expected.put( - "io.netty:netty-transport-native-epoll", - withUnverifiedRuntimeVersionOptional("4.0.56.Final")); - expected.put("org.slf4j:slf4j-api", withUnverifiedRuntimeVersion("1.7.25")); - expected.put("org.ow2.asm:asm", withUnverifiedRuntimeVersion("5.0.3")); - expected.put("com.esri.geometry:esri-geometry-api", withUnverifiedRuntimeVersion("1.2.1")); - expected.put("io.netty:netty-transport", withUnverifiedRuntimeVersion("4.0.56.Final")); - expected.put("com.github.jnr:jnr-x86asm", withUnverifiedRuntimeVersion("1.0.2")); - expected.put("org.ow2.asm:asm-analysis", withUnverifiedRuntimeVersion("5.0.3")); - expected.put("com.github.jnr:jnr-constants", withUnverifiedRuntimeVersion("0.9.9")); - expected.put("io.netty:netty-common", withUnverifiedRuntimeVersion("4.0.56.Final")); - expected.put("com.google.guava:guava", withUnverifiedRuntimeVersion("19.0")); - expected.put("org.xerial.snappy:snappy-java", withUnverifiedRuntimeVersionOptional("1.1.2.6")); - expected.put("io.dropwizard.metrics:metrics-core", withUnverifiedRuntimeVersion("3.2.2")); - expected.put("org.ow2.asm:asm-tree", withUnverifiedRuntimeVersion("5.0.3")); - expected.put("com.github.jnr:jnr-posix", withUnverifiedRuntimeVersion("3.0.44")); - expected.put("org.codehaus.jackson:jackson-core-asl", withUnverifiedRuntimeVersion("1.9.12")); - expected.put( - "com.fasterxml.jackson.core:jackson-databind", withUnverifiedRuntimeVersion("2.7.9.3")); - expected.put("io.netty:netty-codec", withUnverifiedRuntimeVersion("4.0.56.Final")); - expected.put( - "com.fasterxml.jackson.core:jackson-annotations", withUnverifiedRuntimeVersion("2.8.11")); - expected.put("com.fasterxml.jackson.core:jackson-core", withUnverifiedRuntimeVersion("2.8.11")); - expected.put("io.netty:netty-handler", withUnverifiedRuntimeVersion("4.0.56.Final")); - expected.put("at.yawk.lz4:lz4-java", withUnverifiedRuntimeVersionOptional("1.10.1")); - expected.put("org.hdrhistogram:HdrHistogram", withUnverifiedRuntimeVersionOptional("2.1.10")); - expected.put("com.github.jnr:jffi", withUnverifiedRuntimeVersion("1.2.16")); - expected.put("io.netty:netty-buffer", withUnverifiedRuntimeVersion("4.0.56.Final")); - expected.put("org.ow2.asm:asm-commons", withUnverifiedRuntimeVersion("5.0.3")); - expected.put("org.json:json", withUnverifiedRuntimeVersion("20090211")); - expected.put("org.ow2.asm:asm-util", withUnverifiedRuntimeVersion("5.0.3")); - expected.put("com.github.jnr:jnr-ffi", withUnverifiedRuntimeVersion("2.1.7")); - - // when - Map stringStringMap = - new PlatformInfoFinder(this::nullUrlProvider).fetchDependenciesFromFile(inputStream); - - // then - assertThat(stringStringMap).hasSize(28); - assertThat(stringStringMap).isEqualTo(expected); - } - - @Test - public void should_find_dependencies_from_file_without_duplicate() { - // given - InputStream inputStream = - this.getClass().getResourceAsStream("/insights/duplicate-dependencies.txt"); - - // when - Map stringStringMap = - new PlatformInfoFinder(this::nullUrlProvider).fetchDependenciesFromFile(inputStream); - - // then - assertThat(stringStringMap).hasSize(1); - } - - @Test - public void should_keep_order_of_dependencies() { - // given - InputStream inputStream = - this.getClass().getResourceAsStream("/insights/ordered-dependencies.txt"); - Map expected = new LinkedHashMap<>(); - expected.put("b-org.com:art1", withUnverifiedRuntimeVersion("1.0")); - expected.put("a-org.com:art1", withUnverifiedRuntimeVersion("2.0")); - expected.put("c-org.com:art1", withUnverifiedRuntimeVersion("3.0")); - - // when - Map stringStringMap = - new PlatformInfoFinder(this::nullUrlProvider).fetchDependenciesFromFile(inputStream); - - // then - assertThat(stringStringMap).isEqualTo(expected); - Iterator iterator = expected.keySet().iterator(); - assertThat(iterator.next()).isEqualTo("b-org.com:art1"); - assertThat(iterator.next()).isEqualTo("a-org.com:art1"); - assertThat(iterator.next()).isEqualTo("c-org.com:art1"); - } - - @Test - public void should_add_information_about_java_platform() { - // given - Map> runtimeDependencies = new HashMap<>(); - - // when - new PlatformInfoFinder(this::nullUrlProvider).addJavaVersion(runtimeDependencies); - - // then - Map javaDependencies = runtimeDependencies.get("java"); - assertThat(javaDependencies.size()).isEqualTo(3); - } - - @Test - public void should_load_runtime_version_from_pom_properties_URL() { - // given - InputStream inputStream = this.getClass().getResourceAsStream("/insights/netty-dependency.txt"); - Map expected = new LinkedHashMap<>(); - expected.put( - "io.netty:netty-handler", - new RuntimeAndCompileTimeVersions("4.0.56.Final", "4.0.0.Final", false)); - - // when - Map stringStringMap = - new PlatformInfoFinder(this::nettyUrlProvider).fetchDependenciesFromFile(inputStream); - - // then - assertThat(stringStringMap).isEqualTo(expected); - } - - @Test - public void should_load_runtime_version_of_optional_dependency_from_pom_properties_URL() { - // given - InputStream inputStream = - this.getClass().getResourceAsStream("/insights/netty-dependency-optional.txt"); - Map expected = new LinkedHashMap<>(); - expected.put( - "io.netty:netty-handler", - new RuntimeAndCompileTimeVersions("4.0.56.Final", "4.0.0.Final", true)); - - // when - Map stringStringMap = - new PlatformInfoFinder(this::nettyUrlProvider).fetchDependenciesFromFile(inputStream); - - // then - assertThat(stringStringMap).isEqualTo(expected); - } - - @Test - public void should_not_load_runtime_dependency_from_malformed_pom_properties() { - // given - InputStream inputStream = this.getClass().getResourceAsStream("/insights/netty-dependency.txt"); - Map expected = new LinkedHashMap<>(); - expected.put("io.netty:netty-handler", withUnverifiedRuntimeVersion("4.0.0.Final")); - - // when - Map stringStringMap = - new PlatformInfoFinder(this::malformedUrlProvider).fetchDependenciesFromFile(inputStream); - - // then - assertThat(stringStringMap).isEqualTo(expected); - } - - @Test - public void should_not_load_runtime_dependency_from_non_existing_pom_properties() { - // given - InputStream inputStream = this.getClass().getResourceAsStream("/insights/netty-dependency.txt"); - Map expected = new LinkedHashMap<>(); - expected.put("io.netty:netty-handler", withUnverifiedRuntimeVersion("4.0.0.Final")); - - // when - Map stringStringMap = - new PlatformInfoFinder(this::nonExistingUrlProvider).fetchDependenciesFromFile(inputStream); - - // then - assertThat(stringStringMap).isEqualTo(expected); - } - - private RuntimeAndCompileTimeVersions withUnverifiedRuntimeVersion(String compileVersion) { - return new RuntimeAndCompileTimeVersions(UNVERIFIED_RUNTIME_VERSION, compileVersion, false); - } - - private RuntimeAndCompileTimeVersions withUnverifiedRuntimeVersionOptional( - String compileVersion) { - return new RuntimeAndCompileTimeVersions(UNVERIFIED_RUNTIME_VERSION, compileVersion, true); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/insights/ReconnectionPolicyInfoFinderTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/insights/ReconnectionPolicyInfoFinderTest.java deleted file mode 100644 index a076ca38b1c..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/insights/ReconnectionPolicyInfoFinderTest.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.dse.driver.internal.core.insights.schema.ReconnectionPolicyInfo; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.connection.ReconnectionPolicy; -import com.datastax.oss.driver.internal.core.connection.ConstantReconnectionPolicy; -import com.datastax.oss.driver.internal.core.connection.ExponentialReconnectionPolicy; -import java.time.Duration; -import org.assertj.core.data.MapEntry; -import org.junit.Test; - -public class ReconnectionPolicyInfoFinderTest { - - @Test - public void should_find_an_info_about_constant_reconnection_policy() { - // given - DriverExecutionProfile driverExecutionProfile = mock(DriverExecutionProfile.class); - when(driverExecutionProfile.getDuration(DefaultDriverOption.RECONNECTION_BASE_DELAY)) - .thenReturn(Duration.ofMillis(100)); - ReconnectionPolicy constantReconnectionPolicy = mock(ConstantReconnectionPolicy.class); - - // when - ReconnectionPolicyInfo reconnectionPolicyInfo = - new ReconnectionPolicyInfoFinder() - .getReconnectionPolicyInfo(constantReconnectionPolicy, driverExecutionProfile); - - // then - assertThat(reconnectionPolicyInfo.getOptions()).contains(MapEntry.entry("delayMs", 100L)); - assertThat(reconnectionPolicyInfo.getType()).contains("ConstantReconnectionPolicy"); - } - - @Test - public void should_find_an_info_about_exponential_reconnection_policy() { - ExponentialReconnectionPolicy exponentialReconnectionPolicy = - mock(ExponentialReconnectionPolicy.class); - when(exponentialReconnectionPolicy.getBaseDelayMs()).thenReturn(100L); - when(exponentialReconnectionPolicy.getMaxAttempts()).thenReturn(10L); - when(exponentialReconnectionPolicy.getMaxDelayMs()).thenReturn(200L); - - // when - ReconnectionPolicyInfo reconnectionPolicyInfo = - new ReconnectionPolicyInfoFinder() - .getReconnectionPolicyInfo(exponentialReconnectionPolicy, null); - - // then - assertThat(reconnectionPolicyInfo.getOptions()).contains(MapEntry.entry("baseDelayMs", 100L)); - assertThat(reconnectionPolicyInfo.getOptions()).contains(MapEntry.entry("maxAttempts", 10L)); - assertThat(reconnectionPolicyInfo.getOptions()).contains(MapEntry.entry("maxDelayMs", 200L)); - assertThat(reconnectionPolicyInfo.getType()).contains("ExponentialReconnectionPolicy"); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/protocol/TinkerpopBufferPrimitiveCodecTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/protocol/TinkerpopBufferPrimitiveCodecTest.java deleted file mode 100644 index 3ef89c78714..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/protocol/TinkerpopBufferPrimitiveCodecTest.java +++ /dev/null @@ -1,358 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.protocol; - -import static com.datastax.dse.driver.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; - -import com.datastax.dse.driver.Assertions; -import com.datastax.dse.driver.internal.core.graph.binary.buffer.DseNettyBufferFactory; -import com.datastax.oss.driver.internal.core.protocol.ByteBufPrimitiveCodecTest; -import com.datastax.oss.protocol.internal.util.Bytes; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.nio.ByteBuffer; -import java.util.function.Supplier; -import org.apache.tinkerpop.gremlin.structure.io.Buffer; -import org.junit.Test; -import org.junit.runner.RunWith; - -/** - * Note: like {@link ByteBufPrimitiveCodecTest} we don't test trivial methods that simply delegate - * to the underlying Buffer, nor default implementations inherited from {@link - * com.datastax.oss.protocol.internal.PrimitiveCodec}. - */ -@RunWith(DataProviderRunner.class) -public class TinkerpopBufferPrimitiveCodecTest { - - private static final DseNettyBufferFactory factory = new DseNettyBufferFactory(); - private final TinkerpopBufferPrimitiveCodec codec = new TinkerpopBufferPrimitiveCodec(factory); - - @Test - public void should_concatenate() { - Buffer left = factory.withBytes(0xca, 0xfe); - Buffer right = factory.withBytes(0xba, 0xbe); - assertThat(codec.concat(left, right)).containsExactly("0xcafebabe"); - } - - @Test - public void should_read_inet_v4() { - Buffer source = - factory.withBytes( - // length (as a byte) - 0x04, - // address - 0x7f, - 0x00, - 0x00, - 0x01, - // port (as an int) - 0x00, - 0x00, - 0x23, - 0x52); - InetSocketAddress inet = codec.readInet(source); - assertThat(inet.getAddress().getHostAddress()).isEqualTo("127.0.0.1"); - assertThat(inet.getPort()).isEqualTo(9042); - } - - @Test - public void should_read_inet_v6() { - Buffer lengthAndAddress = factory.heap(17); - lengthAndAddress.writeByte(16); - lengthAndAddress.writeLong(0); - lengthAndAddress.writeLong(1); - Buffer source = - codec.concat( - lengthAndAddress, - // port (as an int) - factory.withBytes(0x00, 0x00, 0x23, 0x52)); - InetSocketAddress inet = codec.readInet(source); - assertThat(inet.getAddress().getHostAddress()).isEqualTo("0:0:0:0:0:0:0:1"); - assertThat(inet.getPort()).isEqualTo(9042); - } - - @Test - public void should_fail_to_read_inet_if_length_invalid() { - Buffer source = - factory.withBytes( - // length (as a byte) - 0x03, - // address - 0x7f, - 0x00, - 0x01, - // port (as an int) - 0x00, - 0x00, - 0x23, - 0x52); - assertThatThrownBy(() -> codec.readInet(source)) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage("Invalid address length: 3 ([127, 0, 1])"); - } - - @Test - public void should_read_inetaddr_v4() { - Buffer source = - factory.withBytes( - // length (as a byte) - 0x04, - // address - 0x7f, - 0x00, - 0x00, - 0x01); - InetAddress inetAddr = codec.readInetAddr(source); - assertThat(inetAddr.getHostAddress()).isEqualTo("127.0.0.1"); - } - - @Test - public void should_read_inetaddr_v6() { - Buffer source = factory.heap(17); - source.writeByte(16); - source.writeLong(0); - source.writeLong(1); - InetAddress inetAddr = codec.readInetAddr(source); - assertThat(inetAddr.getHostAddress()).isEqualTo("0:0:0:0:0:0:0:1"); - } - - @Test - public void should_fail_to_read_inetaddr_if_length_invalid() { - Buffer source = - factory.withBytes( - // length (as a byte) - 0x03, - // address - 0x7f, - 0x00, - 0x01); - assertThatThrownBy(() -> codec.readInetAddr(source)) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage("Invalid address length: 3 ([127, 0, 1])"); - } - - @Test - public void should_read_bytes() { - Buffer source = - factory.withBytes( - // length (as an int) - 0x00, - 0x00, - 0x00, - 0x04, - // contents - 0xca, - 0xfe, - 0xba, - 0xbe); - ByteBuffer bytes = codec.readBytes(source); - assertThat(Bytes.toHexString(bytes)).isEqualTo("0xcafebabe"); - } - - @Test - public void should_read_null_bytes() { - Buffer source = factory.withBytes(0xFF, 0xFF, 0xFF, 0xFF); // -1 (as an int) - assertThat(codec.readBytes(source)).isNull(); - } - - @Test - public void should_read_short_bytes() { - Buffer source = - factory.withBytes( - // length (as an unsigned short) - 0x00, - 0x04, - // contents - 0xca, - 0xfe, - 0xba, - 0xbe); - assertThat(Bytes.toHexString(codec.readShortBytes(source))).isEqualTo("0xcafebabe"); - } - - @DataProvider - public static Object[][] bufferTypes() { - return new Object[][] { - {(Supplier) factory::heap}, - {(Supplier) factory::io}, - {(Supplier) factory::direct} - }; - } - - @Test - @UseDataProvider("bufferTypes") - public void should_read_string(Supplier supplier) { - Buffer source = - factory.withBytes( - supplier, - // length (as an unsigned short) - 0x00, - 0x05, - // UTF-8 contents - 0x68, - 0x65, - 0x6c, - 0x6c, - 0x6f); - assertThat(codec.readString(source)).isEqualTo("hello"); - } - - @Test - public void should_fail_to_read_string_if_not_enough_characters() { - Buffer source = factory.heap(); - source.writeShort(4); - - assertThatThrownBy(() -> codec.readString(source)) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage("Not enough bytes to read an UTF-8 serialized string of size 4"); - } - - @Test - public void should_read_long_string() { - Buffer source = - factory.withBytes( - // length (as an int) - 0x00, - 0x00, - 0x00, - 0x05, - // UTF-8 contents - 0x68, - 0x65, - 0x6c, - 0x6c, - 0x6f); - assertThat(codec.readLongString(source)).isEqualTo("hello"); - } - - @Test - public void should_fail_to_read_long_string_if_not_enough_characters() { - Buffer source = factory.heap(4, 4); - source.writeInt(4); - - assertThatThrownBy(() -> codec.readLongString(source)) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage("Not enough bytes to read an UTF-8 serialized string of size 4"); - } - - @Test - public void should_write_inet_v4() throws Exception { - Buffer dest = factory.heap(1 + 4 + 4); - InetSocketAddress inet = new InetSocketAddress(InetAddress.getByName("127.0.0.1"), 9042); - codec.writeInet(inet, dest); - assertThat(dest) - .containsExactly( - "0x04" // size as a byte - + "7f000001" // address - + "00002352" // port - ); - } - - @Test - public void should_write_inet_v6() throws Exception { - Buffer dest = factory.heap(1 + 16 + 4); - InetSocketAddress inet = new InetSocketAddress(InetAddress.getByName("::1"), 9042); - codec.writeInet(inet, dest); - assertThat(dest) - .containsExactly( - "0x10" // size as a byte - + "00000000000000000000000000000001" // address - + "00002352" // port - ); - } - - @Test - public void should_write_inetaddr_v4() throws Exception { - Buffer dest = factory.heap(1 + 4); - InetAddress inetAddr = InetAddress.getByName("127.0.0.1"); - codec.writeInetAddr(inetAddr, dest); - assertThat(dest) - .containsExactly( - "0x04" // size as a byte - + "7f000001" // address - ); - } - - @Test - public void should_write_inetaddr_v6() throws Exception { - Buffer dest = factory.heap(1 + 16); - InetAddress inetAddr = InetAddress.getByName("::1"); - codec.writeInetAddr(inetAddr, dest); - Assertions.assertThat(dest) - .containsExactly( - "0x10" // size as a byte - + "00000000000000000000000000000001" // address - ); - } - - @Test - public void should_write_string() { - Buffer dest = factory.heap(); - codec.writeString("hello", dest); - assertThat(dest) - .containsExactly( - "0x0005" // size as an unsigned short - + "68656c6c6f" // UTF-8 contents - ); - } - - @Test - public void should_write_long_string() { - Buffer dest = factory.heap(9); - codec.writeLongString("hello", dest); - assertThat(dest) - .containsExactly( - "0x00000005" - + // size as an int - "68656c6c6f" // UTF-8 contents - ); - } - - @Test - public void should_write_bytes() { - Buffer dest = factory.heap(8); - codec.writeBytes(Bytes.fromHexString("0xcafebabe"), dest); - assertThat(dest) - .containsExactly( - "0x00000004" - + // size as an int - "cafebabe"); - } - - @Test - public void should_write_short_bytes() { - Buffer dest = factory.heap(6); - codec.writeShortBytes(new byte[] {(byte) 0xca, (byte) 0xfe, (byte) 0xba, (byte) 0xbe}, dest); - assertThat(dest) - .containsExactly( - "0x0004" - + // size as an unsigned short - "cafebabe"); - } - - @Test - public void should_write_null_bytes() { - Buffer dest = factory.heap(4); - codec.writeBytes((ByteBuffer) null, dest); - assertThat(dest).containsExactly("0xFFFFFFFF"); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/type/codec/geometry/GeometryCodecTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/type/codec/geometry/GeometryCodecTest.java deleted file mode 100644 index 9e4d019660c..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/type/codec/geometry/GeometryCodecTest.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.type.codec.geometry; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.dse.driver.api.core.data.geometry.Geometry; - -public abstract class GeometryCodecTest> { - - private C codec; - - protected GeometryCodecTest(C codec) { - this.codec = codec; - } - - public void should_format(G input, String expected) { - assertThat(codec.format(input)).isEqualTo(expected); - } - - public void should_parse(String input, G expected) { - assertThat(codec.parse(input)).isEqualTo(expected); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/type/codec/geometry/LineStringCodecTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/type/codec/geometry/LineStringCodecTest.java deleted file mode 100644 index ba71026ac2c..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/type/codec/geometry/LineStringCodecTest.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.type.codec.geometry; - -import com.datastax.dse.driver.api.core.data.geometry.LineString; -import com.datastax.dse.driver.internal.core.data.geometry.DefaultLineString; -import com.datastax.dse.driver.internal.core.data.geometry.DefaultPoint; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class LineStringCodecTest extends GeometryCodecTest { - - private static DefaultLineString lineString = - new DefaultLineString( - new DefaultPoint(30, 10), new DefaultPoint(10, 30), new DefaultPoint(40, 40)); - - public LineStringCodecTest() { - super(new LineStringCodec()); - } - - @DataProvider - public static Object[][] serde() { - return new Object[][] {{null, null}, {lineString, lineString}}; - } - - @DataProvider - public static Object[][] format() { - return new Object[][] {{null, "NULL"}, {lineString, "'LINESTRING (30 10, 10 30, 40 40)'"}}; - } - - @DataProvider - public static Object[][] parse() { - return new Object[][] { - {null, null}, - {"", null}, - {" ", null}, - {"NULL", null}, - {" NULL ", null}, - {"'LINESTRING (30 10, 10 30, 40 40)'", lineString}, - {" ' LineString (30 10, 10 30, 40 40 ) ' ", lineString} - }; - } - - @Test - @UseDataProvider("format") - @Override - public void should_format(LineString input, String expected) { - super.should_format(input, expected); - } - - @Test - @UseDataProvider("parse") - @Override - public void should_parse(String input, LineString expected) { - super.should_parse(input, expected); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/type/codec/geometry/PointCodecTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/type/codec/geometry/PointCodecTest.java deleted file mode 100644 index 7948f4d758a..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/type/codec/geometry/PointCodecTest.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.type.codec.geometry; - -import com.datastax.dse.driver.api.core.data.geometry.Point; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class PointCodecTest extends GeometryCodecTest { - - public PointCodecTest() { - super(new PointCodec()); - } - - @DataProvider - public static Object[][] serde() { - return new Object[][] { - {null, null}, - {Point.fromCoordinates(1, 2), Point.fromCoordinates(1, 2)}, - {Point.fromCoordinates(-1.1, -2.2), Point.fromCoordinates(-1.1, -2.2)} - }; - } - - @DataProvider - public static Object[][] format() { - return new Object[][] { - {null, "NULL"}, - {Point.fromCoordinates(1, 2), "'POINT (1 2)'"}, - {Point.fromCoordinates(-1.1, -2.2), "'POINT (-1.1 -2.2)'"} - }; - } - - @DataProvider - public static Object[][] parse() { - return new Object[][] { - {null, null}, - {"", null}, - {" ", null}, - {"NULL", null}, - {" NULL ", null}, - {"'POINT ( 1 2 )'", Point.fromCoordinates(1, 2)}, - {"'POINT ( 1.0 2.0 )'", Point.fromCoordinates(1, 2)}, - {"' point ( -1.1 -2.2 )'", Point.fromCoordinates(-1.1, -2.2)}, - {" ' Point ( -1.1 -2.2 ) ' ", Point.fromCoordinates(-1.1, -2.2)} - }; - } - - @Test - @UseDataProvider("format") - @Override - public void should_format(Point input, String expected) { - super.should_format(input, expected); - } - - @Test - @UseDataProvider("parse") - @Override - public void should_parse(String input, Point expected) { - super.should_parse(input, expected); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/type/codec/geometry/PolygonCodecTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/type/codec/geometry/PolygonCodecTest.java deleted file mode 100644 index 290dabe7519..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/type/codec/geometry/PolygonCodecTest.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.type.codec.geometry; - -import com.datastax.dse.driver.api.core.data.geometry.Polygon; -import com.datastax.dse.driver.internal.core.data.geometry.DefaultPoint; -import com.datastax.dse.driver.internal.core.data.geometry.DefaultPolygon; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class PolygonCodecTest extends GeometryCodecTest { - private static Polygon polygon = - new DefaultPolygon( - new DefaultPoint(30, 10), - new DefaultPoint(10, 20), - new DefaultPoint(20, 40), - new DefaultPoint(40, 40)); - - public PolygonCodecTest() { - super(new PolygonCodec()); - } - - @DataProvider - public static Object[][] serde() { - return new Object[][] {{null, null}, {polygon, polygon}}; - } - - @DataProvider - public static Object[][] format() { - return new Object[][] { - {null, "NULL"}, {polygon, "'POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))'"} - }; - } - - @DataProvider - public static Object[][] parse() { - return new Object[][] { - {null, null}, - {"", null}, - {" ", null}, - {"NULL", null}, - {" NULL ", null}, - {"'POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))'", polygon}, - {" ' Polygon ( ( 30 10, 40 40, 20 40, 10 20, 30 10 ) ) ' ", polygon} - }; - } - - @Test - @UseDataProvider("format") - @Override - public void should_format(Polygon input, String expected) { - super.should_format(input, expected); - } - - @Test - @UseDataProvider("parse") - @Override - public void should_parse(String input, Polygon expected) { - super.should_parse(input, expected); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/type/codec/time/DateRangeCodecTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/type/codec/time/DateRangeCodecTest.java deleted file mode 100644 index b9b618b8dd3..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/type/codec/time/DateRangeCodecTest.java +++ /dev/null @@ -1,113 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.type.codec.time; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.dse.driver.api.core.data.time.DateRange; -import com.datastax.dse.driver.api.core.type.codec.DseTypeCodecs; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.shaded.guava.common.base.MoreObjects; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.nio.ByteBuffer; -import java.text.ParseException; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class DateRangeCodecTest { - - @Test - @UseDataProvider("dateRanges") - public void should_encode_and_decode(DateRange dateRange) { - TypeCodec codec = DseTypeCodecs.DATE_RANGE; - DateRange decoded = - codec.decode(codec.encode(dateRange, ProtocolVersion.DEFAULT), ProtocolVersion.DEFAULT); - assertThat(decoded).isEqualTo(dateRange); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_encode_unknown_date_range_type() { - DseTypeCodecs.DATE_RANGE.decode(ByteBuffer.wrap(new byte[] {127}), ProtocolVersion.DEFAULT); - } - - @Test - @UseDataProvider("dateRangeStrings") - public void should_format_and_parse(String dateRangeString) { - TypeCodec codec = DseTypeCodecs.DATE_RANGE; - String formatted = codec.format(codec.parse(dateRangeString)); - assertThat(formatted).isEqualTo(MoreObjects.firstNonNull(dateRangeString, "NULL")); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_invalid_string() { - DseTypeCodecs.DATE_RANGE.parse("foo"); - } - - @DataProvider - public static Object[][] dateRanges() throws ParseException { - return new Object[][] { - {null}, - {DateRange.parse("[2011-01 TO 2015]")}, - {DateRange.parse("[2010-01-02 TO 2015-05-05T13]")}, - {DateRange.parse("[1973-06-30T13:57:28.123Z TO 1999-05-05T14:14:59]")}, - {DateRange.parse("[2010-01-01T15 TO 2016-02]")}, - {DateRange.parse("[1500 TO 1501]")}, - {DateRange.parse("[0001-01-01 TO 0001-01-01]")}, - {DateRange.parse("[0001-01-01 TO 0001-01-02]")}, - {DateRange.parse("[0000-01-01 TO 0000-01-01]")}, - {DateRange.parse("[0000-01-01 TO 0000-01-02]")}, - {DateRange.parse("[-0001-01-01 TO -0001-01-01]")}, - {DateRange.parse("[-0001-01-01 TO -0001-01-02]")}, - {DateRange.parse("[* TO 2014-12-01]")}, - {DateRange.parse("[1999 TO *]")}, - {DateRange.parse("[* TO *]")}, - {DateRange.parse("-0009")}, - {DateRange.parse("2000-11")}, - {DateRange.parse("*")} - }; - } - - @DataProvider - public static Object[][] dateRangeStrings() { - return new Object[][] { - {null}, - {"NULL"}, - {"'[2011-01 TO 2015]'"}, - {"'[2010-01-02 TO 2015-05-05T13]'"}, - {"'[1973-06-30T13:57:28.123Z TO 1999-05-05T14:14:59]'"}, - {"'[2010-01-01T15 TO 2016-02]'"}, - {"'[1500 TO 1501]'"}, - {"'[0001-01-01 TO 0001-01-01]'"}, - {"'[0001-01-01 TO 0001-01-02]'"}, - {"'[0000-01-01 TO 0000-01-01]'"}, - {"'[0000-01-01 TO 0000-01-02]'"}, - {"'[-0001-01-01 TO -0001-01-01]'"}, - {"'[-0001-01-01 TO -0001-01-02]'"}, - {"'[* TO 2014-12-01]'"}, - {"'[1999 TO *]'"}, - {"'[* TO *]'"}, - {"'-0009'"}, - {"'2000-11'"}, - {"'*'"} - }; - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/util/concurrent/BoundedConcurrentQueueTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/util/concurrent/BoundedConcurrentQueueTest.java deleted file mode 100644 index 5cf8a67f84b..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/util/concurrent/BoundedConcurrentQueueTest.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.util.concurrent; - -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.assertj.core.api.Assertions.assertThat; - -import java.util.concurrent.CompletionStage; -import org.junit.Test; - -public class BoundedConcurrentQueueTest { - - @Test - public void should_dequeue_null_when_empty() { - BoundedConcurrentQueue queue = new BoundedConcurrentQueue<>(4); - assertThat(queue.peek()).isNull(); - assertThat(queue.poll()).isNull(); - } - - @Test - public void should_enqueue_and_dequeue_while_not_full() { - BoundedConcurrentQueue queue = new BoundedConcurrentQueue<>(4); - - assertThatStage(queue.offer(1)).isSuccess(e -> assertThat(e).isEqualTo(1)); - assertThat(queue.peek()).isEqualTo(1); - assertThat(queue.poll()).isEqualTo(1); - - assertThatStage(queue.offer(2)).isSuccess(e -> assertThat(e).isEqualTo(2)); - assertThatStage(queue.offer(3)).isSuccess(e -> assertThat(e).isEqualTo(3)); - assertThatStage(queue.offer(4)).isSuccess(e -> assertThat(e).isEqualTo(4)); - - assertThat(queue.peek()).isEqualTo(2); - assertThat(queue.poll()).isEqualTo(2); - assertThat(queue.peek()).isEqualTo(3); - assertThat(queue.poll()).isEqualTo(3); - assertThat(queue.peek()).isEqualTo(4); - assertThat(queue.poll()).isEqualTo(4); - assertThat(queue.poll()).isNull(); - } - - @Test - public void should_delay_insertion_when_full_until_space_available() { - BoundedConcurrentQueue queue = new BoundedConcurrentQueue<>(4); - - assertThatStage(queue.offer(1)).isSuccess(e -> assertThat(e).isEqualTo(1)); - assertThatStage(queue.offer(2)).isSuccess(e -> assertThat(e).isEqualTo(2)); - assertThatStage(queue.offer(3)).isSuccess(e -> assertThat(e).isEqualTo(3)); - assertThatStage(queue.offer(4)).isSuccess(e -> assertThat(e).isEqualTo(4)); - - CompletionStage enqueue5 = queue.offer(5); - assertThat(enqueue5).isNotDone(); - - assertThat(queue.poll()).isEqualTo(1); - assertThatStage(enqueue5).isSuccess(e -> assertThat(e).isEqualTo(5)); - } - - @Test(expected = IllegalStateException.class) - public void should_fail_to_insert_when_other_insert_already_pending() { - BoundedConcurrentQueue queue = new BoundedConcurrentQueue<>(1); - assertThatStage(queue.offer(1)).isSuccess(); - assertThatStage(queue.offer(2)).isNotDone(); - queue.offer(3); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/Assertions.java b/core/src/test/java/com/datastax/oss/driver/Assertions.java deleted file mode 100644 index 8478053e6d8..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/Assertions.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver; - -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.VersionAssert; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.metadata.token.TokenRange; -import com.datastax.oss.driver.internal.core.CompletionStageAssert; -import com.datastax.oss.driver.internal.core.DriverConfigAssert; -import com.datastax.oss.driver.internal.core.NettyFutureAssert; -import com.datastax.oss.driver.internal.core.metadata.token.TokenRangeAssert; -import io.netty.buffer.ByteBuf; -import io.netty.util.concurrent.Future; -import java.util.concurrent.CompletionStage; - -public class Assertions extends org.assertj.core.api.Assertions { - public static ByteBufAssert assertThat(ByteBuf actual) { - return new ByteBufAssert(actual); - } - - public static DriverConfigAssert assertThat(DriverConfig actual) { - return new DriverConfigAssert(actual); - } - - public static NettyFutureAssert assertThat(Future actual) { - return new NettyFutureAssert<>(actual); - } - - /** - * Use a different name because this clashes with AssertJ's built-in one. Our implementation is a - * bit more flexible for checking completion values and errors. - */ - public static CompletionStageAssert assertThatStage(CompletionStage actual) { - return new CompletionStageAssert<>(actual); - } - - public static VersionAssert assertThat(Version actual) { - return new VersionAssert(actual); - } - - public static TokenRangeAssert assertThat(TokenRange actual) { - return new TokenRangeAssert(actual); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/ByteBufAssert.java b/core/src/test/java/com/datastax/oss/driver/ByteBufAssert.java deleted file mode 100644 index 4cd9c3ed358..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/ByteBufAssert.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.protocol.internal.util.Bytes; -import io.netty.buffer.ByteBuf; -import org.assertj.core.api.AbstractAssert; - -public class ByteBufAssert extends AbstractAssert { - public ByteBufAssert(ByteBuf actual) { - super(actual, ByteBufAssert.class); - } - - public ByteBufAssert containsExactly(String hexString) { - ByteBuf copy = actual.duplicate(); - byte[] expectedBytes = Bytes.fromHexString(hexString).array(); - byte[] actualBytes = new byte[expectedBytes.length]; - copy.readBytes(actualBytes); - assertThat(actualBytes).containsExactly(expectedBytes); - // And nothing more - assertThat(copy.isReadable()).isFalse(); - return this; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/DriverRunListener.java b/core/src/test/java/com/datastax/oss/driver/DriverRunListener.java deleted file mode 100644 index 085134b28f2..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/DriverRunListener.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver; - -import static org.assertj.core.api.Assertions.fail; - -import org.junit.runner.Description; -import org.junit.runner.notification.RunListener; - -/** - * Common parent of all driver tests, to store common configuration and perform sanity checks. - * - * @see "maven-surefire-plugin configuration in pom.xml" - */ -public class DriverRunListener extends RunListener { - - @Override - public void testFinished(Description description) throws Exception { - // If a test interrupted the main thread silently, this can make later tests fail. Instead, we - // fail the test and clear the interrupt status. - // Note: Thread.interrupted() also clears the flag, which is what we want. - if (Thread.interrupted()) { - fail(description.getMethodName() + " interrupted the main thread"); - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/TestDataProviders.java b/core/src/test/java/com/datastax/oss/driver/TestDataProviders.java deleted file mode 100644 index a0448c4b769..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/TestDataProviders.java +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver; - -import com.tngtech.java.junit.dataprovider.DataProvider; -import java.util.Arrays; -import java.util.Locale; - -public class TestDataProviders { - - public static Object[][] fromList(Object... l) { - Object[][] result = new Object[l.length][]; - for (int i = 0; i < l.length; i++) { - result[i] = new Object[1]; - result[i][0] = l[i]; - } - return result; - } - - public static Object[][] concat(Object[][] left, Object[][] right) { - Object[][] result = Arrays.copyOf(left, left.length + right.length); - System.arraycopy(right, 0, result, left.length, right.length); - return result; - } - - // example: [ [a,b], [c,d] ], [ [1], [2] ], [ [true], [false] ] - // => [ [a,b,1,true], [a,b,1,false], [a,b,2,true], [a,b,2,false], ... ] - public static Object[][] combine(Object[][]... providers) { - int numberOfProviders = providers.length; // (ex: 3) - - // ex: 2 * 2 * 2 combinations - int numberOfCombinations = 1; - for (Object[][] provider : providers) { - numberOfCombinations *= provider.length; - } - - Object[][] result = new Object[numberOfCombinations][]; - // The current index in each provider (ex: [1,0,1] => [c,d,1,false]) - int[] indices = new int[numberOfProviders]; - - for (int c = 0; c < numberOfCombinations; c++) { - int combinationLength = 0; - for (int p = 0; p < numberOfProviders; p++) { - combinationLength += providers[p][indices[p]].length; - } - Object[] combination = new Object[combinationLength]; - int destPos = 0; - for (int p = 0; p < numberOfProviders; p++) { - Object[] src = providers[p][indices[p]]; - System.arraycopy(src, 0, combination, destPos, src.length); - destPos += src.length; - } - result[c] = combination; - - // Update indices: try to increment from the right, if it overflows reset and move left - for (int p = providers.length - 1; p >= 0; p--) { - if (indices[p] < providers[p].length - 1) { - // ex: [0,0,0], p = 2 => [0,0,1] - indices[p] += 1; - break; - } else { - // ex: [0,0,1], p = 2 => [0,0,0], loop to increment to [0,1,0] - indices[p] = 0; - } - } - } - return result; - } - - @DataProvider - public static Object[][] booleans() { - return fromList(true, false); - } - - /** An arbitrary set of locales to use when testing locale-sensitive operations. */ - @DataProvider - public static Object[][] locales() { - return new Object[][] { - new Object[] {Locale.US}, - // non-latin alphabets - new Object[] {Locale.CHINA}, - new Object[] {Locale.JAPAN}, - new Object[] {Locale.KOREA}, - new Object[] {new Locale("gr") /* greek */}, - new Object[] {new Locale("ar") /* arabic */}, - // latin-based alphabets with extended character sets - new Object[] {new Locale("vi") /* vietnamese */}, - // JAVA-2883: Turkish is the most problematic locale as String.toLowerCase("TITLE") - // wouldn't return "title" but rather "tıtle", where 'ı' is the 'LATIN SMALL LETTER - // DOTLESS I' character specific to the Turkish language. - new Object[] {new Locale("tr") /* turkish*/}, - }; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/AllNodesFailedExceptionTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/AllNodesFailedExceptionTest.java deleted file mode 100644 index 4cd4c0fcd74..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/api/core/AllNodesFailedExceptionTest.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import static com.datastax.oss.driver.api.core.ConsistencyLevel.QUORUM; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.data.MapEntry.entry; - -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.servererrors.ReadTimeoutException; -import com.datastax.oss.driver.api.core.servererrors.UnavailableException; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public class AllNodesFailedExceptionTest { - - @Mock(name = "node1") - private Node node1; - - @Mock(name = "node2") - private Node node2; - - @SuppressWarnings("deprecation") - @Test - public void should_create_instance_from_map_of_first_errors() { - // given - UnavailableException e1 = new UnavailableException(node1, QUORUM, 2, 1); - ReadTimeoutException e2 = new ReadTimeoutException(node2, QUORUM, 2, 1, false); - Map errors = ImmutableMap.of(node1, e1, node2, e2); - // when - AllNodesFailedException e = AllNodesFailedException.fromErrors(errors); - // then - assertThat(e) - .hasMessage( - "All 2 node(s) tried for the query failed " - + "(showing first 2 nodes, use getAllErrors() for more): " - + "node1: [%s], node2: [%s]", - e1, e2); - assertThat(e.getAllErrors()) - .hasEntrySatisfying(node1, list -> assertThat(list).containsExactly(e1)); - assertThat(e.getAllErrors()) - .hasEntrySatisfying(node2, list -> assertThat(list).containsExactly(e2)); - assertThat(e.getErrors()).containsEntry(node1, e1); - assertThat(e.getErrors()).containsEntry(node2, e2); - assertThat(e).hasSuppressedException(e1).hasSuppressedException(e2); - } - - @SuppressWarnings("deprecation") - @Test - public void should_create_instance_from_list_of_all_errors() { - // given - UnavailableException e1a = new UnavailableException(node1, QUORUM, 2, 1); - ReadTimeoutException e1b = new ReadTimeoutException(node1, QUORUM, 2, 1, false); - ReadTimeoutException e2a = new ReadTimeoutException(node2, QUORUM, 2, 1, false); - List> errors = - ImmutableList.of(entry(node1, e1a), entry(node1, e1b), entry(node2, e2a)); - // when - AllNodesFailedException e = AllNodesFailedException.fromErrors(errors); - // then - assertThat(e) - .hasMessage( - "All 2 node(s) tried for the query failed " - + "(showing first 2 nodes, use getAllErrors() for more): " - + "node1: [%s, %s], node2: [%s]", - e1a, e1b, e2a); - assertThat(e.getAllErrors()) - .hasEntrySatisfying(node1, list -> assertThat(list).containsExactly(e1a, e1b)); - assertThat(e.getAllErrors()) - .hasEntrySatisfying(node2, list -> assertThat(list).containsExactly(e2a)); - assertThat(e.getErrors()).containsEntry(node1, e1a); - assertThat(e.getErrors()).containsEntry(node2, e2a); - assertThat(e) - .hasSuppressedException(e1a) - .hasSuppressedException(e1b) - .hasSuppressedException(e2a); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/CqlIdentifierTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/CqlIdentifierTest.java deleted file mode 100644 index 5c7203b8f8d..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/api/core/CqlIdentifierTest.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.TestDataProviders; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.Locale; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class CqlIdentifierTest { - - @Test - public void should_build_from_internal() { - assertThat(CqlIdentifier.fromInternal("foo").asInternal()).isEqualTo("foo"); - assertThat(CqlIdentifier.fromInternal("Foo").asInternal()).isEqualTo("Foo"); - assertThat(CqlIdentifier.fromInternal("foo bar").asInternal()).isEqualTo("foo bar"); - assertThat(CqlIdentifier.fromInternal("foo\"bar").asInternal()).isEqualTo("foo\"bar"); - assertThat(CqlIdentifier.fromInternal("create").asInternal()).isEqualTo("create"); - } - - @Test - @UseDataProvider(location = TestDataProviders.class, value = "locales") - public void should_build_from_valid_cql(Locale locale) { - Locale def = Locale.getDefault(); - try { - Locale.setDefault(locale); - assertThat(CqlIdentifier.fromCql("foo").asInternal()).isEqualTo("foo"); - assertThat(CqlIdentifier.fromCql("Foo").asInternal()).isEqualTo("foo"); - assertThat(CqlIdentifier.fromCql("\"Foo\"").asInternal()).isEqualTo("Foo"); - assertThat(CqlIdentifier.fromCql("\"foo bar\"").asInternal()).isEqualTo("foo bar"); - assertThat(CqlIdentifier.fromCql("\"foo\"\"bar\"").asInternal()).isEqualTo("foo\"bar"); - assertThat(CqlIdentifier.fromCql("\"create\"").asInternal()).isEqualTo("create"); - // JAVA-2883: this would fail under turkish locale if it was used internally - assertThat(CqlIdentifier.fromCql("TITLE").asInternal()).isEqualTo("title"); - } finally { - Locale.setDefault(def); - } - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_build_from_valid_cql_if_special_characters() { - CqlIdentifier.fromCql("foo bar"); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_build_from_valid_cql_if_reserved_keyword() { - CqlIdentifier.fromCql("Create"); - } - - @Test - public void should_format_as_cql() { - assertThat(CqlIdentifier.fromInternal("foo").asCql(false)).isEqualTo("\"foo\""); - assertThat(CqlIdentifier.fromInternal("Foo").asCql(false)).isEqualTo("\"Foo\""); - assertThat(CqlIdentifier.fromInternal("foo bar").asCql(false)).isEqualTo("\"foo bar\""); - assertThat(CqlIdentifier.fromInternal("foo\"bar").asCql(false)).isEqualTo("\"foo\"\"bar\""); - assertThat(CqlIdentifier.fromInternal("create").asCql(false)).isEqualTo("\"create\""); - } - - @Test - public void should_format_as_pretty_cql() { - assertThat(CqlIdentifier.fromInternal("foo").asCql(true)).isEqualTo("foo"); - assertThat(CqlIdentifier.fromInternal("Foo").asCql(true)).isEqualTo("\"Foo\""); - assertThat(CqlIdentifier.fromInternal("foo bar").asCql(true)).isEqualTo("\"foo bar\""); - assertThat(CqlIdentifier.fromInternal("foo\"bar").asCql(true)).isEqualTo("\"foo\"\"bar\""); - assertThat(CqlIdentifier.fromInternal("create").asCql(true)).isEqualTo("\"create\""); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/VersionAssert.java b/core/src/test/java/com/datastax/oss/driver/api/core/VersionAssert.java deleted file mode 100644 index 61beb5cea51..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/api/core/VersionAssert.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import static com.datastax.oss.driver.Assertions.assertThat; - -import org.assertj.core.api.AbstractComparableAssert; - -public class VersionAssert extends AbstractComparableAssert { - - public VersionAssert(Version actual) { - super(actual, VersionAssert.class); - } - - public VersionAssert hasMajorMinorPatch(int major, int minor, int patch) { - assertThat(actual.getMajor()).isEqualTo(major); - assertThat(actual.getMinor()).isEqualTo(minor); - assertThat(actual.getPatch()).isEqualTo(patch); - return this; - } - - public VersionAssert hasDsePatch(int dsePatch) { - assertThat(actual.getDSEPatch()).isEqualTo(dsePatch); - return this; - } - - public VersionAssert hasPreReleaseLabels(String... labels) { - assertThat(actual.getPreReleaseLabels()).containsExactly(labels); - return this; - } - - public VersionAssert hasNoPreReleaseLabels() { - assertThat(actual.getPreReleaseLabels()).isNull(); - return this; - } - - public VersionAssert hasBuildLabel(String label) { - assertThat(actual.getBuildLabel()).isEqualTo(label); - return this; - } - - public VersionAssert hasNextStable(String version) { - assertThat(actual.nextStable()).isEqualTo(Version.parse(version)); - return this; - } - - @Override - public VersionAssert hasToString(String string) { - assertThat(actual.toString()).isEqualTo(string); - return this; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/VersionTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/VersionTest.java deleted file mode 100644 index bce30816f9c..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/api/core/VersionTest.java +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import static com.datastax.oss.driver.Assertions.assertThat; - -import org.junit.Test; - -public class VersionTest { - - @Test - public void should_parse_release_version() { - assertThat(Version.parse("1.2.19")) - .hasMajorMinorPatch(1, 2, 19) - .hasDsePatch(-1) - .hasNoPreReleaseLabels() - .hasBuildLabel(null) - .hasNextStable("1.2.19") - .hasToString("1.2.19"); - } - - @Test - public void should_parse_release_without_patch() { - assertThat(Version.parse("1.2")).hasMajorMinorPatch(1, 2, 0); - } - - @Test - public void should_parse_pre_release_version() { - assertThat(Version.parse("1.2.0-beta1-SNAPSHOT")) - .hasMajorMinorPatch(1, 2, 0) - .hasDsePatch(-1) - .hasPreReleaseLabels("beta1", "SNAPSHOT") - .hasBuildLabel(null) - .hasToString("1.2.0-beta1-SNAPSHOT") - .hasNextStable("1.2.0"); - } - - @Test - public void should_allow_tilde_as_first_pre_release_delimiter() { - assertThat(Version.parse("1.2.0~beta1-SNAPSHOT")) - .hasMajorMinorPatch(1, 2, 0) - .hasDsePatch(-1) - .hasPreReleaseLabels("beta1", "SNAPSHOT") - .hasBuildLabel(null) - .hasToString("1.2.0-beta1-SNAPSHOT") - .hasNextStable("1.2.0"); - } - - @Test - public void should_parse_dse_patch() { - assertThat(Version.parse("1.2.19.2-SNAPSHOT")) - .hasMajorMinorPatch(1, 2, 19) - .hasDsePatch(2) - .hasToString("1.2.19.2-SNAPSHOT") - .hasNextStable("1.2.19.2"); - } - - @Test - public void should_order_versions() { - // by component - assertOrder("1.2.0", "2.0.0", -1); - assertOrder("2.0.0", "2.1.0", -1); - assertOrder("2.0.1", "2.0.2", -1); - assertOrder("2.0.1.1", "2.0.1.2", -1); - - // shortened vs. longer version - assertOrder("2.0", "2.0.0", 0); - assertOrder("2.0", "2.0.1", -1); - - // any DSE version is higher than no DSE version - assertOrder("2.0.0", "2.0.0.0", -1); - assertOrder("2.0.0", "2.0.0.1", -1); - - // pre-release vs. release - assertOrder("2.0.0-beta1", "2.0.0", -1); - assertOrder("2.0.0-SNAPSHOT", "2.0.0", -1); - assertOrder("2.0.0-beta1-SNAPSHOT", "2.0.0", -1); - - // pre-release vs. pre-release - assertOrder("2.0.0-a-b-c", "2.0.0-a-b-d", -1); - assertOrder("2.0.0-a-b-c", "2.0.0-a-b-c-d", -1); - - // build number ignored - assertOrder("2.0.0+build01", "2.0.0+build02", 0); - } - - private void assertOrder(String version1, String version2, int expected) { - assertThat(Version.parse(version1).compareTo(Version.parse(version2))).isEqualTo(expected); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/auth/ProgrammaticPlainTextAuthProviderTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/auth/ProgrammaticPlainTextAuthProviderTest.java deleted file mode 100644 index 44d2acfbb2e..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/api/core/auth/ProgrammaticPlainTextAuthProviderTest.java +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.auth; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.auth.PlainTextAuthProviderBase.Credentials; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.Strict.class) -public class ProgrammaticPlainTextAuthProviderTest { - - @Mock private EndPoint endpoint; - - @Test - public void should_return_correct_credentials_without_authorization_id() { - // given - ProgrammaticPlainTextAuthProvider provider = - new ProgrammaticPlainTextAuthProvider("user", "pass"); - // when - Credentials credentials = provider.getCredentials(endpoint, "irrelevant"); - // then - assertThat(credentials.getUsername()).isEqualTo("user".toCharArray()); - assertThat(credentials.getPassword()).isEqualTo("pass".toCharArray()); - assertThat(credentials.getAuthorizationId()).isEqualTo(new char[0]); - } - - @Test - public void should_return_correct_credentials_with_authorization_id() { - // given - ProgrammaticPlainTextAuthProvider provider = - new ProgrammaticPlainTextAuthProvider("user", "pass", "proxy"); - // when - Credentials credentials = provider.getCredentials(endpoint, "irrelevant"); - // then - assertThat(credentials.getUsername()).isEqualTo("user".toCharArray()); - assertThat(credentials.getPassword()).isEqualTo("pass".toCharArray()); - assertThat(credentials.getAuthorizationId()).isEqualTo("proxy".toCharArray()); - } - - @Test - public void should_change_username() { - // given - ProgrammaticPlainTextAuthProvider provider = - new ProgrammaticPlainTextAuthProvider("user", "pass"); - // when - provider.setUsername("user2"); - Credentials credentials = provider.getCredentials(endpoint, "irrelevant"); - // then - assertThat(credentials.getUsername()).isEqualTo("user2".toCharArray()); - assertThat(credentials.getPassword()).isEqualTo("pass".toCharArray()); - assertThat(credentials.getAuthorizationId()).isEqualTo(new char[0]); - } - - @Test - public void should_change_password() { - // given - ProgrammaticPlainTextAuthProvider provider = - new ProgrammaticPlainTextAuthProvider("user", "pass"); - // when - provider.setPassword("pass2"); - Credentials credentials = provider.getCredentials(endpoint, "irrelevant"); - // then - assertThat(credentials.getUsername()).isEqualTo("user".toCharArray()); - assertThat(credentials.getPassword()).isEqualTo("pass2".toCharArray()); - assertThat(credentials.getAuthorizationId()).isEqualTo(new char[0]); - } - - @Test - public void should_change_authorization_id() { - // given - ProgrammaticPlainTextAuthProvider provider = - new ProgrammaticPlainTextAuthProvider("user", "pass", "proxy"); - // when - provider.setAuthorizationId("proxy2"); - Credentials credentials = provider.getCredentials(endpoint, "irrelevant"); - // then - assertThat(credentials.getUsername()).isEqualTo("user".toCharArray()); - assertThat(credentials.getPassword()).isEqualTo("pass".toCharArray()); - assertThat(credentials.getAuthorizationId()).isEqualTo("proxy2".toCharArray()); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/config/OptionsMapTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/config/OptionsMapTest.java deleted file mode 100644 index ec0410ed868..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/api/core/config/OptionsMapTest.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.config; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.internal.SerializationHelper; -import java.time.Duration; -import java.util.function.Consumer; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public class OptionsMapTest { - @Mock private Consumer mockListener; - - @Test - public void should_serialize_and_deserialize() { - // Given - OptionsMap initial = OptionsMap.driverDefaults(); - Duration slowTimeout = Duration.ofSeconds(30); - initial.put("slow", TypedDriverOption.REQUEST_TIMEOUT, slowTimeout); - initial.addChangeListener(mockListener); - - // When - OptionsMap deserialized = SerializationHelper.serializeAndDeserialize(initial); - - // Then - assertThat(deserialized.get(TypedDriverOption.REQUEST_TIMEOUT)) - .isEqualTo(Duration.ofSeconds(2)); - assertThat(deserialized.get("slow", TypedDriverOption.REQUEST_TIMEOUT)).isEqualTo(slowTimeout); - // Listeners are transient - assertThat(deserialized.removeChangeListener(mockListener)).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/config/TypedDriverOptionTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/config/TypedDriverOptionTest.java deleted file mode 100644 index eee4000a459..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/api/core/config/TypedDriverOptionTest.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.config; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import java.util.HashSet; -import java.util.Set; -import org.junit.Test; - -public class TypedDriverOptionTest { - - /** - * Checks that every built-in {@link DriverOption} has an equivalent constant in {@link - * TypedDriverOption}. - */ - @Test - public void should_have_equivalents_for_all_builtin_untyped_options() { - Set optionsThatHaveATypedEquivalent = new HashSet<>(); - for (TypedDriverOption typedOption : TypedDriverOption.builtInValues()) { - optionsThatHaveATypedEquivalent.add(typedOption.getRawOption()); - } - - // These options are only used internally to compare policy configurations across profiles. - // Users never use them directly, so they don't need typed equivalents. - Set exclusions = - ImmutableSet.of( - DefaultDriverOption.LOAD_BALANCING_POLICY, - DefaultDriverOption.RETRY_POLICY, - DefaultDriverOption.SPECULATIVE_EXECUTION_POLICY); - - for (DriverOption option : - ImmutableSet.builder() - .add(DefaultDriverOption.values()) - .add(DseDriverOption.values()) - .build()) { - if (!exclusions.contains(option)) { - assertThat(optionsThatHaveATypedEquivalent) - .as( - "Couldn't find a typed equivalent for %s.%s. " - + "You need to either add a constant in %s, or an exclusion in this test.", - option.getClass().getSimpleName(), option, TypedDriverOption.class.getSimpleName()) - .contains(option); - } - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/cql/StatementBuilderTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/cql/StatementBuilderTest.java deleted file mode 100644 index 9904b1e27d7..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/api/core/cql/StatementBuilderTest.java +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cql; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.shaded.guava.common.base.Charsets; -import java.nio.ByteBuffer; -import org.junit.Test; - -public class StatementBuilderTest { - - private static class MockSimpleStatementBuilder - extends StatementBuilder { - - public MockSimpleStatementBuilder() { - super(); - } - - public MockSimpleStatementBuilder(SimpleStatement template) { - super(template); - } - - @Override - public SimpleStatement build() { - - SimpleStatement rv = mock(SimpleStatement.class); - when(rv.isTracing()).thenReturn(this.tracing); - when(rv.getRoutingKey()).thenReturn(this.routingKey); - return rv; - } - } - - @Test - public void should_handle_set_tracing_without_args() { - - MockSimpleStatementBuilder builder = new MockSimpleStatementBuilder(); - assertThat(builder.build().isTracing()).isFalse(); - builder.setTracing(); - assertThat(builder.build().isTracing()).isTrue(); - } - - @Test - public void should_handle_set_tracing_with_args() { - - MockSimpleStatementBuilder builder = new MockSimpleStatementBuilder(); - assertThat(builder.build().isTracing()).isFalse(); - builder.setTracing(true); - assertThat(builder.build().isTracing()).isTrue(); - builder.setTracing(false); - assertThat(builder.build().isTracing()).isFalse(); - } - - @Test - public void should_override_set_tracing_in_template() { - - SimpleStatement template = SimpleStatement.builder("select * from system.peers").build(); - MockSimpleStatementBuilder builder = new MockSimpleStatementBuilder(template); - assertThat(builder.build().isTracing()).isFalse(); - builder.setTracing(true); - assertThat(builder.build().isTracing()).isTrue(); - - template = SimpleStatement.builder("select * from system.peers").setTracing().build(); - builder = new MockSimpleStatementBuilder(template); - assertThat(builder.build().isTracing()).isTrue(); - builder.setTracing(false); - assertThat(builder.build().isTracing()).isFalse(); - } - - @Test - public void should_match_set_routing_key_vararg() { - - ByteBuffer buff1 = ByteBuffer.wrap("the quick brown fox".getBytes(Charsets.UTF_8)); - ByteBuffer buff2 = ByteBuffer.wrap("jumped over the lazy dog".getBytes(Charsets.UTF_8)); - - Statement expectedStmt = - SimpleStatement.builder("select * from system.peers").build().setRoutingKey(buff1, buff2); - - MockSimpleStatementBuilder builder = new MockSimpleStatementBuilder(); - Statement builderStmt = builder.setRoutingKey(buff1, buff2).build(); - assertThat(expectedStmt.getRoutingKey()).isEqualTo(builderStmt.getRoutingKey()); - - /* Confirm that order matters here */ - builderStmt = builder.setRoutingKey(buff2, buff1).build(); - assertThat(expectedStmt.getRoutingKey()).isNotEqualTo(builderStmt.getRoutingKey()); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/cql/StatementProfileTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/cql/StatementProfileTest.java deleted file mode 100644 index af2dccd0432..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/api/core/cql/StatementProfileTest.java +++ /dev/null @@ -1,196 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cql; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.TestDataProviders; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.internal.core.cql.DefaultBoundStatement; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.Collections; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class StatementProfileTest { - - private static final DriverExecutionProfile PROFILE = mock(DriverExecutionProfile.class); - private static final String NAME = "mockProfileName"; - - @Test - @UseDataProvider("statements") - public void should_set_profile_and_name_on_statement( - Statement statement, - Operation operation1, - Operation operation2, - String expectedName, - DriverExecutionProfile expectedProfile) { - - statement = operation1.applyTo(statement); - statement = operation2.applyTo(statement); - - assertThat(statement.getExecutionProfileName()).isEqualTo(expectedName); - assertThat(statement.getExecutionProfile()).isEqualTo(expectedProfile); - } - - @Test - @UseDataProvider("builders") - public void should_set_profile_and_name_on_builder( - StatementBuilder builder, - Operation operation1, - Operation operation2, - String expectedName, - DriverExecutionProfile expectedProfile) { - - builder = operation1.applyTo(builder); - builder = operation2.applyTo(builder); - - Statement statement = builder.build(); - - assertThat(statement.getExecutionProfileName()).isEqualTo(expectedName); - assertThat(statement.getExecutionProfile()).isEqualTo(expectedProfile); - } - - private static Object[][] scenarios() { - return new Object[][] { - // operation1, operation2, expectedName, expectedProfile - - // only one set: - new Object[] {setProfile(PROFILE), noop(), null, PROFILE}, - new Object[] {setName(NAME), noop(), NAME, null}, - - // last one wins: - new Object[] {setProfile(PROFILE), setName(NAME), NAME, null}, - new Object[] {setName(NAME), setProfile(PROFILE), null, PROFILE}, - - // null does not unset other: - new Object[] {setProfile(PROFILE), setName(null), null, PROFILE}, - new Object[] {setName(NAME), setProfile(null), NAME, null}, - }; - } - - @DataProvider - public static Object[][] statements() { - SimpleStatement simpleStatement = SimpleStatement.newInstance("mock query"); - Object[][] statements = - TestDataProviders.fromList( - simpleStatement, - newBoundStatement(), - BatchStatement.newInstance(BatchType.LOGGED, simpleStatement)); - - return TestDataProviders.combine(statements, scenarios()); - } - - @DataProvider - public static Object[][] builders() { - SimpleStatement simpleStatement = SimpleStatement.newInstance("mock query"); - Object[][] builders = - TestDataProviders.fromList( - SimpleStatement.builder(simpleStatement), - new BoundStatementBuilder(newBoundStatement()), - BatchStatement.builder(BatchType.LOGGED).addStatement(simpleStatement)); - - return TestDataProviders.combine(builders, scenarios()); - } - - private interface Operation { - - Statement applyTo(Statement statement); - - StatementBuilder applyTo(StatementBuilder builder); - } - - private static Operation setProfile(DriverExecutionProfile profile) { - return new Operation() { - @Override - public Statement applyTo(Statement statement) { - return statement.setExecutionProfile(profile); - } - - @Override - public StatementBuilder applyTo(StatementBuilder builder) { - return builder.setExecutionProfile(profile); - } - }; - } - - private static Operation setName(String name) { - return new Operation() { - @Override - public Statement applyTo(Statement statement) { - return statement.setExecutionProfileName(name); - } - - @Override - public StatementBuilder applyTo(StatementBuilder builder) { - return builder.setExecutionProfileName(name); - } - }; - } - - private static Operation noop() { - return new Operation() { - @Override - public Statement applyTo(Statement statement) { - return statement; - } - - @Override - public StatementBuilder applyTo(StatementBuilder builder) { - return builder; - } - }; - } - - private static BoundStatement newBoundStatement() { - // Mock the minimum state needed to create a DefaultBoundStatement that can also be used to - // initialize a builder - PreparedStatement preparedStatement = mock(PreparedStatement.class); - ColumnDefinitions variableDefinitions = mock(ColumnDefinitions.class); - when(preparedStatement.getVariableDefinitions()).thenReturn(variableDefinitions); - return new DefaultBoundStatement( - preparedStatement, - variableDefinitions, - new ByteBuffer[0], - null, - null, - null, - null, - null, - Collections.emptyMap(), - null, - false, - Statement.NO_DEFAULT_TIMESTAMP, - null, - 5000, - null, - null, - Duration.ZERO, - null, - null, - null, - Statement.NO_NOW_IN_SECONDS); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/data/CqlDurationTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/data/CqlDurationTest.java deleted file mode 100644 index f55453b3eba..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/api/core/data/CqlDurationTest.java +++ /dev/null @@ -1,210 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.data; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static org.assertj.core.api.Assertions.fail; - -import com.datastax.oss.driver.TestDataProviders; -import com.datastax.oss.driver.internal.SerializationHelper; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.time.ZonedDateTime; -import java.time.temporal.ChronoUnit; -import java.time.temporal.UnsupportedTemporalTypeException; -import java.util.Locale; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class CqlDurationTest { - - @Test - @UseDataProvider(location = TestDataProviders.class, value = "locales") - public void should_parse_from_string_with_standard_pattern(Locale locale) { - Locale def = Locale.getDefault(); - try { - Locale.setDefault(locale); - assertThat(CqlDuration.from("1y2mo")).isEqualTo(CqlDuration.newInstance(14, 0, 0)); - assertThat(CqlDuration.from("-1y2mo")).isEqualTo(CqlDuration.newInstance(-14, 0, 0)); - assertThat(CqlDuration.from("1Y2MO")).isEqualTo(CqlDuration.newInstance(14, 0, 0)); - assertThat(CqlDuration.from("2w")).isEqualTo(CqlDuration.newInstance(0, 14, 0)); - assertThat(CqlDuration.from("2d10h")) - .isEqualTo(CqlDuration.newInstance(0, 2, 10 * CqlDuration.NANOS_PER_HOUR)); - assertThat(CqlDuration.from("2d")).isEqualTo(CqlDuration.newInstance(0, 2, 0)); - assertThat(CqlDuration.from("30h")) - .isEqualTo(CqlDuration.newInstance(0, 0, 30 * CqlDuration.NANOS_PER_HOUR)); - assertThat(CqlDuration.from("30h20m")) - .isEqualTo( - CqlDuration.newInstance( - 0, 0, 30 * CqlDuration.NANOS_PER_HOUR + 20 * CqlDuration.NANOS_PER_MINUTE)); - assertThat(CqlDuration.from("20m")) - .isEqualTo(CqlDuration.newInstance(0, 0, 20 * CqlDuration.NANOS_PER_MINUTE)); - assertThat(CqlDuration.from("56s")) - .isEqualTo(CqlDuration.newInstance(0, 0, 56 * CqlDuration.NANOS_PER_SECOND)); - assertThat(CqlDuration.from("567ms")) - .isEqualTo(CqlDuration.newInstance(0, 0, 567 * CqlDuration.NANOS_PER_MILLI)); - assertThat(CqlDuration.from("1950us")) - .isEqualTo(CqlDuration.newInstance(0, 0, 1950 * CqlDuration.NANOS_PER_MICRO)); - assertThat(CqlDuration.from("1950µs")) - .isEqualTo(CqlDuration.newInstance(0, 0, 1950 * CqlDuration.NANOS_PER_MICRO)); - assertThat(CqlDuration.from("1950000ns")).isEqualTo(CqlDuration.newInstance(0, 0, 1950000)); - assertThat(CqlDuration.from("1950000NS")).isEqualTo(CqlDuration.newInstance(0, 0, 1950000)); - assertThat(CqlDuration.from("-1950000ns")).isEqualTo(CqlDuration.newInstance(0, 0, -1950000)); - assertThat(CqlDuration.from("1y3mo2h10m")) - .isEqualTo(CqlDuration.newInstance(15, 0, 130 * CqlDuration.NANOS_PER_MINUTE)); - } finally { - Locale.setDefault(def); - } - } - - @Test - @UseDataProvider(location = TestDataProviders.class, value = "locales") - public void should_parse_from_string_with_iso8601_pattern(Locale locale) { - Locale def = Locale.getDefault(); - try { - Locale.setDefault(locale); - assertThat(CqlDuration.from("P1Y2D")).isEqualTo(CqlDuration.newInstance(12, 2, 0)); - assertThat(CqlDuration.from("P1Y2M")).isEqualTo(CqlDuration.newInstance(14, 0, 0)); - assertThat(CqlDuration.from("P2W")).isEqualTo(CqlDuration.newInstance(0, 14, 0)); - assertThat(CqlDuration.from("P1YT2H")) - .isEqualTo(CqlDuration.newInstance(12, 0, 2 * CqlDuration.NANOS_PER_HOUR)); - assertThat(CqlDuration.from("-P1Y2M")).isEqualTo(CqlDuration.newInstance(-14, 0, 0)); - assertThat(CqlDuration.from("P2D")).isEqualTo(CqlDuration.newInstance(0, 2, 0)); - assertThat(CqlDuration.from("PT30H")) - .isEqualTo(CqlDuration.newInstance(0, 0, 30 * CqlDuration.NANOS_PER_HOUR)); - assertThat(CqlDuration.from("PT30H20M")) - .isEqualTo( - CqlDuration.newInstance( - 0, 0, 30 * CqlDuration.NANOS_PER_HOUR + 20 * CqlDuration.NANOS_PER_MINUTE)); - assertThat(CqlDuration.from("PT20M")) - .isEqualTo(CqlDuration.newInstance(0, 0, 20 * CqlDuration.NANOS_PER_MINUTE)); - assertThat(CqlDuration.from("PT56S")) - .isEqualTo(CqlDuration.newInstance(0, 0, 56 * CqlDuration.NANOS_PER_SECOND)); - assertThat(CqlDuration.from("P1Y3MT2H10M")) - .isEqualTo(CqlDuration.newInstance(15, 0, 130 * CqlDuration.NANOS_PER_MINUTE)); - } finally { - Locale.setDefault(def); - } - } - - @Test - @UseDataProvider(location = TestDataProviders.class, value = "locales") - public void should_parse_from_string_with_iso8601_alternative_pattern(Locale locale) { - Locale def = Locale.getDefault(); - try { - Locale.setDefault(locale); - assertThat(CqlDuration.from("P0001-00-02T00:00:00")) - .isEqualTo(CqlDuration.newInstance(12, 2, 0)); - assertThat(CqlDuration.from("P0001-02-00T00:00:00")) - .isEqualTo(CqlDuration.newInstance(14, 0, 0)); - assertThat(CqlDuration.from("P0001-00-00T02:00:00")) - .isEqualTo(CqlDuration.newInstance(12, 0, 2 * CqlDuration.NANOS_PER_HOUR)); - assertThat(CqlDuration.from("-P0001-02-00T00:00:00")) - .isEqualTo(CqlDuration.newInstance(-14, 0, 0)); - assertThat(CqlDuration.from("P0000-00-02T00:00:00")) - .isEqualTo(CqlDuration.newInstance(0, 2, 0)); - assertThat(CqlDuration.from("P0000-00-00T30:00:00")) - .isEqualTo(CqlDuration.newInstance(0, 0, 30 * CqlDuration.NANOS_PER_HOUR)); - assertThat(CqlDuration.from("P0000-00-00T30:20:00")) - .isEqualTo( - CqlDuration.newInstance( - 0, 0, 30 * CqlDuration.NANOS_PER_HOUR + 20 * CqlDuration.NANOS_PER_MINUTE)); - assertThat(CqlDuration.from("P0000-00-00T00:20:00")) - .isEqualTo(CqlDuration.newInstance(0, 0, 20 * CqlDuration.NANOS_PER_MINUTE)); - assertThat(CqlDuration.from("P0000-00-00T00:00:56")) - .isEqualTo(CqlDuration.newInstance(0, 0, 56 * CqlDuration.NANOS_PER_SECOND)); - assertThat(CqlDuration.from("P0001-03-00T02:10:00")) - .isEqualTo(CqlDuration.newInstance(15, 0, 130 * CqlDuration.NANOS_PER_MINUTE)); - } finally { - Locale.setDefault(def); - } - } - - @Test - public void should_fail_to_parse_invalid_durations() { - assertInvalidDuration( - Long.MAX_VALUE + "d", - "Invalid duration. The total number of days must be less or equal to 2147483647"); - assertInvalidDuration("2µ", "Unable to convert '2µ' to a duration"); - assertInvalidDuration("-2µ", "Unable to convert '2µ' to a duration"); - assertInvalidDuration("12.5s", "Unable to convert '12.5s' to a duration"); - assertInvalidDuration("2m12.5s", "Unable to convert '2m12.5s' to a duration"); - assertInvalidDuration("2m-12s", "Unable to convert '2m-12s' to a duration"); - assertInvalidDuration("12s3s", "Invalid duration. The seconds are specified multiple times"); - assertInvalidDuration("12s3m", "Invalid duration. The seconds should be after minutes"); - assertInvalidDuration("1Y3M4D", "Invalid duration. The minutes should be after days"); - assertInvalidDuration("P2Y3W", "Unable to convert 'P2Y3W' to a duration"); - assertInvalidDuration("P0002-00-20", "Unable to convert 'P0002-00-20' to a duration"); - } - - private void assertInvalidDuration(String duration, String expectedErrorMessage) { - try { - CqlDuration.from(duration); - fail("Expected RuntimeException"); - } catch (RuntimeException e) { - assertThat(e.getMessage()).isEqualTo(expectedErrorMessage); - } - } - - @Test - public void should_get_by_unit() { - CqlDuration duration = CqlDuration.from("3mo2d15s"); - assertThat(duration.get(ChronoUnit.MONTHS)).isEqualTo(3); - assertThat(duration.get(ChronoUnit.DAYS)).isEqualTo(2); - assertThat(duration.get(ChronoUnit.NANOS)).isEqualTo(15 * CqlDuration.NANOS_PER_SECOND); - assertThatThrownBy(() -> duration.get(ChronoUnit.YEARS)) - .isInstanceOf(UnsupportedTemporalTypeException.class); - } - - @Test - public void should_add_to_temporal() { - ZonedDateTime dateTime = ZonedDateTime.parse("2018-10-04T00:00-07:00[America/Los_Angeles]"); - assertThat(dateTime.plus(CqlDuration.from("1mo"))) - .isEqualTo("2018-11-04T00:00-07:00[America/Los_Angeles]"); - assertThat(dateTime.plus(CqlDuration.from("1mo1h10s"))) - .isEqualTo("2018-11-04T01:00:10-07:00[America/Los_Angeles]"); - // 11-04 2:00 is daylight saving time end - assertThat(dateTime.plus(CqlDuration.from("1mo3h"))) - .isEqualTo("2018-11-04T02:00-08:00[America/Los_Angeles]"); - } - - @Test - public void should_subtract_from_temporal() { - ZonedDateTime dateTime = ZonedDateTime.parse("2018-10-04T00:00-07:00[America/Los_Angeles]"); - assertThat(dateTime.minus(CqlDuration.from("2mo"))) - .isEqualTo("2018-08-04T00:00-07:00[America/Los_Angeles]"); - assertThat(dateTime.minus(CqlDuration.from("1h15s15ns"))) - .isEqualTo("2018-10-03T22:59:44.999999985-07:00[America/Los_Angeles]"); - } - - @Test - public void should_serialize_and_deserialize() throws Exception { - CqlDuration initial = CqlDuration.from("3mo2d15s"); - CqlDuration deserialized = SerializationHelper.serializeAndDeserialize(initial); - assertThat(deserialized).isEqualTo(initial); - } - - @Test - public void should_serialize_and_deserialize_negative() throws Exception { - CqlDuration initial = CqlDuration.from("-2d15m"); - CqlDuration deserialized = SerializationHelper.serializeAndDeserialize(initial); - assertThat(deserialized).isEqualTo(initial); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/data/CqlVectorTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/data/CqlVectorTest.java deleted file mode 100644 index 3e0872cb946..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/api/core/data/CqlVectorTest.java +++ /dev/null @@ -1,259 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.data; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static org.assertj.core.api.Assertions.fail; - -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.internal.SerializationHelper; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.io.ByteArrayInputStream; -import java.io.ObjectInputStream; -import java.io.ObjectStreamException; -import java.time.LocalTime; -import java.util.AbstractList; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; -import java.util.stream.Collectors; -import org.apache.commons.codec.DecoderException; -import org.apache.commons.codec.binary.Hex; -import org.assertj.core.util.Lists; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class CqlVectorTest { - - @DataProvider - public static Object[][] dataProvider() { - return new Object[][] { - {new Float[] {1.0f, 2.5f}}, - {new LocalTime[] {LocalTime.of(1, 2), LocalTime.of(3, 4)}}, - {new List[] {Arrays.asList(1, 2), Arrays.asList(3, 4)}}, - {new CqlVector[] {CqlVector.newInstance("a", "bc"), CqlVector.newInstance("d", "ef")}} - }; - } - - private void validate_built_vector(CqlVector vec, Object[] expectedVals) { - assertThat(vec.size()).isEqualTo(2); - assertThat(vec.isEmpty()).isFalse(); - assertThat(vec.get(0)).isEqualTo(expectedVals[0]); - assertThat(vec.get(1)).isEqualTo(expectedVals[1]); - } - - @UseDataProvider("dataProvider") - @Test - public void should_build_vector_from_elements(Object[] vals) { - validate_built_vector(CqlVector.newInstance(vals), vals); - } - - @Test - @UseDataProvider("dataProvider") - public void should_build_vector_from_list(Object[] vals) { - validate_built_vector(CqlVector.newInstance(Lists.newArrayList(vals)), vals); - } - - @Test - public void should_throw_from_null_string() { - assertThatThrownBy( - () -> { - CqlVector.from(null, TypeCodecs.FLOAT); - }) - .isInstanceOf(IllegalArgumentException.class); - } - - @Test - public void should_throw_from_empty_string() { - - assertThatThrownBy( - () -> { - CqlVector.from("", TypeCodecs.FLOAT); - }) - .isInstanceOf(IllegalArgumentException.class); - } - - @Test - public void should_throw_when_building_with_nulls() { - - assertThatThrownBy( - () -> { - CqlVector.newInstance(1.1f, null, 2.2f); - }) - .isInstanceOf(IllegalArgumentException.class); - - Float[] theArray = new Float[] {1.1f, null, 2.2f}; - assertThatThrownBy( - () -> { - CqlVector.newInstance(theArray); - }) - .isInstanceOf(IllegalArgumentException.class); - - List theList = Lists.newArrayList(1.1f, null, 2.2f); - assertThatThrownBy( - () -> { - CqlVector.newInstance(theList); - }) - .isInstanceOf(IllegalArgumentException.class); - } - - @Test - public void should_build_empty_vector() { - CqlVector vector = CqlVector.newInstance(); - assertThat(vector.isEmpty()).isTrue(); - assertThat(vector.size()).isEqualTo(0); - } - - @Test - @UseDataProvider("dataProvider") - public void should_behave_mostly_like_a_list(T[] vals) { - T[] theArray = Arrays.copyOf(vals, vals.length); - CqlVector vector = CqlVector.newInstance(theArray); - assertThat(vector.get(0)).isEqualTo(theArray[0]); - vector.set(0, theArray[1]); - assertThat(vector.get(0)).isEqualTo(theArray[1]); - assertThat(vector.isEmpty()).isFalse(); - assertThat(vector.size()).isEqualTo(2); - Iterator iterator = vector.iterator(); - assertThat(iterator.next()).isEqualTo(theArray[1]); - assertThat(iterator.next()).isEqualTo(theArray[1]); - } - - @Test - @UseDataProvider("dataProvider") - public void should_play_nicely_with_streams(T[] vals) { - CqlVector vector = CqlVector.newInstance(vals); - List results = - vector.stream() - .map(Object::toString) - .collect(Collectors.toCollection(() -> new ArrayList())); - for (int i = 0; i < vector.size(); ++i) { - assertThat(results.get(i)).isEqualTo(vector.get(i).toString()); - } - } - - @Test - @UseDataProvider("dataProvider") - public void should_reflect_changes_to_mutable_list(T[] vals) { - List theList = Lists.newArrayList(vals); - CqlVector vector = CqlVector.newInstance(theList); - assertThat(vector.size()).isEqualTo(2); - assertThat(vector.get(1)).isEqualTo(vals[1]); - - T newVal = vals[0]; - theList.set(1, newVal); - assertThat(vector.size()).isEqualTo(2); - assertThat(vector.get(1)).isEqualTo(newVal); - } - - @Test - @UseDataProvider("dataProvider") - public void should_reflect_changes_to_array(T[] vals) { - T[] theArray = Arrays.copyOf(vals, vals.length); - CqlVector vector = CqlVector.newInstance(theArray); - assertThat(vector.size()).isEqualTo(2); - assertThat(vector.get(1)).isEqualTo(theArray[1]); - - T newVal = theArray[0]; - theArray[1] = newVal; - assertThat(vector.size()).isEqualTo(2); - assertThat(vector.get(1)).isEqualTo(newVal); - } - - @Test - @UseDataProvider("dataProvider") - public void should_correctly_compare_vectors(T[] vals) { - CqlVector vector1 = CqlVector.newInstance(vals); - CqlVector vector2 = CqlVector.newInstance(vals); - CqlVector vector3 = CqlVector.newInstance(Lists.newArrayList(vals)); - assertThat(vector1).isNotSameAs(vector2); - assertThat(vector1).isEqualTo(vector2); - assertThat(vector1).isNotSameAs(vector3); - assertThat(vector1).isEqualTo(vector3); - - T[] differentArgs = Arrays.copyOf(vals, vals.length); - T newVal = differentArgs[1]; - differentArgs[0] = newVal; - CqlVector vector4 = CqlVector.newInstance(differentArgs); - assertThat(vector1).isNotSameAs(vector4); - assertThat(vector1).isNotEqualTo(vector4); - - T[] biggerArgs = Arrays.copyOf(vals, vals.length + 1); - biggerArgs[biggerArgs.length - 1] = newVal; - CqlVector vector5 = CqlVector.newInstance(biggerArgs); - assertThat(vector1).isNotSameAs(vector5); - assertThat(vector1).isNotEqualTo(vector5); - } - - @Test - @UseDataProvider("dataProvider") - public void should_serialize_and_deserialize(T[] vals) throws Exception { - CqlVector initial = CqlVector.newInstance(vals); - CqlVector deserialized = SerializationHelper.serializeAndDeserialize(initial); - assertThat(deserialized).isEqualTo(initial); - } - - @Test - public void should_serialize_and_deserialize_empty_vector() throws Exception { - CqlVector initial = CqlVector.newInstance(Collections.emptyList()); - CqlVector deserialized = SerializationHelper.serializeAndDeserialize(initial); - assertThat(deserialized).isEqualTo(initial); - } - - @Test - @UseDataProvider("dataProvider") - public void should_serialize_and_deserialize_unserializable_list(T[] vals) throws Exception { - CqlVector initial = - CqlVector.newInstance( - new AbstractList() { - @Override - public T get(int index) { - return vals[index]; - } - - @Override - public int size() { - return vals.length; - } - }); - CqlVector deserialized = SerializationHelper.serializeAndDeserialize(initial); - assertThat(deserialized).isEqualTo(initial); - } - - @Test - public void should_not_use_preallocate_serialized_size() throws DecoderException { - // serialized CqlVector(1.0f, 2.5f, 3.0f) with size field adjusted to Integer.MAX_VALUE - byte[] suspiciousBytes = - Hex.decodeHex( - "aced000573720042636f6d2e64617461737461782e6f73732e6472697665722e6170692e636f72652e646174612e43716c566563746f722453657269616c697a6174696f6e50726f78790000000000000001030000787077047fffffff7372000f6a6176612e6c616e672e466c6f6174daedc9a2db3cf0ec02000146000576616c7565787200106a6176612e6c616e672e4e756d62657286ac951d0b94e08b02000078703f8000007371007e0002402000007371007e00024040000078" - .toCharArray()); - try { - new ObjectInputStream(new ByteArrayInputStream(suspiciousBytes)).readObject(); - fail("Should not be able to deserialize bytes with incorrect size field"); - } catch (Exception e) { - // check we fail to deserialize, rather than OOM - assertThat(e).isInstanceOf(ObjectStreamException.class); - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/metadata/SafeInitNodeStateListenerTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/metadata/SafeInitNodeStateListenerTest.java deleted file mode 100644 index a5b9b447e6a..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/api/core/metadata/SafeInitNodeStateListenerTest.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata; - -import com.datastax.oss.driver.api.core.session.Session; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.InOrder; -import org.mockito.Mock; -import org.mockito.Mockito; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public class SafeInitNodeStateListenerTest { - - @Mock private NodeStateListener delegate; - @Mock private Node node; - @Mock private Session session; - - @Test - public void should_replay_init_events() { - SafeInitNodeStateListener wrapper = new SafeInitNodeStateListener(delegate, true); - - // Not a realistic sequence of invocations in the driver, but that doesn't matter - wrapper.onAdd(node); - wrapper.onUp(node); - wrapper.onSessionReady(session); - wrapper.onDown(node); - - InOrder inOrder = Mockito.inOrder(delegate); - inOrder.verify(delegate).onSessionReady(session); - inOrder.verify(delegate).onAdd(node); - inOrder.verify(delegate).onUp(node); - inOrder.verify(delegate).onDown(node); - } - - @Test - public void should_discard_init_events() { - SafeInitNodeStateListener wrapper = new SafeInitNodeStateListener(delegate, false); - - wrapper.onAdd(node); - wrapper.onUp(node); - wrapper.onSessionReady(session); - wrapper.onDown(node); - - InOrder inOrder = Mockito.inOrder(delegate); - inOrder.verify(delegate).onSessionReady(session); - inOrder.verify(delegate).onDown(node); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/paging/OffsetPagerAsyncTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/paging/OffsetPagerAsyncTest.java deleted file mode 100644 index 3963bf6de84..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/api/core/paging/OffsetPagerAsyncTest.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.paging; - -import static com.datastax.oss.driver.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.paging.OffsetPager.Page; -import com.datastax.oss.driver.internal.core.MockAsyncPagingIterable; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.concurrent.CompletionStage; -import org.junit.Test; - -public class OffsetPagerAsyncTest extends OffsetPagerTestBase { - - @Override - protected Page getActualPage( - OffsetPager pager, OffsetPagerTestFixture fixture, int fetchSize) { - CompletionStage> pageFuture = - pager.getPage(fixture.getAsyncIterable(fetchSize), fixture.getRequestedPage()); - return CompletableFutures.getCompleted(pageFuture); - } - - /** - * Covers the corner case where the server sends back an empty frame at the end of the result set. - */ - @Test - @UseDataProvider("fetchSizes") - public void should_return_last_page_when_result_finishes_with_empty_frame(int fetchSize) { - MockAsyncPagingIterable iterable = - new MockAsyncPagingIterable<>(ImmutableList.of("a", "b", "c"), fetchSize, true); - OffsetPager pager = new OffsetPager(3); - Page page = CompletableFutures.getCompleted(pager.getPage(iterable, 1)); - - assertThat(page.getElements()).containsExactly("a", "b", "c"); - assertThat(page.getPageNumber()).isEqualTo(1); - assertThat(page.isLast()).isTrue(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/paging/OffsetPagerSyncTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/paging/OffsetPagerSyncTest.java deleted file mode 100644 index 0d8b380dd49..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/api/core/paging/OffsetPagerSyncTest.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.paging; - -public class OffsetPagerSyncTest extends OffsetPagerTestBase { - - @Override - protected OffsetPager.Page getActualPage( - OffsetPager pager, OffsetPagerTestFixture fixture, /*ignored*/ int fetchSize) { - return pager.getPage(fixture.getSyncIterable(), fixture.getRequestedPage()); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/paging/OffsetPagerTestBase.java b/core/src/test/java/com/datastax/oss/driver/api/core/paging/OffsetPagerTestBase.java deleted file mode 100644 index 7f9ca2ddaa2..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/api/core/paging/OffsetPagerTestBase.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.paging; - -import com.datastax.oss.driver.TestDataProviders; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public abstract class OffsetPagerTestBase { - - /** - * The fetch size only matters for the async implementation. For sync this will essentially run - * the same fixture 4 times, but that's not a problem because tests are fast. - */ - @DataProvider - public static Object[][] fetchSizes() { - return TestDataProviders.fromList(1, 2, 3, 100); - } - - @DataProvider - public static Object[][] scenarios() { - Object[][] fixtures = - TestDataProviders.fromList( - // ------- inputs -------- | ------ expected ------- - // iterable | page | size | page | contents | last? - "a,b,c,d,e,f | 1 | 3 | 1 | a,b,c | false", - "a,b,c,d,e,f | 2 | 3 | 2 | d,e,f | true", - "a,b,c,d,e,f | 2 | 4 | 2 | e,f | true", - "a,b,c,d,e,f | 2 | 5 | 2 | f | true", - "a,b,c | 1 | 3 | 1 | a,b,c | true", - "a,b | 1 | 3 | 1 | a,b | true", - "a | 1 | 3 | 1 | a | true", - // Empty iterator => return one empty page - " | 1 | 3 | 1 | | true", - // Past the end => return last page - "a,b,c,d,e,f | 3 | 3 | 2 | d,e,f | true", - "a,b,c,d,e | 3 | 3 | 2 | d,e | true"); - return TestDataProviders.combine(fixtures, fetchSizes()); - } - - @Test - @UseDataProvider("scenarios") - public void should_return_existing_page(String fixtureSpec, int fetchSize) { - OffsetPagerTestFixture fixture = new OffsetPagerTestFixture(fixtureSpec); - OffsetPager pager = new OffsetPager(fixture.getPageSize()); - OffsetPager.Page actualPage = getActualPage(pager, fixture, fetchSize); - fixture.assertMatches(actualPage); - } - - protected abstract OffsetPager.Page getActualPage( - OffsetPager pager, OffsetPagerTestFixture fixture, int fetchSize); -} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/paging/OffsetPagerTestFixture.java b/core/src/test/java/com/datastax/oss/driver/api/core/paging/OffsetPagerTestFixture.java deleted file mode 100644 index 91079722aa2..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/api/core/paging/OffsetPagerTestFixture.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.paging; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; - -import com.datastax.oss.driver.api.core.PagingIterable; -import com.datastax.oss.driver.internal.core.MockAsyncPagingIterable; -import com.datastax.oss.driver.internal.core.MockPagingIterable; -import com.datastax.oss.driver.shaded.guava.common.base.Splitter; -import java.util.List; - -public class OffsetPagerTestFixture { - - private static final Splitter SPEC_SPLITTER = Splitter.on('|').trimResults(); - private static final Splitter LIST_SPLITTER = Splitter.on(',').trimResults().omitEmptyStrings(); - - private final List inputElements; - private final int requestedPage; - private final int pageSize; - private final int expectedPageNumber; - private final List expectedElements; - private final boolean expectedIsLast; - - public OffsetPagerTestFixture(String spec) { - List components = SPEC_SPLITTER.splitToList(spec); - int size = components.size(); - if (size != 3 && size != 6) { - fail("Invalid fixture spec, expected 3 or 5 components"); - } - - this.inputElements = LIST_SPLITTER.splitToList(components.get(0)); - this.requestedPage = Integer.parseInt(components.get(1)); - this.pageSize = Integer.parseInt(components.get(2)); - if (size == 3) { - this.expectedPageNumber = -1; - this.expectedElements = null; - this.expectedIsLast = false; - } else { - this.expectedPageNumber = Integer.parseInt(components.get(3)); - this.expectedElements = LIST_SPLITTER.splitToList(components.get(4)); - this.expectedIsLast = Boolean.parseBoolean(components.get(5)); - } - } - - public PagingIterable getSyncIterable() { - return new MockPagingIterable<>(inputElements.iterator()); - } - - public MockAsyncPagingIterable getAsyncIterable(int fetchSize) { - return new MockAsyncPagingIterable<>(inputElements, fetchSize, false); - } - - public int getRequestedPage() { - return requestedPage; - } - - public int getPageSize() { - return pageSize; - } - - public void assertMatches(OffsetPager.Page actualPage) { - assertThat(actualPage.getPageNumber()).isEqualTo(expectedPageNumber); - assertThat(actualPage.getElements()).isEqualTo(expectedElements); - assertThat(actualPage.isLast()).isEqualTo(expectedIsLast); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/retry/ConsistencyDowngradingRetryPolicyTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/retry/ConsistencyDowngradingRetryPolicyTest.java deleted file mode 100644 index e4463d833bf..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/api/core/retry/ConsistencyDowngradingRetryPolicyTest.java +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.retry; - -import static com.datastax.oss.driver.api.core.ConsistencyLevel.EACH_QUORUM; -import static com.datastax.oss.driver.api.core.ConsistencyLevel.ONE; -import static com.datastax.oss.driver.api.core.ConsistencyLevel.QUORUM; -import static com.datastax.oss.driver.api.core.ConsistencyLevel.SERIAL; -import static com.datastax.oss.driver.api.core.ConsistencyLevel.THREE; -import static com.datastax.oss.driver.api.core.ConsistencyLevel.TWO; -import static com.datastax.oss.driver.api.core.retry.RetryDecision.IGNORE; -import static com.datastax.oss.driver.api.core.retry.RetryDecision.RETHROW; -import static com.datastax.oss.driver.api.core.retry.RetryDecision.RETRY_NEXT; -import static com.datastax.oss.driver.api.core.retry.RetryDecision.RETRY_SAME; -import static com.datastax.oss.driver.api.core.servererrors.WriteType.BATCH; -import static com.datastax.oss.driver.api.core.servererrors.WriteType.BATCH_LOG; -import static com.datastax.oss.driver.api.core.servererrors.WriteType.CAS; -import static com.datastax.oss.driver.api.core.servererrors.WriteType.CDC; -import static com.datastax.oss.driver.api.core.servererrors.WriteType.COUNTER; -import static com.datastax.oss.driver.api.core.servererrors.WriteType.SIMPLE; -import static com.datastax.oss.driver.api.core.servererrors.WriteType.UNLOGGED_BATCH; -import static com.datastax.oss.driver.api.core.servererrors.WriteType.VIEW; - -import com.datastax.oss.driver.api.core.connection.ClosedConnectionException; -import com.datastax.oss.driver.api.core.connection.HeartbeatException; -import com.datastax.oss.driver.api.core.servererrors.OverloadedException; -import com.datastax.oss.driver.api.core.servererrors.ReadFailureException; -import com.datastax.oss.driver.api.core.servererrors.ServerError; -import com.datastax.oss.driver.api.core.servererrors.TruncateException; -import com.datastax.oss.driver.api.core.servererrors.WriteFailureException; -import com.datastax.oss.driver.internal.core.retry.ConsistencyDowngradingRetryPolicy; -import org.junit.Test; - -public class ConsistencyDowngradingRetryPolicyTest extends RetryPolicyTestBase { - - public ConsistencyDowngradingRetryPolicyTest() { - super(new ConsistencyDowngradingRetryPolicy("test")); - } - - @Test - public void should_process_read_timeouts() { - // retry count != 0 - assertOnReadTimeout(QUORUM, 2, 2, false, 1).hasDecision(RETHROW); - // serial CL - assertOnReadTimeout(SERIAL, 2, 2, false, 0).hasDecision(RETHROW); - // received < blockFor - assertOnReadTimeout(QUORUM, 4, 3, true, 0).hasDecision(RETRY_SAME).hasConsistency(THREE); - assertOnReadTimeout(QUORUM, 4, 3, false, 0).hasDecision(RETRY_SAME).hasConsistency(THREE); - assertOnReadTimeout(QUORUM, 3, 2, true, 0).hasDecision(RETRY_SAME).hasConsistency(TWO); - assertOnReadTimeout(QUORUM, 3, 2, false, 0).hasDecision(RETRY_SAME).hasConsistency(TWO); - assertOnReadTimeout(QUORUM, 2, 1, true, 0).hasDecision(RETRY_SAME).hasConsistency(ONE); - assertOnReadTimeout(QUORUM, 2, 1, false, 0).hasDecision(RETRY_SAME).hasConsistency(ONE); - assertOnReadTimeout(EACH_QUORUM, 2, 0, true, 0).hasDecision(RETRY_SAME).hasConsistency(ONE); - assertOnReadTimeout(EACH_QUORUM, 2, 0, false, 0).hasDecision(RETRY_SAME).hasConsistency(ONE); - assertOnReadTimeout(QUORUM, 2, 0, true, 0).hasDecision(RETHROW); - assertOnReadTimeout(QUORUM, 2, 0, false, 0).hasDecision(RETHROW); - // data present - assertOnReadTimeout(QUORUM, 2, 2, false, 0).hasDecision(RETRY_SAME); - assertOnReadTimeout(QUORUM, 2, 2, true, 0).hasDecision(RETHROW); - } - - @Test - public void should_process_write_timeouts() { - // retry count != 0 - assertOnWriteTimeout(QUORUM, BATCH_LOG, 2, 0, 1).hasDecision(RETHROW); - // SIMPLE - assertOnWriteTimeout(QUORUM, SIMPLE, 2, 1, 0).hasDecision(IGNORE); - assertOnWriteTimeout(QUORUM, SIMPLE, 2, 0, 0).hasDecision(RETHROW); - // BATCH - assertOnWriteTimeout(QUORUM, BATCH, 2, 1, 0).hasDecision(IGNORE); - assertOnWriteTimeout(QUORUM, BATCH, 2, 0, 0).hasDecision(RETHROW); - // UNLOGGED_BATCH - assertOnWriteTimeout(QUORUM, UNLOGGED_BATCH, 4, 3, 0) - .hasDecision(RETRY_SAME) - .hasConsistency(THREE); - assertOnWriteTimeout(QUORUM, UNLOGGED_BATCH, 3, 2, 0) - .hasDecision(RETRY_SAME) - .hasConsistency(TWO); - assertOnWriteTimeout(QUORUM, UNLOGGED_BATCH, 2, 1, 0) - .hasDecision(RETRY_SAME) - .hasConsistency(ONE); - assertOnWriteTimeout(EACH_QUORUM, UNLOGGED_BATCH, 2, 0, 0) - .hasDecision(RETRY_SAME) - .hasConsistency(ONE); - assertOnWriteTimeout(QUORUM, UNLOGGED_BATCH, 2, 0, 0).hasDecision(RETHROW); - // BATCH_LOG - assertOnWriteTimeout(QUORUM, BATCH_LOG, 2, 1, 0).hasDecision(RETRY_SAME); - // others - assertOnWriteTimeout(QUORUM, COUNTER, 2, 1, 0).hasDecision(RETHROW); - assertOnWriteTimeout(QUORUM, CAS, 2, 1, 0).hasDecision(RETHROW); - assertOnWriteTimeout(QUORUM, VIEW, 2, 1, 0).hasDecision(RETHROW); - assertOnWriteTimeout(QUORUM, CDC, 2, 1, 0).hasDecision(RETHROW); - } - - @Test - public void should_process_unavailable() { - // retry count != 0 - assertOnUnavailable(QUORUM, 2, 1, 1).hasDecision(RETHROW); - // SERIAL - assertOnUnavailable(SERIAL, 2, 1, 0).hasDecision(RETRY_NEXT); - // downgrade - assertOnUnavailable(QUORUM, 4, 3, 0).hasDecision(RETRY_SAME).hasConsistency(THREE); - assertOnUnavailable(QUORUM, 3, 2, 0).hasDecision(RETRY_SAME).hasConsistency(TWO); - assertOnUnavailable(QUORUM, 2, 1, 0).hasDecision(RETRY_SAME).hasConsistency(ONE); - assertOnUnavailable(EACH_QUORUM, 2, 0, 0).hasDecision(RETRY_SAME).hasConsistency(ONE); - assertOnUnavailable(QUORUM, 2, 0, 0).hasDecision(RETHROW); - } - - @Test - public void should_process_aborted_request() { - assertOnRequestAborted(ClosedConnectionException.class, 0).hasDecision(RETRY_NEXT); - assertOnRequestAborted(ClosedConnectionException.class, 1).hasDecision(RETRY_NEXT); - assertOnRequestAborted(HeartbeatException.class, 0).hasDecision(RETRY_NEXT); - assertOnRequestAborted(HeartbeatException.class, 1).hasDecision(RETRY_NEXT); - assertOnRequestAborted(Throwable.class, 0).hasDecision(RETHROW); - } - - @Test - public void should_process_error_response() { - assertOnErrorResponse(ReadFailureException.class, 0).hasDecision(RETHROW); - assertOnErrorResponse(ReadFailureException.class, 1).hasDecision(RETHROW); - assertOnErrorResponse(WriteFailureException.class, 0).hasDecision(RETHROW); - assertOnErrorResponse(WriteFailureException.class, 1).hasDecision(RETHROW); - assertOnErrorResponse(WriteFailureException.class, 1).hasDecision(RETHROW); - - assertOnErrorResponse(OverloadedException.class, 0).hasDecision(RETRY_NEXT); - assertOnErrorResponse(OverloadedException.class, 1).hasDecision(RETRY_NEXT); - assertOnErrorResponse(ServerError.class, 0).hasDecision(RETRY_NEXT); - assertOnErrorResponse(ServerError.class, 1).hasDecision(RETRY_NEXT); - assertOnErrorResponse(TruncateException.class, 0).hasDecision(RETRY_NEXT); - assertOnErrorResponse(TruncateException.class, 1).hasDecision(RETRY_NEXT); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/retry/DefaultRetryPolicyTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/retry/DefaultRetryPolicyTest.java deleted file mode 100644 index e36ccff2b91..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/api/core/retry/DefaultRetryPolicyTest.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.retry; - -import static com.datastax.oss.driver.api.core.DefaultConsistencyLevel.QUORUM; -import static com.datastax.oss.driver.api.core.retry.RetryDecision.RETHROW; -import static com.datastax.oss.driver.api.core.retry.RetryDecision.RETRY_NEXT; -import static com.datastax.oss.driver.api.core.retry.RetryDecision.RETRY_SAME; -import static com.datastax.oss.driver.api.core.servererrors.DefaultWriteType.BATCH_LOG; -import static com.datastax.oss.driver.api.core.servererrors.DefaultWriteType.SIMPLE; - -import com.datastax.oss.driver.api.core.connection.ClosedConnectionException; -import com.datastax.oss.driver.api.core.connection.HeartbeatException; -import com.datastax.oss.driver.api.core.servererrors.OverloadedException; -import com.datastax.oss.driver.api.core.servererrors.ReadFailureException; -import com.datastax.oss.driver.api.core.servererrors.ServerError; -import com.datastax.oss.driver.api.core.servererrors.TruncateException; -import com.datastax.oss.driver.api.core.servererrors.WriteFailureException; -import com.datastax.oss.driver.internal.core.retry.DefaultRetryPolicy; -import org.junit.Test; - -public class DefaultRetryPolicyTest extends RetryPolicyTestBase { - - public DefaultRetryPolicyTest() { - super(new DefaultRetryPolicy(null, null)); - } - - @Test - public void should_process_read_timeouts() { - assertOnReadTimeout(QUORUM, 2, 2, false, 0).hasDecision(RETRY_SAME); - assertOnReadTimeout(QUORUM, 2, 2, false, 1).hasDecision(RETHROW); - assertOnReadTimeout(QUORUM, 2, 2, true, 0).hasDecision(RETHROW); - assertOnReadTimeout(QUORUM, 2, 1, true, 0).hasDecision(RETHROW); - assertOnReadTimeout(QUORUM, 2, 1, false, 0).hasDecision(RETHROW); - } - - @Test - public void should_process_write_timeouts() { - assertOnWriteTimeout(QUORUM, BATCH_LOG, 2, 0, 0).hasDecision(RETRY_SAME); - assertOnWriteTimeout(QUORUM, BATCH_LOG, 2, 0, 1).hasDecision(RETHROW); - assertOnWriteTimeout(QUORUM, SIMPLE, 2, 0, 0).hasDecision(RETHROW); - } - - @Test - public void should_process_unavailable() { - assertOnUnavailable(QUORUM, 2, 1, 0).hasDecision(RETRY_NEXT); - assertOnUnavailable(QUORUM, 2, 1, 1).hasDecision(RETHROW); - } - - @Test - public void should_process_aborted_request() { - assertOnRequestAborted(ClosedConnectionException.class, 0).hasDecision(RETRY_NEXT); - assertOnRequestAborted(ClosedConnectionException.class, 1).hasDecision(RETRY_NEXT); - assertOnRequestAborted(HeartbeatException.class, 0).hasDecision(RETRY_NEXT); - assertOnRequestAborted(HeartbeatException.class, 1).hasDecision(RETRY_NEXT); - assertOnRequestAborted(Throwable.class, 0).hasDecision(RETHROW); - } - - @Test - public void should_process_error_response() { - assertOnErrorResponse(ReadFailureException.class, 0).hasDecision(RETHROW); - assertOnErrorResponse(ReadFailureException.class, 1).hasDecision(RETHROW); - assertOnErrorResponse(WriteFailureException.class, 0).hasDecision(RETHROW); - assertOnErrorResponse(WriteFailureException.class, 1).hasDecision(RETHROW); - assertOnErrorResponse(WriteFailureException.class, 1).hasDecision(RETHROW); - - assertOnErrorResponse(OverloadedException.class, 0).hasDecision(RETRY_NEXT); - assertOnErrorResponse(OverloadedException.class, 1).hasDecision(RETRY_NEXT); - assertOnErrorResponse(ServerError.class, 0).hasDecision(RETRY_NEXT); - assertOnErrorResponse(ServerError.class, 1).hasDecision(RETRY_NEXT); - assertOnErrorResponse(TruncateException.class, 0).hasDecision(RETRY_NEXT); - assertOnErrorResponse(TruncateException.class, 1).hasDecision(RETRY_NEXT); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/retry/RetryPolicyTestBase.java b/core/src/test/java/com/datastax/oss/driver/api/core/retry/RetryPolicyTestBase.java deleted file mode 100644 index a57f4ab352f..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/api/core/retry/RetryPolicyTestBase.java +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.retry; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; - -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.servererrors.CoordinatorException; -import com.datastax.oss.driver.api.core.servererrors.WriteType; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.internal.core.retry.ConsistencyDowngradingRetryVerdict; -import org.assertj.core.api.AbstractAssert; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public abstract class RetryPolicyTestBase { - private final RetryPolicy policy; - - @Mock private Request request; - - protected RetryPolicyTestBase(RetryPolicy policy) { - this.policy = policy; - } - - protected RetryVerdictAssert assertOnReadTimeout( - ConsistencyLevel cl, int blockFor, int received, boolean dataPresent, int retryCount) { - return new RetryVerdictAssert( - policy.onReadTimeoutVerdict(request, cl, blockFor, received, dataPresent, retryCount)); - } - - protected RetryVerdictAssert assertOnWriteTimeout( - ConsistencyLevel cl, WriteType writeType, int blockFor, int received, int retryCount) { - return new RetryVerdictAssert( - policy.onWriteTimeoutVerdict(request, cl, writeType, blockFor, received, retryCount)); - } - - protected RetryVerdictAssert assertOnUnavailable( - ConsistencyLevel cl, int required, int alive, int retryCount) { - return new RetryVerdictAssert( - policy.onUnavailableVerdict(request, cl, required, alive, retryCount)); - } - - protected RetryVerdictAssert assertOnRequestAborted( - Class errorClass, int retryCount) { - return new RetryVerdictAssert( - policy.onRequestAbortedVerdict(request, mock(errorClass), retryCount)); - } - - protected RetryVerdictAssert assertOnErrorResponse( - Class errorClass, int retryCount) { - return new RetryVerdictAssert( - policy.onErrorResponseVerdict(request, mock(errorClass), retryCount)); - } - - public static class RetryVerdictAssert extends AbstractAssert { - RetryVerdictAssert(RetryVerdict actual) { - super(actual, RetryVerdictAssert.class); - } - - public RetryVerdictAssert hasDecision(RetryDecision decision) { - assertThat(actual.getRetryDecision()).isEqualTo(decision); - return this; - } - - public RetryVerdictAssert hasConsistency(ConsistencyLevel cl) { - assertThat(actual) - .isInstanceOf(ConsistencyDowngradingRetryVerdict.class) - .extracting("consistencyLevel") - .isEqualTo(cl); - return this; - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/specex/ConstantSpeculativeExecutionPolicyTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/specex/ConstantSpeculativeExecutionPolicyTest.java deleted file mode 100644 index efd804fa66e..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/api/core/specex/ConstantSpeculativeExecutionPolicyTest.java +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.specex; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.internal.core.specex.ConstantSpeculativeExecutionPolicy; -import java.time.Duration; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public class ConstantSpeculativeExecutionPolicyTest { - @Mock private DriverContext context; - @Mock private DriverConfig config; - @Mock private DriverExecutionProfile defaultProfile; - @Mock private Request request; - - @Before - public void setup() { - when(context.getConfig()).thenReturn(config); - when(config.getProfile(DriverExecutionProfile.DEFAULT_NAME)).thenReturn(defaultProfile); - } - - private void mockOptions(int maxExecutions, long constantDelayMillis) { - when(defaultProfile.getInt(DefaultDriverOption.SPECULATIVE_EXECUTION_MAX)) - .thenReturn(maxExecutions); - when(defaultProfile.getDuration(DefaultDriverOption.SPECULATIVE_EXECUTION_DELAY)) - .thenReturn(Duration.ofMillis(constantDelayMillis)); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_if_delay_negative() { - mockOptions(1, -10); - new ConstantSpeculativeExecutionPolicy(context, DriverExecutionProfile.DEFAULT_NAME); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_if_max_less_than_one() { - mockOptions(0, 10); - new ConstantSpeculativeExecutionPolicy(context, DriverExecutionProfile.DEFAULT_NAME); - } - - @Test - public void should_return_delay_until_max() { - mockOptions(3, 10); - SpeculativeExecutionPolicy policy = - new ConstantSpeculativeExecutionPolicy(context, DriverExecutionProfile.DEFAULT_NAME); - - // Initial execution starts, schedule first speculative execution - assertThat(policy.nextExecution(null, null, request, 1)).isEqualTo(10); - // First speculative execution starts, schedule second one - assertThat(policy.nextExecution(null, null, request, 2)).isEqualTo(10); - // Second speculative execution starts, we're at 3 => stop - assertThat(policy.nextExecution(null, null, request, 3)).isNegative(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/type/UserDefinedTypeTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/type/UserDefinedTypeTest.java deleted file mode 100644 index 9db93b37c91..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/api/core/type/UserDefinedTypeTest.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.internal.core.type.UserDefinedTypeBuilder; -import org.junit.Test; - -public class UserDefinedTypeTest { - - private static final UserDefinedType ADDRESS_TYPE = - new UserDefinedTypeBuilder( - CqlIdentifier.fromInternal("test"), CqlIdentifier.fromInternal("address")) - // Not actually used in this test, but UDTs must have fields: - .withField(CqlIdentifier.fromInternal("street"), DataTypes.TEXT) - .frozen() - .build(); - private static final UserDefinedType ACCOUNT_TYPE = - new UserDefinedTypeBuilder( - CqlIdentifier.fromInternal("test"), CqlIdentifier.fromInternal("account")) - .withField(CqlIdentifier.fromInternal("ID"), DataTypes.TEXT) // case-sensitive - .withField(CqlIdentifier.fromInternal("name"), DataTypes.TEXT) - .withField(CqlIdentifier.fromInternal("address"), ADDRESS_TYPE) - .withField( - CqlIdentifier.fromInternal("frozen_list"), DataTypes.frozenListOf(DataTypes.TEXT)) - .withField( - CqlIdentifier.fromInternal("list_of_map"), - DataTypes.listOf(DataTypes.frozenMapOf(DataTypes.TEXT, DataTypes.INT))) - .build(); - - @Test - public void should_describe_as_cql() { - assertThat(ACCOUNT_TYPE.describe(false)) - .isEqualTo( - "CREATE TYPE \"test\".\"account\" ( \"ID\" text, \"name\" text, \"address\" frozen<\"test\".\"address\">, \"frozen_list\" frozen>, \"list_of_map\" list>> );"); - } - - @Test - public void should_describe_as_pretty_cql() { - assertThat(ACCOUNT_TYPE.describe(true)) - .isEqualTo( - "CREATE TYPE test.account (\n" - + " \"ID\" text,\n" - + " name text,\n" - + " address frozen,\n" - + " frozen_list frozen>,\n" - + " list_of_map list>>\n" - + ");"); - } - - @Test - public void should_evaluate_equality() { - assertThat(ACCOUNT_TYPE.newValue()).isEqualTo(ACCOUNT_TYPE.newValue()); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/type/reflect/GenericTypeTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/type/reflect/GenericTypeTest.java deleted file mode 100644 index bddb8f92773..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/api/core/type/reflect/GenericTypeTest.java +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type.reflect; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.shaded.guava.common.reflect.TypeToken; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import org.junit.Test; - -public class GenericTypeTest { - - @Test - public void should_wrap_class() { - GenericType stringType = GenericType.of(String.class); - assertThat(stringType.__getToken()).isEqualTo(TypeToken.of(String.class)); - } - - @Test - public void should_capture_generic_type() { - GenericType> stringListType = new GenericType>() {}; - TypeToken> stringListToken = new TypeToken>() {}; - assertThat(stringListType.__getToken()).isEqualTo(stringListToken); - } - - @Test - public void should_wrap_classes_in_collection() { - GenericType> mapType = GenericType.mapOf(String.class, Integer.class); - assertThat(mapType.__getToken()).isEqualTo(new TypeToken>() {}); - } - - @Test - public void should_wrap_types_in_collection() { - GenericType>> mapType = - GenericType.mapOf(GenericType.of(String.class), GenericType.listOf(Integer.class)); - assertThat(mapType.__getToken()).isEqualTo(new TypeToken>>() {}); - } - - @Test - public void should_substitute_type_parameters() { - assertThat(optionalOf(GenericType.listOf(String.class)).__getToken()) - .isEqualTo(new TypeToken>>() {}); - assertThat(mapOf(String.class, Integer.class).__getToken()) - .isEqualTo(new TypeToken>() {}); - } - - @Test - public void should_report_supertype() { - assertThat(GenericType.of(Number.class).isSupertypeOf(GenericType.of(Integer.class))).isTrue(); - assertThat(GenericType.of(Integer.class).isSupertypeOf(GenericType.of(Number.class))).isFalse(); - } - - @Test - public void should_report_subtype() { - assertThat(GenericType.of(Number.class).isSubtypeOf(GenericType.of(Integer.class))).isFalse(); - assertThat(GenericType.of(Integer.class).isSubtypeOf(GenericType.of(Number.class))).isTrue(); - } - - @Test - public void should_wrap_primitive_type() { - assertThat(GenericType.of(Integer.TYPE).wrap()).isEqualTo(GenericType.of(Integer.class)); - GenericType stringType = GenericType.of(String.class); - assertThat(stringType.wrap()).isSameAs(stringType); - } - - @Test - public void should_unwrap_wrapper_type() { - assertThat(GenericType.of(Integer.class).unwrap()).isEqualTo(GenericType.of(Integer.TYPE)); - GenericType stringType = GenericType.of(String.class); - assertThat(stringType.unwrap()).isSameAs(stringType); - } - - @Test - public void should_return_raw_type() { - assertThat(GenericType.INTEGER.getRawType()).isEqualTo(Integer.class); - assertThat(GenericType.listOf(Integer.class).getRawType()).isEqualTo(List.class); - } - - @Test - public void should_return_super_type() { - GenericType> expectedType = iterableOf(GenericType.INTEGER); - assertThat(GenericType.listOf(Integer.class).getSupertype(Iterable.class)) - .isEqualTo(expectedType); - } - - @Test - public void should_return_sub_type() { - GenericType> superType = iterableOf(GenericType.INTEGER); - assertThat(superType.getSubtype(List.class)).isEqualTo(GenericType.listOf(GenericType.INTEGER)); - } - - @Test - public void should_return_type() { - assertThat(GenericType.INTEGER.getType()).isEqualTo(Integer.class); - } - - @Test - public void should_return_component_type() { - assertThat(GenericType.of(Integer[].class).getComponentType()).isEqualTo(GenericType.INTEGER); - } - - @Test - public void should_report_is_array() { - assertThat(GenericType.INTEGER.isArray()).isFalse(); - assertThat(GenericType.of(Integer[].class).isArray()).isTrue(); - } - - private GenericType> optionalOf(GenericType elementType) { - return new GenericType>() {}.where(new GenericTypeParameter() {}, elementType); - } - - private GenericType> iterableOf(GenericType elementType) { - return new GenericType>() {}.where(new GenericTypeParameter() {}, elementType); - } - - private GenericType> mapOf(Class keyClass, Class valueClass) { - return new GenericType>() {}.where(new GenericTypeParameter() {}, keyClass) - .where(new GenericTypeParameter() {}, valueClass); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/uuid/UuidsTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/uuid/UuidsTest.java deleted file mode 100644 index c547f95e67c..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/api/core/uuid/UuidsTest.java +++ /dev/null @@ -1,496 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.uuid; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.assertj.core.api.Assertions.catchThrowable; - -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.nio.ByteBuffer; -import java.nio.charset.StandardCharsets; -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; -import java.security.SecureRandom; -import java.util.Arrays; -import java.util.HashSet; -import java.util.Random; -import java.util.Set; -import java.util.SplittableRandom; -import java.util.UUID; -import java.util.concurrent.ConcurrentSkipListSet; -import java.util.concurrent.ThreadLocalRandom; -import java.util.function.Supplier; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class UuidsTest { - - @Test - public void should_generate_unique_random_uuids_Random() { - Set generated = serialGeneration(1_000_000, Uuids::random); - assertThat(generated).hasSize(1_000_000); - } - - @Test - public void should_generate_unique_random_uuids_shared_Random2() { - Random random = new Random(); - Set generated = serialGeneration(1_000_000, () -> Uuids.random(random)); - assertThat(generated).hasSize(1_000_000); - } - - @Test - public void should_generate_unique_random_uuids_across_threads_shared_Random() throws Exception { - Random random = new Random(); - Set generated = parallelGeneration(10, 10_000, () -> () -> Uuids.random(random)); - assertThat(generated).hasSize(10 * 10_000); - } - - @Test - public void should_generate_unique_random_uuids_shared_SecureRandom() { - SecureRandom random = new SecureRandom(); - Set generated = serialGeneration(1_000_000, () -> Uuids.random(random)); - assertThat(generated).hasSize(1_000_000); - } - - @Test - public void should_generate_unique_random_uuids_across_threads_shared_SecureRandom() - throws Exception { - SecureRandom random = new SecureRandom(); - Set generated = parallelGeneration(10, 10_000, () -> () -> Uuids.random(random)); - assertThat(generated).hasSize(10 * 10_000); - } - - @Test - public void should_generate_unique_random_uuids_ThreadLocalRandom() { - ThreadLocalRandom random = ThreadLocalRandom.current(); - Set generated = serialGeneration(1_000_000, () -> Uuids.random(random)); - assertThat(generated).hasSize(1_000_000); - } - - @Test - public void should_generate_unique_random_uuids_across_threads_ThreadLocalRandom() - throws Exception { - Set generated = - parallelGeneration( - 10, - 10_000, - () -> { - ThreadLocalRandom random = ThreadLocalRandom.current(); - return () -> Uuids.random(random); - }); - assertThat(generated).hasSize(10 * 10_000); - } - - @Test - public void should_generate_unique_random_uuids_Netty_ThreadLocalRandom() { - io.netty.util.internal.ThreadLocalRandom random = - io.netty.util.internal.ThreadLocalRandom.current(); - Set generated = serialGeneration(1_000_000, () -> Uuids.random(random)); - assertThat(generated).hasSize(1_000_000); - } - - @Test - public void should_generate_unique_random_uuids_across_threads_Netty_ThreadLocalRandom() - throws Exception { - Set generated = - parallelGeneration( - 10, - 10_000, - () -> { - io.netty.util.internal.ThreadLocalRandom random = - io.netty.util.internal.ThreadLocalRandom.current(); - return () -> Uuids.random(random); - }); - assertThat(generated).hasSize(10 * 10_000); - } - - @Test - public void should_generate_unique_random_uuids_SplittableRandom() { - SplittableRandom random = new SplittableRandom(); - Set generated = serialGeneration(1_000_000, () -> Uuids.random(random)); - assertThat(generated).hasSize(1_000_000); - } - - @Test - public void should_generate_unique_random_uuids_across_threads_SplittableRandom() - throws Exception { - Set generated = - parallelGeneration( - 10, - 10_000, - () -> { - SplittableRandom random = new SplittableRandom(); - return () -> Uuids.random(random); - }); - assertThat(generated).hasSize(10 * 10_000); - } - - @Test - @UseDataProvider("byteArrayNames") - public void should_generate_name_based_uuid_from_namespace_and_byte_array( - UUID namespace, byte[] name) throws NoSuchAlgorithmException { - // when - UUID actual = Uuids.nameBased(namespace, name); - // then - assertThat(actual).isNotNull(); - assertThat(actual.version()).isEqualTo(3); - assertUuid(namespace, name, 3, actual); - } - - @DataProvider - public static Object[][] byteArrayNames() { - return new Object[][] { - {Uuids.NAMESPACE_DNS, new byte[] {}}, {Uuids.NAMESPACE_URL, new byte[] {1, 2, 3, 4}}, - }; - } - - @Test - @UseDataProvider("byteArrayNamesWithVersions") - public void should_generate_name_based_uuid_from_namespace_byte_array_and_version( - UUID namespace, byte[] name, int version) throws NoSuchAlgorithmException { - // when - UUID actual = Uuids.nameBased(namespace, name, version); - // then - assertThat(actual).isNotNull(); - assertThat(actual.version()).isEqualTo(version); - assertUuid(namespace, name, version, actual); - } - - @DataProvider - public static Object[][] byteArrayNamesWithVersions() { - return new Object[][] { - {Uuids.NAMESPACE_DNS, new byte[] {}, 3}, - {Uuids.NAMESPACE_URL, new byte[] {1, 2, 3, 4}, 3}, - {Uuids.NAMESPACE_OID, new byte[] {}, 5}, - {Uuids.NAMESPACE_X500, new byte[] {1, 2, 3, 4}, 5}, - }; - } - - @Test - @UseDataProvider("stringNames") - public void should_generate_name_based_uuid_from_namespace_and_string(UUID namespace, String name) - throws NoSuchAlgorithmException { - // when - UUID actual = Uuids.nameBased(namespace, name); - // then - assertThat(actual).isNotNull(); - assertThat(actual.version()).isEqualTo(3); - assertUuid(namespace, name, 3, actual); - } - - @DataProvider - public static Object[][] stringNames() { - return new Object[][] { - {Uuids.NAMESPACE_DNS, ""}, {Uuids.NAMESPACE_URL, "Hello world!"}, {Uuids.NAMESPACE_OID, "你好"}, - }; - } - - @Test - @UseDataProvider("stringNamesWithVersions") - public void should_generate_name_based_uuid_from_namespace_string_and_version( - UUID namespace, String name, int version) throws NoSuchAlgorithmException { - // when - UUID actual = Uuids.nameBased(namespace, name, version); - // then - assertThat(actual).isNotNull(); - assertThat(actual.version()).isEqualTo(version); - assertUuid(namespace, name, version, actual); - } - - @DataProvider - public static Object[][] stringNamesWithVersions() { - return new Object[][] { - {Uuids.NAMESPACE_DNS, "", 3}, - {Uuids.NAMESPACE_URL, "Hello world!", 3}, - {Uuids.NAMESPACE_OID, "你好", 3}, - {Uuids.NAMESPACE_DNS, "", 5}, - {Uuids.NAMESPACE_URL, "Hello world!", 5}, - {Uuids.NAMESPACE_OID, "你好", 5}, - }; - } - - @Test - @UseDataProvider("concatenatedData") - public void should_generate_name_based_uuid_from_concatenated_data(byte[] namespaceAndName) - throws NoSuchAlgorithmException { - // when - UUID actual = Uuids.nameBased(namespaceAndName); - // then - assertThat(actual).isNotNull(); - assertThat(actual.version()).isEqualTo(3); - assertUuid(namespaceAndName, 3, actual); - } - - @DataProvider - public static Object[][] concatenatedData() { - return new Object[][] { - {concat(Uuids.NAMESPACE_DNS, new byte[] {})}, - {concat(Uuids.NAMESPACE_URL, new byte[] {1, 2, 3, 4})}, - }; - } - - @Test - @UseDataProvider("concatenatedDataWithVersions") - public void should_generate_name_based_uuid_from_concatenated_data_and_version( - byte[] namespaceAndName, int version) throws NoSuchAlgorithmException { - // when - UUID actual = Uuids.nameBased(namespaceAndName, version); - // then - assertThat(actual).isNotNull(); - assertThat(actual.version()).isEqualTo(version); - assertUuid(namespaceAndName, version, actual); - } - - @DataProvider - public static Object[][] concatenatedDataWithVersions() { - return new Object[][] { - {concat(Uuids.NAMESPACE_DNS, new byte[] {}), 3}, - {concat(Uuids.NAMESPACE_URL, new byte[] {1, 2, 3, 4}), 3}, - {concat(Uuids.NAMESPACE_DNS, new byte[] {}), 5}, - {concat(Uuids.NAMESPACE_URL, new byte[] {1, 2, 3, 4}), 5}, - }; - } - - @Test - public void should_throw_when_invalid_version() { - Throwable error = catchThrowable(() -> Uuids.nameBased(Uuids.NAMESPACE_URL, "irrelevant", 1)); - assertThat(error) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage("Invalid name-based UUID version, expecting 3 or 5, got: 1"); - } - - @Test - public void should_throw_when_invalid_data() { - Throwable error = catchThrowable(() -> Uuids.nameBased(new byte[] {1}, 3)); - assertThat(error) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage("namespaceAndName must be at least 16 bytes long"); - } - - @Test - public void should_generate_timestamp_within_10_ms() { - - // The Uuids class does some computation at class initialization, which may screw up our - // assumption below that Uuids.timeBased() takes less than 10ms, so force class loading now. - Uuids.timeBased(); - - long start = System.currentTimeMillis(); - UUID uuid = Uuids.timeBased(); - - assertThat(uuid.version()).isEqualTo(1); - assertThat(uuid.variant()).isEqualTo(2); - - long timestamp = Uuids.unixTimestamp(uuid); - - assertThat(timestamp) - .as("Generated timestamp should be within 10 ms") - .isBetween(start, start + 10); - } - - @Test - public void should_generate_unique_time_based_uuids() { - Set generated = serialGeneration(1_000_000, Uuids::timeBased); - assertThat(generated).hasSize(1_000_000); - } - - @Test - public void should_generate_unique_time_based_uuids_across_threads() throws Exception { - Set generated = parallelGeneration(10, 10_000, () -> Uuids::timeBased); - assertThat(generated).hasSize(10 * 10_000); - } - - @Test - public void should_generate_ever_increasing_timestamps() { - int count = 1_000_000; - long previous = 0; - for (int i = 0; i < count; i++) { - long current = Uuids.timeBased().timestamp(); - assertThat(current).isGreaterThan(previous); - previous = current; - } - } - - @Test - public void should_generate_within_bounds_for_given_timestamp() { - - Random random = new Random(System.currentTimeMillis()); - - int timestampsCount = 10; - int uuidsPerTimestamp = 10; - - for (int i = 0; i < timestampsCount; i++) { - long timestamp = random.nextInt(); - for (int j = 0; j < uuidsPerTimestamp; j++) { - UUID uuid = new UUID(Uuids.makeMsb(Uuids.fromUnixTimestamp(timestamp)), random.nextLong()); - assertBetween(uuid, Uuids.startOf(timestamp), Uuids.endOf(timestamp)); - } - } - } - - // Compares using Cassandra's sorting algorithm (not the same as compareTo). - private static void assertBetween(UUID uuid, UUID lowerBound, UUID upperBound) { - ByteBuffer uuidBytes = TypeCodecs.UUID.encode(uuid, DefaultProtocolVersion.V3); - ByteBuffer lb = TypeCodecs.UUID.encode(lowerBound, DefaultProtocolVersion.V3); - ByteBuffer ub = TypeCodecs.UUID.encode(upperBound, DefaultProtocolVersion.V3); - assertThat(uuidBytes).isNotNull(); - assertThat(lb).isNotNull(); - assertThat(ub).isNotNull(); - assertThat(compareTimestampBytes(lb, uuidBytes)).isLessThanOrEqualTo(0); - assertThat(compareTimestampBytes(ub, uuidBytes)).isGreaterThanOrEqualTo(0); - } - - private static int compareTimestampBytes(ByteBuffer o1, ByteBuffer o2) { - int o1Pos = o1.position(); - int o2Pos = o2.position(); - - int d = (o1.get(o1Pos + 6) & 0xF) - (o2.get(o2Pos + 6) & 0xF); - if (d != 0) { - return d; - } - d = (o1.get(o1Pos + 7) & 0xFF) - (o2.get(o2Pos + 7) & 0xFF); - if (d != 0) { - return d; - } - d = (o1.get(o1Pos + 4) & 0xFF) - (o2.get(o2Pos + 4) & 0xFF); - if (d != 0) { - return d; - } - d = (o1.get(o1Pos + 5) & 0xFF) - (o2.get(o2Pos + 5) & 0xFF); - if (d != 0) { - return d; - } - d = (o1.get(o1Pos) & 0xFF) - (o2.get(o2Pos) & 0xFF); - if (d != 0) { - return d; - } - d = (o1.get(o1Pos + 1) & 0xFF) - (o2.get(o2Pos + 1) & 0xFF); - if (d != 0) { - return d; - } - d = (o1.get(o1Pos + 2) & 0xFF) - (o2.get(o2Pos + 2) & 0xFF); - if (d != 0) { - return d; - } - return (o1.get(o1Pos + 3) & 0xFF) - (o2.get(o2Pos + 3) & 0xFF); - } - - private static void assertUuid(UUID namespace, String name, int version, UUID actual) - throws NoSuchAlgorithmException { - assertUuid(namespace, name.getBytes(StandardCharsets.UTF_8), version, actual); - } - - private static void assertUuid(UUID namespace, byte[] name, int version, UUID actual) - throws NoSuchAlgorithmException { - byte[] data = digest(namespace, name, version); - assertThat(longToBytes(actual.getMostSignificantBits())) - .isEqualTo(Arrays.copyOfRange(data, 0, 8)); - assertThat(longToBytes(actual.getLeastSignificantBits())) - .isEqualTo(Arrays.copyOfRange(data, 8, 16)); - } - - private static void assertUuid(byte[] namespaceAndName, int version, UUID actual) - throws NoSuchAlgorithmException { - byte[] data = digest(namespaceAndName, version); - assertThat(longToBytes(actual.getMostSignificantBits())) - .isEqualTo(Arrays.copyOfRange(data, 0, 8)); - assertThat(longToBytes(actual.getLeastSignificantBits())) - .isEqualTo(Arrays.copyOfRange(data, 8, 16)); - } - - private static byte[] digest(UUID namespace, byte[] name, int version) - throws NoSuchAlgorithmException { - byte[] namespaceAndName = concat(namespace, name); - return digest(namespaceAndName, version); - } - - private static byte[] digest(byte[] namespaceAndName, int version) - throws NoSuchAlgorithmException { - MessageDigest result; - String algorithm = version == 3 ? "MD5" : "SHA-1"; - result = MessageDigest.getInstance(algorithm); - byte[] digest = result.digest(namespaceAndName); - digest[6] &= (byte) 0x0f; - digest[6] |= (byte) (version << 4); - digest[8] &= (byte) 0x3f; - digest[8] |= (byte) 0x80; - return digest; - } - - private static byte[] concat(UUID namespace, byte[] name) { - return ByteBuffer.allocate(16 + name.length) - .putLong(namespace.getMostSignificantBits()) - .putLong(namespace.getLeastSignificantBits()) - .put(name) - .array(); - } - - private static byte[] longToBytes(long x) { - return ByteBuffer.allocate(Long.BYTES).putLong(x).array(); - } - - private Set serialGeneration(int count, Supplier uuidSupplier) { - Set generated = new HashSet<>(count); - for (int i = 0; i < count; ++i) { - generated.add(uuidSupplier.get()); - } - return generated; - } - - public Set parallelGeneration( - int threadCount, int uuidsPerThread, Supplier> uuidSupplier) - throws InterruptedException { - Set generated = new ConcurrentSkipListSet<>(); - UuidGenerator[] generators = new UuidGenerator[threadCount]; - for (int i = 0; i < threadCount; i++) { - generators[i] = new UuidGenerator(uuidsPerThread, uuidSupplier, generated); - } - for (int i = 0; i < threadCount; i++) { - generators[i].start(); - } - for (int i = 0; i < threadCount; i++) { - generators[i].join(); - } - return generated; - } - - private static class UuidGenerator extends Thread { - - private final int toGenerate; - private final Set generated; - private final Supplier> uuidSupplier; - - UuidGenerator(int toGenerate, Supplier> uuidSupplier, Set generated) { - this.toGenerate = toGenerate; - this.generated = generated; - this.uuidSupplier = uuidSupplier; - } - - @Override - public void run() { - Supplier uuidSupplier = this.uuidSupplier.get(); - for (int i = 0; i < toGenerate; ++i) { - generated.add(uuidSupplier.get()); - } - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/SerializationHelper.java b/core/src/test/java/com/datastax/oss/driver/internal/SerializationHelper.java deleted file mode 100644 index 4daf7e28eb6..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/SerializationHelper.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal; - -import static org.assertj.core.api.Assertions.fail; - -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.ObjectInputStream; -import java.io.ObjectOutputStream; - -public abstract class SerializationHelper { - - public static byte[] serialize(T t) { - try { - ByteArrayOutputStream bytes = new ByteArrayOutputStream(); - ObjectOutputStream out = new ObjectOutputStream(bytes); - out.writeObject(t); - return bytes.toByteArray(); - } catch (Exception e) { - fail("Unexpected error", e); - throw new AssertionError(); // never reached - } - } - - // the calling code performs validations on the result, so this doesn't matter - @SuppressWarnings("TypeParameterUnusedInFormals") - public static T deserialize(byte[] bytes) { - try { - ObjectInputStream in = new ObjectInputStream(new ByteArrayInputStream(bytes)); - @SuppressWarnings("unchecked") - T t = (T) in.readObject(); - return t; - } catch (Exception e) { - fail("Unexpected error", e); - throw new AssertionError(); // never reached - } - } - - public static T serializeAndDeserialize(T t) { - return deserialize(serialize(t)); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/AsyncPagingIterableWrapperTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/AsyncPagingIterableWrapperTest.java deleted file mode 100644 index dff9877b62d..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/AsyncPagingIterableWrapperTest.java +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.MappedAsyncPagingIterable; -import com.datastax.oss.driver.api.core.cql.ColumnDefinition; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.cql.DefaultAsyncResultSet; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import java.nio.ByteBuffer; -import java.util.ArrayDeque; -import java.util.List; -import java.util.Queue; -import java.util.concurrent.CompletableFuture; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -public class AsyncPagingIterableWrapperTest { - - @Mock private ColumnDefinitions columnDefinitions; - @Mock private Statement statement; - @Mock private CqlSession session; - @Mock private InternalDriverContext context; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - - // One single column "i" of type int: - when(columnDefinitions.contains("i")).thenReturn(true); - ColumnDefinition iDefinition = mock(ColumnDefinition.class); - when(iDefinition.getType()).thenReturn(DataTypes.INT); - when(columnDefinitions.get("i")).thenReturn(iDefinition); - when(columnDefinitions.firstIndexOf("i")).thenReturn(0); - when(columnDefinitions.get(0)).thenReturn(iDefinition); - - when(context.getCodecRegistry()).thenReturn(CodecRegistry.DEFAULT); - when(context.getProtocolVersion()).thenReturn(DefaultProtocolVersion.DEFAULT); - } - - @Test - public void should_wrap_result_set() throws Exception { - // Given - // two pages of data: - ExecutionInfo executionInfo1 = mockExecutionInfo(); - DefaultAsyncResultSet resultSet1 = - new DefaultAsyncResultSet( - columnDefinitions, executionInfo1, mockData(0, 5), session, context); - DefaultAsyncResultSet resultSet2 = - new DefaultAsyncResultSet( - columnDefinitions, mockExecutionInfo(), mockData(5, 10), session, context); - // chain them together: - ByteBuffer mockPagingState = ByteBuffer.allocate(0); - when(executionInfo1.getPagingState()).thenReturn(mockPagingState); - Statement mockNextStatement = mock(Statement.class); - when(((Statement) statement).copy(mockPagingState)).thenReturn(mockNextStatement); - when(session.executeAsync(mockNextStatement)) - .thenAnswer(invocation -> CompletableFuture.completedFuture(resultSet2)); - - // When - MappedAsyncPagingIterable iterable1 = resultSet1.map(row -> row.getInt("i")); - - // Then - for (int i = 0; i < 5; i++) { - assertThat(iterable1.one()).isEqualTo(i); - assertThat(iterable1.remaining()).isEqualTo(resultSet1.remaining()).isEqualTo(4 - i); - } - assertThat(iterable1.hasMorePages()).isTrue(); - - MappedAsyncPagingIterable iterable2 = - iterable1.fetchNextPage().toCompletableFuture().get(); - for (int i = 5; i < 10; i++) { - assertThat(iterable2.one()).isEqualTo(i); - assertThat(iterable2.remaining()).isEqualTo(resultSet2.remaining()).isEqualTo(9 - i); - } - assertThat(iterable2.hasMorePages()).isFalse(); - } - - /** Checks that consuming from the wrapper consumes from the source, and vice-versa. */ - @Test - public void should_share_iteration_progress_with_wrapped_result_set() { - // Given - DefaultAsyncResultSet resultSet = - new DefaultAsyncResultSet( - columnDefinitions, mockExecutionInfo(), mockData(0, 10), session, context); - - // When - MappedAsyncPagingIterable iterable = resultSet.map(row -> row.getInt("i")); - - // Then - // Consume alternatively from the source and mapped iterable, and check that they stay in sync - for (int i = 0; i < 10; i++) { - Object element = (i % 2 == 0 ? resultSet : iterable).one(); - assertThat(element).isNotNull(); - assertThat(iterable.remaining()).isEqualTo(resultSet.remaining()).isEqualTo(9 - i); - } - assertThat(resultSet.hasMorePages()).isFalse(); - assertThat(iterable.hasMorePages()).isFalse(); - } - - private ExecutionInfo mockExecutionInfo() { - ExecutionInfo executionInfo = mock(ExecutionInfo.class); - when(executionInfo.getRequest()).thenAnswer(invocation -> statement); - return executionInfo; - } - - private Queue> mockData(int start, int end) { - Queue> data = new ArrayDeque<>(); - for (int i = start; i < end; i++) { - data.add(Lists.newArrayList(TypeCodecs.INT.encode(i, DefaultProtocolVersion.DEFAULT))); - } - return data; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/CompletionStageAssert.java b/core/src/test/java/com/datastax/oss/driver/internal/core/CompletionStageAssert.java deleted file mode 100644 index 6c0d78d62dd..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/CompletionStageAssert.java +++ /dev/null @@ -1,113 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; - -import java.util.concurrent.CancellationException; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import java.util.function.Consumer; -import org.assertj.core.api.AbstractAssert; - -public class CompletionStageAssert - extends AbstractAssert, CompletionStage> { - - public CompletionStageAssert(CompletionStage actual) { - super(actual, CompletionStageAssert.class); - } - - public CompletionStageAssert isSuccess(Consumer valueAssertions) { - try { - V value = actual.toCompletableFuture().get(2, TimeUnit.SECONDS); - valueAssertions.accept(value); - } catch (TimeoutException e) { - fail("Future did not complete within the timeout"); - } catch (Throwable t) { - fail("Unexpected error while waiting on the future", t); - } - return this; - } - - public CompletionStageAssert isSuccess() { - return isSuccess(v -> {}); - } - - public CompletionStageAssert isFailed(Consumer failureAssertions) { - try { - actual.toCompletableFuture().get(2, TimeUnit.SECONDS); - fail("Expected completion stage to fail"); - } catch (TimeoutException e) { - fail("Future did not complete within the timeout"); - } catch (InterruptedException e) { - fail("Interrupted while waiting for future to fail"); - } catch (ExecutionException e) { - failureAssertions.accept(e.getCause()); - } - return this; - } - - public CompletionStageAssert isFailed() { - return isFailed(f -> {}); - } - - public CompletionStageAssert isCancelled() { - boolean cancelled = false; - try { - actual.toCompletableFuture().get(2, TimeUnit.SECONDS); - } catch (CancellationException e) { - cancelled = true; - } catch (Exception ignored) { - } - if (!cancelled) { - fail("Expected completion stage to be cancelled"); - } - return this; - } - - public CompletionStageAssert isNotCancelled() { - boolean cancelled = false; - try { - actual.toCompletableFuture().get(2, TimeUnit.SECONDS); - } catch (CancellationException e) { - cancelled = true; - } catch (Exception ignored) { - } - if (cancelled) { - fail("Expected completion stage not to be cancelled"); - } - return this; - } - - public CompletionStageAssert isDone() { - assertThat(actual.toCompletableFuture().isDone()) - .overridingErrorMessage("Expected completion stage to be done") - .isTrue(); - return this; - } - - public CompletionStageAssert isNotDone() { - assertThat(actual.toCompletableFuture().isDone()) - .overridingErrorMessage("Expected completion stage not to be done") - .isFalse(); - return this; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/ContactPointsTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/ContactPointsTest.java deleted file mode 100644 index 72b875b8602..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/ContactPointsTest.java +++ /dev/null @@ -1,183 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.filter; -import static org.junit.Assume.assumeTrue; -import static org.mockito.Mockito.atLeast; -import static org.mockito.Mockito.verify; - -import ch.qos.logback.classic.Level; -import ch.qos.logback.classic.Logger; -import ch.qos.logback.classic.spi.ILoggingEvent; -import ch.qos.logback.core.Appender; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.internal.core.metadata.DefaultEndPoint; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.net.UnknownHostException; -import java.util.Collections; -import java.util.Set; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.ArgumentCaptor; -import org.mockito.Captor; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; -import org.slf4j.LoggerFactory; - -@RunWith(MockitoJUnitRunner.class) -public class ContactPointsTest { - - @Mock private Appender appender; - @Captor private ArgumentCaptor loggingEventCaptor; - private Logger logger; - private Level initialLogLevel; - - @Before - public void setup() { - logger = (Logger) LoggerFactory.getLogger(ContactPoints.class); - initialLogLevel = logger.getLevel(); - logger.setLevel(Level.INFO); - logger.addAppender(appender); - } - - @After - public void teardown() { - logger.detachAppender(appender); - logger.setLevel(initialLogLevel); - } - - @Test - public void should_parse_ipv4_address_and_port_in_configuration() { - Set endPoints = - ContactPoints.merge(Collections.emptySet(), ImmutableList.of("127.0.0.1:9042"), true); - - assertThat(endPoints) - .containsExactly(new DefaultEndPoint(new InetSocketAddress("127.0.0.1", 9042))); - } - - @Test - public void should_parse_ipv6_address_and_port_in_configuration() { - Set endPoints = - ContactPoints.merge( - Collections.emptySet(), ImmutableList.of("0:0:0:0:0:0:0:1:9042", "::2:9042"), true); - - assertThat(endPoints) - .containsExactly( - new DefaultEndPoint(new InetSocketAddress("::1", 9042)), - new DefaultEndPoint(new InetSocketAddress("::2", 9042))); - } - - @Test - public void should_parse_host_and_port_in_configuration_and_create_unresolved() { - Set endPoints = - ContactPoints.merge(Collections.emptySet(), ImmutableList.of("localhost:9042"), false); - - assertThat(endPoints) - .containsExactly( - new DefaultEndPoint(InetSocketAddress.createUnresolved("localhost", 9042))); - } - - @Test - public void should_parse_host_and_port_and_resolve_all_a_records() throws UnknownHostException { - int localhostARecordsCount = InetAddress.getAllByName("localhost").length; - assumeTrue( - "This test assumes that localhost resolves to multiple A-records", - localhostARecordsCount >= 2); - - Set endPoints = - ContactPoints.merge(Collections.emptySet(), ImmutableList.of("localhost:9042"), true); - - assertThat(endPoints).hasSize(localhostARecordsCount); - assertLog( - Level.INFO, - "Contact point localhost:9042 resolves to multiple addresses, will use them all"); - } - - @Test - public void should_ignore_malformed_host_and_port_and_warn() { - Set endPoints = - ContactPoints.merge(Collections.emptySet(), ImmutableList.of("foobar"), true); - - assertThat(endPoints).isEmpty(); - assertLog(Level.WARN, "Ignoring invalid contact point foobar (expecting format host:port)"); - } - - @Test - public void should_ignore_malformed_port_and_warn() { - Set endPoints = - ContactPoints.merge(Collections.emptySet(), ImmutableList.of("127.0.0.1:foobar"), true); - - assertThat(endPoints).isEmpty(); - assertLog( - Level.WARN, - "Ignoring invalid contact point 127.0.0.1:foobar (expecting port to be a number, got foobar)"); - } - - @Test - public void should_merge_programmatic_and_configuration() { - Set endPoints = - ContactPoints.merge( - ImmutableSet.of(new DefaultEndPoint(new InetSocketAddress("127.0.0.1", 9042))), - ImmutableList.of("127.0.0.2:9042"), - true); - - assertThat(endPoints) - .containsOnly( - new DefaultEndPoint(new InetSocketAddress("127.0.0.1", 9042)), - new DefaultEndPoint(new InetSocketAddress("127.0.0.2", 9042))); - } - - @Test - public void should_warn_if_duplicate_between_programmatic_and_configuration() { - Set endPoints = - ContactPoints.merge( - ImmutableSet.of(new DefaultEndPoint(new InetSocketAddress("127.0.0.1", 9042))), - ImmutableList.of("127.0.0.1:9042"), - true); - - assertThat(endPoints) - .containsOnly(new DefaultEndPoint(new InetSocketAddress("127.0.0.1", 9042))); - assertLog(Level.WARN, "Duplicate contact point /127.0.0.1:9042"); - } - - @Test - public void should_warn_if_duplicate_in_configuration() { - Set endPoints = - ContactPoints.merge( - Collections.emptySet(), ImmutableList.of("127.0.0.1:9042", "127.0.0.1:9042"), true); - - assertThat(endPoints) - .containsOnly(new DefaultEndPoint(new InetSocketAddress("127.0.0.1", 9042))); - assertLog(Level.WARN, "Duplicate contact point /127.0.0.1:9042"); - } - - private void assertLog(Level level, String message) { - verify(appender, atLeast(1)).doAppend(loggingEventCaptor.capture()); - Iterable logs = - filter(loggingEventCaptor.getAllValues()).with("level", level).get(); - assertThat(logs).hasSize(1); - assertThat(logs.iterator().next().getFormattedMessage()).contains(message); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/DefaultProtocolVersionRegistryTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/DefaultProtocolVersionRegistryTest.java deleted file mode 100644 index 1d7cc65d1f2..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/DefaultProtocolVersionRegistryTest.java +++ /dev/null @@ -1,195 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core; - -import static com.datastax.dse.driver.api.core.DseProtocolVersion.DSE_V1; -import static com.datastax.dse.driver.api.core.DseProtocolVersion.DSE_V2; -import static com.datastax.oss.driver.api.core.ProtocolVersion.V3; -import static com.datastax.oss.driver.api.core.ProtocolVersion.V4; -import static com.datastax.oss.driver.api.core.ProtocolVersion.V5; -import static com.datastax.oss.driver.api.core.ProtocolVersion.V6; -import static com.datastax.oss.driver.internal.core.DefaultProtocolFeature.DATE_TYPE; -import static com.datastax.oss.driver.internal.core.DefaultProtocolFeature.SMALLINT_AND_TINYINT_TYPES; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; - -import com.datastax.dse.driver.api.core.DseProtocolVersion; -import com.datastax.dse.driver.api.core.metadata.DseNodeProperties; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.UnsupportedProtocolVersionException; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import java.util.Optional; -import org.junit.Test; -import org.mockito.Mockito; - -/** - * Note: some tests in this class depend on the set of supported protocol versions, they will need - * to be updated as new versions are added or become non-beta. - */ -public class DefaultProtocolVersionRegistryTest { - - private DefaultProtocolVersionRegistry registry = new DefaultProtocolVersionRegistry("test"); - - @Test - public void should_find_version_by_name() { - assertThat(registry.fromName("V4")).isEqualTo(ProtocolVersion.V4); - assertThat(registry.fromName("DSE_V1")).isEqualTo(DseProtocolVersion.DSE_V1); - } - - @Test - public void should_fail_to_find_version_by_name_different_case() { - assertThatThrownBy(() -> registry.fromName("v4")).isInstanceOf(IllegalArgumentException.class); - assertThatThrownBy(() -> registry.fromName("dse_v1")) - .isInstanceOf(IllegalArgumentException.class); - assertThatThrownBy(() -> registry.fromName("dDSE_v1")) - .isInstanceOf(IllegalArgumentException.class); - assertThatThrownBy(() -> registry.fromName("dse_v1")) - .isInstanceOf(IllegalArgumentException.class); - } - - @Test - public void should_downgrade_if_lower_version_available() { - Optional downgraded = registry.downgrade(V4); - downgraded.map(version -> assertThat(version).isEqualTo(V3)).orElseThrow(AssertionError::new); - } - - @Test - public void should_not_downgrade_if_no_lower_version() { - Optional downgraded = registry.downgrade(V3); - assertThat(downgraded.isPresent()).isFalse(); - } - - @Test - public void should_downgrade_from_dse_to_oss() { - assertThat(registry.downgrade(DseProtocolVersion.DSE_V1).get()).isEqualTo(ProtocolVersion.V5); - } - - @Test - public void should_pick_dse_v2_as_highest_common_when_all_nodes_are_dse_7() { - assertThat(registry.highestCommon(ImmutableList.of(mockDseNode("7.0"), mockDseNode("7.1")))) - .isEqualTo(DseProtocolVersion.DSE_V2); - } - - @Test - public void should_pick_dse_v2_as_highest_common_when_all_nodes_are_dse_6() { - assertThat(registry.highestCommon(ImmutableList.of(mockDseNode("6.0"), mockDseNode("6.1")))) - .isEqualTo(DseProtocolVersion.DSE_V2); - } - - @Test - public void should_pick_dse_v1_as_highest_common_when_all_nodes_are_dse_5_1_or_more() { - assertThat(registry.highestCommon(ImmutableList.of(mockDseNode("5.1"), mockDseNode("6.1")))) - .isEqualTo(DseProtocolVersion.DSE_V1); - } - - @Test - public void should_pick_oss_v4_as_highest_common_when_all_nodes_are_dse_5_or_more() { - assertThat( - registry.highestCommon( - ImmutableList.of(mockDseNode("5.0"), mockDseNode("5.1"), mockDseNode("6.1")))) - .isEqualTo(ProtocolVersion.V4); - } - - @Test - public void should_pick_oss_v3_as_highest_common_when_all_nodes_are_dse_4_7_or_more() { - assertThat( - registry.highestCommon( - ImmutableList.of(mockDseNode("4.7"), mockDseNode("5.1"), mockDseNode("6.1")))) - .isEqualTo(ProtocolVersion.V3); - } - - @Test(expected = UnsupportedProtocolVersionException.class) - public void should_fail_to_pick_highest_common_when_one_node_is_dse_4_6() { - registry.highestCommon( - ImmutableList.of(mockDseNode("4.6"), mockDseNode("5.1"), mockDseNode("6.1"))); - } - - @Test(expected = UnsupportedProtocolVersionException.class) - public void should_fail_to_pick_highest_common_when_one_node_is_2_0() { - registry.highestCommon( - ImmutableList.of(mockCassandraNode("3.0.0"), mockCassandraNode("2.0.9"))); - } - - @Test - public void should_pick_oss_v3_as_highest_common_when_one_node_is_cassandra_2_1() { - assertThat( - registry.highestCommon( - ImmutableList.of( - mockDseNode("5.1"), // oss v4 - mockDseNode("6.1"), // oss v4 - mockCassandraNode("2.1") // oss v3 - ))) - .isEqualTo(ProtocolVersion.V3); - } - - @Test - public void should_support_date_type_on_oss_v4_and_later() { - assertThat(registry.supports(V3, DATE_TYPE)).isFalse(); - assertThat(registry.supports(V4, DATE_TYPE)).isTrue(); - assertThat(registry.supports(V5, DATE_TYPE)).isTrue(); - assertThat(registry.supports(V6, DATE_TYPE)).isTrue(); - assertThat(registry.supports(DSE_V1, DATE_TYPE)).isTrue(); - assertThat(registry.supports(DSE_V2, DATE_TYPE)).isTrue(); - } - - @Test - public void should_support_smallint_and_tinyint_types_on_oss_v4_and_later() { - assertThat(registry.supports(V3, SMALLINT_AND_TINYINT_TYPES)).isFalse(); - assertThat(registry.supports(V4, SMALLINT_AND_TINYINT_TYPES)).isTrue(); - assertThat(registry.supports(V5, SMALLINT_AND_TINYINT_TYPES)).isTrue(); - assertThat(registry.supports(V6, SMALLINT_AND_TINYINT_TYPES)).isTrue(); - assertThat(registry.supports(DSE_V1, SMALLINT_AND_TINYINT_TYPES)).isTrue(); - assertThat(registry.supports(DSE_V2, SMALLINT_AND_TINYINT_TYPES)).isTrue(); - } - - private Node mockCassandraNode(String rawVersion) { - Node node = Mockito.mock(Node.class); - if (rawVersion != null) { - Mockito.when(node.getCassandraVersion()).thenReturn(Version.parse(rawVersion)); - } - return node; - } - - private Node mockDseNode(String rawDseVersion) { - Node node = Mockito.mock(Node.class); - Version dseVersion = Version.parse(rawDseVersion); - Mockito.when(node.getExtras()) - .thenReturn(ImmutableMap.of(DseNodeProperties.DSE_VERSION, dseVersion)); - - Version cassandraVersion; - if (dseVersion.compareTo(DefaultProtocolVersionRegistry.DSE_7_0_0) >= 0) { - cassandraVersion = Version.parse("5.0"); - } else if (dseVersion.compareTo(DefaultProtocolVersionRegistry.DSE_6_0_0) >= 0) { - cassandraVersion = Version.parse("4.0"); - } else if (dseVersion.compareTo(DefaultProtocolVersionRegistry.DSE_5_1_0) >= 0) { - cassandraVersion = Version.parse("3.11"); - } else if (dseVersion.compareTo(DefaultProtocolVersionRegistry.DSE_5_0_0) >= 0) { - cassandraVersion = Version.parse("3.0"); - } else if (dseVersion.compareTo(DefaultProtocolVersionRegistry.DSE_4_7_0) >= 0) { - cassandraVersion = Version.parse("2.1"); - } else { - cassandraVersion = Version.parse("2.0"); - } - Mockito.when(node.getCassandraVersion()).thenReturn(cassandraVersion); - - return node; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/DriverConfigAssert.java b/core/src/test/java/com/datastax/oss/driver/internal/core/DriverConfigAssert.java deleted file mode 100644 index adbe26159db..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/DriverConfigAssert.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverOption; -import org.assertj.core.api.AbstractAssert; - -public class DriverConfigAssert extends AbstractAssert { - public DriverConfigAssert(DriverConfig actual) { - super(actual, DriverConfigAssert.class); - } - - public DriverConfigAssert hasIntOption(DriverOption option, int expected) { - assertThat(actual.getDefaultProfile().getInt(option)).isEqualTo(expected); - return this; - } - - public DriverConfigAssert hasIntOption(String profileName, DriverOption option, int expected) { - assertThat(actual.getProfile(profileName).getInt(option)).isEqualTo(expected); - return this; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/MockAsyncPagingIterable.java b/core/src/test/java/com/datastax/oss/driver/internal/core/MockAsyncPagingIterable.java deleted file mode 100644 index 731c558a81f..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/MockAsyncPagingIterable.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core; - -import com.datastax.oss.driver.api.core.AsyncPagingIterable; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.ArrayDeque; -import java.util.Collections; -import java.util.List; -import java.util.Queue; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; - -public class MockAsyncPagingIterable - implements AsyncPagingIterable> { - - private final Queue currentPage; - private final MockAsyncPagingIterable nextPage; - - public MockAsyncPagingIterable(List elements, int fetchSize, boolean addEmptyLastPage) { - if (elements.size() <= fetchSize) { - currentPage = new ArrayDeque<>(elements); - nextPage = - addEmptyLastPage - ? new MockAsyncPagingIterable<>(Collections.emptyList(), fetchSize, false) - : null; - } else { - currentPage = new ArrayDeque<>(elements.subList(0, fetchSize)); - nextPage = - new MockAsyncPagingIterable<>( - elements.subList(fetchSize, elements.size()), fetchSize, addEmptyLastPage); - } - } - - @NonNull - @Override - public Iterable currentPage() { - return currentPage; - } - - @Override - public int remaining() { - return currentPage.size(); - } - - @Override - public boolean hasMorePages() { - return nextPage != null; - } - - @NonNull - @Override - public CompletionStage> fetchNextPage() - throws IllegalStateException { - Preconditions.checkState(nextPage != null); - return CompletableFuture.completedFuture(nextPage); - } - - @NonNull - @Override - public ColumnDefinitions getColumnDefinitions() { - throw new UnsupportedOperationException("irrelevant"); - } - - @NonNull - @Override - public ExecutionInfo getExecutionInfo() { - throw new UnsupportedOperationException("irrelevant"); - } - - @Override - public boolean wasApplied() { - throw new UnsupportedOperationException("irrelevant"); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/MockPagingIterable.java b/core/src/test/java/com/datastax/oss/driver/internal/core/MockPagingIterable.java deleted file mode 100644 index 885983ee98e..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/MockPagingIterable.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core; - -import com.datastax.oss.driver.api.core.PagingIterable; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Iterator; -import java.util.List; - -public class MockPagingIterable implements PagingIterable { - - private final Iterator iterator; - - public MockPagingIterable(Iterator iterator) { - this.iterator = iterator; - } - - @NonNull - @Override - public Iterator iterator() { - return iterator; - } - - @Override - public boolean isFullyFetched() { - return !iterator.hasNext(); - } - - @NonNull - @Override - public ColumnDefinitions getColumnDefinitions() { - throw new UnsupportedOperationException("irrelevant"); - } - - @NonNull - @Override - public List getExecutionInfos() { - throw new UnsupportedOperationException("irrelevant"); - } - - @Override - public int getAvailableWithoutFetching() { - throw new UnsupportedOperationException("irrelevant"); - } - - @Override - public boolean wasApplied() { - throw new UnsupportedOperationException("irrelevant"); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/NettyFutureAssert.java b/core/src/test/java/com/datastax/oss/driver/internal/core/NettyFutureAssert.java deleted file mode 100644 index 15af3c61bff..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/NettyFutureAssert.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; - -import io.netty.util.concurrent.Future; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import java.util.function.Consumer; -import org.assertj.core.api.AbstractAssert; - -public class NettyFutureAssert extends AbstractAssert, Future> { - - public NettyFutureAssert(Future actual) { - super(actual, NettyFutureAssert.class); - } - - public NettyFutureAssert isNotDone() { - assertThat(actual.isDone()).isFalse(); - return this; - } - - public NettyFutureAssert isSuccess(Consumer valueAssertions) { - try { - V value = actual.get(100, TimeUnit.MILLISECONDS); - valueAssertions.accept(value); - } catch (TimeoutException e) { - fail("Future did not complete within the timeout"); - } catch (Throwable t) { - fail("Unexpected error while waiting on the future", t); - } - return this; - } - - public NettyFutureAssert isSuccess() { - return isSuccess(v -> {}); - } - - public NettyFutureAssert isFailed(Consumer failureAssertions) { - try { - actual.get(100, TimeUnit.MILLISECONDS); - fail("Expected future to fail"); - } catch (TimeoutException e) { - fail("Future did not fail within the timeout"); - } catch (InterruptedException e) { - fail("Interrupted while waiting for future to fail"); - } catch (ExecutionException e) { - failureAssertions.accept(e.getCause()); - } - return this; - } - - public NettyFutureAssert isFailed() { - return isFailed(f -> {}); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/PagingIterableWrapperTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/PagingIterableWrapperTest.java deleted file mode 100644 index 1e7cc62f8ac..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/PagingIterableWrapperTest.java +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.PagingIterable; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.internal.core.cql.ResultSetTestBase; -import com.datastax.oss.driver.internal.core.cql.ResultSets; -import java.util.Iterator; -import org.junit.Test; - -public class PagingIterableWrapperTest extends ResultSetTestBase { - - @Test - public void should_wrap_result_set() { - // Given - AsyncResultSet page1 = mockPage(true, 0, 1, 2); - AsyncResultSet page2 = mockPage(true, 3, 4, 5); - AsyncResultSet page3 = mockPage(false, 6, 7, 8); - - complete(page1.fetchNextPage(), page2); - complete(page2.fetchNextPage(), page3); - - // When - PagingIterable iterable = ResultSets.newInstance(page1).map(row -> row.getInt(0)); - - // Then - assertThat(iterable.getExecutionInfo()).isSameAs(page1.getExecutionInfo()); - assertThat(iterable.getExecutionInfos()).containsExactly(page1.getExecutionInfo()); - - Iterator iterator = iterable.iterator(); - - assertThat(iterator.next()).isEqualTo(0); - assertThat(iterator.next()).isEqualTo(1); - assertThat(iterator.next()).isEqualTo(2); - - assertThat(iterator.hasNext()).isTrue(); - // This should have triggered the fetch of page2 - assertThat(iterable.getExecutionInfo()).isEqualTo(page2.getExecutionInfo()); - assertThat(iterable.getExecutionInfos()) - .containsExactly(page1.getExecutionInfo(), page2.getExecutionInfo()); - - assertThat(iterator.next()).isEqualTo(3); - assertThat(iterator.next()).isEqualTo(4); - assertThat(iterator.next()).isEqualTo(5); - - assertThat(iterator.hasNext()).isTrue(); - // This should have triggered the fetch of page3 - assertThat(iterable.getExecutionInfo()).isEqualTo(page3.getExecutionInfo()); - assertThat(iterable.getExecutionInfos()) - .containsExactly( - page1.getExecutionInfo(), page2.getExecutionInfo(), page3.getExecutionInfo()); - - assertThat(iterator.next()).isEqualTo(6); - assertThat(iterator.next()).isEqualTo(7); - assertThat(iterator.next()).isEqualTo(8); - } - - /** Checks that consuming from the wrapper consumes from the source, and vice-versa. */ - @Test - public void should_share_iteration_progress_with_wrapped_result_set() { - // Given - AsyncResultSet page1 = mockPage(true, 0, 1, 2); - AsyncResultSet page2 = mockPage(true, 3, 4, 5); - AsyncResultSet page3 = mockPage(false, 6, 7, 8); - - complete(page1.fetchNextPage(), page2); - complete(page2.fetchNextPage(), page3); - - // When - ResultSet resultSet = ResultSets.newInstance(page1); - PagingIterable iterable = resultSet.map(row -> row.getInt(0)); - - // Then - Iterator sourceIterator = resultSet.iterator(); - Iterator mappedIterator = iterable.iterator(); - - assertThat(mappedIterator.next()).isEqualTo(0); - assertNextRow(sourceIterator, 1); - assertThat(mappedIterator.next()).isEqualTo(2); - assertNextRow(sourceIterator, 3); - assertThat(mappedIterator.next()).isEqualTo(4); - assertNextRow(sourceIterator, 5); - assertThat(mappedIterator.next()).isEqualTo(6); - assertNextRow(sourceIterator, 7); - assertThat(mappedIterator.next()).isEqualTo(8); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/TestResponses.java b/core/src/test/java/com/datastax/oss/driver/internal/core/TestResponses.java deleted file mode 100644 index ce028e66dbd..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/TestResponses.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core; - -import com.datastax.oss.driver.shaded.guava.common.base.Charsets; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.response.Supported; -import com.datastax.oss.protocol.internal.response.result.ColumnSpec; -import com.datastax.oss.protocol.internal.response.result.DefaultRows; -import com.datastax.oss.protocol.internal.response.result.RawType; -import com.datastax.oss.protocol.internal.response.result.Rows; -import com.datastax.oss.protocol.internal.response.result.RowsMetadata; -import java.nio.ByteBuffer; -import java.util.List; -import java.util.Map; -import java.util.Queue; - -public class TestResponses { - /** The response to the query run by each connection to check if the cluster name matches. */ - public static Rows clusterNameResponse(String actualClusterName) { - ColumnSpec colSpec = - new ColumnSpec( - "system", - "local", - "cluster_name", - 0, - RawType.PRIMITIVES.get(ProtocolConstants.DataType.VARCHAR)); - RowsMetadata metadata = new RowsMetadata(ImmutableList.of(colSpec), null, null, null); - Queue> data = Lists.newLinkedList(); - data.add(Lists.newArrayList(ByteBuffer.wrap(actualClusterName.getBytes(Charsets.UTF_8)))); - return new DefaultRows(metadata, data); - } - - public static Supported supportedResponse(String key, String value) { - Map> options = ImmutableMap.of(key, ImmutableList.of(value)); - return new Supported(options); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/addresstranslation/Ec2MultiRegionAddressTranslatorTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/addresstranslation/Ec2MultiRegionAddressTranslatorTest.java deleted file mode 100644 index 2b871b3e0cc..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/addresstranslation/Ec2MultiRegionAddressTranslatorTest.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.addresstranslation; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.any; -import static org.mockito.Mockito.anyString; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import java.net.InetAddress; -import java.net.InetSocketAddress; -import javax.naming.NamingException; -import javax.naming.directory.BasicAttributes; -import javax.naming.directory.InitialDirContext; -import org.junit.Test; - -public class Ec2MultiRegionAddressTranslatorTest { - - @Test - public void should_return_same_address_when_no_entry_found() throws Exception { - InitialDirContext mock = mock(InitialDirContext.class); - when(mock.getAttributes(anyString(), any(String[].class))).thenReturn(new BasicAttributes()); - Ec2MultiRegionAddressTranslator translator = new Ec2MultiRegionAddressTranslator(mock); - - InetSocketAddress address = new InetSocketAddress("192.0.2.5", 9042); - assertThat(translator.translate(address)).isEqualTo(address); - } - - @Test - public void should_return_same_address_when_exception_encountered() throws Exception { - InitialDirContext mock = mock(InitialDirContext.class); - when(mock.getAttributes(anyString(), any(String[].class))) - .thenThrow(new NamingException("Problem resolving address (not really).")); - Ec2MultiRegionAddressTranslator translator = new Ec2MultiRegionAddressTranslator(mock); - - InetSocketAddress address = new InetSocketAddress("192.0.2.5", 9042); - assertThat(translator.translate(address)).isEqualTo(address); - } - - @Test - public void should_return_new_address_when_match_found() throws Exception { - InetSocketAddress expectedAddress = new InetSocketAddress("54.32.55.66", 9042); - - InitialDirContext mock = mock(InitialDirContext.class); - when(mock.getAttributes("5.2.0.192.in-addr.arpa", new String[] {"PTR"})) - .thenReturn(new BasicAttributes("PTR", expectedAddress.getHostName())); - Ec2MultiRegionAddressTranslator translator = new Ec2MultiRegionAddressTranslator(mock); - - InetSocketAddress address = new InetSocketAddress("192.0.2.5", 9042); - assertThat(translator.translate(address)).isEqualTo(expectedAddress); - } - - @Test - public void should_close_context_when_closed() throws Exception { - InitialDirContext mock = mock(InitialDirContext.class); - Ec2MultiRegionAddressTranslator translator = new Ec2MultiRegionAddressTranslator(mock); - - // ensure close has not been called to this point. - verify(mock, times(0)).close(); - translator.close(); - // ensure close is closed. - verify(mock).close(); - } - - @Test - public void should_build_reversed_domain_name_for_ip_v4() throws Exception { - InetAddress address = InetAddress.getByName("192.0.2.5"); - assertThat(Ec2MultiRegionAddressTranslator.reverse(address)) - .isEqualTo("5.2.0.192.in-addr.arpa"); - } - - @Test - public void should_build_reversed_domain_name_for_ip_v6() throws Exception { - InetAddress address = InetAddress.getByName("2001:db8::567:89ab"); - assertThat(Ec2MultiRegionAddressTranslator.reverse(address)) - .isEqualTo("b.a.9.8.7.6.5.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa"); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/addresstranslation/FixedHostNameAddressTranslatorTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/addresstranslation/FixedHostNameAddressTranslatorTest.java deleted file mode 100644 index 3bb9c4bc291..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/addresstranslation/FixedHostNameAddressTranslatorTest.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.addresstranslation; - -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.ADDRESS_TRANSLATOR_ADVERTISED_HOSTNAME; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; -import com.datastax.oss.driver.internal.core.context.MockedDriverContextFactory; -import java.net.InetSocketAddress; -import org.junit.Test; - -public class FixedHostNameAddressTranslatorTest { - - @Test - public void should_translate_address() { - DriverExecutionProfile defaultProfile = mock(DriverExecutionProfile.class); - when(defaultProfile.getString(ADDRESS_TRANSLATOR_ADVERTISED_HOSTNAME)).thenReturn("myaddress"); - DefaultDriverContext defaultDriverContext = - MockedDriverContextFactory.defaultDriverContext(defaultProfile); - - FixedHostNameAddressTranslator translator = - new FixedHostNameAddressTranslator(defaultDriverContext); - InetSocketAddress address = new InetSocketAddress("192.0.2.5", 6061); - - assertThat(translator.translate(address)).isEqualTo(new InetSocketAddress("myaddress", 6061)); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/addresstranslation/SubnetAddressTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/addresstranslation/SubnetAddressTest.java deleted file mode 100644 index bd505f5dd44..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/addresstranslation/SubnetAddressTest.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.addresstranslation; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; - -import java.net.InetSocketAddress; -import org.junit.Test; - -public class SubnetAddressTest { - @Test - public void should_return_return_true_on_overlapping_with_another_subnet_address() { - SubnetAddress subnetAddress1 = - new SubnetAddress("100.64.0.0/15", mock(InetSocketAddress.class)); - SubnetAddress subnetAddress2 = - new SubnetAddress("100.65.0.0/16", mock(InetSocketAddress.class)); - assertThat(subnetAddress1.isOverlapping(subnetAddress2)).isTrue(); - } - - @Test - public void should_return_return_false_on_not_overlapping_with_another_subnet_address() { - SubnetAddress subnetAddress1 = - new SubnetAddress("100.64.0.0/15", mock(InetSocketAddress.class)); - SubnetAddress subnetAddress2 = - new SubnetAddress("100.66.0.0/15", mock(InetSocketAddress.class)); - assertThat(subnetAddress1.isOverlapping(subnetAddress2)).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/addresstranslation/SubnetAddressTranslatorTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/addresstranslation/SubnetAddressTranslatorTest.java deleted file mode 100644 index 420170654dc..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/addresstranslation/SubnetAddressTranslatorTest.java +++ /dev/null @@ -1,152 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.addresstranslation; - -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.ADDRESS_TRANSLATOR_DEFAULT_ADDRESS; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.ADDRESS_TRANSLATOR_SUBNET_ADDRESSES; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatIllegalArgumentException; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; -import com.datastax.oss.driver.internal.core.context.MockedDriverContextFactory; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import java.net.InetSocketAddress; -import java.util.Map; -import org.junit.Test; - -@SuppressWarnings("resource") -public class SubnetAddressTranslatorTest { - - @Test - public void should_translate_to_correct_subnet_address_ipv4() { - Map subnetAddresses = - ImmutableMap.of( - "\"100.64.0.0/15\"", "cassandra.datacenter1.com:19042", - "100.66.0.\"0/15\"", "cassandra.datacenter2.com:19042"); - DefaultDriverContext context = context(subnetAddresses); - SubnetAddressTranslator translator = new SubnetAddressTranslator(context); - InetSocketAddress address = new InetSocketAddress("100.64.0.1", 9042); - assertThat(translator.translate(address)) - .isEqualTo(InetSocketAddress.createUnresolved("cassandra.datacenter1.com", 19042)); - } - - @Test - public void should_translate_to_correct_subnet_address_ipv6() { - Map subnetAddresses = - ImmutableMap.of( - "\"::ffff:6440:0/111\"", "cassandra.datacenter1.com:19042", - "\"::ffff:6442:0/111\"", "cassandra.datacenter2.com:19042"); - DefaultDriverContext context = context(subnetAddresses); - SubnetAddressTranslator translator = new SubnetAddressTranslator(context); - InetSocketAddress address = new InetSocketAddress("::ffff:6440:1", 9042); - assertThat(translator.translate(address)) - .isEqualTo(InetSocketAddress.createUnresolved("cassandra.datacenter1.com", 19042)); - } - - @Test - public void should_translate_to_default_address() { - DefaultDriverContext context = context(ImmutableMap.of()); - when(context - .getConfig() - .getDefaultProfile() - .getString(ADDRESS_TRANSLATOR_DEFAULT_ADDRESS, null)) - .thenReturn("cassandra.com:19042"); - SubnetAddressTranslator translator = new SubnetAddressTranslator(context); - InetSocketAddress address = new InetSocketAddress("100.68.0.1", 9042); - assertThat(translator.translate(address)) - .isEqualTo(InetSocketAddress.createUnresolved("cassandra.com", 19042)); - } - - @Test - public void should_pass_through_not_matched_address() { - DefaultDriverContext context = context(ImmutableMap.of()); - SubnetAddressTranslator translator = new SubnetAddressTranslator(context); - InetSocketAddress address = new InetSocketAddress("100.68.0.1", 9042); - assertThat(translator.translate(address)).isEqualTo(address); - } - - @Test - public void should_fail_on_intersecting_subnets_ipv4() { - Map subnetAddresses = - ImmutableMap.of( - "\"100.64.0.0/15\"", "cassandra.datacenter1.com:19042", - "100.65.0.\"0/16\"", "cassandra.datacenter2.com:19042"); - DefaultDriverContext context = context(subnetAddresses); - assertThatIllegalArgumentException() - .isThrownBy(() -> new SubnetAddressTranslator(context)) - .withMessage( - "Configured subnets are overlapping: " - + String.format( - "SubnetAddress[subnet=[100, 64, 0, 0], address=%s], ", - InetSocketAddress.createUnresolved("cassandra.datacenter1.com", 19042)) - + String.format( - "SubnetAddress[subnet=[100, 65, 0, 0], address=%s]", - InetSocketAddress.createUnresolved("cassandra.datacenter2.com", 19042))); - } - - @Test - public void should_fail_on_intersecting_subnets_ipv6() { - Map subnetAddresses = - ImmutableMap.of( - "\"::ffff:6440:0/111\"", "cassandra.datacenter1.com:19042", - "\"::ffff:6441:0/112\"", "cassandra.datacenter2.com:19042"); - DefaultDriverContext context = context(subnetAddresses); - assertThatIllegalArgumentException() - .isThrownBy(() -> new SubnetAddressTranslator(context)) - .withMessage( - "Configured subnets are overlapping: " - + String.format( - "SubnetAddress[subnet=[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 100, 64, 0, 0], address=%s], ", - InetSocketAddress.createUnresolved("cassandra.datacenter1.com", 19042)) - + String.format( - "SubnetAddress[subnet=[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 100, 65, 0, 0], address=%s]", - InetSocketAddress.createUnresolved("cassandra.datacenter2.com", 19042))); - } - - @Test - public void should_fail_on_subnet_address_without_port() { - Map subnetAddresses = - ImmutableMap.of("\"100.64.0.0/15\"", "cassandra.datacenter1.com"); - DefaultDriverContext context = context(subnetAddresses); - assertThatIllegalArgumentException() - .isThrownBy(() -> new SubnetAddressTranslator(context)) - .withMessage("Invalid address cassandra.datacenter1.com (expecting format host:port)"); - } - - @Test - public void should_fail_on_default_address_without_port() { - DefaultDriverContext context = context(ImmutableMap.of()); - when(context - .getConfig() - .getDefaultProfile() - .getString(ADDRESS_TRANSLATOR_DEFAULT_ADDRESS, null)) - .thenReturn("cassandra.com"); - assertThatIllegalArgumentException() - .isThrownBy(() -> new SubnetAddressTranslator(context)) - .withMessage("Invalid address cassandra.com (expecting format host:port)"); - } - - private static DefaultDriverContext context(Map subnetAddresses) { - DriverExecutionProfile profile = mock(DriverExecutionProfile.class); - when(profile.getStringMap(ADDRESS_TRANSLATOR_SUBNET_ADDRESSES)).thenReturn(subnetAddresses); - return MockedDriverContextFactory.defaultDriverContext(profile); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/addresstranslation/SubnetTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/addresstranslation/SubnetTest.java deleted file mode 100644 index f8ba8929e9e..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/addresstranslation/SubnetTest.java +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.addresstranslation; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatIllegalArgumentException; -import static org.assertj.core.api.Assertions.assertThatNoException; - -import java.net.UnknownHostException; -import org.junit.Test; - -public class SubnetTest { - @Test - public void should_parse_to_correct_ipv4_subnet() throws UnknownHostException { - Subnet subnet = Subnet.parse("100.64.0.0/15"); - assertThat(subnet.getSubnet()).containsExactly(100, 64, 0, 0); - assertThat(subnet.getNetworkMask()).containsExactly(255, 254, 0, 0); - assertThat(subnet.getUpper()).containsExactly(100, 65, 255, 255); - assertThat(subnet.getLower()).containsExactly(100, 64, 0, 0); - } - - @Test - public void should_parse_to_correct_ipv6_subnet() throws UnknownHostException { - Subnet subnet = Subnet.parse("2001:db8:85a3::8a2e:370:0/111"); - assertThat(subnet.getSubnet()) - .containsExactly(32, 1, 13, 184, 133, 163, 0, 0, 0, 0, 138, 46, 3, 112, 0, 0); - assertThat(subnet.getNetworkMask()) - .containsExactly( - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 254, 0, 0); - assertThat(subnet.getUpper()) - .containsExactly(32, 1, 13, 184, 133, 163, 0, 0, 0, 0, 138, 46, 3, 113, 255, 255); - assertThat(subnet.getLower()) - .containsExactly(32, 1, 13, 184, 133, 163, 0, 0, 0, 0, 138, 46, 3, 112, 0, 0); - } - - @Test - public void should_parse_to_correct_ipv6_subnet_ipv4_convertible() throws UnknownHostException { - Subnet subnet = Subnet.parse("::ffff:6440:0/111"); - assertThat(subnet.getSubnet()) - .containsExactly(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 100, 64, 0, 0); - assertThat(subnet.getNetworkMask()) - .containsExactly( - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 254, 0, 0); - assertThat(subnet.getUpper()) - .containsExactly(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 100, 65, 255, 255); - assertThat(subnet.getLower()) - .containsExactly(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 100, 64, 0, 0); - } - - @Test - public void should_fail_on_invalid_cidr_format() { - assertThatIllegalArgumentException() - .isThrownBy(() -> Subnet.parse("invalid")) - .withMessage("Invalid subnet: invalid"); - } - - @Test - public void should_parse_bounding_prefix_lengths_correctly() { - assertThatNoException().isThrownBy(() -> Subnet.parse("0.0.0.0/0")); - assertThatNoException().isThrownBy(() -> Subnet.parse("100.64.0.0/32")); - } - - @Test - public void should_fail_on_invalid_prefix_length() { - assertThatIllegalArgumentException() - .isThrownBy(() -> Subnet.parse("100.64.0.0/-1")) - .withMessage("Prefix length -1 must be within [0; 32]"); - assertThatIllegalArgumentException() - .isThrownBy(() -> Subnet.parse("100.64.0.0/33")) - .withMessage("Prefix length 33 must be within [0; 32]"); - } - - @Test - public void should_fail_on_not_prefix_block_subnet_ipv4() { - assertThatIllegalArgumentException() - .isThrownBy(() -> Subnet.parse("100.65.0.0/15")) - .withMessage("Subnet 100.65.0.0/15 must be represented as a network prefix block"); - } - - @Test - public void should_fail_on_not_prefix_block_subnet_ipv6() { - assertThatIllegalArgumentException() - .isThrownBy(() -> Subnet.parse("::ffff:6441:0/111")) - .withMessage("Subnet ::ffff:6441:0/111 must be represented as a network prefix block"); - } - - @Test - public void should_return_true_on_containing_address() throws UnknownHostException { - Subnet subnet = Subnet.parse("100.64.0.0/15"); - assertThat(subnet.contains(new byte[] {100, 64, 0, 0})).isTrue(); - assertThat(subnet.contains(new byte[] {100, 65, (byte) 255, (byte) 255})).isTrue(); - assertThat(subnet.contains(new byte[] {100, 65, 100, 100})).isTrue(); - } - - @Test - public void should_return_false_on_not_containing_address() throws UnknownHostException { - Subnet subnet = Subnet.parse("100.64.0.0/15"); - assertThat(subnet.contains(new byte[] {100, 63, (byte) 255, (byte) 255})).isFalse(); - assertThat(subnet.contains(new byte[] {100, 66, 0, 0})).isFalse(); - // IPv6 cannot be contained by IPv4 subnet. - assertThat(subnet.contains(new byte[16])).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelFactoryAvailableIdsTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelFactoryAvailableIdsTest.java deleted file mode 100644 index a1eab41b998..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelFactoryAvailableIdsTest.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.timeout; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.internal.core.metrics.NoopNodeMetricUpdater; -import com.datastax.oss.protocol.internal.Frame; -import com.datastax.oss.protocol.internal.request.Query; -import com.datastax.oss.protocol.internal.response.result.Void; -import io.netty.util.concurrent.Future; -import java.util.concurrent.CompletionStage; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; - -public class ChannelFactoryAvailableIdsTest extends ChannelFactoryTestBase { - - @Mock private ResponseCallback responseCallback; - - @Before - @Override - public void setup() throws InterruptedException { - super.setup(); - when(defaultProfile.isDefined(DefaultDriverOption.PROTOCOL_VERSION)).thenReturn(true); - when(defaultProfile.getString(DefaultDriverOption.PROTOCOL_VERSION)).thenReturn("V4"); - when(protocolVersionRegistry.fromName("V4")).thenReturn(DefaultProtocolVersion.V4); - - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_MAX_REQUESTS)).thenReturn(128); - - when(responseCallback.isLastResponse(any(Frame.class))).thenReturn(true); - } - - @Test - public void should_report_available_ids() { - // Given - ChannelFactory factory = newChannelFactory(); - - // When - CompletionStage channelFuture = - factory.connect( - SERVER_ADDRESS, DriverChannelOptions.builder().build(), NoopNodeMetricUpdater.INSTANCE); - completeSimpleChannelInit(); - - // Then - assertThatStage(channelFuture) - .isSuccess( - channel -> { - assertThat(channel.getAvailableIds()).isEqualTo(128); - - // Write a request, should decrease the count - assertThat(channel.preAcquireId()).isTrue(); - Future writeFuture = - channel.write(new Query("test"), false, Frame.NO_PAYLOAD, responseCallback); - assertThat(writeFuture) - .isSuccess( - v -> { - assertThat(channel.getAvailableIds()).isEqualTo(127); - - // Complete the request, should increase again - writeInboundFrame(readOutboundFrame(), Void.INSTANCE); - verify(responseCallback, timeout(500)).onResponse(any(Frame.class)); - assertThat(channel.getAvailableIds()).isEqualTo(128); - }); - }); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelFactoryClusterNameTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelFactoryClusterNameTest.java deleted file mode 100644 index d9793247c9c..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelFactoryClusterNameTest.java +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.internal.core.TestResponses; -import com.datastax.oss.driver.internal.core.metrics.NoopNodeMetricUpdater; -import com.datastax.oss.protocol.internal.response.Ready; -import java.util.concurrent.CompletionStage; -import org.junit.Test; - -public class ChannelFactoryClusterNameTest extends ChannelFactoryTestBase { - - @Test - public void should_set_cluster_name_from_first_connection() { - // Given - when(defaultProfile.isDefined(DefaultDriverOption.PROTOCOL_VERSION)).thenReturn(false); - when(protocolVersionRegistry.highestNonBeta()).thenReturn(DefaultProtocolVersion.V4); - ChannelFactory factory = newChannelFactory(); - - // When - CompletionStage channelFuture = - factory.connect( - SERVER_ADDRESS, DriverChannelOptions.DEFAULT, NoopNodeMetricUpdater.INSTANCE); - - writeInboundFrame( - readOutboundFrame(), TestResponses.supportedResponse("mock_key", "mock_value")); - writeInboundFrame(readOutboundFrame(), new Ready()); - writeInboundFrame(readOutboundFrame(), TestResponses.clusterNameResponse("mockClusterName")); - - // Then - assertThatStage(channelFuture).isSuccess(); - assertThat(factory.getClusterName()).isEqualTo("mockClusterName"); - } - - @Test - public void should_check_cluster_name_for_next_connections() throws Throwable { - // Given - when(defaultProfile.isDefined(DefaultDriverOption.PROTOCOL_VERSION)).thenReturn(false); - when(protocolVersionRegistry.highestNonBeta()).thenReturn(DefaultProtocolVersion.V4); - ChannelFactory factory = newChannelFactory(); - - // When - CompletionStage channelFuture = - factory.connect( - SERVER_ADDRESS, DriverChannelOptions.DEFAULT, NoopNodeMetricUpdater.INSTANCE); - // open a first connection that will define the cluster name - writeInboundFrame( - readOutboundFrame(), TestResponses.supportedResponse("mock_key", "mock_value")); - writeInboundFrame(readOutboundFrame(), new Ready()); - writeInboundFrame(readOutboundFrame(), TestResponses.clusterNameResponse("mockClusterName")); - assertThatStage(channelFuture).isSuccess(); - // open a second connection that returns the same cluster name - channelFuture = - factory.connect( - SERVER_ADDRESS, DriverChannelOptions.DEFAULT, NoopNodeMetricUpdater.INSTANCE); - writeInboundFrame(readOutboundFrame(), new Ready()); - writeInboundFrame(readOutboundFrame(), TestResponses.clusterNameResponse("mockClusterName")); - - // Then - assertThatStage(channelFuture).isSuccess(); - - // When - // open a third connection that returns a different cluster name - channelFuture = - factory.connect( - SERVER_ADDRESS, DriverChannelOptions.DEFAULT, NoopNodeMetricUpdater.INSTANCE); - writeInboundFrame(readOutboundFrame(), new Ready()); - writeInboundFrame(readOutboundFrame(), TestResponses.clusterNameResponse("wrongClusterName")); - - // Then - assertThatStage(channelFuture) - .isFailed( - e -> - assertThat(e) - .isInstanceOf(ClusterNameMismatchException.class) - .hasMessageContaining( - "reports cluster name 'wrongClusterName' that doesn't match " - + "our cluster name 'mockClusterName'.")); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelFactoryProtocolNegotiationTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelFactoryProtocolNegotiationTest.java deleted file mode 100644 index b9738a140c0..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelFactoryProtocolNegotiationTest.java +++ /dev/null @@ -1,271 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.UnsupportedProtocolVersionException; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.internal.core.TestResponses; -import com.datastax.oss.driver.internal.core.metrics.NoopNodeMetricUpdater; -import com.datastax.oss.protocol.internal.Frame; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.request.Options; -import com.datastax.oss.protocol.internal.response.Error; -import com.datastax.oss.protocol.internal.response.Ready; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.Optional; -import java.util.concurrent.CompletionStage; -import org.junit.Test; - -public class ChannelFactoryProtocolNegotiationTest extends ChannelFactoryTestBase { - - @Test - public void should_succeed_if_version_specified_and_supported_by_server() { - // Given - when(defaultProfile.isDefined(DefaultDriverOption.PROTOCOL_VERSION)).thenReturn(true); - when(defaultProfile.getString(DefaultDriverOption.PROTOCOL_VERSION)).thenReturn("V4"); - when(protocolVersionRegistry.fromName("V4")).thenReturn(DefaultProtocolVersion.V4); - ChannelFactory factory = newChannelFactory(); - - // When - CompletionStage channelFuture = - factory.connect( - SERVER_ADDRESS, DriverChannelOptions.DEFAULT, NoopNodeMetricUpdater.INSTANCE); - - completeSimpleChannelInit(); - - // Then - assertThatStage(channelFuture) - .isSuccess(channel -> assertThat(channel.getClusterName()).isEqualTo("mockClusterName")); - assertThat(factory.protocolVersion).isEqualTo(DefaultProtocolVersion.V4); - } - - @Test - @UseDataProvider("unsupportedProtocolCodes") - public void should_fail_if_version_specified_and_not_supported_by_server(int errorCode) { - // Given - when(defaultProfile.isDefined(DefaultDriverOption.PROTOCOL_VERSION)).thenReturn(true); - when(defaultProfile.getString(DefaultDriverOption.PROTOCOL_VERSION)).thenReturn("V4"); - when(protocolVersionRegistry.fromName("V4")).thenReturn(DefaultProtocolVersion.V4); - ChannelFactory factory = newChannelFactory(); - - // When - CompletionStage channelFuture = - factory.connect( - SERVER_ADDRESS, DriverChannelOptions.DEFAULT, NoopNodeMetricUpdater.INSTANCE); - - Frame requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(Options.class); - writeInboundFrame(requestFrame, TestResponses.supportedResponse("mock_key", "mock_value")); - - requestFrame = readOutboundFrame(); - assertThat(requestFrame.protocolVersion).isEqualTo(DefaultProtocolVersion.V4.getCode()); - // Server does not support v4 - writeInboundFrame( - requestFrame, new Error(errorCode, "Invalid or unsupported protocol version")); - - // Then - assertThatStage(channelFuture) - .isFailed( - e -> { - assertThat(e) - .isInstanceOf(UnsupportedProtocolVersionException.class) - .hasMessageContaining("Host does not support protocol version V4"); - assertThat(((UnsupportedProtocolVersionException) e).getAttemptedVersions()) - .containsExactly(DefaultProtocolVersion.V4); - }); - } - - @Test - public void should_fail_if_version_specified_and_considered_beta_by_server() { - // Given - when(defaultProfile.isDefined(DefaultDriverOption.PROTOCOL_VERSION)).thenReturn(true); - when(defaultProfile.getString(DefaultDriverOption.PROTOCOL_VERSION)).thenReturn("V5"); - when(protocolVersionRegistry.fromName("V5")).thenReturn(DefaultProtocolVersion.V5); - ChannelFactory factory = newChannelFactory(); - - // When - CompletionStage channelFuture = - factory.connect( - SERVER_ADDRESS, DriverChannelOptions.DEFAULT, NoopNodeMetricUpdater.INSTANCE); - - Frame requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(Options.class); - writeInboundFrame(requestFrame, TestResponses.supportedResponse("mock_key", "mock_value")); - - requestFrame = readOutboundFrame(); - assertThat(requestFrame.protocolVersion).isEqualTo(DefaultProtocolVersion.V5.getCode()); - // Server considers v5 beta, e.g. C* 3.10 or 3.11 - writeInboundFrame( - requestFrame, - new Error( - ProtocolConstants.ErrorCode.PROTOCOL_ERROR, - "Beta version of the protocol used (5/v5-beta), but USE_BETA flag is unset")); - - // Then - assertThatStage(channelFuture) - .isFailed( - e -> { - assertThat(e) - .isInstanceOf(UnsupportedProtocolVersionException.class) - .hasMessageContaining("Host does not support protocol version V5"); - assertThat(((UnsupportedProtocolVersionException) e).getAttemptedVersions()) - .containsExactly(DefaultProtocolVersion.V5); - }); - } - - @Test - public void should_succeed_if_version_not_specified_and_server_supports_latest_supported() { - // Given - when(defaultProfile.isDefined(DefaultDriverOption.PROTOCOL_VERSION)).thenReturn(false); - when(protocolVersionRegistry.highestNonBeta()).thenReturn(DefaultProtocolVersion.V4); - ChannelFactory factory = newChannelFactory(); - - // When - CompletionStage channelFuture = - factory.connect( - SERVER_ADDRESS, DriverChannelOptions.DEFAULT, NoopNodeMetricUpdater.INSTANCE); - - Frame requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(Options.class); - writeInboundFrame(requestFrame, TestResponses.supportedResponse("mock_key", "mock_value")); - - requestFrame = readOutboundFrame(); - assertThat(requestFrame.protocolVersion).isEqualTo(DefaultProtocolVersion.V4.getCode()); - writeInboundFrame(requestFrame, new Ready()); - - requestFrame = readOutboundFrame(); - writeInboundFrame(requestFrame, TestResponses.clusterNameResponse("mockClusterName")); - - // Then - assertThatStage(channelFuture) - .isSuccess(channel -> assertThat(channel.getClusterName()).isEqualTo("mockClusterName")); - assertThat(factory.protocolVersion).isEqualTo(DefaultProtocolVersion.V4); - } - - @Test - @UseDataProvider("unsupportedProtocolCodes") - public void should_negotiate_if_version_not_specified_and_server_supports_legacy(int errorCode) { - // Given - when(defaultProfile.isDefined(DefaultDriverOption.PROTOCOL_VERSION)).thenReturn(false); - when(protocolVersionRegistry.highestNonBeta()).thenReturn(DefaultProtocolVersion.V4); - when(protocolVersionRegistry.downgrade(DefaultProtocolVersion.V4)) - .thenReturn(Optional.of(DefaultProtocolVersion.V3)); - ChannelFactory factory = newChannelFactory(); - - // When - CompletionStage channelFuture = - factory.connect( - SERVER_ADDRESS, DriverChannelOptions.DEFAULT, NoopNodeMetricUpdater.INSTANCE); - - Frame requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(Options.class); - writeInboundFrame(requestFrame, TestResponses.supportedResponse("mock_key", "mock_value")); - - requestFrame = readOutboundFrame(); - assertThat(requestFrame.protocolVersion).isEqualTo(DefaultProtocolVersion.V4.getCode()); - // Server does not support v4 - writeInboundFrame( - requestFrame, new Error(errorCode, "Invalid or unsupported protocol version")); - - // Then - // Factory should initialize a new connection, that retries with the lower version - requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(Options.class); - writeInboundFrame(requestFrame, TestResponses.supportedResponse("mock_key", "mock_value")); - - requestFrame = readOutboundFrame(); - assertThat(requestFrame.protocolVersion).isEqualTo(DefaultProtocolVersion.V3.getCode()); - writeInboundFrame(requestFrame, new Ready()); - - requestFrame = readOutboundFrame(); - writeInboundFrame(requestFrame, TestResponses.clusterNameResponse("mockClusterName")); - assertThatStage(channelFuture) - .isSuccess(channel -> assertThat(channel.getClusterName()).isEqualTo("mockClusterName")); - assertThat(factory.protocolVersion).isEqualTo(DefaultProtocolVersion.V3); - } - - @Test - @UseDataProvider("unsupportedProtocolCodes") - public void should_fail_if_negotiation_finds_no_matching_version(int errorCode) { - // Given - when(defaultProfile.isDefined(DefaultDriverOption.PROTOCOL_VERSION)).thenReturn(false); - when(protocolVersionRegistry.highestNonBeta()).thenReturn(DefaultProtocolVersion.V4); - when(protocolVersionRegistry.downgrade(DefaultProtocolVersion.V4)) - .thenReturn(Optional.of(DefaultProtocolVersion.V3)); - when(protocolVersionRegistry.downgrade(DefaultProtocolVersion.V3)).thenReturn(Optional.empty()); - ChannelFactory factory = newChannelFactory(); - - // When - CompletionStage channelFuture = - factory.connect( - SERVER_ADDRESS, DriverChannelOptions.DEFAULT, NoopNodeMetricUpdater.INSTANCE); - - Frame requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(Options.class); - writeInboundFrame(requestFrame, TestResponses.supportedResponse("mock_key", "mock_value")); - - requestFrame = readOutboundFrame(); - assertThat(requestFrame.protocolVersion).isEqualTo(DefaultProtocolVersion.V4.getCode()); - // Server does not support v4 - writeInboundFrame( - requestFrame, new Error(errorCode, "Invalid or unsupported protocol version")); - - // Client retries with v3 - requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(Options.class); - writeInboundFrame(requestFrame, TestResponses.supportedResponse("mock_key", "mock_value")); - - requestFrame = readOutboundFrame(); - assertThat(requestFrame.protocolVersion).isEqualTo(DefaultProtocolVersion.V3.getCode()); - // Server does not support v3 - writeInboundFrame( - requestFrame, new Error(errorCode, "Invalid or unsupported protocol version")); - - // Then - assertThatStage(channelFuture) - .isFailed( - e -> { - assertThat(e) - .isInstanceOf(UnsupportedProtocolVersionException.class) - .hasMessageContaining( - "Protocol negotiation failed: could not find a common version " - + "(attempted: [V4, V3])"); - assertThat(((UnsupportedProtocolVersionException) e).getAttemptedVersions()) - .containsExactly(DefaultProtocolVersion.V4, DefaultProtocolVersion.V3); - }); - } - - /** - * Depending on the Cassandra version, an "unsupported protocol" response can use different error - * codes, so we test all of them. - */ - @DataProvider - public static Object[][] unsupportedProtocolCodes() { - return new Object[][] { - new Object[] {ProtocolConstants.ErrorCode.PROTOCOL_ERROR}, - // C* 2.1 reports a server error instead of protocol error, see CASSANDRA-9451. - new Object[] {ProtocolConstants.ErrorCode.SERVER_ERROR} - }; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelFactorySupportedOptionsTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelFactorySupportedOptionsTest.java deleted file mode 100644 index 559e11e0bc2..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelFactorySupportedOptionsTest.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.internal.core.TestResponses; -import com.datastax.oss.driver.internal.core.metrics.NoopNodeMetricUpdater; -import com.datastax.oss.protocol.internal.response.Ready; -import java.util.concurrent.CompletionStage; -import org.junit.Test; - -public class ChannelFactorySupportedOptionsTest extends ChannelFactoryTestBase { - - @Test - public void should_query_supported_options_on_first_channel() throws Throwable { - // Given - when(defaultProfile.isDefined(DefaultDriverOption.PROTOCOL_VERSION)).thenReturn(false); - when(protocolVersionRegistry.highestNonBeta()).thenReturn(DefaultProtocolVersion.V4); - ChannelFactory factory = newChannelFactory(); - - // When - CompletionStage channelFuture1 = - factory.connect( - SERVER_ADDRESS, DriverChannelOptions.DEFAULT, NoopNodeMetricUpdater.INSTANCE); - writeInboundFrame( - readOutboundFrame(), TestResponses.supportedResponse("mock_key", "mock_value")); - writeInboundFrame(readOutboundFrame(), new Ready()); - writeInboundFrame(readOutboundFrame(), TestResponses.clusterNameResponse("mockClusterName")); - - // Then - assertThatStage(channelFuture1).isSuccess(); - DriverChannel channel1 = channelFuture1.toCompletableFuture().get(); - assertThat(channel1.getOptions()).containsKey("mock_key"); - assertThat(channel1.getOptions().get("mock_key")).containsOnly("mock_value"); - - // When - CompletionStage channelFuture2 = - factory.connect( - SERVER_ADDRESS, DriverChannelOptions.DEFAULT, NoopNodeMetricUpdater.INSTANCE); - writeInboundFrame(readOutboundFrame(), new Ready()); - writeInboundFrame(readOutboundFrame(), TestResponses.clusterNameResponse("mockClusterName")); - - // Then - assertThatStage(channelFuture2).isSuccess(); - DriverChannel channel2 = channelFuture2.toCompletableFuture().get(); - assertThat(channel2.getOptions()).isNull(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelFactoryTestBase.java b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelFactoryTestBase.java deleted file mode 100644 index b25a1e9ad71..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelFactoryTestBase.java +++ /dev/null @@ -1,297 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import static java.util.concurrent.TimeUnit.MILLISECONDS; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.internal.core.ProtocolVersionRegistry; -import com.datastax.oss.driver.internal.core.TestResponses; -import com.datastax.oss.driver.internal.core.context.EventBus; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.context.NettyOptions; -import com.datastax.oss.driver.internal.core.metrics.NodeMetricUpdater; -import com.datastax.oss.driver.internal.core.protocol.ByteBufPrimitiveCodec; -import com.datastax.oss.protocol.internal.Compressor; -import com.datastax.oss.protocol.internal.Frame; -import com.datastax.oss.protocol.internal.FrameCodec; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.request.Options; -import com.datastax.oss.protocol.internal.request.Startup; -import com.datastax.oss.protocol.internal.response.Ready; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import io.netty.bootstrap.ServerBootstrap; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.ByteBufAllocator; -import io.netty.channel.Channel; -import io.netty.channel.ChannelFuture; -import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelInboundHandlerAdapter; -import io.netty.channel.ChannelInitializer; -import io.netty.channel.DefaultEventLoopGroup; -import io.netty.channel.local.LocalChannel; -import io.netty.channel.local.LocalServerChannel; -import java.time.Duration; -import java.util.Collections; -import java.util.Optional; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.Exchanger; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import org.junit.After; -import org.junit.Before; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; -import org.mockito.stubbing.Answer; - -/** - * Sets up the infrastructure for channel factory tests. - * - *

Because the factory manages channel creation itself, {@link - * io.netty.channel.embedded.EmbeddedChannel} is not suitable. Instead, we launch an embedded server - * and connect to it with the local transport. - * - *

The current implementation assumes that only one connection will be tested at a time, but - * support for multiple simultaneous connections could easily be added: store multiple instances of - * requestFrameExchanger and serverResponseChannel, and add a parameter to readOutboundFrame and - * writeInboundFrame (for instance the position of the connection in creation order) to specify - * which instance to use. - */ -@RunWith(DataProviderRunner.class) -public abstract class ChannelFactoryTestBase { - static final EndPoint SERVER_ADDRESS = - new LocalEndPoint(ChannelFactoryTestBase.class.getSimpleName() + "-server"); - - private static final int TIMEOUT_MILLIS = 500; - - DefaultEventLoopGroup serverGroup; - DefaultEventLoopGroup clientGroup; - - @Mock InternalDriverContext context; - @Mock DriverConfig driverConfig; - @Mock DriverExecutionProfile defaultProfile; - @Mock NettyOptions nettyOptions; - @Mock ProtocolVersionRegistry protocolVersionRegistry; - @Mock EventBus eventBus; - @Mock Compressor compressor; - - // The server's I/O thread will store the last received request here, and block until the test - // thread retrieves it. This assumes readOutboundFrame() is called for each actual request, else - // the test will hang forever. - private final Exchanger requestFrameExchanger = new Exchanger<>(); - - // The channel that accepts incoming connections on the server - private LocalServerChannel serverAcceptChannel; - // The channel to send responses to the last open connection - private volatile LocalChannel serverResponseChannel; - - @Before - public void setup() throws InterruptedException { - MockitoAnnotations.initMocks(this); - - serverGroup = new DefaultEventLoopGroup(1); - clientGroup = new DefaultEventLoopGroup(1); - - when(context.getConfig()).thenReturn(driverConfig); - when(driverConfig.getDefaultProfile()).thenReturn(defaultProfile); - when(defaultProfile.isDefined(DefaultDriverOption.AUTH_PROVIDER_CLASS)).thenReturn(false); - when(defaultProfile.getDuration(DefaultDriverOption.CONNECTION_INIT_QUERY_TIMEOUT)) - .thenReturn(Duration.ofMillis(TIMEOUT_MILLIS)); - when(defaultProfile.getDuration(DefaultDriverOption.CONNECTION_SET_KEYSPACE_TIMEOUT)) - .thenReturn(Duration.ofMillis(TIMEOUT_MILLIS)); - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_MAX_REQUESTS)).thenReturn(1); - when(defaultProfile.getDuration(DefaultDriverOption.HEARTBEAT_INTERVAL)) - .thenReturn(Duration.ofSeconds(30)); - when(defaultProfile.getDuration(DefaultDriverOption.CONNECTION_CONNECT_TIMEOUT)) - .thenReturn(Duration.ofSeconds(5)); - - when(context.getProtocolVersionRegistry()).thenReturn(protocolVersionRegistry); - when(context.getNettyOptions()).thenReturn(nettyOptions); - when(nettyOptions.ioEventLoopGroup()).thenReturn(clientGroup); - when(nettyOptions.channelClass()).thenAnswer((Answer) i -> LocalChannel.class); - when(nettyOptions.allocator()).thenReturn(ByteBufAllocator.DEFAULT); - when(context.getFrameCodec()) - .thenReturn( - FrameCodec.defaultClient( - new ByteBufPrimitiveCodec(ByteBufAllocator.DEFAULT), Compressor.none())); - when(context.getSslHandlerFactory()).thenReturn(Optional.empty()); - when(context.getEventBus()).thenReturn(eventBus); - when(context.getWriteCoalescer()).thenReturn(new PassThroughWriteCoalescer(null)); - when(context.getCompressor()).thenReturn(compressor); - - // Start local server - ServerBootstrap serverBootstrap = - new ServerBootstrap() - .group(serverGroup) - .channel(LocalServerChannel.class) - .localAddress(SERVER_ADDRESS.resolve()) - .childHandler(new ServerInitializer()); - ChannelFuture channelFuture = serverBootstrap.bind().sync(); - serverAcceptChannel = (LocalServerChannel) channelFuture.sync().channel(); - } - - // Sets up the pipeline for our local server - private class ServerInitializer extends ChannelInitializer { - @Override - protected void initChannel(LocalChannel ch) throws Exception { - // Install a single handler that stores received requests, so that the test can check what - // the client sent - ch.pipeline() - .addLast( - new ChannelInboundHandlerAdapter() { - @Override - @SuppressWarnings("unchecked") - public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { - super.channelRead(ctx, msg); - requestFrameExchanger.exchange((Frame) msg); - } - }); - - // Store the channel so that the test can send responses back to the client - serverResponseChannel = ch; - } - } - - protected Frame readOutboundFrame() { - try { - return requestFrameExchanger.exchange(null, TIMEOUT_MILLIS, MILLISECONDS); - } catch (InterruptedException e) { - fail("unexpected interruption while waiting for outbound frame", e); - } catch (TimeoutException e) { - fail("Timed out reading outbound frame"); - } - return null; // never reached - } - - protected void writeInboundFrame(Frame requestFrame, Message response) { - writeInboundFrame(requestFrame, response, requestFrame.protocolVersion); - } - - private void writeInboundFrame(Frame requestFrame, Message response, int protocolVersion) { - serverResponseChannel.writeAndFlush( - Frame.forResponse( - protocolVersion, - requestFrame.streamId, - null, - Frame.NO_PAYLOAD, - Collections.emptyList(), - response)); - } - - /** - * Simulate the sequence of roundtrips to initialize a simple channel without authentication or - * keyspace (avoids repeating it in subclasses). - */ - protected void completeSimpleChannelInit() { - Frame requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(Options.class); - writeInboundFrame(requestFrame, TestResponses.supportedResponse("mock_key", "mock_value")); - - requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(Startup.class); - writeInboundFrame(requestFrame, new Ready()); - - requestFrame = readOutboundFrame(); - writeInboundFrame(requestFrame, TestResponses.clusterNameResponse("mockClusterName")); - } - - ChannelFactory newChannelFactory() { - return new TestChannelFactory(context); - } - - // A simplified channel factory to use in the tests. - // It only installs high-level handlers on the pipeline, not the frame codecs. So we'll receive - // Frame objects on the server side, which is simpler to test. - private static class TestChannelFactory extends ChannelFactory { - - private TestChannelFactory(InternalDriverContext internalDriverContext) { - super(internalDriverContext); - } - - @Override - ChannelInitializer initializer( - EndPoint endPoint, - ProtocolVersion protocolVersion, - DriverChannelOptions options, - NodeMetricUpdater nodeMetricUpdater, - CompletableFuture resultFuture) { - return new ChannelInitializer() { - @Override - protected void initChannel(Channel channel) throws Exception { - try { - DriverExecutionProfile defaultProfile = context.getConfig().getDefaultProfile(); - - long setKeyspaceTimeoutMillis = - defaultProfile - .getDuration(DefaultDriverOption.CONNECTION_SET_KEYSPACE_TIMEOUT) - .toMillis(); - int maxRequestsPerConnection = - defaultProfile.getInt(DefaultDriverOption.CONNECTION_MAX_REQUESTS); - - InFlightHandler inFlightHandler = - new InFlightHandler( - protocolVersion, - new StreamIdGenerator(maxRequestsPerConnection), - Integer.MAX_VALUE, - setKeyspaceTimeoutMillis, - channel.newPromise(), - null, - "test"); - - HeartbeatHandler heartbeatHandler = new HeartbeatHandler(defaultProfile); - ProtocolInitHandler initHandler = - new ProtocolInitHandler( - context, - protocolVersion, - getClusterName(), - endPoint, - options, - heartbeatHandler, - productType == null); - channel - .pipeline() - .addLast(ChannelFactory.INFLIGHT_HANDLER_NAME, inFlightHandler) - .addLast(ChannelFactory.INIT_HANDLER_NAME, initHandler); - } catch (Throwable t) { - resultFuture.completeExceptionally(t); - } - } - }; - } - } - - @After - public void tearDown() throws InterruptedException { - serverAcceptChannel.close(); - - serverGroup - .shutdownGracefully(TIMEOUT_MILLIS, TIMEOUT_MILLIS * 2, TimeUnit.MILLISECONDS) - .sync(); - clientGroup - .shutdownGracefully(TIMEOUT_MILLIS, TIMEOUT_MILLIS * 2, TimeUnit.MILLISECONDS) - .sync(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelHandlerTestBase.java b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelHandlerTestBase.java deleted file mode 100644 index 5feb85a457b..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelHandlerTestBase.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.protocol.internal.Frame; -import com.datastax.oss.protocol.internal.Message; -import io.netty.channel.embedded.EmbeddedChannel; -import java.util.Collections; -import org.junit.Before; - -/** - * Infrastructure for channel handler test. - * - *

It relies on an embedded channel where the tested handler is installed. Then the test can - * simulate incoming/outgoing messages, and check that the handler propagates the adequate messages - * upstream/downstream. - */ -public class ChannelHandlerTestBase { - protected EmbeddedChannel channel; - - @Before - public void setup() { - channel = new EmbeddedChannel(); - } - - /** Reads a request frame that we expect the tested handler to have sent inbound. */ - protected Frame readInboundFrame() { - channel.runPendingTasks(); - Object o = channel.readInbound(); - assertThat(o).isInstanceOf(Frame.class); - return ((Frame) o); - } - - /** Reads a request frame that we expect the tested handler to have sent outbound. */ - protected Frame readOutboundFrame() { - channel.runPendingTasks(); - Object o = channel.readOutbound(); - assertThat(o).isInstanceOf(Frame.class); - return ((Frame) o); - } - - protected void assertNoOutboundFrame() { - channel.runPendingTasks(); - Object o = channel.readOutbound(); - assertThat(o).isNull(); - } - - /** Writes a response frame for the tested handler to read. */ - protected void writeInboundFrame(Frame responseFrame) { - channel.writeInbound(responseFrame); - } - - /** Writes a response frame that matches the given request, with the given response message. */ - protected void writeInboundFrame(Frame requestFrame, Message response) { - channel.writeInbound(buildInboundFrame(requestFrame, response)); - } - - /** Builds a response frame matching a request frame. */ - protected Frame buildInboundFrame(Frame requestFrame, Message response) { - return Frame.forResponse( - requestFrame.protocolVersion, - requestFrame.streamId, - null, - requestFrame.customPayload, - Collections.emptyList(), - response); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ConnectInitHandlerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ConnectInitHandlerTest.java deleted file mode 100644 index 6024ed26a5c..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ConnectInitHandlerTest.java +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import static com.datastax.oss.driver.Assertions.assertThat; - -import io.netty.channel.ChannelFuture; -import io.netty.channel.ChannelHandlerContext; -import java.net.InetSocketAddress; -import org.junit.Before; -import org.junit.Test; - -public class ConnectInitHandlerTest extends ChannelHandlerTestBase { - - private TestHandler handler; - - @Before - @Override - public void setup() { - super.setup(); - handler = new TestHandler(); - channel.pipeline().addLast(handler); - } - - @Test - public void should_call_onRealConnect_when_connection_succeeds() { - assertThat(handler.hasConnected).isFalse(); - - // When - channel.connect(new InetSocketAddress("localhost", 9042)); - - // Then - assertThat(handler.hasConnected).isTrue(); - } - - @Test - public void should_not_complete_connect_future_before_triggered_by_handler() { - // When - ChannelFuture connectFuture = channel.connect(new InetSocketAddress("localhost", 9042)); - - // Then - assertThat(connectFuture.isDone()).isFalse(); - } - - @Test - public void should_complete_connect_future_when_handler_completes() { - // Given - ChannelFuture connectFuture = channel.connect(new InetSocketAddress("localhost", 9042)); - - // When - handler.setConnectSuccess(); - - // Then - assertThat(connectFuture.isSuccess()).isTrue(); - } - - @Test - public void should_remove_handler_from_pipeline_when_handler_completes() { - // Given - channel.connect(new InetSocketAddress("localhost", 9042)); - - // When - handler.setConnectSuccess(); - - // Then - assertThat(channel.pipeline().get(TestHandler.class)).isNull(); - } - - @Test - public void should_fail_connect_future_when_handler_fails() { - // Given - ChannelFuture connectFuture = channel.connect(new InetSocketAddress("localhost", 9042)); - Exception exception = new Exception("test"); - - // When - handler.setConnectFailure(exception); - - // Then - assertThat(connectFuture).isFailed(e -> assertThat(e).isEqualTo(exception)); - } - - /** - * Well-behaved implementations should not call setConnect* multiple times in a row, but check - * that we handle it gracefully if they do. - */ - @Test - public void should_ignore_subsequent_calls_if_handler_already_failed() { - // Given - ChannelFuture connectFuture = channel.connect(new InetSocketAddress("localhost", 9042)); - Exception exception = new Exception("test"); - - // When - handler.setConnectFailure(exception); - handler.setConnectFailure(new Exception("test2")); - handler.setConnectSuccess(); - - // Then - assertThat(connectFuture).isFailed(e -> assertThat(e).isEqualTo(exception)); - } - - static class TestHandler extends ConnectInitHandler { - boolean hasConnected; - - @Override - protected void onRealConnect(ChannelHandlerContext ctx) { - hasConnected = true; - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/DriverChannelTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/DriverChannelTest.java deleted file mode 100644 index e0660b9609e..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/DriverChannelTest.java +++ /dev/null @@ -1,165 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import static com.datastax.oss.driver.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.connection.ClosedConnectionException; -import com.datastax.oss.protocol.internal.Frame; -import com.datastax.oss.protocol.internal.request.Query; -import com.datastax.oss.protocol.internal.response.result.Void; -import io.netty.channel.Channel; -import io.netty.channel.ChannelFuture; -import io.netty.channel.ChannelPromise; -import io.netty.util.concurrent.Future; -import java.util.AbstractMap; -import java.util.ArrayDeque; -import java.util.Map; -import java.util.Queue; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -public class DriverChannelTest extends ChannelHandlerTestBase { - public static final int SET_KEYSPACE_TIMEOUT_MILLIS = 100; - - private DriverChannel driverChannel; - private MockWriteCoalescer writeCoalescer; - - @Mock private StreamIdGenerator streamIds; - - @Before - @Override - public void setup() { - super.setup(); - MockitoAnnotations.initMocks(this); - channel - .pipeline() - .addLast( - new InFlightHandler( - DefaultProtocolVersion.V3, - streamIds, - Integer.MAX_VALUE, - SET_KEYSPACE_TIMEOUT_MILLIS, - channel.newPromise(), - null, - "test")); - writeCoalescer = new MockWriteCoalescer(); - driverChannel = - new DriverChannel( - new EmbeddedEndPoint(), channel, writeCoalescer, DefaultProtocolVersion.V3); - } - - /** - * Ensures that the potential delay introduced by the write coalescer does not mess with the - * graceful shutdown sequence: any write submitted before {@link DriverChannel#close()} is - * guaranteed to complete. - */ - @Test - public void should_wait_for_coalesced_writes_when_closing_gracefully() { - // Given - MockResponseCallback responseCallback = new MockResponseCallback(); - driverChannel.write(new Query("test"), false, Frame.NO_PAYLOAD, responseCallback); - // nothing written yet because the coalescer hasn't flushed - assertNoOutboundFrame(); - - // When - Future closeFuture = driverChannel.close(); - - // Then - // not closed yet because there is still a pending write - assertThat(closeFuture).isNotDone(); - assertNoOutboundFrame(); - - // When - // the coalescer finally runs - writeCoalescer.triggerFlush(); - - // Then - // the pending write goes through - Frame requestFrame = readOutboundFrame(); - assertThat(requestFrame).isNotNull(); - // not closed yet because there is now a pending response - assertThat(closeFuture).isNotDone(); - - // When - // the pending response arrives - writeInboundFrame(requestFrame, Void.INSTANCE); - assertThat(responseCallback.getLastResponse().message).isEqualTo(Void.INSTANCE); - - // Then - assertThat(closeFuture).isSuccess(); - } - - /** - * Ensures that the potential delay introduced by the write coalescer does not mess with the - * forceful shutdown sequence: any write submitted before {@link DriverChannel#forceClose()} - * should get the "Channel was force-closed" error, whether it had been flushed or not. - */ - @Test - public void should_wait_for_coalesced_writes_when_closing_forcefully() { - // Given - MockResponseCallback responseCallback = new MockResponseCallback(); - driverChannel.write(new Query("test"), false, Frame.NO_PAYLOAD, responseCallback); - // nothing written yet because the coalescer hasn't flushed - assertNoOutboundFrame(); - - // When - Future closeFuture = driverChannel.forceClose(); - - // Then - // not closed yet because there is still a pending write - assertThat(closeFuture).isNotDone(); - assertNoOutboundFrame(); - - // When - // the coalescer finally runs - writeCoalescer.triggerFlush(); - // and the pending write goes through - Frame requestFrame = readOutboundFrame(); - assertThat(requestFrame).isNotNull(); - - // Then - assertThat(closeFuture).isSuccess(); - assertThat(responseCallback.getFailure()) - .isInstanceOf(ClosedConnectionException.class) - .hasMessageContaining("Channel was force-closed"); - } - - // Simple implementation that holds all the writes, and flushes them when it's explicitly - // triggered. - private class MockWriteCoalescer implements WriteCoalescer { - private Queue> messages = new ArrayDeque<>(); - - @Override - public ChannelFuture writeAndFlush(Channel channel, Object message) { - assertThat(channel).isEqualTo(DriverChannelTest.this.channel); - ChannelPromise writePromise = channel.newPromise(); - messages.offer(new AbstractMap.SimpleEntry<>(message, writePromise)); - return writePromise; - } - - void triggerFlush() { - for (Map.Entry entry : messages) { - channel.writeAndFlush(entry.getKey(), entry.getValue()); - } - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/EmbeddedEndPoint.java b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/EmbeddedEndPoint.java deleted file mode 100644 index 5e463299a66..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/EmbeddedEndPoint.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.net.SocketAddress; - -/** Endpoint implementation for unit tests that use an embedded Netty channel. */ -public class EmbeddedEndPoint implements EndPoint { - - @NonNull - @Override - public SocketAddress resolve() { - throw new UnsupportedOperationException("This should not get called from unit tests"); - } - - @NonNull - @Override - public String asMetricPrefix() { - throw new UnsupportedOperationException("This should not get called from unit tests"); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/InFlightHandlerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/InFlightHandlerTest.java deleted file mode 100644 index 35049e99af1..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/InFlightHandlerTest.java +++ /dev/null @@ -1,660 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.connection.BusyConnectionException; -import com.datastax.oss.driver.api.core.connection.ClosedConnectionException; -import com.datastax.oss.driver.internal.core.protocol.FrameDecodingException; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.protocol.internal.Frame; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.request.Query; -import com.datastax.oss.protocol.internal.response.Error; -import com.datastax.oss.protocol.internal.response.event.StatusChangeEvent; -import com.datastax.oss.protocol.internal.response.result.SetKeyspace; -import com.datastax.oss.protocol.internal.response.result.Void; -import io.netty.channel.ChannelFuture; -import io.netty.channel.ChannelPromise; -import java.net.InetSocketAddress; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.concurrent.TimeUnit; -import org.junit.Before; -import org.junit.Test; -import org.mockito.ArgumentCaptor; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -public class InFlightHandlerTest extends ChannelHandlerTestBase { - private static final Query QUERY = new Query("select * from foo"); - private static final int SET_KEYSPACE_TIMEOUT_MILLIS = 100; - private static final int MAX_ORPHAN_IDS = 10; - - @Mock private StreamIdGenerator streamIds; - - @Before - @Override - public void setup() { - super.setup(); - MockitoAnnotations.initMocks(this); - when(streamIds.preAcquire()).thenReturn(true); - } - - @Test - public void should_fail_if_connection_busy() throws Throwable { - // Given - addToPipeline(); - when(streamIds.acquire()).thenReturn(-1); - - // When - ChannelFuture writeFuture = - channel.writeAndFlush( - new DriverChannel.RequestMessage( - QUERY, false, Frame.NO_PAYLOAD, new MockResponseCallback())); - - // Then - assertThat(writeFuture) - .isFailed(e -> assertThat(e).isInstanceOf(BusyConnectionException.class)); - } - - @Test - public void should_assign_streamid_and_send_frame() { - // Given - addToPipeline(); - when(streamIds.acquire()).thenReturn(42); - MockResponseCallback responseCallback = new MockResponseCallback(); - - // When - ChannelFuture writeFuture = - channel.writeAndFlush( - new DriverChannel.RequestMessage(QUERY, false, Frame.NO_PAYLOAD, responseCallback)); - - // Then - assertThat(writeFuture).isSuccess(); - verify(streamIds).acquire(); - - Frame frame = readOutboundFrame(); - assertThat(frame.streamId).isEqualTo(42); - assertThat(frame.message).isEqualTo(QUERY); - } - - @Test - public void should_notify_callback_of_response() { - // Given - addToPipeline(); - when(streamIds.acquire()).thenReturn(42); - MockResponseCallback responseCallback = new MockResponseCallback(); - channel.writeAndFlush( - new DriverChannel.RequestMessage(QUERY, false, Frame.NO_PAYLOAD, responseCallback)); - Frame requestFrame = readOutboundFrame(); - - // When - Frame responseFrame = buildInboundFrame(requestFrame, Void.INSTANCE); - writeInboundFrame(responseFrame); - - // Then - assertThat(responseCallback.getLastResponse()).isSameAs(responseFrame); - verify(streamIds).release(42); - } - - @Test - public void should_notify_response_promise_when_decoding_fails() throws Throwable { - // Given - addToPipeline(); - when(streamIds.acquire()).thenReturn(42); - MockResponseCallback responseCallback = new MockResponseCallback(); - channel - .writeAndFlush( - new DriverChannel.RequestMessage(QUERY, false, Frame.NO_PAYLOAD, responseCallback)) - .awaitUninterruptibly(); - - // When - RuntimeException mockCause = new RuntimeException("test"); - channel.pipeline().fireExceptionCaught(new FrameDecodingException(42, mockCause)); - - // Then - assertThat(responseCallback.getFailure()).isSameAs(mockCause); - verify(streamIds).release(42); - } - - @Test - public void should_release_stream_id_when_orphaned_callback_receives_response() { - // Given - addToPipeline(); - when(streamIds.acquire()).thenReturn(42); - MockResponseCallback responseCallback = new MockResponseCallback(); - channel.writeAndFlush( - new DriverChannel.RequestMessage(QUERY, false, Frame.NO_PAYLOAD, responseCallback)); - Frame requestFrame = readOutboundFrame(); - - // When - channel.writeAndFlush(responseCallback); // means cancellation (see DriverChannel#cancel) - Frame responseFrame = buildInboundFrame(requestFrame, Void.INSTANCE); - writeInboundFrame(responseFrame); - - // Then - verify(streamIds).release(42); - // The response is not propagated, because we assume a callback that cancelled managed its own - // termination - assertThat(responseCallback.getLastResponse()).isNull(); - } - - @Test - public void should_delay_graceful_close_and_complete_when_last_pending_completes() { - // Given - addToPipeline(); - when(streamIds.acquire()).thenReturn(42); - MockResponseCallback responseCallback = new MockResponseCallback(); - channel - .writeAndFlush( - new DriverChannel.RequestMessage(QUERY, false, Frame.NO_PAYLOAD, responseCallback)) - .awaitUninterruptibly(); - - // When - channel.write(DriverChannel.GRACEFUL_CLOSE_MESSAGE); - - // Then - // not closed yet because there is one pending request - assertThat(channel.closeFuture()).isNotDone(); - - // When - // completing pending request - Frame requestFrame = readOutboundFrame(); - writeInboundFrame(requestFrame, Void.INSTANCE); - - // Then - assertThat(channel.closeFuture()).isSuccess(); - } - - @Test - public void should_delay_graceful_close_and_complete_when_last_pending_cancelled() { - // Given - addToPipeline(); - when(streamIds.acquire()).thenReturn(42); - MockResponseCallback responseCallback = new MockResponseCallback(); - channel - .writeAndFlush( - new DriverChannel.RequestMessage(QUERY, false, Frame.NO_PAYLOAD, responseCallback)) - .awaitUninterruptibly(); - - // When - channel.write(DriverChannel.GRACEFUL_CLOSE_MESSAGE); - - // Then - // not closed yet because there is one pending request - assertThat(channel.closeFuture()).isNotDone(); - - // When - // cancelling pending request - channel.write(responseCallback); - - // Then - assertThat(channel.closeFuture()).isSuccess(); - } - - @Test - public void should_graceful_close_immediately_if_no_pending() { - // Given - addToPipeline(); - - // When - channel.write(DriverChannel.GRACEFUL_CLOSE_MESSAGE); - - // Then - assertThat(channel.closeFuture()).isSuccess(); - } - - @Test - public void should_refuse_new_writes_during_graceful_close() { - // Given - addToPipeline(); - when(streamIds.acquire()).thenReturn(42); - MockResponseCallback responseCallback = new MockResponseCallback(); - channel - .writeAndFlush( - new DriverChannel.RequestMessage(QUERY, false, Frame.NO_PAYLOAD, responseCallback)) - .awaitUninterruptibly(); - - // When - channel.write(DriverChannel.GRACEFUL_CLOSE_MESSAGE); - - // Then - // not closed yet because there is one pending request - assertThat(channel.closeFuture()).isNotDone(); - // should not allow other write - ChannelFuture otherWriteFuture = - channel.writeAndFlush( - new DriverChannel.RequestMessage(QUERY, false, Frame.NO_PAYLOAD, responseCallback)); - assertThat(otherWriteFuture) - .isFailed( - e -> - assertThat(e) - .isInstanceOf(IllegalStateException.class) - .hasMessage("Channel is closing")); - } - - @Test - public void should_close_gracefully_if_orphan_ids_above_max_and_pending_request() { - // Given - addToPipeline(); - // Generate n orphan ids by writing and cancelling the requests: - for (int i = 0; i < MAX_ORPHAN_IDS; i++) { - when(streamIds.acquire()).thenReturn(i); - MockResponseCallback responseCallback = new MockResponseCallback(); - channel - .writeAndFlush( - new DriverChannel.RequestMessage(QUERY, false, Frame.NO_PAYLOAD, responseCallback)) - .awaitUninterruptibly(); - channel.writeAndFlush(responseCallback).awaitUninterruptibly(); - } - // Generate another request that is pending and not cancelled: - when(streamIds.acquire()).thenReturn(MAX_ORPHAN_IDS); - MockResponseCallback pendingResponseCallback = new MockResponseCallback(); - channel - .writeAndFlush( - new DriverChannel.RequestMessage( - QUERY, false, Frame.NO_PAYLOAD, pendingResponseCallback)) - .awaitUninterruptibly(); - - // When - // Generate the n+1th orphan id that makes us go above the threshold - when(streamIds.acquire()).thenReturn(MAX_ORPHAN_IDS + 1); - MockResponseCallback responseCallback = new MockResponseCallback(); - channel - .writeAndFlush( - new DriverChannel.RequestMessage(QUERY, false, Frame.NO_PAYLOAD, responseCallback)) - .awaitUninterruptibly(); - channel.writeAndFlush(responseCallback).awaitUninterruptibly(); - - // Then - // Channel should be closing gracefully. There's no way to observe that from the outside, so - // write another request and check that it's rejected: - assertThat(channel.closeFuture()).isNotDone(); - ChannelFuture otherWriteFuture = - channel.writeAndFlush( - new DriverChannel.RequestMessage(QUERY, false, Frame.NO_PAYLOAD, responseCallback)); - assertThat(otherWriteFuture) - .isFailed( - e -> - assertThat(e) - .isInstanceOf(IllegalStateException.class) - .hasMessage("Channel is closing")); - - // When - // Cancel the last pending request - channel.writeAndFlush(pendingResponseCallback).awaitUninterruptibly(); - - // Then - // The graceful shutdown completes - assertThat(channel.closeFuture()).isSuccess(); - } - - @Test - public void should_close_gracefully_if_orphan_ids_above_max_and_multiple_pending_requests() { - // Given - addToPipeline(); - // Generate n orphan ids by writing and cancelling the requests. - for (int i = 0; i < MAX_ORPHAN_IDS; i++) { - when(streamIds.acquire()).thenReturn(i); - MockResponseCallback responseCallback = new MockResponseCallback(); - channel - .writeAndFlush( - new DriverChannel.RequestMessage(QUERY, false, Frame.NO_PAYLOAD, responseCallback)) - .awaitUninterruptibly(); - channel.writeAndFlush(responseCallback).awaitUninterruptibly(); - } - // Generate 3 additional requests that are pending and not cancelled. - List pendingResponseCallbacks = new ArrayList<>(); - for (int i = 0; i < 3; i++) { - when(streamIds.acquire()).thenReturn(MAX_ORPHAN_IDS + i); - MockResponseCallback responseCallback = new MockResponseCallback(); - channel - .writeAndFlush( - new DriverChannel.RequestMessage(QUERY, false, Frame.NO_PAYLOAD, responseCallback)) - .awaitUninterruptibly(); - pendingResponseCallbacks.add(responseCallback); - } - - // When - // Generate the n+1th orphan id that makes us go above the threshold by canceling one if the - // pending requests. - channel.writeAndFlush(pendingResponseCallbacks.remove(0)).awaitUninterruptibly(); - - // Then - // Channel should be closing gracefully but there's no way to observe that from the outside - // besides writing another request and check that it's rejected. - assertThat(channel.closeFuture()).isNotDone(); - ChannelFuture otherWriteFuture = - channel.writeAndFlush( - new DriverChannel.RequestMessage( - QUERY, false, Frame.NO_PAYLOAD, new MockResponseCallback())); - assertThat(otherWriteFuture).isFailed(); - assertThat(otherWriteFuture.cause()) - .isInstanceOf(IllegalStateException.class) - .hasMessage("Channel is closing"); - - // When - // Cancel the remaining pending requests causing the n+ith orphan ids above the threshold. - for (MockResponseCallback pendingResponseCallback : pendingResponseCallbacks) { - ChannelFuture future = channel.writeAndFlush(pendingResponseCallback).awaitUninterruptibly(); - - // Then - // The future should succeed even though the channel has started closing gracefully. - assertThat(future).isSuccess(); - } - - // Then - // The graceful shutdown completes. - assertThat(channel.closeFuture()).isSuccess(); - } - - @Test - public void should_close_immediately_if_orphan_ids_above_max_and_no_pending_requests() { - // Given - addToPipeline(); - // Generate n orphan ids by writing and cancelling the requests: - for (int i = 0; i < MAX_ORPHAN_IDS; i++) { - when(streamIds.acquire()).thenReturn(i); - MockResponseCallback responseCallback = new MockResponseCallback(); - channel - .writeAndFlush( - new DriverChannel.RequestMessage(QUERY, false, Frame.NO_PAYLOAD, responseCallback)) - .awaitUninterruptibly(); - channel.writeAndFlush(responseCallback).awaitUninterruptibly(); - } - - // When - // Generate the n+1th orphan id that makes us go above the threshold - when(streamIds.acquire()).thenReturn(MAX_ORPHAN_IDS); - MockResponseCallback responseCallback = new MockResponseCallback(); - channel - .writeAndFlush( - new DriverChannel.RequestMessage(QUERY, false, Frame.NO_PAYLOAD, responseCallback)) - .awaitUninterruptibly(); - channel.writeAndFlush(responseCallback).awaitUninterruptibly(); - - // Then - // Channel should close immediately since no active pending requests. - assertThat(channel.closeFuture()).isSuccess(); - } - - @Test - public void should_fail_all_pending_when_force_closed() throws Throwable { - // Given - addToPipeline(); - when(streamIds.acquire()).thenReturn(42, 43); - MockResponseCallback responseCallback1 = new MockResponseCallback(); - MockResponseCallback responseCallback2 = new MockResponseCallback(); - channel - .writeAndFlush( - new DriverChannel.RequestMessage(QUERY, false, Frame.NO_PAYLOAD, responseCallback1)) - .awaitUninterruptibly(); - channel - .writeAndFlush( - new DriverChannel.RequestMessage(QUERY, false, Frame.NO_PAYLOAD, responseCallback2)) - .awaitUninterruptibly(); - - // When - channel.write(DriverChannel.FORCEFUL_CLOSE_MESSAGE); - - // Then - assertThat(channel.closeFuture()).isSuccess(); - for (MockResponseCallback callback : ImmutableList.of(responseCallback1, responseCallback2)) { - assertThat(callback.getFailure()) - .isInstanceOf(ClosedConnectionException.class) - .hasMessageContaining("Channel was force-closed"); - } - } - - @Test - public void should_fail_all_pending_and_close_on_unexpected_inbound_exception() throws Throwable { - // Given - addToPipeline(); - when(streamIds.acquire()).thenReturn(42, 43); - MockResponseCallback responseCallback1 = new MockResponseCallback(); - MockResponseCallback responseCallback2 = new MockResponseCallback(); - channel - .writeAndFlush( - new DriverChannel.RequestMessage(QUERY, false, Frame.NO_PAYLOAD, responseCallback1)) - .awaitUninterruptibly(); - channel - .writeAndFlush( - new DriverChannel.RequestMessage(QUERY, false, Frame.NO_PAYLOAD, responseCallback2)) - .awaitUninterruptibly(); - - // When - RuntimeException mockException = new RuntimeException("test"); - channel.pipeline().fireExceptionCaught(mockException); - - // Then - assertThat(channel.closeFuture()).isSuccess(); - for (MockResponseCallback callback : ImmutableList.of(responseCallback1, responseCallback2)) { - Throwable failure = callback.getFailure(); - assertThat(failure).isInstanceOf(ClosedConnectionException.class); - assertThat(failure.getCause()).isSameAs(mockException); - } - } - - @Test - public void should_fail_all_pending_if_connection_lost() { - // Given - addToPipeline(); - when(streamIds.acquire()).thenReturn(42, 43); - MockResponseCallback responseCallback1 = new MockResponseCallback(); - MockResponseCallback responseCallback2 = new MockResponseCallback(); - channel - .writeAndFlush( - new DriverChannel.RequestMessage(QUERY, false, Frame.NO_PAYLOAD, responseCallback1)) - .awaitUninterruptibly(); - channel - .writeAndFlush( - new DriverChannel.RequestMessage(QUERY, false, Frame.NO_PAYLOAD, responseCallback2)) - .awaitUninterruptibly(); - - // When - channel.pipeline().fireChannelInactive(); - - // Then - for (MockResponseCallback callback : ImmutableList.of(responseCallback1, responseCallback2)) { - assertThat(callback.getFailure()) - .isInstanceOf(ClosedConnectionException.class) - .hasMessageContaining("Lost connection to remote peer"); - } - } - - @Test - public void should_hold_stream_id_for_multi_response_callback() { - // Given - addToPipeline(); - when(streamIds.acquire()).thenReturn(42); - MockResponseCallback responseCallback = - new MockResponseCallback(frame -> frame.message instanceof Error); - - // When - channel - .writeAndFlush( - new DriverChannel.RequestMessage(QUERY, false, Frame.NO_PAYLOAD, responseCallback)) - .awaitUninterruptibly(); - - // Then - // notify callback of stream id - assertThat(responseCallback.streamId).isEqualTo(42); - - Frame requestFrame = readOutboundFrame(); - for (int i = 0; i < 5; i++) { - // When - // completing pending request - Frame responseFrame = buildInboundFrame(requestFrame, Void.INSTANCE); - writeInboundFrame(responseFrame); - - // Then - assertThat(responseCallback.getLastResponse()).isSameAs(responseFrame); - // Stream id not released, callback can receive more responses - verify(streamIds, never()).release(42); - } - - // When - // a terminal response comes in - Frame responseFrame = buildInboundFrame(requestFrame, new Error(0, "test")); - writeInboundFrame(responseFrame); - - // Then - verify(streamIds).release(42); - assertThat(responseCallback.getLastResponse()).isSameAs(responseFrame); - - // When - // more responses come in - writeInboundFrame(requestFrame, Void.INSTANCE); - - // Then - // the callback does not get them anymore (this could only be responses to a new request that - // reused the id) - assertThat(responseCallback.getLastResponse()).isNull(); - } - - @Test - public void - should_release_stream_id_when_orphaned_multi_response_callback_receives_last_response() { - // Given - addToPipeline(); - when(streamIds.acquire()).thenReturn(42); - MockResponseCallback responseCallback = - new MockResponseCallback(frame -> frame.message instanceof Error); - - channel - .writeAndFlush( - new DriverChannel.RequestMessage(QUERY, false, Frame.NO_PAYLOAD, responseCallback)) - .awaitUninterruptibly(); - - Frame requestFrame = readOutboundFrame(); - for (int i = 0; i < 5; i++) { - Frame responseFrame = buildInboundFrame(requestFrame, Void.INSTANCE); - writeInboundFrame(responseFrame); - assertThat(responseCallback.getLastResponse()).isSameAs(responseFrame); - verify(streamIds, never()).release(42); - } - - // When - // cancelled mid-flight - channel.writeAndFlush(responseCallback); - - // Then - // subsequent non-final responses are not propagated (we assume the callback completed itself - // already), but do not release the stream id - writeInboundFrame(requestFrame, Void.INSTANCE); - assertThat(responseCallback.getLastResponse()).isNull(); - verify(streamIds, never()).release(42); - - // When - // the terminal response arrives - writeInboundFrame(requestFrame, new Error(0, "test")); - - // Then - // still not propagated but the id is released - assertThat(responseCallback.getLastResponse()).isNull(); - verify(streamIds).release(42); - } - - @Test - public void should_set_keyspace() { - // Given - addToPipeline(); - ChannelPromise setKeyspacePromise = channel.newPromise(); - DriverChannel.SetKeyspaceEvent setKeyspaceEvent = - new DriverChannel.SetKeyspaceEvent(CqlIdentifier.fromCql("ks"), setKeyspacePromise); - - // When - channel.pipeline().fireUserEventTriggered(setKeyspaceEvent); - Frame requestFrame = readOutboundFrame(); - - // Then - assertThat(requestFrame.message).isInstanceOf(Query.class); - writeInboundFrame(requestFrame, new SetKeyspace("ks")); - assertThat(setKeyspacePromise).isSuccess(); - } - - @Test - public void should_fail_to_set_keyspace_if_query_times_out() throws InterruptedException { - // Given - addToPipeline(); - ChannelPromise setKeyspacePromise = channel.newPromise(); - DriverChannel.SetKeyspaceEvent setKeyspaceEvent = - new DriverChannel.SetKeyspaceEvent(CqlIdentifier.fromCql("ks"), setKeyspacePromise); - - // When - channel.pipeline().fireUserEventTriggered(setKeyspaceEvent); - TimeUnit.MILLISECONDS.sleep(SET_KEYSPACE_TIMEOUT_MILLIS * 2); - channel.runPendingTasks(); - - // Then - assertThat(setKeyspacePromise).isFailed(); - } - - @Test - public void should_notify_callback_of_events() { - // Given - EventCallback eventCallback = mock(EventCallback.class); - addToPipelineWithEventCallback(eventCallback); - - // When - StatusChangeEvent event = - new StatusChangeEvent( - ProtocolConstants.StatusChangeType.UP, new InetSocketAddress("127.0.0.1", 9042)); - Frame eventFrame = - Frame.forResponse( - DefaultProtocolVersion.V3.getCode(), - -1, - null, - Collections.emptyMap(), - Collections.emptyList(), - event); - writeInboundFrame(eventFrame); - - // Then - ArgumentCaptor captor = ArgumentCaptor.forClass(StatusChangeEvent.class); - verify(eventCallback).onEvent(captor.capture()); - assertThat(captor.getValue()).isSameAs(event); - } - - private void addToPipeline() { - addToPipelineWithEventCallback(null); - } - - private void addToPipelineWithEventCallback(EventCallback eventCallback) { - channel - .pipeline() - .addLast( - new InFlightHandler( - DefaultProtocolVersion.V3, - streamIds, - MAX_ORPHAN_IDS, - SET_KEYSPACE_TIMEOUT_MILLIS, - channel.newPromise(), - eventCallback, - "test")); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/LocalEndPoint.java b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/LocalEndPoint.java deleted file mode 100644 index c90731eece9..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/LocalEndPoint.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import edu.umd.cs.findbugs.annotations.NonNull; -import io.netty.channel.local.LocalAddress; -import java.net.SocketAddress; - -/** Endpoint implementation for unit tests that use the local Netty transport. */ -public class LocalEndPoint implements EndPoint { - - private final LocalAddress localAddress; - - public LocalEndPoint(String id) { - this.localAddress = new LocalAddress(id); - } - - @NonNull - @Override - public SocketAddress resolve() { - return localAddress; - } - - @NonNull - @Override - public String asMetricPrefix() { - throw new UnsupportedOperationException("This should not get called from unit tests"); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/MockAuthenticator.java b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/MockAuthenticator.java deleted file mode 100644 index 6015203ed38..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/MockAuthenticator.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import com.datastax.oss.driver.api.core.auth.SyncAuthenticator; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.nio.ByteBuffer; - -/** - * Dummy authenticator for our tests. - * - *

The initial response is hard-coded. When the server asks it to evaluate a challenge, it always - * replies with the same token. When authentication succeeds, the success token is stored for later - * inspection. - */ -public class MockAuthenticator implements SyncAuthenticator { - static final String INITIAL_RESPONSE = "0xcafebabe"; - - volatile String successToken; - - @Override - public ByteBuffer initialResponseSync() { - return Bytes.fromHexString(INITIAL_RESPONSE); - } - - @Override - public ByteBuffer evaluateChallengeSync(ByteBuffer challenge) { - return challenge; - } - - @Override - public void onAuthenticationSuccessSync(ByteBuffer token) { - successToken = Bytes.toHexString(token); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/MockChannelFactoryHelper.java b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/MockChannelFactoryHelper.java deleted file mode 100644 index 43768131108..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/MockChannelFactoryHelper.java +++ /dev/null @@ -1,182 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.timeout; -import static org.mockito.Mockito.verifyZeroInteractions; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.driver.shaded.guava.common.collect.ListMultimap; -import com.datastax.oss.driver.shaded.guava.common.collect.MultimapBuilder; -import com.datastax.oss.driver.shaded.guava.common.collect.Sets; -import java.util.ArrayDeque; -import java.util.Deque; -import java.util.HashMap; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import org.mockito.ArgumentCaptor; -import org.mockito.InOrder; -import org.mockito.internal.util.MockUtil; -import org.mockito.stubbing.OngoingStubbing; - -/** - * Helper class to set up and verify a sequence of invocations on a ChannelFactory mock. - * - *

Use the builder at the beginning of the test to stub expected calls. Then call the verify - * methods throughout the test to check that each call has been performed. - * - *

This class handles asynchronous calls to the thread factory, but it must be used from a single - * thread (see {@link #waitForCalls(Node, int)}). - */ -public class MockChannelFactoryHelper { - - private static final int CONNECT_TIMEOUT_MILLIS = 500; - - public static Builder builder(ChannelFactory channelFactory) { - return new Builder(channelFactory); - } - - private final ChannelFactory channelFactory; - private final InOrder inOrder; - // If waitForCalls sees more invocations than expected, the difference is stored here - private final Map previous = new HashMap<>(); - - public MockChannelFactoryHelper(ChannelFactory channelFactory) { - this.channelFactory = channelFactory; - this.inOrder = inOrder(channelFactory); - } - - public void waitForCall(Node node) { - waitForCalls(node, 1); - } - - /** - * Waits for a given number of calls to {@code ChannelFactory.connect()}. - * - *

Because we test asynchronous, non-blocking code, there might already be more calls than - * expected when this method is called. If so, the extra calls are stored and stored and will be - * taken into account next time. - */ - public void waitForCalls(Node node, int expected) { - int fromLastTime = previous.getOrDefault(node, 0); - if (fromLastTime >= expected) { - previous.put(node, fromLastTime - expected); - return; - } - expected -= fromLastTime; - - // Because we test asynchronous, non-blocking code, there might have been already more - // invocations than expected. Use `atLeast` and a captor to find out. - ArgumentCaptor optionsCaptor = - ArgumentCaptor.forClass(DriverChannelOptions.class); - inOrder - .verify(channelFactory, timeout(CONNECT_TIMEOUT_MILLIS).atLeast(expected)) - .connect(eq(node), optionsCaptor.capture()); - int actual = optionsCaptor.getAllValues().size(); - - int extras = actual - expected; - if (extras > 0) { - previous.compute(node, (k, v) -> (v == null) ? extras : v + extras); - } - } - - public void verifyNoMoreCalls() { - inOrder - .verify(channelFactory, timeout(CONNECT_TIMEOUT_MILLIS).times(0)) - .connect(any(Node.class), any(DriverChannelOptions.class)); - - Set counts = Sets.newHashSet(previous.values()); - if (!counts.isEmpty()) { - assertThat(counts).containsExactly(0); - } - } - - public static class Builder { - private final ChannelFactory channelFactory; - private final ListMultimap invocations = - MultimapBuilder.hashKeys().arrayListValues().build(); - - public Builder(ChannelFactory channelFactory) { - assertThat(MockUtil.isMock(channelFactory)).as("expected a mock").isTrue(); - verifyZeroInteractions(channelFactory); - this.channelFactory = channelFactory; - } - - public Builder success(Node node, DriverChannel channel) { - invocations.put(node, channel); - return this; - } - - public Builder failure(Node node, String error) { - invocations.put(node, new Exception(error)); - return this; - } - - public Builder failure(Node node, Throwable error) { - invocations.put(node, error); - return this; - } - - public Builder pending(Node node, CompletableFuture future) { - invocations.put(node, future); - return this; - } - - public MockChannelFactoryHelper build() { - stub(); - return new MockChannelFactoryHelper(channelFactory); - } - - private void stub() { - for (Node node : invocations.keySet()) { - Deque> results = new ArrayDeque<>(); - for (Object object : invocations.get(node)) { - if (object instanceof DriverChannel) { - results.add(CompletableFuture.completedFuture(((DriverChannel) object))); - } else if (object instanceof Throwable) { - results.add(CompletableFutures.failedFuture(((Throwable) object))); - } else if (object instanceof CompletableFuture) { - @SuppressWarnings("unchecked") - CompletionStage future = (CompletionStage) object; - results.add(future); - } else { - fail("unexpected type: " + object.getClass()); - } - } - if (results.size() > 0) { - CompletionStage first = results.poll(); - OngoingStubbing> ongoingStubbing = - when(channelFactory.connect(eq(node), any(DriverChannelOptions.class))) - .thenReturn(first); - for (CompletionStage result : results) { - ongoingStubbing.thenReturn(result); - } - } - } - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/MockResponseCallback.java b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/MockResponseCallback.java deleted file mode 100644 index 8774ee3e298..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/MockResponseCallback.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import com.datastax.oss.protocol.internal.Frame; -import java.util.ArrayDeque; -import java.util.Queue; -import java.util.function.Predicate; - -class MockResponseCallback implements ResponseCallback { - private final Queue responses = new ArrayDeque<>(); - private final Predicate isLastResponse; - - volatile int streamId = -1; - - MockResponseCallback() { - this(f -> true); - } - - MockResponseCallback(Predicate isLastResponse) { - this.isLastResponse = isLastResponse; - } - - @Override - public void onResponse(Frame responseFrame) { - responses.offer(responseFrame); - } - - @Override - public void onFailure(Throwable error) { - responses.offer(error); - } - - @Override - public boolean isLastResponse(Frame responseFrame) { - return isLastResponse.test(responseFrame); - } - - @Override - public void onStreamIdAssigned(int streamId) { - this.streamId = streamId; - } - - Frame getLastResponse() { - return (Frame) responses.poll(); - } - - Throwable getFailure() { - return (Throwable) responses.poll(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ProtocolInitHandlerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ProtocolInitHandlerTest.java deleted file mode 100644 index 2fd12fc9f94..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ProtocolInitHandlerTest.java +++ /dev/null @@ -1,652 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import ch.qos.logback.classic.Level; -import ch.qos.logback.classic.Logger; -import ch.qos.logback.classic.spi.ILoggingEvent; -import ch.qos.logback.core.Appender; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.InvalidKeyspaceException; -import com.datastax.oss.driver.api.core.auth.AuthProvider; -import com.datastax.oss.driver.api.core.auth.AuthenticationException; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.internal.core.DefaultProtocolVersionRegistry; -import com.datastax.oss.driver.internal.core.ProtocolVersionRegistry; -import com.datastax.oss.driver.internal.core.TestResponses; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.TestNodeFactory; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.protocol.internal.Frame; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.request.AuthResponse; -import com.datastax.oss.protocol.internal.request.Options; -import com.datastax.oss.protocol.internal.request.Query; -import com.datastax.oss.protocol.internal.request.Register; -import com.datastax.oss.protocol.internal.request.Startup; -import com.datastax.oss.protocol.internal.response.AuthChallenge; -import com.datastax.oss.protocol.internal.response.AuthSuccess; -import com.datastax.oss.protocol.internal.response.Authenticate; -import com.datastax.oss.protocol.internal.response.Error; -import com.datastax.oss.protocol.internal.response.Ready; -import com.datastax.oss.protocol.internal.response.result.SetKeyspace; -import com.datastax.oss.protocol.internal.util.Bytes; -import io.netty.channel.ChannelFuture; -import java.io.IOException; -import java.net.InetSocketAddress; -import java.time.Duration; -import java.util.ConcurrentModificationException; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.concurrent.TimeUnit; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; -import org.slf4j.LoggerFactory; - -public class ProtocolInitHandlerTest extends ChannelHandlerTestBase { - - private static final long QUERY_TIMEOUT_MILLIS = 100L; - // The handled only uses this to call the auth provider and for exception messages, so the actual - // value doesn't matter: - private static final EndPoint END_POINT = TestNodeFactory.newEndPoint(1); - - @Mock private InternalDriverContext internalDriverContext; - @Mock private DriverConfig driverConfig; - @Mock private DriverExecutionProfile defaultProfile; - @Mock private Appender appender; - - private ProtocolVersionRegistry protocolVersionRegistry = - new DefaultProtocolVersionRegistry("test"); - private HeartbeatHandler heartbeatHandler; - - @Before - @Override - public void setup() { - super.setup(); - MockitoAnnotations.initMocks(this); - when(internalDriverContext.getConfig()).thenReturn(driverConfig); - when(driverConfig.getDefaultProfile()).thenReturn(defaultProfile); - when(defaultProfile.getDuration(DefaultDriverOption.CONNECTION_INIT_QUERY_TIMEOUT)) - .thenReturn(Duration.ofMillis(QUERY_TIMEOUT_MILLIS)); - when(defaultProfile.getDuration(DefaultDriverOption.HEARTBEAT_INTERVAL)) - .thenReturn(Duration.ofSeconds(30)); - when(internalDriverContext.getProtocolVersionRegistry()).thenReturn(protocolVersionRegistry); - - channel - .pipeline() - .addLast( - ChannelFactory.INFLIGHT_HANDLER_NAME, - new InFlightHandler( - DefaultProtocolVersion.V4, - new StreamIdGenerator(100), - Integer.MAX_VALUE, - 100, - channel.newPromise(), - null, - "test")); - - heartbeatHandler = new HeartbeatHandler(defaultProfile); - } - - @Test - public void should_initialize() { - channel - .pipeline() - .addLast( - ChannelFactory.INIT_HANDLER_NAME, - new ProtocolInitHandler( - internalDriverContext, - DefaultProtocolVersion.V4, - null, - END_POINT, - DriverChannelOptions.DEFAULT, - heartbeatHandler, - false)); - - ChannelFuture connectFuture = channel.connect(new InetSocketAddress("localhost", 9042)); - - // It should send a STARTUP message - Frame requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(Startup.class); - assertThat(connectFuture).isNotDone(); - - // Simulate a READY response - writeInboundFrame(buildInboundFrame(requestFrame, new Ready())); - - // Simulate the cluster name check - requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(Query.class); - writeInboundFrame(requestFrame, TestResponses.clusterNameResponse("someClusterName")); - - // Init should complete - assertThat(connectFuture).isSuccess(); - } - - @Test - public void should_query_supported_options() { - channel - .pipeline() - .addLast( - ChannelFactory.INIT_HANDLER_NAME, - new ProtocolInitHandler( - internalDriverContext, - DefaultProtocolVersion.V4, - null, - END_POINT, - DriverChannelOptions.DEFAULT, - heartbeatHandler, - true)); - - ChannelFuture connectFuture = channel.connect(new InetSocketAddress("localhost", 9042)); - - // It should send an OPTIONS message - Frame requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(Options.class); - assertThat(connectFuture).isNotDone(); - - // Simulate the SUPPORTED response - writeInboundFrame(requestFrame, TestResponses.supportedResponse("mock_key", "mock_value")); - - Map> supportedOptions = channel.attr(DriverChannel.OPTIONS_KEY).get(); - assertThat(supportedOptions).containsKey("mock_key"); - assertThat(supportedOptions.get("mock_key")).containsOnly("mock_value"); - - // It should send a STARTUP message - requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(Startup.class); - assertThat(connectFuture).isNotDone(); - - // Simulate a READY response - writeInboundFrame(buildInboundFrame(requestFrame, new Ready())); - - // Simulate the cluster name check - requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(Query.class); - writeInboundFrame(requestFrame, TestResponses.clusterNameResponse("someClusterName")); - - // Init should complete - assertThat(connectFuture).isSuccess(); - } - - @Test - public void should_add_heartbeat_handler_to_pipeline_on_success() { - ProtocolInitHandler protocolInitHandler = - new ProtocolInitHandler( - internalDriverContext, - DefaultProtocolVersion.V4, - null, - END_POINT, - DriverChannelOptions.DEFAULT, - heartbeatHandler, - false); - - channel.pipeline().addLast(ChannelFactory.INIT_HANDLER_NAME, protocolInitHandler); - - ChannelFuture connectFuture = channel.connect(new InetSocketAddress("localhost", 9042)); - - // heartbeat should initially not be in pipeline - assertThat(channel.pipeline().get(ChannelFactory.HEARTBEAT_HANDLER_NAME)).isNull(); - - // It should send a STARTUP message - Frame requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(Startup.class); - assertThat(connectFuture).isNotDone(); - - // Simulate a READY response - writeInboundFrame(buildInboundFrame(requestFrame, new Ready())); - - // Simulate the cluster name check - requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(Query.class); - writeInboundFrame(requestFrame, TestResponses.clusterNameResponse("someClusterName")); - - // Init should complete - assertThat(connectFuture).isSuccess(); - - // should have added heartbeat handler to pipeline. - assertThat(channel.pipeline().get(ChannelFactory.HEARTBEAT_HANDLER_NAME)) - .isEqualTo(heartbeatHandler); - // should have removed itself from pipeline. - assertThat(channel.pipeline().last()).isNotEqualTo(protocolInitHandler); - } - - @Test - public void should_fail_to_initialize_if_init_query_times_out() throws InterruptedException { - channel - .pipeline() - .addLast( - ChannelFactory.INIT_HANDLER_NAME, - new ProtocolInitHandler( - internalDriverContext, - DefaultProtocolVersion.V4, - null, - END_POINT, - DriverChannelOptions.DEFAULT, - heartbeatHandler, - false)); - - ChannelFuture connectFuture = channel.connect(new InetSocketAddress("localhost", 9042)); - - readOutboundFrame(); - - // Simulate a pause longer than the timeout - TimeUnit.MILLISECONDS.sleep(QUERY_TIMEOUT_MILLIS * 2); - channel.runPendingTasks(); - - assertThat(connectFuture).isFailed(); - } - - @Test - public void should_initialize_with_authentication() { - channel - .pipeline() - .addLast( - ChannelFactory.INIT_HANDLER_NAME, - new ProtocolInitHandler( - internalDriverContext, - DefaultProtocolVersion.V4, - null, - END_POINT, - DriverChannelOptions.DEFAULT, - heartbeatHandler, - false)); - - String serverAuthenticator = "mockServerAuthenticator"; - AuthProvider authProvider = mock(AuthProvider.class); - MockAuthenticator authenticator = new MockAuthenticator(); - when(authProvider.newAuthenticator(END_POINT, serverAuthenticator)).thenReturn(authenticator); - when(internalDriverContext.getAuthProvider()).thenReturn(Optional.of(authProvider)); - - ChannelFuture connectFuture = channel.connect(new InetSocketAddress("localhost", 9042)); - - Frame requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(Startup.class); - assertThat(connectFuture).isNotDone(); - - // Simulate a response that says that the server requires authentication - writeInboundFrame(requestFrame, new Authenticate(serverAuthenticator)); - - // The connection should have created an authenticator from the auth provider - verify(authProvider).newAuthenticator(END_POINT, serverAuthenticator); - - // And sent an auth response - requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(AuthResponse.class); - AuthResponse authResponse = (AuthResponse) requestFrame.message; - assertThat(Bytes.toHexString(authResponse.token)).isEqualTo(MockAuthenticator.INITIAL_RESPONSE); - assertThat(connectFuture).isNotDone(); - - // As long as the server sends an auth challenge, the client should reply with another - // auth_response - String mockToken = "0xabcd"; - for (int i = 0; i < 5; i++) { - writeInboundFrame(requestFrame, new AuthChallenge(Bytes.fromHexString(mockToken))); - - requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(AuthResponse.class); - authResponse = (AuthResponse) requestFrame.message; - // Our mock impl happens to send back the same token - assertThat(Bytes.toHexString(authResponse.token)).isEqualTo(mockToken); - assertThat(connectFuture).isNotDone(); - } - - // When the server finally sends back a success message, should proceed to the cluster name - // check and succeed - writeInboundFrame(requestFrame, new AuthSuccess(Bytes.fromHexString(mockToken))); - assertThat(authenticator.successToken).isEqualTo(mockToken); - - requestFrame = readOutboundFrame(); - writeInboundFrame(requestFrame, TestResponses.clusterNameResponse("someClusterName")); - - assertThat(connectFuture).isSuccess(); - } - - @Test - public void should_invoke_auth_provider_when_server_does_not_send_challenge() { - channel - .pipeline() - .addLast( - ChannelFactory.INIT_HANDLER_NAME, - new ProtocolInitHandler( - internalDriverContext, - DefaultProtocolVersion.V4, - null, - END_POINT, - DriverChannelOptions.DEFAULT, - heartbeatHandler, - false)); - - AuthProvider authProvider = mock(AuthProvider.class); - when(internalDriverContext.getAuthProvider()).thenReturn(Optional.of(authProvider)); - - ChannelFuture connectFuture = channel.connect(new InetSocketAddress("localhost", 9042)); - - Frame requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(Startup.class); - - // Simulate a READY response, the provider should be notified - writeInboundFrame(buildInboundFrame(requestFrame, new Ready())); - verify(authProvider).onMissingChallenge(END_POINT); - - // Since our mock does nothing, init should proceed normally - requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(Query.class); - writeInboundFrame(requestFrame, TestResponses.clusterNameResponse("someClusterName")); - assertThat(connectFuture).isSuccess(); - } - - @Test - public void should_fail_to_initialize_if_server_sends_auth_error() throws Throwable { - channel - .pipeline() - .addLast( - ChannelFactory.INIT_HANDLER_NAME, - new ProtocolInitHandler( - internalDriverContext, - DefaultProtocolVersion.V4, - null, - END_POINT, - DriverChannelOptions.DEFAULT, - heartbeatHandler, - false)); - - String serverAuthenticator = "mockServerAuthenticator"; - AuthProvider authProvider = mock(AuthProvider.class); - MockAuthenticator authenticator = new MockAuthenticator(); - when(authProvider.newAuthenticator(END_POINT, serverAuthenticator)).thenReturn(authenticator); - when(internalDriverContext.getAuthProvider()).thenReturn(Optional.of(authProvider)); - - ChannelFuture connectFuture = channel.connect(new InetSocketAddress("localhost", 9042)); - - Frame requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(Startup.class); - assertThat(connectFuture).isNotDone(); - - writeInboundFrame(requestFrame, new Authenticate("mockServerAuthenticator")); - - requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(AuthResponse.class); - assertThat(connectFuture).isNotDone(); - - writeInboundFrame( - requestFrame, new Error(ProtocolConstants.ErrorCode.AUTH_ERROR, "mock error")); - - assertThat(connectFuture) - .isFailed( - e -> - assertThat(e) - .isInstanceOf(AuthenticationException.class) - .hasMessage( - String.format( - "Authentication error on node %s: server replied with 'mock error' to AuthResponse request", - END_POINT))); - } - - @Test - public void should_check_cluster_name_if_provided() { - channel - .pipeline() - .addLast( - ChannelFactory.INIT_HANDLER_NAME, - new ProtocolInitHandler( - internalDriverContext, - DefaultProtocolVersion.V4, - "expectedClusterName", - END_POINT, - DriverChannelOptions.DEFAULT, - heartbeatHandler, - false)); - - ChannelFuture connectFuture = channel.connect(new InetSocketAddress("localhost", 9042)); - - Frame requestFrame = readOutboundFrame(); - writeInboundFrame(requestFrame, new Ready()); - - requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(Query.class); - Query query = (Query) requestFrame.message; - assertThat(query.query).isEqualTo("SELECT cluster_name FROM system.local"); - assertThat(connectFuture).isNotDone(); - - writeInboundFrame(requestFrame, TestResponses.clusterNameResponse("expectedClusterName")); - - assertThat(connectFuture).isSuccess(); - } - - @Test - public void should_fail_to_initialize_if_cluster_name_does_not_match() throws Throwable { - channel - .pipeline() - .addLast( - ChannelFactory.INIT_HANDLER_NAME, - new ProtocolInitHandler( - internalDriverContext, - DefaultProtocolVersion.V4, - "expectedClusterName", - END_POINT, - DriverChannelOptions.DEFAULT, - heartbeatHandler, - false)); - - ChannelFuture connectFuture = channel.connect(new InetSocketAddress("localhost", 9042)); - - writeInboundFrame(readOutboundFrame(), new Ready()); - writeInboundFrame( - readOutboundFrame(), TestResponses.clusterNameResponse("differentClusterName")); - - assertThat(connectFuture) - .isFailed( - e -> - assertThat(e) - .isInstanceOf(ClusterNameMismatchException.class) - .hasMessageContaining( - String.format( - "Node %s reports cluster name 'differentClusterName' that doesn't match our cluster name 'expectedClusterName'.", - END_POINT))); - } - - @Test - public void should_initialize_with_keyspace() { - DriverChannelOptions options = - DriverChannelOptions.builder().withKeyspace(CqlIdentifier.fromCql("ks")).build(); - channel - .pipeline() - .addLast( - ChannelFactory.INIT_HANDLER_NAME, - new ProtocolInitHandler( - internalDriverContext, - DefaultProtocolVersion.V4, - null, - END_POINT, - options, - heartbeatHandler, - false)); - - ChannelFuture connectFuture = channel.connect(new InetSocketAddress("localhost", 9042)); - - writeInboundFrame(readOutboundFrame(), new Ready()); - writeInboundFrame(readOutboundFrame(), TestResponses.clusterNameResponse("someClusterName")); - - Frame requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(Query.class); - assertThat(((Query) requestFrame.message).query).isEqualTo("USE \"ks\""); - writeInboundFrame(requestFrame, new SetKeyspace("ks")); - - assertThat(connectFuture).isSuccess(); - } - - @Test - public void should_initialize_with_events() { - List eventTypes = ImmutableList.of("foo", "bar"); - EventCallback eventCallback = mock(EventCallback.class); - DriverChannelOptions driverChannelOptions = - DriverChannelOptions.builder().withEvents(eventTypes, eventCallback).build(); - channel - .pipeline() - .addLast( - ChannelFactory.INIT_HANDLER_NAME, - new ProtocolInitHandler( - internalDriverContext, - DefaultProtocolVersion.V4, - null, - END_POINT, - driverChannelOptions, - heartbeatHandler, - false)); - - ChannelFuture connectFuture = channel.connect(new InetSocketAddress("localhost", 9042)); - - writeInboundFrame(readOutboundFrame(), new Ready()); - writeInboundFrame(readOutboundFrame(), TestResponses.clusterNameResponse("someClusterName")); - - Frame requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(Register.class); - assertThat(((Register) requestFrame.message).eventTypes).containsExactly("foo", "bar"); - writeInboundFrame(requestFrame, new Ready()); - - assertThat(connectFuture).isSuccess(); - } - - @Test - public void should_initialize_with_keyspace_and_events() { - List eventTypes = ImmutableList.of("foo", "bar"); - EventCallback eventCallback = mock(EventCallback.class); - DriverChannelOptions driverChannelOptions = - DriverChannelOptions.builder() - .withKeyspace(CqlIdentifier.fromCql("ks")) - .withEvents(eventTypes, eventCallback) - .build(); - channel - .pipeline() - .addLast( - ChannelFactory.INIT_HANDLER_NAME, - new ProtocolInitHandler( - internalDriverContext, - DefaultProtocolVersion.V4, - null, - END_POINT, - driverChannelOptions, - heartbeatHandler, - false)); - - ChannelFuture connectFuture = channel.connect(new InetSocketAddress("localhost", 9042)); - - writeInboundFrame(readOutboundFrame(), new Ready()); - writeInboundFrame(readOutboundFrame(), TestResponses.clusterNameResponse("someClusterName")); - - Frame requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(Query.class); - assertThat(((Query) requestFrame.message).query).isEqualTo("USE \"ks\""); - writeInboundFrame(requestFrame, new SetKeyspace("ks")); - - requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(Register.class); - assertThat(((Register) requestFrame.message).eventTypes).containsExactly("foo", "bar"); - writeInboundFrame(requestFrame, new Ready()); - - assertThat(connectFuture).isSuccess(); - } - - @Test - public void should_fail_to_initialize_if_keyspace_is_invalid() { - DriverChannelOptions driverChannelOptions = - DriverChannelOptions.builder().withKeyspace(CqlIdentifier.fromCql("ks")).build(); - channel - .pipeline() - .addLast( - ChannelFactory.INIT_HANDLER_NAME, - new ProtocolInitHandler( - internalDriverContext, - DefaultProtocolVersion.V4, - null, - END_POINT, - driverChannelOptions, - heartbeatHandler, - false)); - - ChannelFuture connectFuture = channel.connect(new InetSocketAddress("localhost", 9042)); - - writeInboundFrame(readOutboundFrame(), new Ready()); - writeInboundFrame(readOutboundFrame(), TestResponses.clusterNameResponse("someClusterName")); - - Frame requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(Query.class); - assertThat(((Query) requestFrame.message).query).isEqualTo("USE \"ks\""); - writeInboundFrame( - requestFrame, new Error(ProtocolConstants.ErrorCode.INVALID, "invalid keyspace")); - - assertThat(connectFuture) - .isFailed( - error -> - assertThat(error) - .isInstanceOf(InvalidKeyspaceException.class) - .hasMessage("invalid keyspace")); - } - - /** - * This covers a corner case where {@code abortAllInFlight} was recursing into itself, causing a - * {@link ConcurrentModificationException}. This was recoverable but caused Netty to generate a - * warning log. - * - * @see JAVA-2838 - */ - @Test - public void should_fail_pending_requests_only_once_if_init_fails() { - Logger logger = - (Logger) LoggerFactory.getLogger("io.netty.channel.AbstractChannelHandlerContext"); - Level levelBefore = logger.getLevel(); - logger.setLevel(Level.WARN); - logger.addAppender(appender); - - channel - .pipeline() - .addLast( - "init", - new ProtocolInitHandler( - internalDriverContext, - DefaultProtocolVersion.V4, - null, - END_POINT, - DriverChannelOptions.DEFAULT, - heartbeatHandler, - false)); - - ChannelFuture connectFuture = channel.connect(new InetSocketAddress("localhost", 9042)); - channel.pipeline().fireExceptionCaught(new IOException("Mock I/O exception")); - assertThat(connectFuture).isFailed(); - - verify(appender, never()).doAppend(any(ILoggingEvent.class)); - - logger.detachAppender(appender); - logger.setLevel(levelBefore); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/StreamIdGeneratorTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/StreamIdGeneratorTest.java deleted file mode 100644 index 83802884c45..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/StreamIdGeneratorTest.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import static com.datastax.oss.driver.Assertions.assertThat; - -import org.junit.Test; - -public class StreamIdGeneratorTest { - @Test - public void should_have_all_available_upon_creation() { - StreamIdGenerator generator = new StreamIdGenerator(8); - assertThat(generator.getAvailableIds()).isEqualTo(8); - } - - @Test - public void should_return_available_ids_in_sequence() { - StreamIdGenerator generator = new StreamIdGenerator(8); - for (int i = 0; i < 8; i++) { - assertThat(generator.preAcquire()).isTrue(); - assertThat(generator.acquire()).isEqualTo(i); - assertThat(generator.getAvailableIds()).isEqualTo(7 - i); - } - } - - @Test - public void should_return_minus_one_when_no_id_available() { - StreamIdGenerator generator = new StreamIdGenerator(8); - for (int i = 0; i < 8; i++) { - assertThat(generator.preAcquire()).isTrue(); - // also validating that ids are held as soon as preAcquire() is called, even if acquire() has - // not been invoked yet - } - assertThat(generator.getAvailableIds()).isEqualTo(0); - assertThat(generator.preAcquire()).isFalse(); - } - - @Test - public void should_return_previously_released_ids() { - StreamIdGenerator generator = new StreamIdGenerator(8); - for (int i = 0; i < 8; i++) { - assertThat(generator.preAcquire()).isTrue(); - assertThat(generator.acquire()).isEqualTo(i); - } - generator.release(7); - generator.release(2); - assertThat(generator.getAvailableIds()).isEqualTo(2); - assertThat(generator.preAcquire()).isTrue(); - assertThat(generator.acquire()).isEqualTo(2); - assertThat(generator.preAcquire()).isTrue(); - assertThat(generator.acquire()).isEqualTo(7); - assertThat(generator.preAcquire()).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/config/MockOptions.java b/core/src/test/java/com/datastax/oss/driver/internal/core/config/MockOptions.java deleted file mode 100644 index cee57abbfdf..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/config/MockOptions.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.config; - -import com.datastax.oss.driver.api.core.config.DriverOption; -import edu.umd.cs.findbugs.annotations.NonNull; - -public enum MockOptions implements DriverOption { - INT1("int1"), - INT2("int2"), - AUTH_PROVIDER("auth_provider"), - SUBNET_ADDRESSES("subnet_addresses"), - ; - - private final String path; - - MockOptions(String path) { - this.path = path; - } - - @NonNull - @Override - public String getPath() { - return path; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/config/MockTypedOptions.java b/core/src/test/java/com/datastax/oss/driver/internal/core/config/MockTypedOptions.java deleted file mode 100644 index ecad298aa37..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/config/MockTypedOptions.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.config; - -import com.datastax.oss.driver.api.core.config.TypedDriverOption; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; - -public class MockTypedOptions { - public static final TypedDriverOption INT1 = - new TypedDriverOption<>(MockOptions.INT1, GenericType.INTEGER); - public static final TypedDriverOption INT2 = - new TypedDriverOption<>(MockOptions.INT2, GenericType.INTEGER); -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/config/cloud/CloudConfigFactoryTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/config/cloud/CloudConfigFactoryTest.java deleted file mode 100644 index a0db82d298e..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/config/cloud/CloudConfigFactoryTest.java +++ /dev/null @@ -1,235 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.config.cloud; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; -import static com.github.tomakehurst.wiremock.client.WireMock.any; -import static com.github.tomakehurst.wiremock.client.WireMock.stubFor; -import static com.github.tomakehurst.wiremock.client.WireMock.urlEqualTo; -import static com.github.tomakehurst.wiremock.client.WireMock.urlPathEqualTo; -import static com.github.tomakehurst.wiremock.core.WireMockConfiguration.wireMockConfig; -import static org.assertj.core.api.Assertions.catchThrowable; - -import com.datastax.oss.driver.internal.core.ssl.SniSslEngineFactory; -import com.fasterxml.jackson.core.JsonParseException; -import com.github.tomakehurst.wiremock.common.JettySettings; -import com.github.tomakehurst.wiremock.core.Options; -import com.github.tomakehurst.wiremock.http.AdminRequestHandler; -import com.github.tomakehurst.wiremock.http.HttpServer; -import com.github.tomakehurst.wiremock.http.HttpServerFactory; -import com.github.tomakehurst.wiremock.http.StubRequestHandler; -import com.github.tomakehurst.wiremock.jetty9.JettyHttpServer; -import com.github.tomakehurst.wiremock.junit.WireMockRule; -import com.google.common.base.Joiner; -import java.io.FileNotFoundException; -import java.io.IOException; -import java.net.InetSocketAddress; -import java.net.URISyntaxException; -import java.net.URL; -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import org.eclipse.jetty.io.NetworkTrafficListener; -import org.eclipse.jetty.server.ConnectionFactory; -import org.eclipse.jetty.server.ServerConnector; -import org.eclipse.jetty.server.SslConnectionFactory; -import org.eclipse.jetty.util.ssl.SslContextFactory; -import org.junit.Rule; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public class CloudConfigFactoryTest { - - private static final String BUNDLE_PATH = "/config/cloud/creds.zip"; - - @Rule - public WireMockRule wireMockRule = - new WireMockRule( - wireMockConfig() - .httpsPort(30443) - .dynamicPort() - .httpServerFactory(new HttpsServerFactory()) - .needClientAuth(true) - .keystorePath(path("/config/cloud/identity.jks").toString()) - .keystorePassword("fakePasswordForTests") - .trustStorePath(path("/config/cloud/trustStore.jks").toString()) - .trustStorePassword("fakePasswordForTests2")); - - public CloudConfigFactoryTest() throws URISyntaxException {} - - @Test - public void should_load_config_from_local_filesystem() throws Exception { - // given - URL configFile = getClass().getResource(BUNDLE_PATH); - mockProxyMetadataService(jsonMetadata()); - // when - CloudConfigFactory cloudConfigFactory = new CloudConfigFactory(); - CloudConfig cloudConfig = cloudConfigFactory.createCloudConfig(configFile); - // then - assertCloudConfig(cloudConfig); - } - - @Test - public void should_load_config_from_external_location() throws Exception { - // given - mockHttpSecureBundle(secureBundle()); - mockProxyMetadataService(jsonMetadata()); - // when - URL configFile = new URL("http", "localhost", wireMockRule.port(), BUNDLE_PATH); - CloudConfigFactory cloudConfigFactory = new CloudConfigFactory(); - CloudConfig cloudConfig = cloudConfigFactory.createCloudConfig(configFile); - // then - assertCloudConfig(cloudConfig); - } - - @Test - public void should_throw_when_bundle_not_found() throws Exception { - // given - stubFor(any(urlEqualTo(BUNDLE_PATH)).willReturn(aResponse().withStatus(404))); - // when - URL configFile = new URL("http", "localhost", wireMockRule.port(), BUNDLE_PATH); - CloudConfigFactory cloudConfigFactory = new CloudConfigFactory(); - Throwable t = catchThrowable(() -> cloudConfigFactory.createCloudConfig(configFile)); - assertThat(t) - .isInstanceOf(FileNotFoundException.class) - .hasMessageContaining(configFile.toExternalForm()); - } - - @Test - public void should_throw_when_bundle_not_readable() throws Exception { - // given - mockHttpSecureBundle("not a zip file".getBytes(StandardCharsets.UTF_8)); - // when - URL configFile = new URL("http", "localhost", wireMockRule.port(), BUNDLE_PATH); - CloudConfigFactory cloudConfigFactory = new CloudConfigFactory(); - Throwable t = catchThrowable(() -> cloudConfigFactory.createCloudConfig(configFile)); - assertThat(t) - .isInstanceOf(IllegalStateException.class) - .hasMessageContaining("Invalid bundle: missing file config.json"); - } - - @Test - public void should_throw_when_metadata_not_found() throws Exception { - // given - mockHttpSecureBundle(secureBundle()); - stubFor(any(urlPathEqualTo("/metadata")).willReturn(aResponse().withStatus(404))); - // when - URL configFile = new URL("http", "localhost", wireMockRule.port(), BUNDLE_PATH); - CloudConfigFactory cloudConfigFactory = new CloudConfigFactory(); - Throwable t = catchThrowable(() -> cloudConfigFactory.createCloudConfig(configFile)); - assertThat(t).isInstanceOf(FileNotFoundException.class).hasMessageContaining("metadata"); - } - - @Test - public void should_throw_when_metadata_not_readable() throws Exception { - // given - mockHttpSecureBundle(secureBundle()); - mockProxyMetadataService("not a valid json payload"); - // when - URL configFile = new URL("http", "localhost", wireMockRule.port(), BUNDLE_PATH); - CloudConfigFactory cloudConfigFactory = new CloudConfigFactory(); - Throwable t = catchThrowable(() -> cloudConfigFactory.createCloudConfig(configFile)); - assertThat(t).isInstanceOf(JsonParseException.class).hasMessageContaining("Unrecognized token"); - } - - private void mockHttpSecureBundle(byte[] body) { - stubFor( - any(urlEqualTo(BUNDLE_PATH)) - .willReturn( - aResponse() - .withStatus(200) - .withHeader("Content-Type", "application/octet-stream") - .withBody(body))); - } - - private void mockProxyMetadataService(String jsonMetadata) { - stubFor( - any(urlPathEqualTo("/metadata")) - .willReturn( - aResponse() - .withStatus(200) - .withHeader("Content-Type", "application/json") - .withBody(jsonMetadata))); - } - - private byte[] secureBundle() throws IOException, URISyntaxException { - return Files.readAllBytes(path(BUNDLE_PATH)); - } - - private String jsonMetadata() throws IOException, URISyntaxException { - return Joiner.on('\n') - .join(Files.readAllLines(path("/config/cloud/metadata.json"), StandardCharsets.UTF_8)); - } - - private Path path(String resource) throws URISyntaxException { - return Paths.get(getClass().getResource(resource).toURI()); - } - - private void assertCloudConfig(CloudConfig config) { - InetSocketAddress expectedProxyAddress = InetSocketAddress.createUnresolved("localhost", 30002); - assertThat(config.getLocalDatacenter()).isEqualTo("dc1"); - assertThat(config.getProxyAddress()).isEqualTo(expectedProxyAddress); - assertThat(config.getEndPoints()).extracting("proxyAddress").containsOnly(expectedProxyAddress); - assertThat(config.getEndPoints()) - .extracting("serverName") - .containsExactly( - "4ac06655-f861-49f9-881e-3fee22e69b94", - "2af7c253-3394-4a0d-bfac-f1ad81b5154d", - "b17b6e2a-3f48-4d6a-81c1-20a0a1f3192a"); - assertThat(config.getSslEngineFactory()).isNotNull().isInstanceOf(SniSslEngineFactory.class); - } - - static { - javax.net.ssl.HttpsURLConnection.setDefaultHostnameVerifier( - (hostname, sslSession) -> hostname.equals("localhost")); - } - - // see https://github.com/tomakehurst/wiremock/issues/874 - private static class HttpsServerFactory implements HttpServerFactory { - @Override - public HttpServer buildHttpServer( - Options options, - AdminRequestHandler adminRequestHandler, - StubRequestHandler stubRequestHandler) { - return new JettyHttpServer(options, adminRequestHandler, stubRequestHandler) { - @Override - protected ServerConnector createServerConnector( - String bindAddress, - JettySettings jettySettings, - int port, - NetworkTrafficListener listener, - ConnectionFactory... connectionFactories) { - if (port == options.httpsSettings().port()) { - SslConnectionFactory sslConnectionFactory = - (SslConnectionFactory) connectionFactories[0]; - SslContextFactory sslContextFactory = sslConnectionFactory.getSslContextFactory(); - sslContextFactory.setKeyStorePassword(options.httpsSettings().keyStorePassword()); - connectionFactories = - new ConnectionFactory[] {sslConnectionFactory, connectionFactories[1]}; - } - return super.createServerConnector( - bindAddress, jettySettings, port, listener, connectionFactories); - } - }; - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/config/composite/CompositeDriverConfigReloadTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/config/composite/CompositeDriverConfigReloadTest.java deleted file mode 100644 index 1d327a08101..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/config/composite/CompositeDriverConfigReloadTest.java +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.config.composite; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.assertj.core.api.Assertions.catchThrowable; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -@RunWith(DataProviderRunner.class) -public class CompositeDriverConfigReloadTest { - - @Mock private DriverConfigLoader primaryLoader; - @Mock private DriverConfigLoader fallbackLoader; - private DriverConfigLoader compositeLoader; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - compositeLoader = DriverConfigLoader.compose(primaryLoader, fallbackLoader); - } - - @Test - @UseDataProvider("reloadabilities") - public void should_be_reloadable_if_either_child_is( - boolean primaryIsReloadable, - boolean fallbackIsReloadable, - boolean compositeShouldBeReloadable) { - when(primaryLoader.supportsReloading()).thenReturn(primaryIsReloadable); - when(fallbackLoader.supportsReloading()).thenReturn(fallbackIsReloadable); - assertThat(compositeLoader.supportsReloading()).isEqualTo(compositeShouldBeReloadable); - } - - @Test - @UseDataProvider("reloadabilities") - public void should_delegate_reloading_to_reloadable_children( - boolean primaryIsReloadable, - boolean fallbackIsReloadable, - boolean compositeShouldBeReloadable) { - when(primaryLoader.supportsReloading()).thenReturn(primaryIsReloadable); - when(primaryLoader.reload()) - .thenReturn( - primaryIsReloadable - ? CompletableFuture.completedFuture(true) - : CompletableFutures.failedFuture(new UnsupportedOperationException())); - - when(fallbackLoader.supportsReloading()).thenReturn(fallbackIsReloadable); - when(fallbackLoader.reload()) - .thenReturn( - fallbackIsReloadable - ? CompletableFuture.completedFuture(true) - : CompletableFutures.failedFuture(new UnsupportedOperationException())); - - CompletionStage reloadFuture = compositeLoader.reload(); - - if (compositeShouldBeReloadable) { - assertThat(reloadFuture).isCompletedWithValue(true); - } else { - assertThat(reloadFuture).isCompletedExceptionally(); - Throwable t = catchThrowable(() -> reloadFuture.toCompletableFuture().get()); - assertThat(t).hasRootCauseInstanceOf(UnsupportedOperationException.class); - } - verify(primaryLoader, primaryIsReloadable ? times(1) : never()).reload(); - verify(fallbackLoader, fallbackIsReloadable ? times(1) : never()).reload(); - } - - @DataProvider - public static Object[][] reloadabilities() { - return new Object[][] { - // primaryIsReloadable, fallbackIsReloadable, compositeShouldBeReloadable - new Object[] {true, true, true}, - new Object[] {true, false, true}, - new Object[] {false, true, true}, - new Object[] {false, false, false}, - }; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/config/composite/CompositeDriverConfigTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/config/composite/CompositeDriverConfigTest.java deleted file mode 100644 index e5d5ffcdf83..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/config/composite/CompositeDriverConfigTest.java +++ /dev/null @@ -1,147 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.config.composite; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.entry; - -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.config.OptionsMap; -import com.datastax.oss.driver.api.core.config.TypedDriverOption; -import org.junit.Before; -import org.junit.Test; - -public class CompositeDriverConfigTest { - - private OptionsMap primaryMap; - private OptionsMap fallbackMap; - private DriverConfig compositeConfig; - private DriverExecutionProfile compositeDefaultProfile; - - @Before - public void setup() { - primaryMap = new OptionsMap(); - // We need at least one option so that the default profile exists. Do it now to avoid having to - // do it in every test. We use an option that we won't reuse in the tests so that there are no - // unwanted interactions. - primaryMap.put(TypedDriverOption.CONTINUOUS_PAGING_MAX_PAGES, 1); - - fallbackMap = new OptionsMap(); - fallbackMap.put(TypedDriverOption.CONTINUOUS_PAGING_MAX_PAGES, 1); - - DriverConfigLoader compositeLoader = - DriverConfigLoader.compose( - DriverConfigLoader.fromMap(primaryMap), DriverConfigLoader.fromMap(fallbackMap)); - compositeConfig = compositeLoader.getInitialConfig(); - compositeDefaultProfile = compositeConfig.getDefaultProfile(); - } - - @Test - public void should_use_value_from_primary_config() { - primaryMap.put(TypedDriverOption.CONNECTION_POOL_LOCAL_SIZE, 1); - - assertThat(compositeDefaultProfile.isDefined(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)) - .isTrue(); - assertThat(compositeDefaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)) - .isEqualTo(1); - assertThat(compositeDefaultProfile.entrySet()) - .containsExactly( - entry(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE.getPath(), 1), - entry(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES.getPath(), 1)); - } - - @Test - public void should_ignore_value_from_fallback_config_if_defined_in_both() { - primaryMap.put(TypedDriverOption.CONNECTION_POOL_LOCAL_SIZE, 1); - fallbackMap.put(TypedDriverOption.CONNECTION_POOL_LOCAL_SIZE, 2); - - assertThat(compositeDefaultProfile.isDefined(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)) - .isTrue(); - assertThat(compositeDefaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)) - .isEqualTo(1); - assertThat(compositeDefaultProfile.entrySet()) - .containsExactly( - entry(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE.getPath(), 1), - entry(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES.getPath(), 1)); - } - - @Test - public void should_use_value_from_fallback_config_if_not_defined_in_primary() { - fallbackMap.put(TypedDriverOption.CONNECTION_POOL_LOCAL_SIZE, 1); - - assertThat(compositeDefaultProfile.isDefined(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)) - .isTrue(); - assertThat(compositeDefaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)) - .isEqualTo(1); - assertThat(compositeDefaultProfile.entrySet()) - .containsExactly( - entry(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE.getPath(), 1), - entry(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES.getPath(), 1)); - } - - @Test - public void should_merge_profiles() { - primaryMap.put("onlyInPrimary", TypedDriverOption.CONNECTION_POOL_LOCAL_SIZE, 1); - primaryMap.put("inBoth", TypedDriverOption.CONNECTION_POOL_LOCAL_SIZE, 2); - fallbackMap.put("inBoth", TypedDriverOption.CONNECTION_POOL_LOCAL_SIZE, 3); - fallbackMap.put("onlyInFallback", TypedDriverOption.CONNECTION_POOL_LOCAL_SIZE, 4); - - assertThat(compositeConfig.getProfiles()) - .containsKeys( - DriverExecutionProfile.DEFAULT_NAME, - "onlyInPrimary", - "inBoth", - "inBoth", - "onlyInFallback"); - - assertThat( - compositeConfig - .getProfile("onlyInPrimary") - .getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)) - .isEqualTo(1); - assertThat( - compositeConfig - .getProfile("inBoth") - .getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)) - .isEqualTo(2); - assertThat( - compositeConfig - .getProfile("onlyInFallback") - .getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)) - .isEqualTo(4); - - assertThat(compositeConfig.getProfile("onlyInPrimary").entrySet()) - .containsExactly( - entry(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE.getPath(), 1), - entry(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES.getPath(), 1)); - - assertThat(compositeConfig.getProfile("inBoth").entrySet()) - .containsExactly( - entry(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE.getPath(), 2), - entry(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES.getPath(), 1)); - - assertThat(compositeConfig.getProfile("onlyInFallback").entrySet()) - .containsExactly( - entry(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE.getPath(), 4), - entry(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES.getPath(), 1)); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/config/map/MapBasedDriverConfigLoaderTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/config/map/MapBasedDriverConfigLoaderTest.java deleted file mode 100644 index 93f6b274826..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/config/map/MapBasedDriverConfigLoaderTest.java +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.config.map; - -import static com.typesafe.config.ConfigFactory.defaultReference; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.config.DriverOption; -import com.datastax.oss.driver.api.core.config.OptionsMap; -import com.datastax.oss.driver.api.core.config.TypedDriverOption; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.config.MockOptions; -import com.datastax.oss.driver.internal.core.config.MockTypedOptions; -import com.datastax.oss.driver.internal.core.config.typesafe.DefaultDriverConfigLoader; -import com.typesafe.config.ConfigException; -import com.typesafe.config.ConfigFactory; -import java.util.Optional; -import org.junit.Test; - -public class MapBasedDriverConfigLoaderTest { - - @Test - public void should_reflect_changes_in_real_time() { - OptionsMap source = new OptionsMap(); - source.put(MockTypedOptions.INT1, 1); - - DriverConfigLoader loader = DriverConfigLoader.fromMap(source); - DriverConfig config = loader.getInitialConfig(); - assertThat(config.getDefaultProfile().getInt(MockOptions.INT1)).isEqualTo(1); - - source.put(MockTypedOptions.INT1, 2); - assertThat(config.getDefaultProfile().getInt(MockOptions.INT1)).isEqualTo(2); - } - - /** - * Checks that, if we ask to pre-fill the default profile, then we get the same set of options as - * the built-in reference.conf. - */ - @Test - public void should_fill_default_profile_like_reference_file() { - OptionsMap optionsMap = OptionsMap.driverDefaults(); - DriverExecutionProfile mapBasedConfig = - DriverConfigLoader.fromMap(optionsMap).getInitialConfig().getDefaultProfile(); - DriverExecutionProfile fileBasedConfig = - new DefaultDriverConfigLoader( - () -> { - // Only load reference.conf since we are focusing on driver defaults - ConfigFactory.invalidateCaches(); - return defaultReference().getConfig(DefaultDriverConfigLoader.DEFAULT_ROOT_PATH); - }) - .getInitialConfig() - .getDefaultProfile(); - - // Make sure we're not missing any options. -1 is for CONFIG_RELOAD_INTERVAL, which is not - // defined by OptionsMap because it is irrelevant for the map-based config. - assertThat(mapBasedConfig.entrySet()).hasSize(fileBasedConfig.entrySet().size() - 1); - - for (TypedDriverOption option : TypedDriverOption.builtInValues()) { - if (option.getRawOption() == DefaultDriverOption.CONFIG_RELOAD_INTERVAL) { - continue; - } - Optional fileBasedValue = get(fileBasedConfig, option); - Optional mapBasedValue = get(mapBasedConfig, option); - assertThat(mapBasedValue) - .as("Wrong value for %s in OptionsMap", option.getRawOption()) - .isEqualTo(fileBasedValue); - } - } - - private Optional get(DriverExecutionProfile config, TypedDriverOption typedOption) { - DriverOption option = typedOption.getRawOption(); - GenericType type = typedOption.getExpectedType(); - Object value = null; - if (config.isDefined(option)) { - // This is ugly, we have no other way than enumerating all possible types. - // This kind of bridging code between OptionsMap and DriverConfig is unlikely to exist - // anywhere outside of this test. - if (type.equals(GenericType.listOf(String.class))) { - value = config.getStringList(option); - } else if (type.equals(GenericType.STRING)) { - value = config.getString(option); - } else if (type.equals(GenericType.DURATION)) { - value = config.getDuration(option); - } else if (type.equals(GenericType.INTEGER)) { - value = config.getInt(option); - } else if (type.equals(GenericType.BOOLEAN)) { - value = config.getBoolean(option); - } else if (type.equals(GenericType.LONG)) { - try { - value = config.getLong(option); - } catch (ConfigException.WrongType e) { - value = config.getBytes(option); - } - } else if (type.equals(GenericType.mapOf(GenericType.STRING, GenericType.STRING))) { - value = config.getStringMap(option); - } else { - fail("Unexpected type " + type); - } - } - return Optional.ofNullable(value); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/config/map/MapBasedDriverConfigTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/config/map/MapBasedDriverConfigTest.java deleted file mode 100644 index 1ebd5fb48ba..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/config/map/MapBasedDriverConfigTest.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.config.map; - -import static com.datastax.oss.driver.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.config.OptionsMap; -import com.datastax.oss.driver.internal.core.config.MockOptions; -import com.datastax.oss.driver.internal.core.config.MockTypedOptions; -import org.junit.Test; - -public class MapBasedDriverConfigTest { - - @Test - public void should_load_minimal_config_with_no_profiles() { - OptionsMap source = new OptionsMap(); - source.put(MockTypedOptions.INT1, 42); - DriverConfig config = DriverConfigLoader.fromMap(source).getInitialConfig(); - - assertThat(config).hasIntOption(MockOptions.INT1, 42); - } - - @Test - public void should_inherit_option_in_profile() { - OptionsMap source = new OptionsMap(); - source.put(MockTypedOptions.INT1, 42); - // need to add an unrelated option to create the profile - source.put("profile1", MockTypedOptions.INT2, 1); - DriverConfig config = DriverConfigLoader.fromMap(source).getInitialConfig(); - - assertThat(config) - .hasIntOption(MockOptions.INT1, 42) - .hasIntOption("profile1", MockOptions.INT1, 42); - } - - @Test - public void should_override_option_in_profile() { - OptionsMap source = new OptionsMap(); - source.put(MockTypedOptions.INT1, 42); - source.put("profile1", MockTypedOptions.INT1, 43); - DriverConfig config = DriverConfigLoader.fromMap(source).getInitialConfig(); - - assertThat(config) - .hasIntOption(MockOptions.INT1, 42) - .hasIntOption("profile1", MockOptions.INT1, 43); - } - - @Test - public void should_create_derived_profile_with_new_option() { - OptionsMap source = new OptionsMap(); - source.put(MockTypedOptions.INT1, 42); - DriverConfig config = DriverConfigLoader.fromMap(source).getInitialConfig(); - DriverExecutionProfile base = config.getDefaultProfile(); - DriverExecutionProfile derived = base.withInt(MockOptions.INT2, 43); - - assertThat(base.isDefined(MockOptions.INT2)).isFalse(); - assertThat(derived.isDefined(MockOptions.INT2)).isTrue(); - assertThat(derived.getInt(MockOptions.INT2)).isEqualTo(43); - } - - @Test - public void should_create_derived_profile_overriding_option() { - OptionsMap source = new OptionsMap(); - source.put(MockTypedOptions.INT1, 42); - DriverConfig config = DriverConfigLoader.fromMap(source).getInitialConfig(); - DriverExecutionProfile base = config.getDefaultProfile(); - DriverExecutionProfile derived = base.withInt(MockOptions.INT1, 43); - - assertThat(base.getInt(MockOptions.INT1)).isEqualTo(42); - assertThat(derived.getInt(MockOptions.INT1)).isEqualTo(43); - } - - @Test - public void should_create_derived_profile_unsetting_option() { - OptionsMap source = new OptionsMap(); - source.put(MockTypedOptions.INT1, 42); - source.put(MockTypedOptions.INT2, 43); - DriverConfig config = DriverConfigLoader.fromMap(source).getInitialConfig(); - DriverExecutionProfile base = config.getDefaultProfile(); - DriverExecutionProfile derived = base.without(MockOptions.INT2); - - assertThat(base.getInt(MockOptions.INT2)).isEqualTo(43); - assertThat(derived.isDefined(MockOptions.INT2)).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/config/typesafe/DefaultDriverConfigLoaderTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/config/typesafe/DefaultDriverConfigLoaderTest.java deleted file mode 100644 index 16b8f0b3aa6..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/config/typesafe/DefaultDriverConfigLoaderTest.java +++ /dev/null @@ -1,314 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.config.typesafe; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.DefaultConsistencyLevel; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.internal.core.config.ConfigChangeEvent; -import com.datastax.oss.driver.internal.core.config.MockOptions; -import com.datastax.oss.driver.internal.core.context.EventBus; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.context.NettyOptions; -import com.datastax.oss.driver.internal.core.util.concurrent.ScheduledTaskCapturingEventLoop; -import com.datastax.oss.driver.internal.core.util.concurrent.ScheduledTaskCapturingEventLoop.CapturedTask; -import com.typesafe.config.ConfigFactory; -import io.netty.channel.EventLoopGroup; -import java.io.File; -import java.time.Duration; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicReference; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -public class DefaultDriverConfigLoaderTest { - - @Mock private InternalDriverContext context; - @Mock private NettyOptions nettyOptions; - @Mock private EventLoopGroup adminEventExecutorGroup; - @Mock private DriverConfig config; - @Mock private DriverExecutionProfile defaultProfile; - private ScheduledTaskCapturingEventLoop adminExecutor; - private EventBus eventBus; - private AtomicReference configSource; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - - when(context.getSessionName()).thenReturn("test"); - when(context.getNettyOptions()).thenReturn(nettyOptions); - when(nettyOptions.adminEventExecutorGroup()).thenReturn(adminEventExecutorGroup); - - adminExecutor = new ScheduledTaskCapturingEventLoop(adminEventExecutorGroup); - when(adminEventExecutorGroup.next()).thenReturn(adminExecutor); - - eventBus = spy(new EventBus("test")); - when(context.getEventBus()).thenReturn(eventBus); - - // The already loaded config in the context. - // In real life, it's the object managed by the loader, but in this test it's simpler to mock - // it. - when(context.getConfig()).thenReturn(config); - when(config.getDefaultProfile()).thenReturn(defaultProfile); - when(defaultProfile.getDuration(DefaultDriverOption.CONFIG_RELOAD_INTERVAL)) - .thenReturn(Duration.ofSeconds(12)); - - configSource = new AtomicReference<>("int1 = 42"); - } - - @Test - public void should_build_initial_config() { - DefaultDriverConfigLoader loader = - new DefaultDriverConfigLoader(() -> ConfigFactory.parseString(configSource.get())); - DriverConfig initialConfig = loader.getInitialConfig(); - assertThat(initialConfig).hasIntOption(MockOptions.INT1, 42); - } - - @Test - public void should_schedule_reloading_task() { - DefaultDriverConfigLoader loader = - new DefaultDriverConfigLoader(() -> ConfigFactory.parseString(configSource.get())); - - loader.onDriverInit(context); - adminExecutor.waitForNonScheduledTasks(); - - CapturedTask task = adminExecutor.nextTask(); - assertThat(task.getInitialDelay(TimeUnit.SECONDS)).isEqualTo(12); - assertThat(task.getPeriod(TimeUnit.SECONDS)).isEqualTo(12); - } - - @Test - public void should_detect_config_change_from_periodic_reload() { - DefaultDriverConfigLoader loader = - new DefaultDriverConfigLoader(() -> ConfigFactory.parseString(configSource.get())); - DriverConfig initialConfig = loader.getInitialConfig(); - assertThat(initialConfig).hasIntOption(MockOptions.INT1, 42); - - loader.onDriverInit(context); - adminExecutor.waitForNonScheduledTasks(); - - CapturedTask task = adminExecutor.nextTask(); - - configSource.set("int1 = 43"); - - task.run(); - - assertThat(initialConfig).hasIntOption(MockOptions.INT1, 43); - verify(eventBus).fire(ConfigChangeEvent.INSTANCE); - } - - @Test - public void should_detect_config_change_from_manual_reload() { - DefaultDriverConfigLoader loader = - new DefaultDriverConfigLoader(() -> ConfigFactory.parseString(configSource.get())); - DriverConfig initialConfig = loader.getInitialConfig(); - assertThat(initialConfig).hasIntOption(MockOptions.INT1, 42); - - loader.onDriverInit(context); - adminExecutor.waitForNonScheduledTasks(); - - configSource.set("int1 = 43"); - - CompletionStage reloaded = loader.reload(); - adminExecutor.waitForNonScheduledTasks(); - - assertThat(initialConfig).hasIntOption(MockOptions.INT1, 43); - verify(eventBus).fire(ConfigChangeEvent.INSTANCE); - assertThatStage(reloaded).isSuccess(changed -> assertThat(changed).isTrue()); - } - - @Test - public void should_not_notify_from_periodic_reload_if_config_has_not_changed() { - DefaultDriverConfigLoader loader = - new DefaultDriverConfigLoader(() -> ConfigFactory.parseString(configSource.get())); - DriverConfig initialConfig = loader.getInitialConfig(); - assertThat(initialConfig).hasIntOption(MockOptions.INT1, 42); - - loader.onDriverInit(context); - adminExecutor.waitForNonScheduledTasks(); - - CapturedTask task = adminExecutor.nextTask(); - - // no change to the config source - - task.run(); - - verify(eventBus, never()).fire(ConfigChangeEvent.INSTANCE); - } - - @Test - public void should_not_notify_from_manual_reload_if_config_has_not_changed() { - DefaultDriverConfigLoader loader = - new DefaultDriverConfigLoader(() -> ConfigFactory.parseString(configSource.get())); - DriverConfig initialConfig = loader.getInitialConfig(); - assertThat(initialConfig).hasIntOption(MockOptions.INT1, 42); - - loader.onDriverInit(context); - adminExecutor.waitForNonScheduledTasks(); - - CompletionStage reloaded = loader.reload(); - adminExecutor.waitForNonScheduledTasks(); - - verify(eventBus, never()).fire(ConfigChangeEvent.INSTANCE); - assertThatStage(reloaded).isSuccess(changed -> assertThat(changed).isFalse()); - } - - @Test - public void should_load_from_other_classpath_resource() { - DriverConfigLoader loader = DriverConfigLoader.fromClasspath("config/customApplication"); - DriverExecutionProfile config = loader.getInitialConfig().getDefaultProfile(); - // From customApplication.conf: - assertThat(config.getDuration(DefaultDriverOption.REQUEST_TIMEOUT)) - .isEqualTo(Duration.ofSeconds(5)); - // From customApplication.json: - assertThat(config.getInt(DefaultDriverOption.REQUEST_PAGE_SIZE)).isEqualTo(2000); - // From customApplication.properties: - assertThat(config.getString(DefaultDriverOption.REQUEST_CONSISTENCY)) - .isEqualTo(DefaultConsistencyLevel.ONE.name()); - // From reference.conf: - assertThat(config.getString(DefaultDriverOption.REQUEST_SERIAL_CONSISTENCY)) - .isEqualTo(DefaultConsistencyLevel.SERIAL.name()); - } - - @Test - public void should_load_from_file() { - File file = new File("src/test/resources/config/customApplication.conf"); - assertThat(file).exists(); - DriverConfigLoader loader = DriverConfigLoader.fromFile(file); - DriverExecutionProfile config = loader.getInitialConfig().getDefaultProfile(); - // From customApplication.conf: - assertThat(config.getDuration(DefaultDriverOption.REQUEST_TIMEOUT)) - .isEqualTo(Duration.ofSeconds(5)); - // From reference.conf: - assertThat(config.getString(DefaultDriverOption.REQUEST_SERIAL_CONSISTENCY)) - .isEqualTo(DefaultConsistencyLevel.SERIAL.name()); - } - - @Test - public void should_load_from_file_with_system_property() { - File file = new File("src/test/resources/config/customApplication.conf"); - assertThat(file).exists(); - System.setProperty("config.file", file.getAbsolutePath()); - try { - DriverConfigLoader loader = new DefaultDriverConfigLoader(); - DriverExecutionProfile config = loader.getInitialConfig().getDefaultProfile(); - // From customApplication.conf: - assertThat(config.getDuration(DefaultDriverOption.REQUEST_TIMEOUT)) - .isEqualTo(Duration.ofSeconds(5)); - // From reference.conf: - assertThat(config.getString(DefaultDriverOption.REQUEST_SERIAL_CONSISTENCY)) - .isEqualTo(DefaultConsistencyLevel.SERIAL.name()); - } finally { - System.clearProperty("config.file"); - } - } - - @Test - public void should_return_failed_future_if_reloading_not_supported() { - DefaultDriverConfigLoader loader = - new DefaultDriverConfigLoader(() -> ConfigFactory.parseString(configSource.get()), false); - assertThat(loader.supportsReloading()).isFalse(); - CompletionStage stage = loader.reload(); - assertThatStage(stage) - .isFailed( - t -> - assertThat(t) - .isInstanceOf(UnsupportedOperationException.class) - .hasMessage( - "This instance of DefaultDriverConfigLoader does not support reloading")); - } - - /** Test for JAVA-2846. */ - @Test - public void should_load_setting_from_system_property_when_application_conf_is_also_provided() { - System.setProperty("datastax-java-driver.basic.request.timeout", "1 millisecond"); - try { - assertThat( - new DefaultDriverConfigLoader() - .getInitialConfig() - .getDefaultProfile() - .getDuration(DefaultDriverOption.REQUEST_TIMEOUT)) - .isEqualTo(Duration.ofMillis(1)); - } finally { - System.clearProperty("datastax-java-driver.basic.request.timeout"); - } - } - - /** Test for JAVA-2846. */ - @Test - public void - should_load_and_resolve_setting_from_system_property_when_application_conf_is_also_provided() { - System.setProperty( - "datastax-java-driver.advanced.connection.init-query-timeout", "1234 milliseconds"); - try { - assertThat( - new DefaultDriverConfigLoader() - .getInitialConfig() - .getDefaultProfile() - .getDuration(DefaultDriverOption.REQUEST_TIMEOUT)) - .isEqualTo(Duration.ofMillis(1234)); - } finally { - System.clearProperty("datastax-java-driver.advanced.connection.init-query-timeout"); - } - } - - /** Test for JAVA-2846. */ - @Test - public void - should_load_setting_from_system_property_when_application_conf_is_also_provided_for_custom_classloader() { - System.setProperty("datastax-java-driver.basic.request.timeout", "1 millisecond"); - try { - assertThat( - new DefaultDriverConfigLoader(Thread.currentThread().getContextClassLoader()) - .getInitialConfig() - .getDefaultProfile() - .getDuration(DefaultDriverOption.REQUEST_TIMEOUT)) - .isEqualTo(Duration.ofMillis(1)); - } finally { - System.clearProperty("datastax-java-driver.basic.request.timeout"); - } - } - - @Test - public void should_create_from_string() { - DriverExecutionProfile config = - DriverConfigLoader.fromString( - "datastax-java-driver.basic { session-name = my-app\nrequest.timeout = 1 millisecond }") - .getInitialConfig() - .getDefaultProfile(); - - assertThat(config.getString(DefaultDriverOption.SESSION_NAME)).isEqualTo("my-app"); - assertThat(config.getDuration(DefaultDriverOption.REQUEST_TIMEOUT)) - .isEqualTo(Duration.ofMillis(1)); - // Any option not in the string should be pulled from reference.conf - assertThat(config.getString(DefaultDriverOption.REQUEST_CONSISTENCY)).isEqualTo("LOCAL_ONE"); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/config/typesafe/DefaultProgrammaticDriverConfigLoaderBuilderTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/config/typesafe/DefaultProgrammaticDriverConfigLoaderBuilderTest.java deleted file mode 100644 index 4f2edf98246..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/config/typesafe/DefaultProgrammaticDriverConfigLoaderBuilderTest.java +++ /dev/null @@ -1,139 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.config.typesafe; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.internal.core.config.MockOptions; -import com.typesafe.config.ConfigFactory; -import org.junit.Test; - -public class DefaultProgrammaticDriverConfigLoaderBuilderTest { - - private static final String FALLBACK_CONFIG = - "int1 = 1\nint2 = 2\nprofiles.profile1 { int1 = 11 }"; - - @Test - public void should_override_option() { - DriverConfigLoader loader = - new DefaultProgrammaticDriverConfigLoaderBuilder( - () -> ConfigFactory.parseString(FALLBACK_CONFIG), "") - .withInt(MockOptions.INT1, 2) - .withInt(MockOptions.INT1, 3) - .withInt(MockOptions.INT1, 4) - .withInt(MockOptions.INT2, 3) - .withInt(MockOptions.INT2, 4) - .build(); - DriverConfig config = loader.getInitialConfig(); - assertThat(config.getDefaultProfile().getInt(MockOptions.INT1)).isEqualTo(4); - assertThat(config.getDefaultProfile().getInt(MockOptions.INT2)).isEqualTo(4); - } - - @Test - public void should_override_option_in_default_profile() { - DriverConfigLoader loader = - new DefaultProgrammaticDriverConfigLoaderBuilder( - () -> ConfigFactory.parseString(FALLBACK_CONFIG), "") - .withInt(MockOptions.INT1, 3) - .build(); - DriverConfig config = loader.getInitialConfig(); - assertThat(config.getDefaultProfile().getInt(MockOptions.INT1)).isEqualTo(3); - assertThat(config.getDefaultProfile().getInt(MockOptions.INT2)).isEqualTo(2); - } - - @Test - public void should_override_option_in_existing_profile() { - DriverConfigLoader loader = - new DefaultProgrammaticDriverConfigLoaderBuilder( - () -> ConfigFactory.parseString(FALLBACK_CONFIG), "") - .startProfile("profile1") - .withInt(MockOptions.INT1, 3) - .build(); - DriverConfig config = loader.getInitialConfig(); - assertThat(config.getDefaultProfile().getInt(MockOptions.INT1)).isEqualTo(1); - assertThat(config.getProfile("profile1").getInt(MockOptions.INT1)).isEqualTo(3); - } - - @Test - public void should_override_option_in_new_profile() { - DriverConfigLoader loader = - new DefaultProgrammaticDriverConfigLoaderBuilder( - () -> ConfigFactory.parseString(FALLBACK_CONFIG), "") - .startProfile("profile2") - .withInt(MockOptions.INT1, 3) - .build(); - DriverConfig config = loader.getInitialConfig(); - assertThat(config.getDefaultProfile().getInt(MockOptions.INT1)).isEqualTo(1); - assertThat(config.getProfile("profile1").getInt(MockOptions.INT1)).isEqualTo(11); - assertThat(config.getProfile("profile2").getInt(MockOptions.INT1)).isEqualTo(3); - assertThat(config.getProfile("profile2").getInt(MockOptions.INT2)).isEqualTo(2); - } - - @Test - public void should_go_back_to_default_profile_when_profile_ends() { - DriverConfigLoader loader = - new DefaultProgrammaticDriverConfigLoaderBuilder( - () -> ConfigFactory.parseString(FALLBACK_CONFIG), "") - .startProfile("profile2") - .withInt(MockOptions.INT1, 3) - .endProfile() - .withInt(MockOptions.INT1, 4) - .build(); - DriverConfig config = loader.getInitialConfig(); - assertThat(config.getDefaultProfile().getInt(MockOptions.INT1)).isEqualTo(4); - } - - @Test - public void should_handle_multiple_programmatic_profiles() { - DriverConfigLoader loader = - new DefaultProgrammaticDriverConfigLoaderBuilder( - () -> ConfigFactory.parseString(FALLBACK_CONFIG), "") - .startProfile("profile2") - .withInt(MockOptions.INT1, 3) - .startProfile("profile3") - .withInt(MockOptions.INT1, 4) - .build(); - DriverConfig config = loader.getInitialConfig(); - assertThat(config.getProfile("profile2").getInt(MockOptions.INT1)).isEqualTo(3); - assertThat(config.getProfile("profile3").getInt(MockOptions.INT1)).isEqualTo(4); - } - - @Test - public void should_honor_root_path() { - String rootPath = "test-root"; - String propertyKey = rootPath + "." + DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE.getPath(); - try { - System.setProperty(propertyKey, "42"); - DriverConfigLoader loader = - new DefaultProgrammaticDriverConfigLoaderBuilder( - DefaultProgrammaticDriverConfigLoaderBuilder.DEFAULT_FALLBACK_SUPPLIER, rootPath) - .withInt(DefaultDriverOption.REQUEST_PAGE_SIZE, 1234) - .build(); - DriverConfig config = loader.getInitialConfig(); - assertThat(config.getDefaultProfile().getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)) - .isEqualTo(42); - assertThat(config.getDefaultProfile().getInt(DefaultDriverOption.REQUEST_PAGE_SIZE)) - .isEqualTo(1234); - } finally { - System.clearProperty(propertyKey); - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/config/typesafe/TypeSafeDriverConfigOverrideDefaultsTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/config/typesafe/TypeSafeDriverConfigOverrideDefaultsTest.java deleted file mode 100644 index 2f2f0a9b3c1..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/config/typesafe/TypeSafeDriverConfigOverrideDefaultsTest.java +++ /dev/null @@ -1,179 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.config.typesafe; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.config.DriverOption; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.typesafe.config.Config; -import com.typesafe.config.ConfigFactory; -import java.time.Duration; -import java.util.Map; -import org.junit.Test; - -/** Focuses on {@link TypesafeDriverConfig#overrideDefaults(Map)}. */ -public class TypeSafeDriverConfigOverrideDefaultsTest { - - @Test - public void should_replace_if_value_comes_from_reference() { - // Given - TypesafeDriverConfig config = config(""); - assertThat(config.getDefaultProfile().getString(DefaultDriverOption.REQUEST_CONSISTENCY)) - .isEqualTo("LOCAL_ONE"); - - // When - config.overrideDefaults( - ImmutableMap.of(DefaultDriverOption.REQUEST_CONSISTENCY, "LOCAL_QUORUM")); - - // Then - assertThat(config.getDefaultProfile().getString(DefaultDriverOption.REQUEST_CONSISTENCY)) - .isEqualTo("LOCAL_QUORUM"); - } - - @Test - public void should_replace_multiple_times() { - // Given - TypesafeDriverConfig config = config(""); - assertThat(config.getDefaultProfile().getString(DefaultDriverOption.REQUEST_CONSISTENCY)) - .isEqualTo("LOCAL_ONE"); - - // When - config.overrideDefaults( - ImmutableMap.of(DefaultDriverOption.REQUEST_CONSISTENCY, "LOCAL_QUORUM")); - config.overrideDefaults(ImmutableMap.of(DefaultDriverOption.REQUEST_CONSISTENCY, "TWO")); - - // Then - assertThat(config.getDefaultProfile().getString(DefaultDriverOption.REQUEST_CONSISTENCY)) - .isEqualTo("TWO"); - } - - @Test - public void should_not_replace_if_overridden_from_application() { - // Given - TypesafeDriverConfig config = - config("datastax-java-driver.basic.request.consistency = LOCAL_ONE"); - assertThat(config.getDefaultProfile().getString(DefaultDriverOption.REQUEST_CONSISTENCY)) - .isEqualTo("LOCAL_ONE"); - - // When - config.overrideDefaults( - ImmutableMap.of(DefaultDriverOption.REQUEST_CONSISTENCY, "LOCAL_QUORUM")); - - // Then - // not replaced because it was set explictly in application.conf - assertThat(config.getDefaultProfile().getString(DefaultDriverOption.REQUEST_CONSISTENCY)) - .isEqualTo("LOCAL_ONE"); - } - - @Test - public void should_handle_reloads() { - // Given - TypesafeDriverConfig config = config(""); - assertThat(config.getDefaultProfile().getString(DefaultDriverOption.REQUEST_CONSISTENCY)) - .isEqualTo("LOCAL_ONE"); - - // When - config.overrideDefaults( - ImmutableMap.of(DefaultDriverOption.REQUEST_CONSISTENCY, "LOCAL_QUORUM")); - reload(config, ""); - - // Then - assertThat(config.getDefaultProfile().getString(DefaultDriverOption.REQUEST_CONSISTENCY)) - .isEqualTo("LOCAL_QUORUM"); - - // When - reload(config, "datastax-java-driver.basic.request.consistency = ONE"); - - // Then - // overridden default not used anymore if the reload detected a user change - assertThat(config.getDefaultProfile().getString(DefaultDriverOption.REQUEST_CONSISTENCY)) - .isEqualTo("ONE"); - } - - @Test - public void should_ignore_non_existent_option() { - // Given - TypesafeDriverConfig config = config(""); - DriverOption nonExistent = () -> "non existent"; - - // When - config.overrideDefaults(ImmutableMap.of(nonExistent, "IRRELEVANT")); - - // Then - assertThat(config.getDefaultProfile().isDefined(nonExistent)).isFalse(); - } - - @Test - public void should_handle_profiles() { - // Given - TypesafeDriverConfig config = - config( - "datastax-java-driver.profiles.profile1.basic.request.consistency = TWO\n" - + "datastax-java-driver.profiles.profile2.basic.request.timeout = 5 seconds"); - DriverExecutionProfile profile1 = config.getProfile("profile1"); - DriverExecutionProfile profile2 = config.getProfile("profile2"); - DriverExecutionProfile derivedProfile21 = - profile2.withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(10)); - DriverExecutionProfile derivedProfile22 = - profile2.withString(DefaultDriverOption.REQUEST_CONSISTENCY, "QUORUM"); - assertThat(profile1.getString(DefaultDriverOption.REQUEST_CONSISTENCY)).isEqualTo("TWO"); - assertThat(profile2.getString(DefaultDriverOption.REQUEST_CONSISTENCY)) - .isEqualTo("LOCAL_ONE"); // inherited from default profile in reference.conf - assertThat(derivedProfile21.getString(DefaultDriverOption.REQUEST_CONSISTENCY)) - .isEqualTo("LOCAL_ONE"); // inherited from default profile in reference.conf - assertThat(derivedProfile22.getString(DefaultDriverOption.REQUEST_CONSISTENCY)) - .isEqualTo("QUORUM"); // overridden programmatically - - // When - config.overrideDefaults( - ImmutableMap.of(DefaultDriverOption.REQUEST_CONSISTENCY, "LOCAL_QUORUM")); - - // Then - // Unaffected because it was set manually in application.conf: - assertThat(profile1.getString(DefaultDriverOption.REQUEST_CONSISTENCY)).isEqualTo("TWO"); - // Affected because it was using the default from reference.conf: - assertThat(profile2.getString(DefaultDriverOption.REQUEST_CONSISTENCY)) - .isEqualTo("LOCAL_QUORUM"); - // Same: - assertThat(derivedProfile21.getString(DefaultDriverOption.REQUEST_CONSISTENCY)) - .isEqualTo("LOCAL_QUORUM"); - // Unaffected because it was overridden programmatically: - assertThat(derivedProfile22.getString(DefaultDriverOption.REQUEST_CONSISTENCY)) - .isEqualTo("QUORUM"); - } - - // Builds a config based on reference.conf + the given application.conf overrides - private TypesafeDriverConfig config(String application) { - return new TypesafeDriverConfig(rawConfig(application)); - } - - private boolean reload(TypesafeDriverConfig config, String newApplication) { - return config.reload(rawConfig(newApplication)); - } - - private Config rawConfig(String application) { - ConfigFactory.invalidateCaches(); - return ConfigFactory.parseString(application) - .withFallback(ConfigFactory.defaultReference()) - .resolve() - .getConfig(DefaultDriverConfigLoader.DEFAULT_ROOT_PATH); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/config/typesafe/TypesafeDriverConfigTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/config/typesafe/TypesafeDriverConfigTest.java deleted file mode 100644 index 4a78c3ccb03..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/config/typesafe/TypesafeDriverConfigTest.java +++ /dev/null @@ -1,200 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.config.typesafe; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.assertj.core.api.Assertions.entry; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.internal.core.config.MockOptions; -import com.typesafe.config.Config; -import com.typesafe.config.ConfigFactory; -import java.util.HashMap; -import java.util.Map; -import org.junit.Test; - -public class TypesafeDriverConfigTest { - - @Test - public void should_load_minimal_config_with_no_profiles() { - TypesafeDriverConfig config = parse("int1 = 42"); - assertThat(config).hasIntOption(MockOptions.INT1, 42); - } - - @Test - public void should_load_config_with_no_profiles_and_optional_values() { - TypesafeDriverConfig config = parse("int1 = 42\n int2 = 43"); - assertThat(config).hasIntOption(MockOptions.INT1, 42); - assertThat(config).hasIntOption(MockOptions.INT2, 43); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_if_profile_uses_default_name() { - parse("int1 = 42\n profiles { default { int1 = 43 } }"); - } - - @Test - public void should_inherit_option_in_profile() { - TypesafeDriverConfig config = parse("int1 = 42\n profiles { profile1 { } }"); - assertThat(config) - .hasIntOption(MockOptions.INT1, 42) - .hasIntOption("profile1", MockOptions.INT1, 42); - } - - @Test - public void should_override_option_in_profile() { - TypesafeDriverConfig config = parse("int1 = 42\n profiles { profile1 { int1 = 43 } }"); - assertThat(config) - .hasIntOption(MockOptions.INT1, 42) - .hasIntOption("profile1", MockOptions.INT1, 43); - } - - @Test - public void should_create_derived_profile_with_new_option() { - TypesafeDriverConfig config = parse("int1 = 42"); - DriverExecutionProfile base = config.getDefaultProfile(); - DriverExecutionProfile derived = base.withInt(MockOptions.INT2, 43); - - assertThat(base.isDefined(MockOptions.INT2)).isFalse(); - assertThat(derived.isDefined(MockOptions.INT2)).isTrue(); - assertThat(derived.getInt(MockOptions.INT2)).isEqualTo(43); - } - - @Test - public void should_create_derived_profile_overriding_option() { - TypesafeDriverConfig config = parse("int1 = 42"); - DriverExecutionProfile base = config.getDefaultProfile(); - DriverExecutionProfile derived = base.withInt(MockOptions.INT1, 43); - - assertThat(base.getInt(MockOptions.INT1)).isEqualTo(42); - assertThat(derived.getInt(MockOptions.INT1)).isEqualTo(43); - } - - @Test - public void should_create_derived_profile_unsetting_option() { - TypesafeDriverConfig config = parse("int1 = 42\n int2 = 43"); - DriverExecutionProfile base = config.getDefaultProfile(); - DriverExecutionProfile derived = base.without(MockOptions.INT2); - - assertThat(base.getInt(MockOptions.INT2)).isEqualTo(43); - assertThat(derived.isDefined(MockOptions.INT2)).isFalse(); - } - - @Test - public void should_fetch_string_map() { - TypesafeDriverConfig config = - parse( - "int1 = 42 \n auth_provider { auth_thing_one= one \n auth_thing_two = two \n auth_thing_three = three}"); - DriverExecutionProfile base = config.getDefaultProfile(); - Map map = base.getStringMap(MockOptions.AUTH_PROVIDER); - assertThat(map.entrySet().size()).isEqualTo(3); - assertThat(map.get("auth_thing_one")).isEqualTo("one"); - assertThat(map.get("auth_thing_two")).isEqualTo("two"); - assertThat(map.get("auth_thing_three")).isEqualTo("three"); - } - - @Test - public void should_fetch_string_map_with_forward_slash_in_keys() { - TypesafeDriverConfig config = - parse( - "subnet_addresses { 100.64.0.0/15 = \"cassandra.datacenter1.com:9042\" \n \"100.66.0.0/15\" = \"cassandra.datacenter2.com\" \n \"::ffff:6440:0/111\" = \"cassandra.datacenter3.com:19042\" }"); - DriverExecutionProfile base = config.getDefaultProfile(); - Map map = base.getStringMap(MockOptions.SUBNET_ADDRESSES); - assertThat(map.entrySet().size()).isEqualTo(3); - assertThat(map.get("100.64.0.\"0/15\"")).isEqualTo("cassandra.datacenter1.com:9042"); - assertThat(map.get("\"100.66.0.0/15\"")).isEqualTo("cassandra.datacenter2.com"); - assertThat(map.get("\"::ffff:6440:0/111\"")).isEqualTo("cassandra.datacenter3.com:19042"); - } - - @Test - public void should_create_derived_profile_with_string_map() { - TypesafeDriverConfig config = parse("int1 = 42"); - Map authThingMap = new HashMap<>(); - authThingMap.put("auth_thing_one", "one"); - authThingMap.put("auth_thing_two", "two"); - authThingMap.put("auth_thing_three", "three"); - DriverExecutionProfile base = config.getDefaultProfile(); - DriverExecutionProfile mapBase = base.withStringMap(MockOptions.AUTH_PROVIDER, authThingMap); - Map fetchedMap = mapBase.getStringMap(MockOptions.AUTH_PROVIDER); - assertThat(fetchedMap).isEqualTo(authThingMap); - } - - @Test - public void should_reload() { - TypesafeDriverConfig config = parse("int1 = 42\n profiles { profile1 { int1 = 43 } }"); - - config.reload(ConfigFactory.parseString("int1 = 44\n profiles { profile1 { int1 = 45 } }")); - assertThat(config) - .hasIntOption(MockOptions.INT1, 44) - .hasIntOption("profile1", MockOptions.INT1, 45); - } - - @Test - public void should_update_derived_profiles_after_reloading() { - TypesafeDriverConfig config = parse("int1 = 42\n profiles { profile1 { int1 = 43 } }"); - - DriverExecutionProfile derivedFromDefault = - config.getDefaultProfile().withInt(MockOptions.INT2, 50); - DriverExecutionProfile derivedFromProfile1 = - config.getProfile("profile1").withInt(MockOptions.INT2, 51); - - config.reload(ConfigFactory.parseString("int1 = 44\n profiles { profile1 { int1 = 45 } }")); - - assertThat(derivedFromDefault.getInt(MockOptions.INT1)).isEqualTo(44); - assertThat(derivedFromDefault.getInt(MockOptions.INT2)).isEqualTo(50); - - assertThat(derivedFromProfile1.getInt(MockOptions.INT1)).isEqualTo(45); - assertThat(derivedFromProfile1.getInt(MockOptions.INT2)).isEqualTo(51); - } - - @Test - public void should_enumerate_options() { - TypesafeDriverConfig config = - parse( - "int1 = 42 \n" - + "auth_provider { auth_thing_one= one \n auth_thing_two = two \n auth_thing_three = three}\n" - + "profiles { profile1 { int1 = 45 } }"); - - assertThat(config.getDefaultProfile().entrySet()) - .containsExactly( - entry("auth_provider.auth_thing_one", "one"), - entry("auth_provider.auth_thing_three", "three"), - entry("auth_provider.auth_thing_two", "two"), - entry("int1", 42)); - - assertThat(config.getProfile("profile1").entrySet()) - .containsExactly( - entry("auth_provider.auth_thing_one", "one"), - entry("auth_provider.auth_thing_three", "three"), - entry("auth_provider.auth_thing_two", "two"), - entry("int1", 45)); - } - - @Test - public void should_update_default_profile_on_reload() { - TypesafeDriverConfig config = parse("int1 = 42\n profiles { profile1 { int1 = 43 } }"); - assertThat(config.getDefaultProfile().getInt(MockOptions.INT1)).isEqualTo(42); - config.reload(ConfigFactory.parseString("int1 = 44\n profiles { profile1 { int1 = 45 } }")); - assertThat(config.getDefaultProfile().getInt(MockOptions.INT1)).isEqualTo(44); - } - - private TypesafeDriverConfig parse(String configString) { - Config config = ConfigFactory.parseString(configString); - return new TypesafeDriverConfig(config); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/connection/ExponentialReconnectionPolicyTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/connection/ExponentialReconnectionPolicyTest.java deleted file mode 100644 index 9a973c1b0e4..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/connection/ExponentialReconnectionPolicyTest.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.connection; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.connection.ReconnectionPolicy; -import com.datastax.oss.driver.api.core.context.DriverContext; -import java.time.Duration; -import java.time.temporal.ChronoUnit; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -public class ExponentialReconnectionPolicyTest { - - @Mock private DriverContext driverContext; - @Mock private DriverConfig driverConfig; - @Mock private DriverExecutionProfile profile; - private final long baseDelay = 1000L; - private final long maxDelay = 60000L; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - when(driverConfig.getDefaultProfile()).thenReturn(profile); - when(driverContext.getConfig()).thenReturn(driverConfig); - when(profile.getDuration(DefaultDriverOption.RECONNECTION_BASE_DELAY)) - .thenReturn(Duration.of(baseDelay, ChronoUnit.MILLIS)); - when(profile.getDuration(DefaultDriverOption.RECONNECTION_MAX_DELAY)) - .thenReturn(Duration.of(maxDelay, ChronoUnit.MILLIS)); - } - - @Test - public void should_generate_exponential_delay_with_jitter() throws Exception { - ExponentialReconnectionPolicy policy = new ExponentialReconnectionPolicy(driverContext); - ReconnectionPolicy.ReconnectionSchedule schedule = policy.newControlConnectionSchedule(false); - // generate a number of delays and make sure they are all within the base/max values range - // limit the loop to 53 as the bit shift and min/max calculations will cause long overflows - // past that - for (int i = 0; i < 54; ++i) { - // compute the min and max delays based on attempt count (i) and prevent long overflows - long exponentialDelay = Math.min(baseDelay * (1L << i), maxDelay); - // min will be 85% of the pure exponential delay (with a floor of baseDelay) - long minJitterDelay = Math.max(baseDelay, (exponentialDelay * 85) / 100); - // max will be 115% of the pure exponential delay (with a ceiling of maxDelay) - long maxJitterDelay = Math.min(maxDelay, (exponentialDelay * 115) / 100); - long delay = schedule.nextDelay().toMillis(); - assertThat(delay).isBetween(minJitterDelay, maxJitterDelay); - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/context/DefaultDriverContextTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/context/DefaultDriverContextTest.java deleted file mode 100644 index 6d4585cb4d7..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/context/DefaultDriverContextTest.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.context; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.internal.core.protocol.Lz4Compressor; -import com.datastax.oss.driver.internal.core.protocol.SnappyCompressor; -import com.datastax.oss.protocol.internal.Compressor; -import com.datastax.oss.protocol.internal.NoopCompressor; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import io.netty.buffer.ByteBuf; -import java.util.Optional; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class DefaultDriverContextTest { - - private DefaultDriverContext buildMockedContext(Optional compressionOption) { - - DriverExecutionProfile defaultProfile = mock(DriverExecutionProfile.class); - when(defaultProfile.getString(DefaultDriverOption.PROTOCOL_COMPRESSION, "none")) - .thenReturn(compressionOption.orElse("none")); - return MockedDriverContextFactory.defaultDriverContext(defaultProfile); - } - - private void doCreateCompressorTest(Optional configVal, Class expectedClz) { - - DefaultDriverContext ctx = buildMockedContext(configVal); - Compressor compressor = ctx.getCompressor(); - assertThat(compressor).isNotNull(); - assertThat(compressor).isInstanceOf(expectedClz); - } - - @Test - @DataProvider({"lz4", "lZ4", "Lz4", "LZ4"}) - public void should_create_lz4_compressor(String name) { - - doCreateCompressorTest(Optional.of(name), Lz4Compressor.class); - } - - @Test - @DataProvider({"snappy", "SNAPPY", "sNaPpY", "SNapPy"}) - public void should_create_snappy_compressor(String name) { - - doCreateCompressorTest(Optional.of(name), SnappyCompressor.class); - } - - @Test - public void should_create_noop_compressor_if_undefined() { - - doCreateCompressorTest(Optional.empty(), NoopCompressor.class); - } - - @Test - @DataProvider({"none", "NONE", "NoNe", "nONe"}) - public void should_create_noop_compressor_if_defined_as_none(String name) { - - doCreateCompressorTest(Optional.of(name), NoopCompressor.class); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/context/MockedDriverContextFactory.java b/core/src/test/java/com/datastax/oss/driver/internal/core/context/MockedDriverContextFactory.java deleted file mode 100644 index a8b25193f54..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/context/MockedDriverContextFactory.java +++ /dev/null @@ -1,160 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.context; - -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistanceEvaluator; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeStateListener; -import com.datastax.oss.driver.api.core.metadata.schema.SchemaChangeListener; -import com.datastax.oss.driver.api.core.session.ProgrammaticArguments; -import com.datastax.oss.driver.api.core.tracker.RequestTracker; -import com.datastax.oss.driver.internal.core.ConsistencyLevelRegistry; -import com.datastax.oss.driver.internal.core.loadbalancing.DefaultLoadBalancingPolicy; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.Maps; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.time.Duration; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.UUID; - -public class MockedDriverContextFactory { - - public static DefaultDriverContext defaultDriverContext() { - return defaultDriverContext(MockedDriverContextFactory.defaultProfile("datacenter1")); - } - - public static DefaultDriverContext defaultDriverContext( - DriverExecutionProfile defaultProfile, DriverExecutionProfile... profiles) { - - /* Setup machinery to connect the input DriverExecutionProfile to the config loader */ - final DriverConfig driverConfig = mock(DriverConfig.class); - final DriverConfigLoader configLoader = mock(DriverConfigLoader.class); - when(configLoader.getInitialConfig()).thenReturn(driverConfig); - when(driverConfig.getDefaultProfile()).thenReturn(defaultProfile); - when(driverConfig.getProfile(defaultProfile.getName())).thenReturn(defaultProfile); - - for (DriverExecutionProfile profile : profiles) { - when(driverConfig.getProfile(profile.getName())).thenReturn(profile); - } - - ProgrammaticArguments args = - ProgrammaticArguments.builder() - .withNodeStateListener(mock(NodeStateListener.class)) - .withSchemaChangeListener(mock(SchemaChangeListener.class)) - .withRequestTracker(mock(RequestTracker.class)) - .withLocalDatacenters(Maps.newHashMap()) - .withNodeDistanceEvaluators(Maps.newHashMap()) - .build(); - - return new DefaultDriverContext(configLoader, args) { - @NonNull - @Override - public Map getLoadBalancingPolicies() { - ImmutableMap.Builder map = ImmutableMap.builder(); - map.put( - defaultProfile.getName(), - mockLoadBalancingPolicy( - this, - defaultProfile.getName(), - defaultProfile.getString(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER))); - for (DriverExecutionProfile profile : profiles) { - map.put( - profile.getName(), - mockLoadBalancingPolicy( - this, - profile.getName(), - profile.getString(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER))); - } - return map.build(); - } - - @NonNull - @Override - public ConsistencyLevelRegistry getConsistencyLevelRegistry() { - return mock(ConsistencyLevelRegistry.class); - } - }; - } - - public static DriverExecutionProfile defaultProfile(String localDc) { - return createProfile(DriverExecutionProfile.DEFAULT_NAME, localDc); - } - - public static DriverExecutionProfile createProfile(String name, String localDc) { - DriverExecutionProfile defaultProfile = mock(DriverExecutionProfile.class); - when(defaultProfile.getName()).thenReturn(name); - when(defaultProfile.getString(DefaultDriverOption.PROTOCOL_COMPRESSION, "none")) - .thenReturn("none"); - when(defaultProfile.getDuration(DefaultDriverOption.METRICS_NODE_EXPIRE_AFTER)) - .thenReturn(Duration.ofMinutes(5)); - when(defaultProfile.isDefined(DefaultDriverOption.METRICS_FACTORY_CLASS)).thenReturn(true); - when(defaultProfile.getString(DefaultDriverOption.METRICS_FACTORY_CLASS)) - .thenReturn("DefaultMetricsFactory"); - when(defaultProfile.getString(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER)) - .thenReturn(localDc); - return defaultProfile; - } - - public static void allowRemoteDcConnectivity( - DriverExecutionProfile profile, - int maxNodesPerRemoteDc, - boolean allowRemoteSatisfyLocalDc, - List preferredRemoteDcs) { - when(profile.getInt(DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_MAX_NODES_PER_REMOTE_DC)) - .thenReturn(maxNodesPerRemoteDc); - when(profile.getBoolean( - DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_ALLOW_FOR_LOCAL_CONSISTENCY_LEVELS)) - .thenReturn(allowRemoteSatisfyLocalDc); - when(profile.getStringList(DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_PREFERRED_REMOTE_DCS)) - .thenReturn(preferredRemoteDcs); - } - - private static LoadBalancingPolicy mockLoadBalancingPolicy( - DefaultDriverContext driverContext, String profile, String localDc) { - LoadBalancingPolicy loadBalancingPolicy = - new DefaultLoadBalancingPolicy(driverContext, profile) { - @NonNull - @Override - protected Optional discoverLocalDc(@NonNull Map nodes) { - return Optional.ofNullable(localDc); - } - - @NonNull - @Override - protected NodeDistanceEvaluator createNodeDistanceEvaluator( - @Nullable String localDc, @NonNull Map nodes) { - return mock(NodeDistanceEvaluator.class); - } - }; - loadBalancingPolicy.init( - Collections.emptyMap(), mock(LoadBalancingPolicy.DistanceReporter.class)); - return loadBalancingPolicy; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/context/StartupOptionsBuilderTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/context/StartupOptionsBuilderTest.java deleted file mode 100644 index d12e50b7e8e..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/context/StartupOptionsBuilderTest.java +++ /dev/null @@ -1,138 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.context; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatIllegalArgumentException; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.protocol.internal.request.Startup; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class StartupOptionsBuilderTest { - - private DefaultDriverContext buildMockedContext(String compression) { - - DriverExecutionProfile defaultProfile = mock(DriverExecutionProfile.class); - when(defaultProfile.getString(DefaultDriverOption.PROTOCOL_COMPRESSION, "none")) - .thenReturn(compression); - when(defaultProfile.getName()).thenReturn(DriverExecutionProfile.DEFAULT_NAME); - return MockedDriverContextFactory.defaultDriverContext(defaultProfile); - } - - private void assertDefaultStartupOptions(Startup startup) { - - assertThat(startup.options).containsEntry(Startup.CQL_VERSION_KEY, "3.0.0"); - assertThat(startup.options) - .containsEntry( - StartupOptionsBuilder.DRIVER_NAME_KEY, Session.OSS_DRIVER_COORDINATES.getName()); - assertThat(startup.options).containsKey(StartupOptionsBuilder.DRIVER_VERSION_KEY); - Version version = Version.parse(startup.options.get(StartupOptionsBuilder.DRIVER_VERSION_KEY)); - assertThat(version).isEqualByComparingTo(Session.OSS_DRIVER_COORDINATES.getVersion()); - } - - @Test - public void should_build_startup_options_with_no_compression_if_undefined() { - - DefaultDriverContext ctx = MockedDriverContextFactory.defaultDriverContext(); - Startup startup = new Startup(ctx.getStartupOptions()); - assertThat(startup.options).doesNotContainKey(Startup.COMPRESSION_KEY); - assertDefaultStartupOptions(startup); - } - - @Test - public void should_build_startup_options_with_no_compression_if_defined_as_none() { - - DefaultDriverContext ctx = buildMockedContext("none"); - Startup startup = new Startup(ctx.getStartupOptions()); - assertThat(startup.options).doesNotContainKey(Startup.COMPRESSION_KEY); - assertDefaultStartupOptions(startup); - } - - @Test - @DataProvider({"lz4", "snappy"}) - public void should_build_startup_options(String compression) { - - DefaultDriverContext ctx = buildMockedContext(compression); - Startup startup = new Startup(ctx.getStartupOptions()); - // assert the compression option is present - assertThat(startup.options).containsEntry(Startup.COMPRESSION_KEY, compression); - assertDefaultStartupOptions(startup); - } - - @Test - public void should_fail_to_build_startup_options_with_invalid_compression() { - - assertThatIllegalArgumentException() - .isThrownBy( - () -> { - DefaultDriverContext ctx = buildMockedContext("foobar"); - new Startup(ctx.getStartupOptions()); - }); - } - - @Test - public void should_include_all_local_dcs_in_startup_message() { - - DefaultDriverContext ctx = - MockedDriverContextFactory.defaultDriverContext( - MockedDriverContextFactory.defaultProfile("us-west-2"), - MockedDriverContextFactory.createProfile("oltp", "us-east-2"), - MockedDriverContextFactory.createProfile("olap", "eu-central-1")); - Startup startup = new Startup(ctx.getStartupOptions()); - assertThat(startup.options) - .containsEntry( - StartupOptionsBuilder.DRIVER_BAGGAGE, - "{\"default\":{\"DefaultLoadBalancingPolicy\":{\"localDc\":\"us-west-2\"}}," - + "\"oltp\":{\"DefaultLoadBalancingPolicy\":{\"localDc\":\"us-east-2\"}}," - + "\"olap\":{\"DefaultLoadBalancingPolicy\":{\"localDc\":\"eu-central-1\"}}}"); - } - - @Test - public void should_include_all_lbp_details_in_startup_message() { - - DriverExecutionProfile defaultProfile = MockedDriverContextFactory.defaultProfile("dc1"); - DriverExecutionProfile oltpProfile = MockedDriverContextFactory.createProfile("oltp", "dc1"); - MockedDriverContextFactory.allowRemoteDcConnectivity( - oltpProfile, 2, true, ImmutableList.of("dc2", "dc3")); - DefaultDriverContext ctx = - MockedDriverContextFactory.defaultDriverContext(defaultProfile, oltpProfile); - - Startup startup = new Startup(ctx.getStartupOptions()); - - assertThat(startup.options) - .containsEntry( - StartupOptionsBuilder.DRIVER_BAGGAGE, - "{\"default\":{\"DefaultLoadBalancingPolicy\":{\"localDc\":\"dc1\"}}," - + "\"oltp\":{\"DefaultLoadBalancingPolicy\":{" - + "\"localDc\":\"dc1\"," - + "\"preferredRemoteDcs\":[\"dc2\",\"dc3\"]," - + "\"allowDcFailoverForLocalCl\":true," - + "\"maxNodesPerRemoteDc\":2}}}"); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/context/bus/EventBusTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/context/bus/EventBusTest.java deleted file mode 100644 index 61533a8e8e9..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/context/bus/EventBusTest.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.context.bus; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.internal.core.context.EventBus; -import java.util.HashMap; -import java.util.Map; -import org.junit.Before; -import org.junit.Test; - -public class EventBusTest { - - private EventBus bus; - private Map results; - private ChildEvent event = new ChildEvent(); - - @Before - public void setup() { - bus = new EventBus("test"); - results = new HashMap<>(); - } - - @Test - public void should_notify_registered_listeners() { - // Given - bus.register(ChildEvent.class, (e) -> results.put("listener1", e)); - bus.register(ChildEvent.class, (e) -> results.put("listener2", e)); - - // When - bus.fire(event); - - // Then - assertThat(results) - .hasSize(2) - .containsEntry("listener1", event) - .containsEntry("listener2", event); - } - - @Test - public void should_unregister_listener() { - // Given - Object key1 = bus.register(ChildEvent.class, (e) -> results.put("listener1", e)); - bus.register(ChildEvent.class, (e) -> results.put("listener2", e)); - bus.unregister(key1, ChildEvent.class); - - // When - bus.fire(event); - - // Then - assertThat(results).hasSize(1).containsEntry("listener2", event); - } - - @Test - public void should_use_exact_class() { - // Given - bus.register(ChildEvent.class, (e) -> results.put("listener1", e)); - bus.register(ParentEvent.class, (e) -> results.put("listener2", e)); - - // When - bus.fire(event); - - // Then - assertThat(results).hasSize(1).containsEntry("listener1", event); - - // When - results.clear(); - ParentEvent parentEvent = new ParentEvent(); - bus.fire(parentEvent); - - // Then - assertThat(results).hasSize(1).containsEntry("listener2", parentEvent); - } - - private static class ParentEvent {} - - private static class ChildEvent extends ParentEvent {} -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/control/ControlConnectionEventsTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/control/ControlConnectionEventsTest.java deleted file mode 100644 index cb83b523ebe..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/control/ControlConnectionEventsTest.java +++ /dev/null @@ -1,157 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.control; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.awaitility.Awaitility.await; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.channel.DriverChannelOptions; -import com.datastax.oss.driver.internal.core.channel.EventCallback; -import com.datastax.oss.driver.internal.core.metadata.TopologyEvent; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.response.event.SchemaChangeEvent; -import com.datastax.oss.protocol.internal.response.event.StatusChangeEvent; -import com.datastax.oss.protocol.internal.response.event.TopologyChangeEvent; -import java.util.concurrent.CompletableFuture; -import org.junit.Test; -import org.mockito.ArgumentCaptor; - -public class ControlConnectionEventsTest extends ControlConnectionTestBase { - - @Test - public void should_register_for_all_events_if_topology_requested() { - // Given - DriverChannel channel1 = newMockDriverChannel(1); - ArgumentCaptor optionsCaptor = - ArgumentCaptor.forClass(DriverChannelOptions.class); - when(channelFactory.connect(eq(node1), optionsCaptor.capture())) - .thenReturn(CompletableFuture.completedFuture(channel1)); - - // When - controlConnection.init(true, false, false); - - // Then - await() - .untilAsserted( - () -> { - DriverChannelOptions channelOptions = optionsCaptor.getValue(); - assertThat(channelOptions.eventTypes) - .containsExactly( - ProtocolConstants.EventType.SCHEMA_CHANGE, - ProtocolConstants.EventType.STATUS_CHANGE, - ProtocolConstants.EventType.TOPOLOGY_CHANGE); - assertThat(channelOptions.eventCallback).isEqualTo(controlConnection); - }); - } - - @Test - public void should_register_for_schema_events_only_if_topology_not_requested() { - // Given - DriverChannel channel1 = newMockDriverChannel(1); - ArgumentCaptor optionsCaptor = - ArgumentCaptor.forClass(DriverChannelOptions.class); - when(channelFactory.connect(eq(node1), optionsCaptor.capture())) - .thenReturn(CompletableFuture.completedFuture(channel1)); - - // When - controlConnection.init(false, false, false); - - // Then - await() - .untilAsserted( - () -> { - DriverChannelOptions channelOptions = optionsCaptor.getValue(); - assertThat(channelOptions.eventTypes) - .containsExactly(ProtocolConstants.EventType.SCHEMA_CHANGE); - assertThat(channelOptions.eventCallback).isEqualTo(controlConnection); - }); - } - - @Test - public void should_process_status_change_events() { - // Given - DriverChannel channel1 = newMockDriverChannel(1); - ArgumentCaptor optionsCaptor = - ArgumentCaptor.forClass(DriverChannelOptions.class); - when(channelFactory.connect(eq(node1), optionsCaptor.capture())) - .thenReturn(CompletableFuture.completedFuture(channel1)); - controlConnection.init(true, false, false); - await().until(() -> optionsCaptor.getValue() != null); - EventCallback callback = optionsCaptor.getValue().eventCallback; - StatusChangeEvent event = - new StatusChangeEvent(ProtocolConstants.StatusChangeType.UP, ADDRESS1); - - // When - callback.onEvent(event); - - // Then - verify(eventBus).fire(TopologyEvent.suggestUp(ADDRESS1)); - } - - @Test - public void should_process_topology_change_events() { - // Given - DriverChannel channel1 = newMockDriverChannel(1); - ArgumentCaptor optionsCaptor = - ArgumentCaptor.forClass(DriverChannelOptions.class); - when(channelFactory.connect(eq(node1), optionsCaptor.capture())) - .thenReturn(CompletableFuture.completedFuture(channel1)); - controlConnection.init(true, false, false); - await().until(() -> optionsCaptor.getValue() != null); - EventCallback callback = optionsCaptor.getValue().eventCallback; - TopologyChangeEvent event = - new TopologyChangeEvent(ProtocolConstants.TopologyChangeType.NEW_NODE, ADDRESS1); - - // When - callback.onEvent(event); - - // Then - verify(eventBus).fire(TopologyEvent.suggestAdded(ADDRESS1)); - } - - @Test - public void should_process_schema_change_events() { - // Given - DriverChannel channel1 = newMockDriverChannel(1); - ArgumentCaptor optionsCaptor = - ArgumentCaptor.forClass(DriverChannelOptions.class); - when(channelFactory.connect(eq(node1), optionsCaptor.capture())) - .thenReturn(CompletableFuture.completedFuture(channel1)); - controlConnection.init(false, false, false); - await().until(() -> optionsCaptor.getValue() != null); - EventCallback callback = optionsCaptor.getValue().eventCallback; - SchemaChangeEvent event = - new SchemaChangeEvent( - ProtocolConstants.SchemaChangeType.CREATED, - ProtocolConstants.SchemaChangeTarget.FUNCTION, - "ks", - "fn", - ImmutableList.of("text", "text")); - - // When - callback.onEvent(event); - - // Then - verify(metadataManager).refreshSchema("ks", false, false); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/control/ControlConnectionTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/control/ControlConnectionTest.java deleted file mode 100644 index 526efefa2fe..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/control/ControlConnectionTest.java +++ /dev/null @@ -1,564 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.control; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.awaitility.Awaitility.await; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeState; -import com.datastax.oss.driver.internal.core.channel.ChannelEvent; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.channel.MockChannelFactoryHelper; -import com.datastax.oss.driver.internal.core.metadata.DistanceEvent; -import com.datastax.oss.driver.internal.core.metadata.NodeStateEvent; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import java.time.Duration; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.TimeUnit; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class ControlConnectionTest extends ControlConnectionTestBase { - - @Test - public void should_close_successfully_if_it_was_never_init() { - // When - CompletionStage closeFuture = controlConnection.forceCloseAsync(); - - // Then - assertThatStage(closeFuture).isSuccess(); - } - - @Test - public void should_init_with_first_contact_point_if_reachable() { - // Given - DriverChannel channel1 = newMockDriverChannel(1); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory).success(node1, channel1).build(); - - // When - CompletionStage initFuture = controlConnection.init(false, false, false); - factoryHelper.waitForCall(node1); - - // Then - assertThatStage(initFuture) - .isSuccess(v -> assertThat(controlConnection.channel()).isEqualTo(channel1)); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node1)); - - factoryHelper.verifyNoMoreCalls(); - } - - @Test - public void should_always_return_same_init_future() { - // Given - DriverChannel channel1 = newMockDriverChannel(1); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory).success(node1, channel1).build(); - - // When - CompletionStage initFuture1 = controlConnection.init(false, false, false); - factoryHelper.waitForCall(node1); - CompletionStage initFuture2 = controlConnection.init(false, false, false); - - // Then - assertThatStage(initFuture1).isEqualTo(initFuture2); - - factoryHelper.verifyNoMoreCalls(); - } - - @Test - public void should_init_with_second_contact_point_if_first_one_fails() { - // Given - DriverChannel channel2 = newMockDriverChannel(2); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - .failure(node1, "mock failure") - .success(node2, channel2) - .build(); - - // When - CompletionStage initFuture = controlConnection.init(false, false, false); - factoryHelper.waitForCall(node1); - factoryHelper.waitForCall(node2); - - // Then - assertThatStage(initFuture) - .isSuccess(v -> assertThat(controlConnection.channel()).isEqualTo(channel2)); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.controlConnectionFailed(node1)); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node2)); - // each attempt tries all nodes, so there is no reconnection - verify(reconnectionPolicy, never()).newNodeSchedule(any(Node.class)); - - factoryHelper.verifyNoMoreCalls(); - } - - @Test - public void should_fail_to_init_if_all_contact_points_fail() { - // Given - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - .failure(node1, "mock failure") - .failure(node2, "mock failure") - .build(); - - // When - CompletionStage initFuture = controlConnection.init(false, false, false); - factoryHelper.waitForCall(node1); - factoryHelper.waitForCall(node2); - - // Then - assertThatStage(initFuture).isFailed(); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.controlConnectionFailed(node1)); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.controlConnectionFailed(node2)); - // no reconnections at init - verify(reconnectionPolicy, never()).newNodeSchedule(any(Node.class)); - - factoryHelper.verifyNoMoreCalls(); - } - - @Test - public void should_reconnect_if_channel_goes_down() throws Exception { - // Given - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofNanos(1)); - DriverChannel channel1 = newMockDriverChannel(1); - DriverChannel channel2 = newMockDriverChannel(2); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - .success(node1, channel1) - .failure(node1, "mock failure") - .success(node2, channel2) - .build(); - - CompletionStage initFuture = controlConnection.init(false, false, false); - factoryHelper.waitForCall(node1); - - assertThatStage(initFuture) - .isSuccess(v -> assertThat(controlConnection.channel()).isEqualTo(channel1)); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node1)); - - // When - channel1.close(); - - // Then - // a reconnection was started - verify(reconnectionSchedule, VERIFY_TIMEOUT).nextDelay(); - factoryHelper.waitForCall(node1); - factoryHelper.waitForCall(node2); - await().untilAsserted(() -> assertThat(controlConnection.channel()).isEqualTo(channel2)); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelClosed(node1)); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node2)); - verify(metadataManager, VERIFY_TIMEOUT).refreshNodes(); - verify(loadBalancingPolicyWrapper, VERIFY_TIMEOUT).init(); - - factoryHelper.verifyNoMoreCalls(); - } - - @Test - public void should_reconnect_if_node_becomes_ignored() { - // Given - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofNanos(1)); - DriverChannel channel1 = newMockDriverChannel(1); - DriverChannel channel2 = newMockDriverChannel(2); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - .success(node1, channel1) - .success(node2, channel2) - .build(); - - CompletionStage initFuture = controlConnection.init(false, false, false); - factoryHelper.waitForCall(node1); - - assertThatStage(initFuture) - .isSuccess(v -> assertThat(controlConnection.channel()).isEqualTo(channel1)); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node1)); - - // When - mockQueryPlan(node2); - eventBus.fire(new DistanceEvent(NodeDistance.IGNORED, node1)); - - // Then - // an immediate reconnection was started - factoryHelper.waitForCall(node2); - await().untilAsserted(() -> assertThat(controlConnection.channel()).isEqualTo(channel2)); - verify(reconnectionSchedule, never()).nextDelay(); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelClosed(node1)); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node2)); - verify(metadataManager, VERIFY_TIMEOUT).refreshNodes(); - verify(loadBalancingPolicyWrapper, VERIFY_TIMEOUT).init(); - - factoryHelper.verifyNoMoreCalls(); - } - - @Test - public void should_reconnect_if_node_is_removed() { - should_reconnect_if_event(NodeStateEvent.removed(node1)); - } - - @Test - public void should_reconnect_if_node_is_forced_down() { - should_reconnect_if_event(NodeStateEvent.changed(NodeState.UP, NodeState.FORCED_DOWN, node1)); - } - - private void should_reconnect_if_event(NodeStateEvent event) { - // Given - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofNanos(1)); - DriverChannel channel1 = newMockDriverChannel(1); - DriverChannel channel2 = newMockDriverChannel(2); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - .success(node1, channel1) - .success(node2, channel2) - .build(); - - CompletionStage initFuture = controlConnection.init(false, false, false); - factoryHelper.waitForCall(node1); - - assertThatStage(initFuture) - .isSuccess(v -> assertThat(controlConnection.channel()).isEqualTo(channel1)); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node1)); - - // When - mockQueryPlan(node2); - eventBus.fire(event); - - // Then - // an immediate reconnection was started - factoryHelper.waitForCall(node2); - await().untilAsserted(() -> assertThat(controlConnection.channel()).isEqualTo(channel2)); - verify(reconnectionSchedule, never()).nextDelay(); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelClosed(node1)); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node2)); - verify(metadataManager, VERIFY_TIMEOUT).refreshNodes(); - verify(loadBalancingPolicyWrapper, VERIFY_TIMEOUT).init(); - - factoryHelper.verifyNoMoreCalls(); - } - - @Test - public void should_reconnect_if_node_became_ignored_during_reconnection_attempt() { - // Given - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofNanos(1)); - DriverChannel channel1 = newMockDriverChannel(1); - DriverChannel channel2 = newMockDriverChannel(2); - CompletableFuture channel2Future = new CompletableFuture<>(); - DriverChannel channel3 = newMockDriverChannel(3); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - // init - .success(node1, channel1) - // reconnection - .pending(node2, channel2Future) - .success(node1, channel3) - .build(); - - CompletionStage initFuture = controlConnection.init(false, false, false); - factoryHelper.waitForCall(node1); - - assertThatStage(initFuture) - .isSuccess(v -> assertThat(controlConnection.channel()).isEqualTo(channel1)); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node1)); - - mockQueryPlan(node2, node1); - // channel1 goes down, triggering a reconnection - channel1.close(); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelClosed(node1)); - verify(reconnectionSchedule, VERIFY_TIMEOUT).nextDelay(); - // the reconnection to node2 is in progress - factoryHelper.waitForCall(node2); - - // When - // node2 becomes ignored - eventBus.fire(new DistanceEvent(NodeDistance.IGNORED, node2)); - // the reconnection to node2 completes - channel2Future.complete(channel2); - - // Then - // The channel should get closed and we should try the next node - verify(channel2, VERIFY_TIMEOUT).forceClose(); - factoryHelper.waitForCall(node1); - } - - @Test - public void should_reconnect_if_node_was_removed_during_reconnection_attempt() { - should_reconnect_if_event_during_reconnection_attempt(NodeStateEvent.removed(node2)); - } - - @Test - public void should_reconnect_if_node_was_forced_down_during_reconnection_attempt() { - should_reconnect_if_event_during_reconnection_attempt( - NodeStateEvent.changed(NodeState.UP, NodeState.FORCED_DOWN, node2)); - } - - private void should_reconnect_if_event_during_reconnection_attempt(NodeStateEvent event) { - // Given - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofNanos(1)); - DriverChannel channel1 = newMockDriverChannel(1); - DriverChannel channel2 = newMockDriverChannel(2); - CompletableFuture channel2Future = new CompletableFuture<>(); - DriverChannel channel3 = newMockDriverChannel(3); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - // init - .success(node1, channel1) - // reconnection - .pending(node2, channel2Future) - .success(node1, channel3) - .build(); - - CompletionStage initFuture = controlConnection.init(false, false, false); - factoryHelper.waitForCall(node1); - - assertThatStage(initFuture).isSuccess(); - await().untilAsserted(() -> assertThat(controlConnection.channel()).isEqualTo(channel1)); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node1)); - - mockQueryPlan(node2, node1); - // channel1 goes down, triggering a reconnection - channel1.close(); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelClosed(node1)); - verify(reconnectionSchedule, VERIFY_TIMEOUT).nextDelay(); - // the reconnection to node2 is in progress - factoryHelper.waitForCall(node2); - - // When - // node2 goes into the new state - eventBus.fire(event); - // the reconnection to node2 completes - channel2Future.complete(channel2); - - // Then - // The channel should get closed and we should try the next node - verify(channel2, VERIFY_TIMEOUT).forceClose(); - factoryHelper.waitForCall(node1); - } - - @Test - public void should_force_reconnection_if_pending() { - // Given - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofDays(1)); - - DriverChannel channel1 = newMockDriverChannel(1); - DriverChannel channel2 = newMockDriverChannel(2); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - .success(node1, channel1) - .failure(node1, "mock failure") - .success(node2, channel2) - .build(); - - CompletionStage initFuture = controlConnection.init(false, false, false); - factoryHelper.waitForCall(node1); - assertThatStage(initFuture) - .isSuccess(v -> assertThat(controlConnection.channel()).isEqualTo(channel1)); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node1)); - - // the channel fails and a reconnection is scheduled for later - channel1.close(); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelClosed(node1)); - verify(reconnectionSchedule, VERIFY_TIMEOUT).nextDelay(); - - // When - controlConnection.reconnectNow(); - factoryHelper.waitForCall(node1); - factoryHelper.waitForCall(node2); - - // Then - await().untilAsserted(() -> assertThat(controlConnection.channel()).isEqualTo(channel2)); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node2)); - - factoryHelper.verifyNoMoreCalls(); - } - - @Test - public void should_force_reconnection_even_if_connected() { - // Given - DriverChannel channel1 = newMockDriverChannel(1); - DriverChannel channel2 = newMockDriverChannel(2); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - .success(node1, channel1) - .failure(node1, "mock failure") - .success(node2, channel2) - .build(); - - CompletionStage initFuture = controlConnection.init(false, false, false); - factoryHelper.waitForCall(node1); - assertThatStage(initFuture) - .isSuccess(v -> assertThat(controlConnection.channel()).isEqualTo(channel1)); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node1)); - - // When - controlConnection.reconnectNow(); - - // Then - factoryHelper.waitForCall(node1); - factoryHelper.waitForCall(node2); - await().untilAsserted(() -> assertThat(controlConnection.channel()).isEqualTo(channel2)); - verify(channel1, VERIFY_TIMEOUT).forceClose(); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelClosed(node1)); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node2)); - - factoryHelper.verifyNoMoreCalls(); - } - - @Test - public void should_not_force_reconnection_if_not_init() throws InterruptedException { - // When - controlConnection.reconnectNow(); - TimeUnit.MILLISECONDS.sleep(500); - - // Then - verify(reconnectionSchedule, never()).nextDelay(); - } - - @Test - public void should_not_force_reconnection_if_closed() throws InterruptedException { - // Given - DriverChannel channel1 = newMockDriverChannel(1); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory).success(node1, channel1).build(); - CompletionStage initFuture = controlConnection.init(false, false, false); - factoryHelper.waitForCall(node1); - assertThatStage(initFuture).isSuccess(); - CompletionStage closeFuture = controlConnection.forceCloseAsync(); - assertThatStage(closeFuture).isSuccess(); - - // When - controlConnection.reconnectNow(); - TimeUnit.MILLISECONDS.sleep(500); - - // Then - verify(reconnectionSchedule, never()).nextDelay(); - - factoryHelper.verifyNoMoreCalls(); - } - - @Test - public void should_close_channel_when_closing() { - // Given - DriverChannel channel1 = newMockDriverChannel(1); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory).success(node1, channel1).build(); - - CompletionStage initFuture = controlConnection.init(false, false, false); - factoryHelper.waitForCall(node1); - assertThatStage(initFuture).isSuccess(); - - // When - CompletionStage closeFuture = controlConnection.forceCloseAsync(); - - // Then - assertThatStage(closeFuture).isSuccess(); - verify(channel1, VERIFY_TIMEOUT).forceClose(); - - factoryHelper.verifyNoMoreCalls(); - } - - @Test - public void should_close_channel_if_closed_during_reconnection() { - // Given - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofNanos(1)); - - DriverChannel channel1 = newMockDriverChannel(1); - DriverChannel channel2 = newMockDriverChannel(2); - CompletableFuture channel2Future = new CompletableFuture<>(); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - .success(node1, channel1) - .failure(node1, "mock failure") - .pending(node2, channel2Future) - .build(); - - CompletionStage initFuture = controlConnection.init(false, false, false); - factoryHelper.waitForCall(node1); - assertThatStage(initFuture) - .isSuccess(v -> assertThat(controlConnection.channel()).isEqualTo(channel1)); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node1)); - - // the channel fails and a reconnection is scheduled - channel1.close(); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelClosed(node1)); - verify(reconnectionSchedule, VERIFY_TIMEOUT).nextDelay(); - factoryHelper.waitForCall(node1); - // channel2 starts initializing (but the future is not completed yet) - factoryHelper.waitForCall(node2); - - // When - // the control connection gets closed before channel2 initialization is complete - CompletionStage closeFuture = controlConnection.forceCloseAsync(); - assertThatStage(closeFuture).isSuccess(); - channel2Future.complete(channel2); - - // Then - verify(channel2, VERIFY_TIMEOUT).forceClose(); - // no event because the control connection never "owned" the channel - verify(eventBus, never()).fire(ChannelEvent.channelOpened(node2)); - verify(eventBus, never()).fire(ChannelEvent.channelClosed(node2)); - - factoryHelper.verifyNoMoreCalls(); - } - - @Test - public void should_handle_channel_failure_if_closed_during_reconnection() { - // Given - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofNanos(1)); - - DriverChannel channel1 = newMockDriverChannel(1); - DriverChannel channel2 = newMockDriverChannel(2); - CompletableFuture channel1Future = new CompletableFuture<>(); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - .success(node1, channel1) - .pending(node1, channel1Future) - .success(node2, channel2) - .build(); - - CompletionStage initFuture = controlConnection.init(false, false, false); - factoryHelper.waitForCall(node1); - assertThatStage(initFuture) - .isSuccess(v -> assertThat(controlConnection.channel()).isEqualTo(channel1)); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node1)); - - // the channel fails and a reconnection is scheduled - channel1.close(); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelClosed(node1)); - verify(reconnectionSchedule, VERIFY_TIMEOUT).nextDelay(); - // channel1 starts initializing (but the future is not completed yet) - factoryHelper.waitForCall(node1); - - // When - // the control connection gets closed before channel1 initialization fails - CompletionStage closeFuture = controlConnection.forceCloseAsync(); - assertThatStage(closeFuture).isSuccess(); - channel1Future.completeExceptionally(new Exception("mock failure")); - - // Then - // should never try channel2 because the reconnection has detected that it can stop after the - // first failure - factoryHelper.verifyNoMoreCalls(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/control/ControlConnectionTestBase.java b/core/src/test/java/com/datastax/oss/driver/internal/core/control/ControlConnectionTestBase.java deleted file mode 100644 index c52199465a8..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/control/ControlConnectionTestBase.java +++ /dev/null @@ -1,178 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.control; - -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyBoolean; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.timeout; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.connection.ReconnectionPolicy; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.channel.ChannelFactory; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.channel.DriverChannelOptions; -import com.datastax.oss.driver.internal.core.context.EventBus; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.context.NettyOptions; -import com.datastax.oss.driver.internal.core.metadata.DefaultEndPoint; -import com.datastax.oss.driver.internal.core.metadata.DefaultNode; -import com.datastax.oss.driver.internal.core.metadata.LoadBalancingPolicyWrapper; -import com.datastax.oss.driver.internal.core.metadata.MetadataManager; -import com.datastax.oss.driver.internal.core.metadata.TestNodeFactory; -import com.datastax.oss.driver.internal.core.metrics.MetricsFactory; -import io.netty.channel.Channel; -import io.netty.channel.DefaultChannelPromise; -import io.netty.channel.DefaultEventLoopGroup; -import io.netty.channel.EventLoop; -import java.net.InetSocketAddress; -import java.time.Duration; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ConcurrentLinkedQueue; -import java.util.concurrent.Exchanger; -import java.util.concurrent.TimeUnit; -import org.junit.After; -import org.junit.Before; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; -import org.mockito.verification.VerificationWithTimeout; - -abstract class ControlConnectionTestBase { - protected static final InetSocketAddress ADDRESS1 = new InetSocketAddress("127.0.0.1", 9042); - - /** How long we wait when verifying mocks for async invocations */ - protected static final VerificationWithTimeout VERIFY_TIMEOUT = timeout(500); - - @Mock protected InternalDriverContext context; - @Mock protected DriverConfig config; - @Mock protected DriverExecutionProfile defaultProfile; - @Mock protected ReconnectionPolicy reconnectionPolicy; - @Mock protected ReconnectionPolicy.ReconnectionSchedule reconnectionSchedule; - @Mock protected NettyOptions nettyOptions; - protected DefaultEventLoopGroup adminEventLoopGroup; - protected EventBus eventBus; - @Mock protected ChannelFactory channelFactory; - protected Exchanger> channelFactoryFuture; - @Mock protected LoadBalancingPolicyWrapper loadBalancingPolicyWrapper; - @Mock protected MetadataManager metadataManager; - @Mock protected MetricsFactory metricsFactory; - - protected DefaultNode node1; - protected DefaultNode node2; - - protected ControlConnection controlConnection; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - - adminEventLoopGroup = new DefaultEventLoopGroup(1); - - when(context.getNettyOptions()).thenReturn(nettyOptions); - when(nettyOptions.adminEventExecutorGroup()).thenReturn(adminEventLoopGroup); - eventBus = spy(new EventBus("test")); - when(context.getEventBus()).thenReturn(eventBus); - when(context.getChannelFactory()).thenReturn(channelFactory); - - channelFactoryFuture = new Exchanger<>(); - when(channelFactory.connect(any(Node.class), any(DriverChannelOptions.class))) - .thenAnswer( - invocation -> { - CompletableFuture channelFuture = new CompletableFuture<>(); - channelFactoryFuture.exchange(channelFuture, 100, TimeUnit.MILLISECONDS); - return channelFuture; - }); - - when(context.getConfig()).thenReturn(config); - when(config.getDefaultProfile()).thenReturn(defaultProfile); - when(defaultProfile.getBoolean(DefaultDriverOption.RECONNECT_ON_INIT)).thenReturn(false); - - when(context.getReconnectionPolicy()).thenReturn(reconnectionPolicy); - // Child classes only cover "runtime" reconnections when the driver is already initialized - when(reconnectionPolicy.newControlConnectionSchedule(false)).thenReturn(reconnectionSchedule); - // By default, set a large reconnection delay. Tests that care about reconnection will override - // it. - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofDays(1)); - - when(context.getLoadBalancingPolicyWrapper()).thenReturn(loadBalancingPolicyWrapper); - - when(context.getMetricsFactory()).thenReturn(metricsFactory); - node1 = TestNodeFactory.newNode(1, context); - node2 = TestNodeFactory.newNode(2, context); - mockQueryPlan(node1, node2); - - when(metadataManager.refreshNodes()).thenReturn(CompletableFuture.completedFuture(null)); - when(metadataManager.refreshSchema(anyString(), anyBoolean(), anyBoolean())) - .thenReturn(CompletableFuture.completedFuture(null)); - when(context.getMetadataManager()).thenReturn(metadataManager); - - when(context.getConfig()).thenReturn(config); - when(config.getDefaultProfile()).thenReturn(defaultProfile); - when(defaultProfile.getBoolean(DefaultDriverOption.CONNECTION_WARN_INIT_ERROR)) - .thenReturn(false); - - controlConnection = new ControlConnection(context); - } - - protected void mockQueryPlan(Node... nodes) { - when(loadBalancingPolicyWrapper.newQueryPlan()) - .thenAnswer( - i -> { - ConcurrentLinkedQueue queryPlan = new ConcurrentLinkedQueue<>(); - for (Node node : nodes) { - queryPlan.offer(node); - } - return queryPlan; - }); - } - - @After - public void teardown() { - adminEventLoopGroup.shutdownGracefully(100, 200, TimeUnit.MILLISECONDS); - } - - protected DriverChannel newMockDriverChannel(int id) { - DriverChannel driverChannel = mock(DriverChannel.class); - Channel channel = mock(Channel.class); - EventLoop adminExecutor = adminEventLoopGroup.next(); - DefaultChannelPromise closeFuture = new DefaultChannelPromise(channel, adminExecutor); - when(driverChannel.close()) - .thenAnswer( - i -> { - closeFuture.trySuccess(null); - return closeFuture; - }); - when(driverChannel.forceClose()) - .thenAnswer( - i -> { - closeFuture.trySuccess(null); - return closeFuture; - }); - when(driverChannel.closeFuture()).thenReturn(closeFuture); - when(driverChannel.toString()).thenReturn("channel" + id); - when(driverChannel.getEndPoint()) - .thenReturn(new DefaultEndPoint(new InetSocketAddress("127.0.0." + id, 9042))); - return driverChannel; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/ConversionsTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/ConversionsTest.java deleted file mode 100644 index 954cf0e14a0..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/ConversionsTest.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.cql.ColumnDefinition; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import java.util.List; -import org.junit.Test; - -public class ConversionsTest { - @Test - public void should_find_pk_indices_if_all_bound() { - assertThat(Conversions.findIndices(partitionKey("pk"), variables("pk"))).containsExactly(0); - assertThat(Conversions.findIndices(partitionKey("pk"), variables("pk", "c"))) - .containsExactly(0); - assertThat(Conversions.findIndices(partitionKey("pk"), variables("c", "pk"))) - .containsExactly(1); - assertThat( - Conversions.findIndices( - partitionKey("pk1", "pk2", "pk3"), - variables("c1", "pk2", "pk3", "c2", "pk1", "c3"))) - .containsExactly(4, 1, 2); - } - - @Test - public void should_use_first_pk_index_if_bound_multiple_times() { - assertThat(Conversions.findIndices(partitionKey("pk"), variables("pk", "pk"))) - .containsExactly(0); - assertThat(Conversions.findIndices(partitionKey("pk"), variables("pk", "c1", "pk", "c2"))) - .containsExactly(0); - assertThat( - Conversions.findIndices( - partitionKey("pk1", "pk2", "pk3"), - variables("c1", "pk2", "pk3", "c2", "pk1", "c3", "pk1", "pk2"))) - .containsExactly(4, 1, 2); - } - - @Test - public void should_return_empty_pk_indices_if_at_least_one_component_not_bound() { - assertThat(Conversions.findIndices(partitionKey("pk"), variables("c1", "c2"))).isEmpty(); - assertThat( - Conversions.findIndices( - partitionKey("pk1", "pk2", "pk3"), variables("c1", "pk2", "c2", "pk1", "c3"))) - .isEmpty(); - } - - private List partitionKey(String... columnNames) { - ImmutableList.Builder columns = - ImmutableList.builderWithExpectedSize(columnNames.length); - for (String columnName : columnNames) { - ColumnMetadata column = mock(ColumnMetadata.class); - when(column.getName()).thenReturn(CqlIdentifier.fromInternal(columnName)); - columns.add(column); - } - return columns.build(); - } - - private ColumnDefinitions variables(String... columnNames) { - ImmutableList.Builder columns = - ImmutableList.builderWithExpectedSize(columnNames.length); - for (String columnName : columnNames) { - ColumnDefinition column = mock(ColumnDefinition.class); - when(column.getName()).thenReturn(CqlIdentifier.fromInternal(columnName)); - columns.add(column); - } - return DefaultColumnDefinitions.valueOf(columns.build()); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlPrepareHandlerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlPrepareHandlerTest.java deleted file mode 100644 index 1924ef5a9af..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlPrepareHandlerTest.java +++ /dev/null @@ -1,399 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static com.datastax.oss.driver.internal.core.cql.CqlRequestHandlerTestBase.defaultFrameOf; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyBoolean; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.NodeUnavailableException; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import com.datastax.oss.driver.api.core.retry.RetryVerdict; -import com.datastax.oss.driver.api.core.servererrors.OverloadedException; -import com.datastax.oss.driver.internal.core.channel.ResponseCallback; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.request.Prepare; -import com.datastax.oss.protocol.internal.response.Error; -import com.datastax.oss.protocol.internal.response.result.ColumnSpec; -import com.datastax.oss.protocol.internal.response.result.Prepared; -import com.datastax.oss.protocol.internal.response.result.RawType; -import com.datastax.oss.protocol.internal.response.result.RowsMetadata; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.nio.ByteBuffer; -import java.util.List; -import java.util.Map; -import java.util.concurrent.CompletionStage; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -public class CqlPrepareHandlerTest { - - private static final DefaultPrepareRequest PREPARE_REQUEST = - new DefaultPrepareRequest("mock query"); - - @Mock private Node node1; - @Mock private Node node2; - @Mock private Node node3; - - private final Map payload = - ImmutableMap.of("key1", ByteBuffer.wrap(new byte[] {1, 2, 3, 4})); - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - } - - @Test - public void should_prepare_on_first_node_and_reprepare_on_others() { - RequestHandlerTestHarness.Builder harnessBuilder = RequestHandlerTestHarness.builder(); - PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1); - PoolBehavior node2Behavior = harnessBuilder.customBehavior(node2); - PoolBehavior node3Behavior = harnessBuilder.customBehavior(node3); - - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - - CompletionStage prepareFuture = - new CqlPrepareHandler(PREPARE_REQUEST, harness.getSession(), harness.getContext(), "test") - .handle(); - - node1Behavior.verifyWrite(); - node1Behavior.setWriteSuccess(); - node1Behavior.setResponseSuccess(defaultFrameOf(simplePrepared())); - - // The future waits for the reprepare attempt on other nodes, so it's not done yet. - assertThatStage(prepareFuture).isNotDone(); - - // Should now reprepare on the remaining nodes: - node2Behavior.verifyWrite(); - node2Behavior.setWriteSuccess(); - node2Behavior.setResponseSuccess(defaultFrameOf(simplePrepared())); - - node3Behavior.verifyWrite(); - node3Behavior.setWriteSuccess(); - node3Behavior.setResponseSuccess(defaultFrameOf(simplePrepared())); - - assertThatStage(prepareFuture).isSuccess(CqlPrepareHandlerTest::assertMatchesSimplePrepared); - } - } - - @Test - public void should_not_reprepare_on_other_nodes_if_disabled_in_config() { - RequestHandlerTestHarness.Builder harnessBuilder = RequestHandlerTestHarness.builder(); - PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1); - PoolBehavior node2Behavior = harnessBuilder.customBehavior(node2); - PoolBehavior node3Behavior = harnessBuilder.customBehavior(node3); - - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - - DriverExecutionProfile config = harness.getContext().getConfig().getDefaultProfile(); - when(config.getBoolean(DefaultDriverOption.PREPARE_ON_ALL_NODES)).thenReturn(false); - - CompletionStage prepareFuture = - new CqlPrepareHandler(PREPARE_REQUEST, harness.getSession(), harness.getContext(), "test") - .handle(); - - node1Behavior.verifyWrite(); - node1Behavior.setWriteSuccess(); - node1Behavior.setResponseSuccess(defaultFrameOf(simplePrepared())); - - // The future should complete immediately: - assertThatStage(prepareFuture).isSuccess(); - - // And the other nodes should not be contacted: - node2Behavior.verifyNoWrite(); - node3Behavior.verifyNoWrite(); - } - } - - @Test - public void should_ignore_errors_while_repreparing_on_other_nodes() { - RequestHandlerTestHarness.Builder harnessBuilder = - RequestHandlerTestHarness.builder().withResponse(node1, defaultFrameOf(simplePrepared())); - PoolBehavior node2Behavior = harnessBuilder.customBehavior(node2); - PoolBehavior node3Behavior = harnessBuilder.customBehavior(node3); - - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - - CompletionStage prepareFuture = - new CqlPrepareHandler(PREPARE_REQUEST, harness.getSession(), harness.getContext(), "test") - .handle(); - - assertThatStage(prepareFuture).isNotDone(); - - // Other nodes fail, the future should still succeed when all done - node2Behavior.verifyWrite(); - node2Behavior.setWriteSuccess(); - node2Behavior.setResponseSuccess( - defaultFrameOf(new Error(ProtocolConstants.ErrorCode.SERVER_ERROR, "mock error"))); - - node3Behavior.verifyWrite(); - node3Behavior.setWriteFailure(new RuntimeException("mock error")); - - assertThatStage(prepareFuture).isSuccess(CqlPrepareHandlerTest::assertMatchesSimplePrepared); - } - } - - @Test - public void should_retry_initial_prepare_if_recoverable_error() { - RequestHandlerTestHarness.Builder harnessBuilder = - RequestHandlerTestHarness.builder() - .withResponse( - node1, - defaultFrameOf(new Error(ProtocolConstants.ErrorCode.OVERLOADED, "mock message"))) - .withResponse(node2, defaultFrameOf(simplePrepared())); - PoolBehavior node3Behavior = harnessBuilder.customBehavior(node3); - - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - - // Make node1's error recoverable, will switch to node2 - when(harness - .getContext() - .getRetryPolicy(anyString()) - .onErrorResponseVerdict(eq(PREPARE_REQUEST), any(OverloadedException.class), eq(0))) - .thenReturn(RetryVerdict.RETRY_NEXT); - - CompletionStage prepareFuture = - new CqlPrepareHandler(PREPARE_REQUEST, harness.getSession(), harness.getContext(), "test") - .handle(); - - // Success on node2, reprepare on node3 - assertThatStage(prepareFuture).isNotDone(); - node3Behavior.verifyWrite(); - node3Behavior.setWriteSuccess(); - node3Behavior.setResponseSuccess(defaultFrameOf(simplePrepared())); - - assertThatStage(prepareFuture).isSuccess(CqlPrepareHandlerTest::assertMatchesSimplePrepared); - } - } - - @Test - public void should_not_retry_initial_prepare_if_unrecoverable_error() { - RequestHandlerTestHarness.Builder harnessBuilder = - RequestHandlerTestHarness.builder() - .withResponse( - node1, - defaultFrameOf(new Error(ProtocolConstants.ErrorCode.OVERLOADED, "mock message"))); - PoolBehavior node2Behavior = harnessBuilder.customBehavior(node2); - PoolBehavior node3Behavior = harnessBuilder.customBehavior(node3); - - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - - // Make node1's error unrecoverable, will rethrow - when(harness - .getContext() - .getRetryPolicy(anyString()) - .onErrorResponseVerdict(eq(PREPARE_REQUEST), any(OverloadedException.class), eq(0))) - .thenReturn(RetryVerdict.RETHROW); - - CompletionStage prepareFuture = - new CqlPrepareHandler(PREPARE_REQUEST, harness.getSession(), harness.getContext(), "test") - .handle(); - - // Success on node2, reprepare on node3 - assertThatStage(prepareFuture) - .isFailed( - error -> { - assertThat(error).isInstanceOf(OverloadedException.class); - node2Behavior.verifyNoWrite(); - node3Behavior.verifyNoWrite(); - }); - } - } - - @Test - public void should_fail_if_nodes_unavailable() { - RequestHandlerTestHarness.Builder harnessBuilder = RequestHandlerTestHarness.builder(); - try (RequestHandlerTestHarness harness = - harnessBuilder.withEmptyPool(node1).withEmptyPool(node2).build()) { - CompletionStage prepareFuture = - new CqlPrepareHandler(PREPARE_REQUEST, harness.getSession(), harness.getContext(), "test") - .handle(); - assertThatStage(prepareFuture) - .isFailed( - error -> { - assertThat(error).isInstanceOf(AllNodesFailedException.class); - Map> allErrors = - ((AllNodesFailedException) error).getAllErrors(); - assertThat(allErrors).hasSize(2); - assertThat(allErrors) - .hasEntrySatisfying( - node1, - nodeErrors -> - assertThat(nodeErrors) - .singleElement() - .isInstanceOf(NodeUnavailableException.class)); - assertThat(allErrors) - .hasEntrySatisfying( - node2, - nodeErrors -> - assertThat(nodeErrors) - .singleElement() - .isInstanceOf(NodeUnavailableException.class)); - }); - } - } - - @Test - public void should_fail_if_retry_policy_ignores_error() { - RequestHandlerTestHarness.Builder harnessBuilder = - RequestHandlerTestHarness.builder() - .withResponse( - node1, - defaultFrameOf(new Error(ProtocolConstants.ErrorCode.OVERLOADED, "mock message"))); - PoolBehavior node2Behavior = harnessBuilder.customBehavior(node2); - PoolBehavior node3Behavior = harnessBuilder.customBehavior(node3); - - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - - // Make node1's error unrecoverable, will rethrow - RetryPolicy mockRetryPolicy = - harness.getContext().getRetryPolicy(DriverExecutionProfile.DEFAULT_NAME); - when(mockRetryPolicy.onErrorResponseVerdict( - eq(PREPARE_REQUEST), any(OverloadedException.class), eq(0))) - .thenReturn(RetryVerdict.IGNORE); - - CompletionStage prepareFuture = - new CqlPrepareHandler(PREPARE_REQUEST, harness.getSession(), harness.getContext(), "test") - .handle(); - - // Success on node2, reprepare on node3 - assertThatStage(prepareFuture) - .isFailed( - error -> { - assertThat(error) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "IGNORE decisions are not allowed for prepare requests, " - + "please fix your retry policy."); - node2Behavior.verifyNoWrite(); - node3Behavior.verifyNoWrite(); - }); - } - } - - @Test - public void should_propagate_custom_payload_on_single_node() { - RequestHandlerTestHarness.Builder harnessBuilder = RequestHandlerTestHarness.builder(); - DefaultPrepareRequest prepareRequest = - new DefaultPrepareRequest( - SimpleStatement.newInstance("irrelevant").setCustomPayload(payload)); - PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1); - PoolBehavior node2Behavior = harnessBuilder.customBehavior(node2); - PoolBehavior node3Behavior = harnessBuilder.customBehavior(node3); - node1Behavior.setResponseSuccess(defaultFrameOf(simplePrepared())); - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - DriverExecutionProfile config = harness.getContext().getConfig().getDefaultProfile(); - when(config.getBoolean(DefaultDriverOption.PREPARE_ON_ALL_NODES)).thenReturn(false); - CompletionStage prepareFuture = - new CqlPrepareHandler(prepareRequest, harness.getSession(), harness.getContext(), "test") - .handle(); - verify(node1Behavior.channel) - .write(any(Prepare.class), anyBoolean(), eq(payload), any(ResponseCallback.class)); - node2Behavior.verifyNoWrite(); - node3Behavior.verifyNoWrite(); - assertThatStage(prepareFuture).isSuccess(CqlPrepareHandlerTest::assertMatchesSimplePrepared); - } - } - - @Test - public void should_propagate_custom_payload_on_all_nodes() { - RequestHandlerTestHarness.Builder harnessBuilder = RequestHandlerTestHarness.builder(); - DefaultPrepareRequest prepareRequest = - new DefaultPrepareRequest( - SimpleStatement.newInstance("irrelevant").setCustomPayload(payload)); - PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1); - PoolBehavior node2Behavior = harnessBuilder.customBehavior(node2); - PoolBehavior node3Behavior = harnessBuilder.customBehavior(node3); - node1Behavior.setResponseSuccess(defaultFrameOf(simplePrepared())); - node2Behavior.setResponseSuccess(defaultFrameOf(simplePrepared())); - node3Behavior.setResponseSuccess(defaultFrameOf(simplePrepared())); - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - DriverExecutionProfile config = harness.getContext().getConfig().getDefaultProfile(); - when(config.getBoolean(DefaultDriverOption.PREPARE_ON_ALL_NODES)).thenReturn(true); - CompletionStage prepareFuture = - new CqlPrepareHandler(prepareRequest, harness.getSession(), harness.getContext(), "test") - .handle(); - verify(node1Behavior.channel) - .write(any(Prepare.class), anyBoolean(), eq(payload), any(ResponseCallback.class)); - verify(node2Behavior.channel) - .write(any(Prepare.class), anyBoolean(), eq(payload), any(ResponseCallback.class)); - verify(node3Behavior.channel) - .write(any(Prepare.class), anyBoolean(), eq(payload), any(ResponseCallback.class)); - assertThatStage(prepareFuture).isSuccess(CqlPrepareHandlerTest::assertMatchesSimplePrepared); - } - } - - private static Message simplePrepared() { - RowsMetadata variablesMetadata = - new RowsMetadata( - ImmutableList.of( - new ColumnSpec( - "ks", - "table", - "key", - 0, - RawType.PRIMITIVES.get(ProtocolConstants.DataType.VARCHAR))), - null, - new int[] {0}, - null); - RowsMetadata resultMetadata = - new RowsMetadata( - ImmutableList.of( - new ColumnSpec( - "ks", - "table", - "message", - 0, - RawType.PRIMITIVES.get(ProtocolConstants.DataType.VARCHAR))), - null, - new int[] {}, - null); - return new Prepared( - Bytes.fromHexString("0xffff").array(), null, variablesMetadata, resultMetadata); - } - - private static void assertMatchesSimplePrepared(PreparedStatement statement) { - assertThat(Bytes.toHexString(statement.getId())).isEqualTo("0xffff"); - - ColumnDefinitions variableDefinitions = statement.getVariableDefinitions(); - assertThat(variableDefinitions).hasSize(1); - assertThat(variableDefinitions.get(0).getName().asInternal()).isEqualTo("key"); - - ColumnDefinitions resultSetDefinitions = statement.getResultSetDefinitions(); - assertThat(resultSetDefinitions).hasSize(1); - assertThat(resultSetDefinitions.get(0).getName().asInternal()).isEqualTo("message"); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandlerRetryTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandlerRetryTest.java deleted file mode 100644 index ccac873c616..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandlerRetryTest.java +++ /dev/null @@ -1,607 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.atMost; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyNoMoreInteractions; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.TestDataProviders; -import com.datastax.oss.driver.api.core.DefaultConsistencyLevel; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.connection.HeartbeatException; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import com.datastax.oss.driver.api.core.retry.RetryVerdict; -import com.datastax.oss.driver.api.core.servererrors.BootstrappingException; -import com.datastax.oss.driver.api.core.servererrors.DefaultWriteType; -import com.datastax.oss.driver.api.core.servererrors.InvalidQueryException; -import com.datastax.oss.driver.api.core.servererrors.ReadTimeoutException; -import com.datastax.oss.driver.api.core.servererrors.ServerError; -import com.datastax.oss.driver.api.core.servererrors.UnavailableException; -import com.datastax.oss.driver.api.core.servererrors.WriteTimeoutException; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.tracker.RequestIdGenerator; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.response.Error; -import com.datastax.oss.protocol.internal.response.error.ReadTimeout; -import com.datastax.oss.protocol.internal.response.error.Unavailable; -import com.datastax.oss.protocol.internal.response.error.WriteTimeout; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.nio.charset.StandardCharsets; -import java.util.Iterator; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; -import org.junit.Test; - -public class CqlRequestHandlerRetryTest extends CqlRequestHandlerTestBase { - - @Test - @UseDataProvider("allIdempotenceConfigs") - public void should_always_try_next_node_if_bootstrapping( - boolean defaultIdempotence, Statement statement) { - try (RequestHandlerTestHarness harness = - RequestHandlerTestHarness.builder() - .withDefaultIdempotence(defaultIdempotence) - .withResponse( - node1, - defaultFrameOf( - new Error(ProtocolConstants.ErrorCode.IS_BOOTSTRAPPING, "mock message"))) - .withResponse(node2, defaultFrameOf(singleRow())) - .build()) { - - CompletionStage resultSetFuture = - new CqlRequestHandler(statement, harness.getSession(), harness.getContext(), "test") - .handle(); - - assertThatStage(resultSetFuture) - .isSuccess( - resultSet -> { - Iterator rows = resultSet.currentPage().iterator(); - assertThat(rows.hasNext()).isTrue(); - assertThat(rows.next().getString("message")).isEqualTo("hello, world"); - - ExecutionInfo executionInfo = resultSet.getExecutionInfo(); - assertThat(executionInfo.getCoordinator()).isEqualTo(node2); - assertThat(executionInfo.getErrors()).hasSize(1); - assertThat(executionInfo.getErrors().get(0).getKey()).isEqualTo(node1); - assertThat(executionInfo.getErrors().get(0).getValue()) - .isInstanceOf(BootstrappingException.class); - assertThat(executionInfo.getIncomingPayload()).isEmpty(); - assertThat(executionInfo.getPagingState()).isNull(); - assertThat(executionInfo.getSpeculativeExecutionCount()).isEqualTo(0); - assertThat(executionInfo.getSuccessfulExecutionIndex()).isEqualTo(0); - assertThat(executionInfo.getWarnings()).isEmpty(); - - verifyNoMoreInteractions(harness.getContext().getRetryPolicy(anyString())); - }); - } - } - - @Test - @UseDataProvider("allIdempotenceConfigs") - public void should_always_rethrow_query_validation_error( - boolean defaultIdempotence, Statement statement) { - try (RequestHandlerTestHarness harness = - RequestHandlerTestHarness.builder() - .withDefaultIdempotence(defaultIdempotence) - .withResponse( - node1, - defaultFrameOf(new Error(ProtocolConstants.ErrorCode.INVALID, "mock message"))) - .build()) { - - CompletionStage resultSetFuture = - new CqlRequestHandler(statement, harness.getSession(), harness.getContext(), "test") - .handle(); - - assertThatStage(resultSetFuture) - .isFailed( - error -> { - assertThat(error) - .isInstanceOf(InvalidQueryException.class) - .hasMessage("mock message"); - verifyNoMoreInteractions(harness.getContext().getRetryPolicy(anyString())); - - verify(nodeMetricUpdater1) - .incrementCounter( - DefaultNodeMetric.OTHER_ERRORS, DriverExecutionProfile.DEFAULT_NAME); - verify(nodeMetricUpdater1, atMost(1)) - .isEnabled(DefaultNodeMetric.CQL_MESSAGES, DriverExecutionProfile.DEFAULT_NAME); - verify(nodeMetricUpdater1) - .updateTimer( - eq(DefaultNodeMetric.CQL_MESSAGES), - eq(DriverExecutionProfile.DEFAULT_NAME), - anyLong(), - eq(TimeUnit.NANOSECONDS)); - verifyNoMoreInteractions(nodeMetricUpdater1); - }); - } - } - - @Test - @UseDataProvider("failureAndIdempotent") - public void should_try_next_node_if_idempotent_and_retry_policy_decides_so( - FailureScenario failureScenario, boolean defaultIdempotence, Statement statement) { - RequestHandlerTestHarness.Builder harnessBuilder = - RequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); - failureScenario.mockRequestError(harnessBuilder, node1); - harnessBuilder.withResponse(node2, defaultFrameOf(singleRow())); - - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - failureScenario.mockRetryPolicyVerdict( - harness.getContext().getRetryPolicy(anyString()), RetryVerdict.RETRY_NEXT); - - CompletionStage resultSetFuture = - new CqlRequestHandler(statement, harness.getSession(), harness.getContext(), "test") - .handle(); - - assertThatStage(resultSetFuture) - .isSuccess( - resultSet -> { - Iterator rows = resultSet.currentPage().iterator(); - assertThat(rows.hasNext()).isTrue(); - assertThat(rows.next().getString("message")).isEqualTo("hello, world"); - - ExecutionInfo executionInfo = resultSet.getExecutionInfo(); - assertThat(executionInfo.getCoordinator()).isEqualTo(node2); - assertThat(executionInfo.getErrors()).hasSize(1); - assertThat(executionInfo.getErrors().get(0).getKey()).isEqualTo(node1); - - verify(nodeMetricUpdater1) - .incrementCounter( - failureScenario.errorMetric, DriverExecutionProfile.DEFAULT_NAME); - verify(nodeMetricUpdater1) - .incrementCounter( - DefaultNodeMetric.RETRIES, DriverExecutionProfile.DEFAULT_NAME); - verify(nodeMetricUpdater1) - .incrementCounter( - failureScenario.retryMetric, DriverExecutionProfile.DEFAULT_NAME); - verify(nodeMetricUpdater1, atMost(1)) - .isEnabled(DefaultNodeMetric.CQL_MESSAGES, DriverExecutionProfile.DEFAULT_NAME); - verify(nodeMetricUpdater1, atMost(1)) - .updateTimer( - eq(DefaultNodeMetric.CQL_MESSAGES), - eq(DriverExecutionProfile.DEFAULT_NAME), - anyLong(), - eq(TimeUnit.NANOSECONDS)); - verifyNoMoreInteractions(nodeMetricUpdater1); - }); - } - } - - @Test - @UseDataProvider("failureAndIdempotent") - public void should_try_same_node_if_idempotent_and_retry_policy_decides_so( - FailureScenario failureScenario, boolean defaultIdempotence, Statement statement) { - RequestHandlerTestHarness.Builder harnessBuilder = - RequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); - failureScenario.mockRequestError(harnessBuilder, node1); - harnessBuilder.withResponse(node1, defaultFrameOf(singleRow())); - - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - failureScenario.mockRetryPolicyVerdict( - harness.getContext().getRetryPolicy(anyString()), RetryVerdict.RETRY_SAME); - - CompletionStage resultSetFuture = - new CqlRequestHandler(statement, harness.getSession(), harness.getContext(), "test") - .handle(); - - assertThatStage(resultSetFuture) - .isSuccess( - resultSet -> { - Iterator rows = resultSet.currentPage().iterator(); - assertThat(rows.hasNext()).isTrue(); - assertThat(rows.next().getString("message")).isEqualTo("hello, world"); - - ExecutionInfo executionInfo = resultSet.getExecutionInfo(); - assertThat(executionInfo.getCoordinator()).isEqualTo(node1); - assertThat(executionInfo.getErrors()).hasSize(1); - assertThat(executionInfo.getErrors().get(0).getKey()).isEqualTo(node1); - - verify(nodeMetricUpdater1) - .incrementCounter( - failureScenario.errorMetric, DriverExecutionProfile.DEFAULT_NAME); - verify(nodeMetricUpdater1) - .incrementCounter( - DefaultNodeMetric.RETRIES, DriverExecutionProfile.DEFAULT_NAME); - verify(nodeMetricUpdater1) - .incrementCounter( - failureScenario.retryMetric, DriverExecutionProfile.DEFAULT_NAME); - verify(nodeMetricUpdater1, atMost(2)) - .isEnabled(DefaultNodeMetric.CQL_MESSAGES, DriverExecutionProfile.DEFAULT_NAME); - verify(nodeMetricUpdater1, atMost(2)) - .updateTimer( - eq(DefaultNodeMetric.CQL_MESSAGES), - eq(DriverExecutionProfile.DEFAULT_NAME), - anyLong(), - eq(TimeUnit.NANOSECONDS)); - verifyNoMoreInteractions(nodeMetricUpdater1); - }); - } - } - - @Test - @UseDataProvider("failureAndIdempotent") - public void should_ignore_error_if_idempotent_and_retry_policy_decides_so( - FailureScenario failureScenario, boolean defaultIdempotence, Statement statement) { - RequestHandlerTestHarness.Builder harnessBuilder = - RequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); - failureScenario.mockRequestError(harnessBuilder, node1); - - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - failureScenario.mockRetryPolicyVerdict( - harness.getContext().getRetryPolicy(anyString()), RetryVerdict.IGNORE); - - CompletionStage resultSetFuture = - new CqlRequestHandler(statement, harness.getSession(), harness.getContext(), "test") - .handle(); - - assertThatStage(resultSetFuture) - .isSuccess( - resultSet -> { - Iterator rows = resultSet.currentPage().iterator(); - assertThat(rows.hasNext()).isFalse(); - - ExecutionInfo executionInfo = resultSet.getExecutionInfo(); - assertThat(executionInfo.getCoordinator()).isEqualTo(node1); - assertThat(executionInfo.getErrors()).hasSize(0); - - verify(nodeMetricUpdater1) - .incrementCounter( - failureScenario.errorMetric, DriverExecutionProfile.DEFAULT_NAME); - verify(nodeMetricUpdater1) - .incrementCounter( - DefaultNodeMetric.IGNORES, DriverExecutionProfile.DEFAULT_NAME); - verify(nodeMetricUpdater1) - .incrementCounter( - failureScenario.ignoreMetric, DriverExecutionProfile.DEFAULT_NAME); - verify(nodeMetricUpdater1, atMost(1)) - .isEnabled(DefaultNodeMetric.CQL_MESSAGES, DriverExecutionProfile.DEFAULT_NAME); - verify(nodeMetricUpdater1, atMost(1)) - .updateTimer( - eq(DefaultNodeMetric.CQL_MESSAGES), - eq(DriverExecutionProfile.DEFAULT_NAME), - anyLong(), - eq(TimeUnit.NANOSECONDS)); - verifyNoMoreInteractions(nodeMetricUpdater1); - }); - } - } - - @Test - @UseDataProvider("failureAndIdempotent") - public void should_rethrow_error_if_idempotent_and_retry_policy_decides_so( - FailureScenario failureScenario, boolean defaultIdempotence, Statement statement) { - RequestHandlerTestHarness.Builder harnessBuilder = - RequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); - failureScenario.mockRequestError(harnessBuilder, node1); - - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - - failureScenario.mockRetryPolicyVerdict( - harness.getContext().getRetryPolicy(anyString()), RetryVerdict.RETHROW); - - CompletionStage resultSetFuture = - new CqlRequestHandler(statement, harness.getSession(), harness.getContext(), "test") - .handle(); - - assertThatStage(resultSetFuture) - .isFailed( - error -> { - assertThat(error).isInstanceOf(failureScenario.expectedExceptionClass); - - verify(nodeMetricUpdater1) - .incrementCounter( - failureScenario.errorMetric, DriverExecutionProfile.DEFAULT_NAME); - verify(nodeMetricUpdater1, atMost(1)) - .isEnabled(DefaultNodeMetric.CQL_MESSAGES, DriverExecutionProfile.DEFAULT_NAME); - verify(nodeMetricUpdater1, atMost(1)) - .updateTimer( - eq(DefaultNodeMetric.CQL_MESSAGES), - eq(DriverExecutionProfile.DEFAULT_NAME), - anyLong(), - eq(TimeUnit.NANOSECONDS)); - verifyNoMoreInteractions(nodeMetricUpdater1); - }); - } - } - - @Test - @UseDataProvider("failureAndNotIdempotent") - public void should_rethrow_error_if_not_idempotent_and_error_unsafe_or_policy_rethrows( - FailureScenario failureScenario, boolean defaultIdempotence, Statement statement) { - - // For two of the possible exceptions, the retry policy is called even if the statement is not - // idempotent - boolean shouldCallRetryPolicy = - (failureScenario.expectedExceptionClass.equals(UnavailableException.class) - || failureScenario.expectedExceptionClass.equals(ReadTimeoutException.class)); - - RequestHandlerTestHarness.Builder harnessBuilder = - RequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); - failureScenario.mockRequestError(harnessBuilder, node1); - harnessBuilder.withResponse(node2, defaultFrameOf(singleRow())); - - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - - if (shouldCallRetryPolicy) { - failureScenario.mockRetryPolicyVerdict( - harness.getContext().getRetryPolicy(anyString()), RetryVerdict.RETHROW); - } - - CompletionStage resultSetFuture = - new CqlRequestHandler(statement, harness.getSession(), harness.getContext(), "test") - .handle(); - - assertThatStage(resultSetFuture) - .isFailed( - error -> { - assertThat(error).isInstanceOf(failureScenario.expectedExceptionClass); - // When non idempotent, the policy is bypassed completely: - if (!shouldCallRetryPolicy) { - verifyNoMoreInteractions(harness.getContext().getRetryPolicy(anyString())); - } - - verify(nodeMetricUpdater1) - .incrementCounter( - failureScenario.errorMetric, DriverExecutionProfile.DEFAULT_NAME); - verify(nodeMetricUpdater1, atMost(1)) - .isEnabled(DefaultNodeMetric.CQL_MESSAGES, DriverExecutionProfile.DEFAULT_NAME); - verify(nodeMetricUpdater1, atMost(1)) - .updateTimer( - eq(DefaultNodeMetric.CQL_MESSAGES), - eq(DriverExecutionProfile.DEFAULT_NAME), - anyLong(), - eq(TimeUnit.NANOSECONDS)); - verifyNoMoreInteractions(nodeMetricUpdater1); - }); - } - } - - @Test - @UseDataProvider("failureAndIdempotent") - public void should_not_fail_with_duplicate_key_when_retrying_with_request_id_generator( - FailureScenario failureScenario, boolean defaultIdempotence, Statement statement) { - - // Create a RequestIdGenerator that uses the same key as the statement's custom payload - RequestIdGenerator requestIdGenerator = - new RequestIdGenerator() { - private AtomicInteger counter = new AtomicInteger(0); - - @Override - public String getSessionRequestId() { - return "session-123"; - } - - @Override - public String getNodeRequestId(@NonNull Request request, @NonNull String parentId) { - return parentId + "-" + counter.getAndIncrement(); - } - }; - - RequestHandlerTestHarness.Builder harnessBuilder = - RequestHandlerTestHarness.builder() - .withDefaultIdempotence(defaultIdempotence) - .withRequestIdGenerator(requestIdGenerator); - failureScenario.mockRequestError(harnessBuilder, node1); - harnessBuilder.withResponse(node2, defaultFrameOf(singleRow())); - - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - failureScenario.mockRetryPolicyVerdict( - harness.getContext().getRetryPolicy(anyString()), RetryVerdict.RETRY_NEXT); - - CompletionStage resultSetFuture = - new CqlRequestHandler(statement, harness.getSession(), harness.getContext(), "test") - .handle(); - - // The test should succeed without throwing a duplicate key exception - assertThatStage(resultSetFuture) - .isSuccess( - resultSet -> { - Iterator rows = resultSet.currentPage().iterator(); - assertThat(rows.hasNext()).isTrue(); - assertThat(rows.next().getString("message")).isEqualTo("hello, world"); - - ExecutionInfo executionInfo = resultSet.getExecutionInfo(); - assertThat(executionInfo.getCoordinator()).isEqualTo(node2); - assertThat(executionInfo.getErrors()).hasSize(1); - assertThat(executionInfo.getErrors().get(0).getKey()).isEqualTo(node1); - - // Verify that the custom payload still contains the request ID key - // (either the original value or the generated one, depending on implementation) - assertThat(executionInfo.getRequest().getCustomPayload().get("request-id")) - .isEqualTo(ByteBuffer.wrap("session-123-1".getBytes(StandardCharsets.UTF_8))); - }); - } - } - - /** - * Sets up the mocks to simulate an error from a node, and make the retry policy return a given - * decision for that error. - */ - private abstract static class FailureScenario { - private final Class expectedExceptionClass; - final DefaultNodeMetric errorMetric; - final DefaultNodeMetric retryMetric; - final DefaultNodeMetric ignoreMetric; - - protected FailureScenario( - Class expectedExceptionClass, - DefaultNodeMetric errorMetric, - DefaultNodeMetric retryMetric, - DefaultNodeMetric ignoreMetric) { - this.expectedExceptionClass = expectedExceptionClass; - this.errorMetric = errorMetric; - this.retryMetric = retryMetric; - this.ignoreMetric = ignoreMetric; - } - - abstract void mockRequestError(RequestHandlerTestHarness.Builder builder, Node node); - - abstract void mockRetryPolicyVerdict(RetryPolicy policy, RetryVerdict verdict); - } - - @DataProvider - public static Object[][] failure() { - return TestDataProviders.fromList( - new FailureScenario( - ReadTimeoutException.class, - DefaultNodeMetric.READ_TIMEOUTS, - DefaultNodeMetric.RETRIES_ON_READ_TIMEOUT, - DefaultNodeMetric.IGNORES_ON_READ_TIMEOUT) { - @Override - public void mockRequestError(RequestHandlerTestHarness.Builder builder, Node node) { - builder.withResponse( - node, - defaultFrameOf( - new ReadTimeout( - "mock message", ProtocolConstants.ConsistencyLevel.LOCAL_ONE, 1, 2, true))); - } - - @Override - public void mockRetryPolicyVerdict(RetryPolicy policy, RetryVerdict verdict) { - when(policy.onReadTimeoutVerdict( - any(Statement.class), - eq(DefaultConsistencyLevel.LOCAL_ONE), - eq(2), - eq(1), - eq(true), - eq(0))) - .thenReturn(verdict); - } - }, - new FailureScenario( - WriteTimeoutException.class, - DefaultNodeMetric.WRITE_TIMEOUTS, - DefaultNodeMetric.RETRIES_ON_WRITE_TIMEOUT, - DefaultNodeMetric.IGNORES_ON_WRITE_TIMEOUT) { - @Override - public void mockRequestError(RequestHandlerTestHarness.Builder builder, Node node) { - builder.withResponse( - node, - defaultFrameOf( - new WriteTimeout( - "mock message", - ProtocolConstants.ConsistencyLevel.LOCAL_ONE, - 1, - 2, - ProtocolConstants.WriteType.SIMPLE))); - } - - @Override - public void mockRetryPolicyVerdict(RetryPolicy policy, RetryVerdict verdict) { - when(policy.onWriteTimeoutVerdict( - any(Statement.class), - eq(DefaultConsistencyLevel.LOCAL_ONE), - eq(DefaultWriteType.SIMPLE), - eq(2), - eq(1), - eq(0))) - .thenReturn(verdict); - } - }, - new FailureScenario( - UnavailableException.class, - DefaultNodeMetric.UNAVAILABLES, - DefaultNodeMetric.RETRIES_ON_UNAVAILABLE, - DefaultNodeMetric.IGNORES_ON_UNAVAILABLE) { - @Override - public void mockRequestError(RequestHandlerTestHarness.Builder builder, Node node) { - builder.withResponse( - node, - defaultFrameOf( - new Unavailable( - "mock message", ProtocolConstants.ConsistencyLevel.LOCAL_ONE, 2, 1))); - } - - @Override - public void mockRetryPolicyVerdict(RetryPolicy policy, RetryVerdict verdict) { - when(policy.onUnavailableVerdict( - any(Statement.class), - eq(DefaultConsistencyLevel.LOCAL_ONE), - eq(2), - eq(1), - eq(0))) - .thenReturn(verdict); - } - }, - new FailureScenario( - ServerError.class, - DefaultNodeMetric.OTHER_ERRORS, - DefaultNodeMetric.RETRIES_ON_OTHER_ERROR, - DefaultNodeMetric.IGNORES_ON_OTHER_ERROR) { - @Override - public void mockRequestError(RequestHandlerTestHarness.Builder builder, Node node) { - builder.withResponse( - node, - defaultFrameOf( - new Error(ProtocolConstants.ErrorCode.SERVER_ERROR, "mock server error"))); - } - - @Override - public void mockRetryPolicyVerdict(RetryPolicy policy, RetryVerdict verdict) { - when(policy.onErrorResponseVerdict(any(Statement.class), any(ServerError.class), eq(0))) - .thenReturn(verdict); - } - }, - new FailureScenario( - HeartbeatException.class, - DefaultNodeMetric.ABORTED_REQUESTS, - DefaultNodeMetric.RETRIES_ON_ABORTED, - DefaultNodeMetric.IGNORES_ON_ABORTED) { - @Override - public void mockRequestError(RequestHandlerTestHarness.Builder builder, Node node) { - builder.withResponseFailure(node, mock(HeartbeatException.class)); - } - - @Override - public void mockRetryPolicyVerdict(RetryPolicy policy, RetryVerdict verdict) { - when(policy.onRequestAbortedVerdict( - any(Statement.class), any(HeartbeatException.class), eq(0))) - .thenReturn(verdict); - } - }); - } - - @DataProvider - public static Object[][] failureAndIdempotent() { - return TestDataProviders.combine(failure(), idempotentConfig()); - } - - @DataProvider - public static Object[][] failureAndNotIdempotent() { - return TestDataProviders.combine(failure(), nonIdempotentConfig()); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandlerSpeculativeExecutionTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandlerSpeculativeExecutionTest.java deleted file mode 100644 index a09a9eb3d5a..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandlerSpeculativeExecutionTest.java +++ /dev/null @@ -1,427 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyNoMoreInteractions; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.NoNodeAvailableException; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; -import com.datastax.oss.driver.api.core.servererrors.BootstrappingException; -import com.datastax.oss.driver.api.core.specex.SpeculativeExecutionPolicy; -import com.datastax.oss.driver.internal.core.util.concurrent.CapturingTimer.CapturedTimeout; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.response.Error; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.List; -import java.util.Map; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.TimeUnit; -import org.junit.Test; - -public class CqlRequestHandlerSpeculativeExecutionTest extends CqlRequestHandlerTestBase { - - @Test - @UseDataProvider("nonIdempotentConfig") - public void should_not_schedule_speculative_executions_if_not_idempotent( - boolean defaultIdempotence, Statement statement) { - RequestHandlerTestHarness.Builder harnessBuilder = - RequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); - PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1); - - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - SpeculativeExecutionPolicy speculativeExecutionPolicy = - harness.getContext().getSpeculativeExecutionPolicy(DriverExecutionProfile.DEFAULT_NAME); - - new CqlRequestHandler(statement, harness.getSession(), harness.getContext(), "test").handle(); - - node1Behavior.verifyWrite(); - - assertThat(harness.nextScheduledTimeout()).isNotNull(); // Discard the timeout task - assertThat(harness.nextScheduledTimeout()).isNull(); - - verifyNoMoreInteractions(speculativeExecutionPolicy); - verifyNoMoreInteractions(nodeMetricUpdater1); - } - } - - @Test - @UseDataProvider("idempotentConfig") - public void should_schedule_speculative_executions( - boolean defaultIdempotence, Statement statement) throws Exception { - RequestHandlerTestHarness.Builder harnessBuilder = - RequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); - PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1); - PoolBehavior node2Behavior = harnessBuilder.customBehavior(node2); - PoolBehavior node3Behavior = harnessBuilder.customBehavior(node3); - - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - SpeculativeExecutionPolicy speculativeExecutionPolicy = - harness.getContext().getSpeculativeExecutionPolicy(DriverExecutionProfile.DEFAULT_NAME); - long firstExecutionDelay = 100L; - long secondExecutionDelay = 200L; - when(speculativeExecutionPolicy.nextExecution( - any(Node.class), eq(null), eq(statement), eq(1))) - .thenReturn(firstExecutionDelay); - when(speculativeExecutionPolicy.nextExecution( - any(Node.class), eq(null), eq(statement), eq(2))) - .thenReturn(secondExecutionDelay); - when(speculativeExecutionPolicy.nextExecution( - any(Node.class), eq(null), eq(statement), eq(3))) - .thenReturn(-1L); - - new CqlRequestHandler(statement, harness.getSession(), harness.getContext(), "test").handle(); - - node1Behavior.verifyWrite(); - node1Behavior.setWriteSuccess(); - - harness.nextScheduledTimeout(); // Discard the timeout task - - CapturedTimeout speculativeExecution1 = harness.nextScheduledTimeout(); - assertThat(speculativeExecution1.getDelay(TimeUnit.MILLISECONDS)) - .isEqualTo(firstExecutionDelay); - verifyNoMoreInteractions(nodeMetricUpdater1); - speculativeExecution1.task().run(speculativeExecution1); - verify(nodeMetricUpdater1) - .incrementCounter( - DefaultNodeMetric.SPECULATIVE_EXECUTIONS, DriverExecutionProfile.DEFAULT_NAME); - node2Behavior.verifyWrite(); - node2Behavior.setWriteSuccess(); - - CapturedTimeout speculativeExecution2 = harness.nextScheduledTimeout(); - assertThat(speculativeExecution2.getDelay(TimeUnit.MILLISECONDS)) - .isEqualTo(secondExecutionDelay); - verifyNoMoreInteractions(nodeMetricUpdater2); - speculativeExecution2.task().run(speculativeExecution2); - verify(nodeMetricUpdater2) - .incrementCounter( - DefaultNodeMetric.SPECULATIVE_EXECUTIONS, DriverExecutionProfile.DEFAULT_NAME); - node3Behavior.verifyWrite(); - node3Behavior.setWriteSuccess(); - - // No more scheduled tasks since the policy returns 0 on the third call. - assertThat(harness.nextScheduledTimeout()).isNull(); - - // Note that we don't need to complete any response, the test is just about checking that - // executions are started. - } - } - - @Test - @UseDataProvider("idempotentConfig") - public void should_not_start_execution_if_result_complete( - boolean defaultIdempotence, Statement statement) throws Exception { - RequestHandlerTestHarness.Builder harnessBuilder = - RequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); - PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1); - PoolBehavior node2Behavior = harnessBuilder.customBehavior(node2); - - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - SpeculativeExecutionPolicy speculativeExecutionPolicy = - harness.getContext().getSpeculativeExecutionPolicy(DriverExecutionProfile.DEFAULT_NAME); - long firstExecutionDelay = 100L; - when(speculativeExecutionPolicy.nextExecution( - any(Node.class), eq(null), eq(statement), eq(1))) - .thenReturn(firstExecutionDelay); - - CqlRequestHandler requestHandler = - new CqlRequestHandler(statement, harness.getSession(), harness.getContext(), "test"); - CompletionStage resultSetFuture = requestHandler.handle(); - node1Behavior.verifyWrite(); - node1Behavior.setWriteSuccess(); - - harness.nextScheduledTimeout(); // Discard the timeout task - - // Check that the first execution was scheduled but don't run it yet - CapturedTimeout speculativeExecution1 = harness.nextScheduledTimeout(); - assertThat(speculativeExecution1.getDelay(TimeUnit.MILLISECONDS)) - .isEqualTo(firstExecutionDelay); - - // Complete the request from the initial execution - node1Behavior.setResponseSuccess(defaultFrameOf(singleRow())); - assertThatStage(resultSetFuture).isSuccess(); - - // Pending speculative executions should have been cancelled. However we don't check - // firstExecutionTask directly because the request handler's onResponse can sometimes be - // invoked before operationComplete (this is very unlikely in practice, but happens in our - // Travis CI build). When that happens, the speculative execution is not recorded yet when - // cancelScheduledTasks runs. - // So check the timeout future instead, since it's cancelled in the same method. - assertThat(requestHandler.scheduledTimeout.isCancelled()).isTrue(); - - // The fact that we missed the speculative execution is not a problem; even if it starts, it - // will eventually find out that the result is already complete and cancel itself: - speculativeExecution1.task().run(speculativeExecution1); - node2Behavior.verifyNoWrite(); - - verify(nodeMetricUpdater1) - .isEnabled(DefaultNodeMetric.CQL_MESSAGES, DriverExecutionProfile.DEFAULT_NAME); - verify(nodeMetricUpdater1) - .updateTimer( - eq(DefaultNodeMetric.CQL_MESSAGES), - eq(DriverExecutionProfile.DEFAULT_NAME), - anyLong(), - eq(TimeUnit.NANOSECONDS)); - verifyNoMoreInteractions(nodeMetricUpdater1); - } - } - - @Test - @UseDataProvider("idempotentConfig") - public void should_fail_if_no_nodes(boolean defaultIdempotence, Statement statement) { - RequestHandlerTestHarness.Builder harnessBuilder = - RequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); - // No configured behaviors => will yield an empty query plan - - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - SpeculativeExecutionPolicy speculativeExecutionPolicy = - harness.getContext().getSpeculativeExecutionPolicy(DriverExecutionProfile.DEFAULT_NAME); - long firstExecutionDelay = 100L; - when(speculativeExecutionPolicy.nextExecution( - any(Node.class), eq(null), eq(statement), eq(1))) - .thenReturn(firstExecutionDelay); - - CompletionStage resultSetFuture = - new CqlRequestHandler(statement, harness.getSession(), harness.getContext(), "test") - .handle(); - - harness.nextScheduledTimeout(); // Discard the timeout task - - assertThatStage(resultSetFuture) - .isFailed(error -> assertThat(error).isInstanceOf(NoNodeAvailableException.class)); - } - } - - @Test - @UseDataProvider("idempotentConfig") - public void should_fail_if_no_more_nodes_and_initial_execution_is_last( - boolean defaultIdempotence, Statement statement) throws Exception { - RequestHandlerTestHarness.Builder harnessBuilder = - RequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); - PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1); - harnessBuilder.withResponse( - node2, - defaultFrameOf(new Error(ProtocolConstants.ErrorCode.IS_BOOTSTRAPPING, "mock message"))); - - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - SpeculativeExecutionPolicy speculativeExecutionPolicy = - harness.getContext().getSpeculativeExecutionPolicy(DriverExecutionProfile.DEFAULT_NAME); - long firstExecutionDelay = 100L; - when(speculativeExecutionPolicy.nextExecution( - any(Node.class), eq(null), eq(statement), eq(1))) - .thenReturn(firstExecutionDelay); - - CompletionStage resultSetFuture = - new CqlRequestHandler(statement, harness.getSession(), harness.getContext(), "test") - .handle(); - node1Behavior.verifyWrite(); - node1Behavior.setWriteSuccess(); - // do not simulate a response from node1 yet - - harness.nextScheduledTimeout(); // Discard the timeout task - - // Run the next scheduled task to start the speculative execution. node2 will reply with a - // BOOTSTRAPPING error, causing a RETRY_NEXT; but the query plan is now empty so the - // speculative execution stops. - // next scheduled timeout should be the first speculative execution. Get it and run it. - CapturedTimeout speculativeExecution1 = harness.nextScheduledTimeout(); - assertThat(speculativeExecution1.getDelay(TimeUnit.MILLISECONDS)) - .isEqualTo(firstExecutionDelay); - speculativeExecution1.task().run(speculativeExecution1); - - // node1 now replies with the same response, that triggers a RETRY_NEXT - node1Behavior.setResponseSuccess( - defaultFrameOf(new Error(ProtocolConstants.ErrorCode.IS_BOOTSTRAPPING, "mock message"))); - - // But again the query plan is empty so that should fail the request - assertThatStage(resultSetFuture) - .isFailed( - error -> { - assertThat(error).isInstanceOf(AllNodesFailedException.class); - Map> nodeErrors = - ((AllNodesFailedException) error).getAllErrors(); - assertThat(nodeErrors).containsOnlyKeys(node1, node2); - assertThat(nodeErrors.get(node1).get(0)).isInstanceOf(BootstrappingException.class); - assertThat(nodeErrors.get(node2).get(0)).isInstanceOf(BootstrappingException.class); - }); - } - } - - @Test - @UseDataProvider("idempotentConfig") - public void should_fail_if_no_more_nodes_and_speculative_execution_is_last( - boolean defaultIdempotence, Statement statement) throws Exception { - RequestHandlerTestHarness.Builder harnessBuilder = - RequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); - PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1); - PoolBehavior node2Behavior = harnessBuilder.customBehavior(node2); - - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - SpeculativeExecutionPolicy speculativeExecutionPolicy = - harness.getContext().getSpeculativeExecutionPolicy(DriverExecutionProfile.DEFAULT_NAME); - long firstExecutionDelay = 100L; - when(speculativeExecutionPolicy.nextExecution( - any(Node.class), eq(null), eq(statement), eq(1))) - .thenReturn(firstExecutionDelay); - - CompletionStage resultSetFuture = - new CqlRequestHandler(statement, harness.getSession(), harness.getContext(), "test") - .handle(); - node1Behavior.verifyWrite(); - node1Behavior.setWriteSuccess(); - // do not simulate a response from node1 yet - - harness.nextScheduledTimeout(); // Discard the timeout task - - // next scheduled timeout should be the first speculative execution. Get it and run it. - CapturedTimeout speculativeExecution1 = harness.nextScheduledTimeout(); - assertThat(speculativeExecution1.getDelay(TimeUnit.MILLISECONDS)) - .isEqualTo(firstExecutionDelay); - speculativeExecution1.task().run(speculativeExecution1); - - // node1 now replies with a BOOTSTRAPPING error that triggers a RETRY_NEXT - // but the query plan is empty so the initial execution stops - node1Behavior.setResponseSuccess( - defaultFrameOf(new Error(ProtocolConstants.ErrorCode.IS_BOOTSTRAPPING, "mock message"))); - - // Same thing with node2, so the speculative execution should reach the end of the query plan - // and fail the request - node2Behavior.setResponseSuccess( - defaultFrameOf(new Error(ProtocolConstants.ErrorCode.IS_BOOTSTRAPPING, "mock message"))); - - assertThatStage(resultSetFuture) - .isFailed( - error -> { - assertThat(error).isInstanceOf(AllNodesFailedException.class); - Map> nodeErrors = - ((AllNodesFailedException) error).getAllErrors(); - assertThat(nodeErrors).containsOnlyKeys(node1, node2); - assertThat(nodeErrors.get(node1).get(0)).isInstanceOf(BootstrappingException.class); - assertThat(nodeErrors.get(node2).get(0)).isInstanceOf(BootstrappingException.class); - }); - } - } - - @Test - @UseDataProvider("idempotentConfig") - public void should_retry_in_speculative_executions( - boolean defaultIdempotence, Statement statement) throws Exception { - RequestHandlerTestHarness.Builder harnessBuilder = - RequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); - PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1); - PoolBehavior node2Behavior = harnessBuilder.customBehavior(node2); - harnessBuilder.withResponse(node3, defaultFrameOf(singleRow())); - - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - SpeculativeExecutionPolicy speculativeExecutionPolicy = - harness.getContext().getSpeculativeExecutionPolicy(DriverExecutionProfile.DEFAULT_NAME); - long firstExecutionDelay = 100L; - when(speculativeExecutionPolicy.nextExecution( - any(Node.class), eq(null), eq(statement), eq(1))) - .thenReturn(firstExecutionDelay); - - CompletionStage resultSetFuture = - new CqlRequestHandler(statement, harness.getSession(), harness.getContext(), "test") - .handle(); - node1Behavior.verifyWrite(); - node1Behavior.setWriteSuccess(); - // do not simulate a response from node1. The request will stay hanging for the rest of this - // test - - harness.nextScheduledTimeout(); // Discard the timeout task - - // next scheduled timeout should be the first speculative execution. Get it and run it. - CapturedTimeout speculativeExecution1 = harness.nextScheduledTimeout(); - assertThat(speculativeExecution1.getDelay(TimeUnit.MILLISECONDS)) - .isEqualTo(firstExecutionDelay); - speculativeExecution1.task().run(speculativeExecution1); - - node2Behavior.verifyWrite(); - node2Behavior.setWriteSuccess(); - - // node2 replies with a response that triggers a RETRY_NEXT - node2Behavior.setResponseSuccess( - defaultFrameOf(new Error(ProtocolConstants.ErrorCode.IS_BOOTSTRAPPING, "mock message"))); - - // The second execution should move to node3 and complete the request - assertThatStage(resultSetFuture).isSuccess(); - - // The request to node1 was still in flight, it should have been cancelled - node1Behavior.verifyCancellation(); - } - } - - @Test - @UseDataProvider("idempotentConfig") - public void should_stop_retrying_other_executions_if_result_complete( - boolean defaultIdempotence, Statement statement) throws Exception { - RequestHandlerTestHarness.Builder harnessBuilder = - RequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); - PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1); - PoolBehavior node2Behavior = harnessBuilder.customBehavior(node2); - PoolBehavior node3Behavior = harnessBuilder.customBehavior(node3); - - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - SpeculativeExecutionPolicy speculativeExecutionPolicy = - harness.getContext().getSpeculativeExecutionPolicy(DriverExecutionProfile.DEFAULT_NAME); - long firstExecutionDelay = 100L; - when(speculativeExecutionPolicy.nextExecution( - any(Node.class), eq(null), eq(statement), eq(1))) - .thenReturn(firstExecutionDelay); - CompletionStage resultSetFuture = - new CqlRequestHandler(statement, harness.getSession(), harness.getContext(), "test") - .handle(); - node1Behavior.verifyWrite(); - node1Behavior.setWriteSuccess(); - - harness.nextScheduledTimeout(); // Discard the timeout task - - // next scheduled timeout should be the first speculative execution. Get it and run it. - CapturedTimeout speculativeExecution1 = harness.nextScheduledTimeout(); - assertThat(speculativeExecution1.getDelay(TimeUnit.MILLISECONDS)) - .isEqualTo(firstExecutionDelay); - speculativeExecution1.task().run(speculativeExecution1); - - node2Behavior.verifyWrite(); - node2Behavior.setWriteSuccess(); - - // Complete the request from the initial execution - node1Behavior.setResponseSuccess(defaultFrameOf(singleRow())); - assertThatStage(resultSetFuture).isSuccess(); - - // node2 replies with a response that would trigger a RETRY_NEXT if the request was still - // running - node2Behavior.setResponseSuccess( - defaultFrameOf(new Error(ProtocolConstants.ErrorCode.IS_BOOTSTRAPPING, "mock message"))); - - // The speculative execution should not move to node3 because it is stopped - node3Behavior.verifyNoWrite(); - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandlerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandlerTest.java deleted file mode 100644 index c1a2765eef0..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandlerTest.java +++ /dev/null @@ -1,253 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.DriverTimeoutException; -import com.datastax.oss.driver.api.core.NoNodeAvailableException; -import com.datastax.oss.driver.api.core.NodeUnavailableException; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.session.RepreparePayload; -import com.datastax.oss.driver.internal.core.util.concurrent.CapturingTimer.CapturedTimeout; -import com.datastax.oss.protocol.internal.request.Prepare; -import com.datastax.oss.protocol.internal.response.error.Unprepared; -import com.datastax.oss.protocol.internal.response.result.Prepared; -import com.datastax.oss.protocol.internal.response.result.SetKeyspace; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.TimeUnit; -import org.junit.Test; - -public class CqlRequestHandlerTest extends CqlRequestHandlerTestBase { - - @Test - public void should_complete_result_if_first_node_replies_immediately() { - try (RequestHandlerTestHarness harness = - RequestHandlerTestHarness.builder() - .withResponse(node1, defaultFrameOf(singleRow())) - .build()) { - - CompletionStage resultSetFuture = - new CqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT, - harness.getSession(), - harness.getContext(), - "test") - .handle(); - - assertThatStage(resultSetFuture) - .isSuccess( - resultSet -> { - Iterator rows = resultSet.currentPage().iterator(); - assertThat(rows.hasNext()).isTrue(); - assertThat(rows.next().getString("message")).isEqualTo("hello, world"); - - ExecutionInfo executionInfo = resultSet.getExecutionInfo(); - assertThat(executionInfo.getCoordinator()).isEqualTo(node1); - assertThat(executionInfo.getErrors()).isEmpty(); - assertThat(executionInfo.getIncomingPayload()).isEmpty(); - assertThat(executionInfo.getPagingState()).isNull(); - assertThat(executionInfo.getSpeculativeExecutionCount()).isEqualTo(0); - assertThat(executionInfo.getSuccessfulExecutionIndex()).isEqualTo(0); - assertThat(executionInfo.getWarnings()).isEmpty(); - }); - } - } - - @Test - public void should_fail_if_no_node_available() { - try (RequestHandlerTestHarness harness = - RequestHandlerTestHarness.builder() - // Mock no responses => this will produce an empty query plan - .build()) { - - CompletionStage resultSetFuture = - new CqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT, - harness.getSession(), - harness.getContext(), - "test") - .handle(); - - assertThatStage(resultSetFuture) - .isFailed(error -> assertThat(error).isInstanceOf(NoNodeAvailableException.class)); - } - } - - @Test - public void should_fail_if_nodes_unavailable() { - RequestHandlerTestHarness.Builder harnessBuilder = RequestHandlerTestHarness.builder(); - try (RequestHandlerTestHarness harness = - harnessBuilder.withEmptyPool(node1).withEmptyPool(node2).build()) { - CompletionStage resultSetFuture = - new CqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT, - harness.getSession(), - harness.getContext(), - "test") - .handle(); - assertThatStage(resultSetFuture) - .isFailed( - error -> { - assertThat(error).isInstanceOf(AllNodesFailedException.class); - Map> allErrors = - ((AllNodesFailedException) error).getAllErrors(); - assertThat(allErrors).hasSize(2); - assertThat(allErrors) - .hasEntrySatisfying( - node1, - nodeErrors -> - assertThat(nodeErrors) - .singleElement() - .isInstanceOf(NodeUnavailableException.class)); - assertThat(allErrors) - .hasEntrySatisfying( - node2, - nodeErrors -> - assertThat(nodeErrors) - .singleElement() - .isInstanceOf(NodeUnavailableException.class)); - }); - } - } - - @Test - public void should_time_out_if_first_node_takes_too_long_to_respond() throws Exception { - RequestHandlerTestHarness.Builder harnessBuilder = RequestHandlerTestHarness.builder(); - PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1); - node1Behavior.setWriteSuccess(); - - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - - CompletionStage resultSetFuture = - new CqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT, - harness.getSession(), - harness.getContext(), - "test") - .handle(); - - // First scheduled task is the timeout, run it before node1 has responded - CapturedTimeout requestTimeout = harness.nextScheduledTimeout(); - Duration configuredTimeoutDuration = - harness - .getContext() - .getConfig() - .getDefaultProfile() - .getDuration(DefaultDriverOption.REQUEST_TIMEOUT); - assertThat(requestTimeout.getDelay(TimeUnit.NANOSECONDS)) - .isEqualTo(configuredTimeoutDuration.toNanos()); - requestTimeout.task().run(requestTimeout); - - assertThatStage(resultSetFuture) - .isFailed(t -> assertThat(t).isInstanceOf(DriverTimeoutException.class)); - } - } - - @Test - public void should_switch_keyspace_on_session_after_successful_use_statement() { - try (RequestHandlerTestHarness harness = - RequestHandlerTestHarness.builder() - .withResponse(node1, defaultFrameOf(new SetKeyspace("newKeyspace"))) - .build()) { - - CompletionStage resultSetFuture = - new CqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT, - harness.getSession(), - harness.getContext(), - "test") - .handle(); - - assertThatStage(resultSetFuture) - .isSuccess( - resultSet -> - verify(harness.getSession()) - .setKeyspace(CqlIdentifier.fromInternal("newKeyspace"))); - } - } - - @Test - public void should_reprepare_on_the_fly_if_not_prepared() throws InterruptedException { - ByteBuffer mockId = Bytes.fromHexString("0xffff"); - - PreparedStatement preparedStatement = mock(PreparedStatement.class); - when(preparedStatement.getId()).thenReturn(mockId); - ColumnDefinitions columnDefinitions = mock(ColumnDefinitions.class); - when(columnDefinitions.size()).thenReturn(0); - when(preparedStatement.getResultSetDefinitions()).thenReturn(columnDefinitions); - BoundStatement boundStatement = mock(BoundStatement.class); - when(boundStatement.getPreparedStatement()).thenReturn(preparedStatement); - when(boundStatement.getValues()).thenReturn(Collections.emptyList()); - when(boundStatement.getNowInSeconds()).thenReturn(Statement.NO_NOW_IN_SECONDS); - - RequestHandlerTestHarness.Builder harnessBuilder = RequestHandlerTestHarness.builder(); - // For the first attempt that gets the UNPREPARED response - PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1); - // For the second attempt that succeeds - harnessBuilder.withResponse(node1, defaultFrameOf(singleRow())); - - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - - // The handler will look for the info to reprepare in the session's cache, put it there - ConcurrentMap repreparePayloads = new ConcurrentHashMap<>(); - repreparePayloads.put( - mockId, new RepreparePayload(mockId, "mock query", null, Collections.emptyMap())); - when(harness.getSession().getRepreparePayloads()).thenReturn(repreparePayloads); - - CompletionStage resultSetFuture = - new CqlRequestHandler(boundStatement, harness.getSession(), harness.getContext(), "test") - .handle(); - - // Before we proceed, mock the PREPARE exchange that will occur as soon as we complete the - // first response. - node1Behavior.mockFollowupRequest( - Prepare.class, defaultFrameOf(new Prepared(Bytes.getArray(mockId), null, null, null))); - - node1Behavior.setWriteSuccess(); - node1Behavior.setResponseSuccess( - defaultFrameOf(new Unprepared("mock message", Bytes.getArray(mockId)))); - - // Should now re-prepare, re-execute and succeed. - assertThatStage(resultSetFuture).isSuccess(); - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandlerTestBase.java b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandlerTestBase.java deleted file mode 100644 index 9bd3b6fa28c..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandlerTestBase.java +++ /dev/null @@ -1,153 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.TestDataProviders; -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.cql.BatchStatement; -import com.datastax.oss.driver.api.core.cql.BatchType; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.metrics.NodeMetric; -import com.datastax.oss.driver.internal.core.metadata.DefaultNode; -import com.datastax.oss.driver.internal.core.metrics.NodeMetricUpdater; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.protocol.internal.Frame; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.response.result.ColumnSpec; -import com.datastax.oss.protocol.internal.response.result.DefaultRows; -import com.datastax.oss.protocol.internal.response.result.RawType; -import com.datastax.oss.protocol.internal.response.result.RowsMetadata; -import com.datastax.oss.protocol.internal.util.Bytes; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import java.nio.ByteBuffer; -import java.util.ArrayDeque; -import java.util.Collections; -import java.util.List; -import java.util.Queue; -import org.junit.Before; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -@RunWith(DataProviderRunner.class) -public abstract class CqlRequestHandlerTestBase { - - protected static final SimpleStatement UNDEFINED_IDEMPOTENCE_STATEMENT = - SimpleStatement.newInstance("mock query"); - protected static final SimpleStatement IDEMPOTENT_STATEMENT = - SimpleStatement.builder("mock query").setIdempotence(true).build(); - protected static final SimpleStatement NON_IDEMPOTENT_STATEMENT = - SimpleStatement.builder("mock query").setIdempotence(false).build(); - protected static final BatchStatement UNDEFINED_IDEMPOTENCE_BATCH_STATEMENT = - BatchStatement.newInstance(BatchType.LOGGED, UNDEFINED_IDEMPOTENCE_STATEMENT); - protected static final BatchStatement IDEMPOTENT_BATCH_STATEMENT = - BatchStatement.newInstance(BatchType.LOGGED, IDEMPOTENT_STATEMENT).setIdempotent(true); - protected static final BatchStatement NON_IDEMPOTENT_BATCH_STATEMENT = - BatchStatement.newInstance(BatchType.LOGGED, NON_IDEMPOTENT_STATEMENT).setIdempotent(false); - - @Mock protected DefaultNode node1; - @Mock protected DefaultNode node2; - @Mock protected DefaultNode node3; - @Mock protected NodeMetricUpdater nodeMetricUpdater1; - @Mock protected NodeMetricUpdater nodeMetricUpdater2; - @Mock protected NodeMetricUpdater nodeMetricUpdater3; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - - when(node1.getMetricUpdater()).thenReturn(nodeMetricUpdater1); - when(nodeMetricUpdater1.isEnabled(any(NodeMetric.class), anyString())).thenReturn(true); - when(node2.getMetricUpdater()).thenReturn(nodeMetricUpdater2); - when(nodeMetricUpdater2.isEnabled(any(NodeMetric.class), anyString())).thenReturn(true); - when(node3.getMetricUpdater()).thenReturn(nodeMetricUpdater3); - when(nodeMetricUpdater3.isEnabled(any(NodeMetric.class), anyString())).thenReturn(true); - } - - protected static Frame defaultFrameOf(Message responseMessage) { - return Frame.forResponse( - DefaultProtocolVersion.V4.getCode(), - 0, - null, - Frame.NO_PAYLOAD, - Collections.emptyList(), - responseMessage); - } - - // Returns a single row, with a single "message" column with the value "hello, world" - protected static Message singleRow() { - RowsMetadata metadata = - new RowsMetadata( - ImmutableList.of( - new ColumnSpec( - "ks", - "table", - "message", - 0, - RawType.PRIMITIVES.get(ProtocolConstants.DataType.VARCHAR))), - null, - new int[] {}, - null); - Queue> data = new ArrayDeque<>(); - data.add(ImmutableList.of(Bytes.fromHexString("0x68656C6C6F2C20776F726C64"))); - return new DefaultRows(metadata, data); - } - - /** - * The combination of the default idempotence option and statement setting that produce an - * idempotent statement. - */ - @DataProvider - public static Object[][] idempotentConfig() { - return new Object[][] { - new Object[] {true, UNDEFINED_IDEMPOTENCE_STATEMENT}, - new Object[] {false, IDEMPOTENT_STATEMENT}, - new Object[] {true, IDEMPOTENT_STATEMENT}, - new Object[] {true, UNDEFINED_IDEMPOTENCE_BATCH_STATEMENT}, - new Object[] {false, IDEMPOTENT_BATCH_STATEMENT}, - new Object[] {true, IDEMPOTENT_BATCH_STATEMENT}, - }; - } - - /** - * The combination of the default idempotence option and statement setting that produce a non - * idempotent statement. - */ - @DataProvider - public static Object[][] nonIdempotentConfig() { - return new Object[][] { - new Object[] {false, UNDEFINED_IDEMPOTENCE_STATEMENT}, - new Object[] {true, NON_IDEMPOTENT_STATEMENT}, - new Object[] {false, NON_IDEMPOTENT_STATEMENT}, - new Object[] {false, UNDEFINED_IDEMPOTENCE_BATCH_STATEMENT}, - new Object[] {true, NON_IDEMPOTENT_BATCH_STATEMENT}, - new Object[] {false, NON_IDEMPOTENT_BATCH_STATEMENT}, - }; - } - - @DataProvider - public static Object[][] allIdempotenceConfigs() { - return TestDataProviders.concat(idempotentConfig(), nonIdempotentConfig()); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandlerTrackerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandlerTrackerTest.java deleted file mode 100644 index ecc087fb8ac..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandlerTrackerTest.java +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyNoMoreInteractions; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.servererrors.BootstrappingException; -import com.datastax.oss.driver.api.core.tracker.RequestTracker; -import com.datastax.oss.driver.internal.core.tracker.NoopRequestTracker; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.response.Error; -import java.util.concurrent.CompletionStage; -import org.junit.Test; - -public class CqlRequestHandlerTrackerTest extends CqlRequestHandlerTestBase { - - @Test - public void should_invoke_request_tracker() { - try (RequestHandlerTestHarness harness = - RequestHandlerTestHarness.builder() - .withDefaultIdempotence(true) - .withResponse( - node1, - defaultFrameOf( - new Error(ProtocolConstants.ErrorCode.IS_BOOTSTRAPPING, "mock message"))) - .withResponse(node2, defaultFrameOf(singleRow())) - .build()) { - - RequestTracker requestTracker = mock(RequestTracker.class); - when(harness.getContext().getRequestTracker()).thenReturn(requestTracker); - - CompletionStage resultSetFuture = - new CqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT, - harness.getSession(), - harness.getContext(), - "test") - .handle(); - - assertThatStage(resultSetFuture) - .isSuccess( - resultSet -> { - verify(requestTracker) - .onNodeError( - eq(UNDEFINED_IDEMPOTENCE_STATEMENT), - any(BootstrappingException.class), - anyLong(), - any(DriverExecutionProfile.class), - eq(node1), - any(String.class)); - verify(requestTracker) - .onNodeSuccess( - eq(UNDEFINED_IDEMPOTENCE_STATEMENT), - anyLong(), - any(DriverExecutionProfile.class), - eq(node2), - any(String.class)); - verify(requestTracker) - .onSuccess( - eq(UNDEFINED_IDEMPOTENCE_STATEMENT), - anyLong(), - any(DriverExecutionProfile.class), - eq(node2), - any(String.class)); - verifyNoMoreInteractions(requestTracker); - }); - } - } - - @Test - public void should_not_invoke_noop_request_tracker() { - try (RequestHandlerTestHarness harness = - RequestHandlerTestHarness.builder() - .withDefaultIdempotence(true) - .withResponse( - node1, - defaultFrameOf( - new Error(ProtocolConstants.ErrorCode.IS_BOOTSTRAPPING, "mock message"))) - .withResponse(node2, defaultFrameOf(singleRow())) - .build()) { - - RequestTracker requestTracker = spy(new NoopRequestTracker(harness.getContext())); - when(harness.getContext().getRequestTracker()).thenReturn(requestTracker); - - CompletionStage resultSetFuture = - new CqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT, - harness.getSession(), - harness.getContext(), - "test") - .handle(); - - assertThatStage(resultSetFuture) - .isSuccess(resultSet -> verifyNoMoreInteractions(requestTracker)); - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/DefaultAsyncResultSetTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/DefaultAsyncResultSetTest.java deleted file mode 100644 index 8ed509caeb7..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/DefaultAsyncResultSetTest.java +++ /dev/null @@ -1,195 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.mockito.Mockito.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.ColumnDefinition; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.nio.ByteBuffer; -import java.util.ArrayDeque; -import java.util.List; -import java.util.Queue; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -public class DefaultAsyncResultSetTest { - - @Mock private ColumnDefinitions columnDefinitions; - @Mock private ExecutionInfo executionInfo; - @Mock private Statement statement; - @Mock private CqlSession session; - @Mock private InternalDriverContext context; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - - when(executionInfo.getRequest()).thenAnswer(invocation -> statement); - when(context.getCodecRegistry()).thenReturn(CodecRegistry.DEFAULT); - when(context.getProtocolVersion()).thenReturn(DefaultProtocolVersion.DEFAULT); - } - - @Test(expected = IllegalStateException.class) - public void should_fail_to_fetch_next_page_if_last() { - // Given - when(executionInfo.getPagingState()).thenReturn(null); - - // When - DefaultAsyncResultSet resultSet = - new DefaultAsyncResultSet( - columnDefinitions, executionInfo, new ArrayDeque<>(), session, context); - - // Then - assertThat(resultSet.hasMorePages()).isFalse(); - resultSet.fetchNextPage(); - } - - @Test - public void should_invoke_session_to_fetch_next_page() { - // Given - ByteBuffer mockPagingState = ByteBuffer.allocate(0); - when(executionInfo.getPagingState()).thenReturn(mockPagingState); - - Statement mockNextStatement = mock(Statement.class); - when(((Statement) statement).copy(mockPagingState)).thenReturn(mockNextStatement); - - CompletableFuture mockResultFuture = new CompletableFuture<>(); - when(session.executeAsync(any(Statement.class))).thenAnswer(invocation -> mockResultFuture); - - // When - DefaultAsyncResultSet resultSet = - new DefaultAsyncResultSet( - columnDefinitions, executionInfo, new ArrayDeque<>(), session, context); - assertThat(resultSet.hasMorePages()).isTrue(); - CompletionStage nextPageFuture = resultSet.fetchNextPage(); - - // Then - verify(statement).copy(mockPagingState); - verify(session).executeAsync(mockNextStatement); - assertThatStage(nextPageFuture).isEqualTo(mockResultFuture); - } - - @Test - public void should_report_applied_if_column_not_present_and_empty() { - // Given - when(columnDefinitions.contains("[applied]")).thenReturn(false); - - // When - DefaultAsyncResultSet resultSet = - new DefaultAsyncResultSet( - columnDefinitions, executionInfo, new ArrayDeque<>(), session, context); - - // Then - assertThat(resultSet.wasApplied()).isTrue(); - } - - @Test - public void should_report_applied_if_column_not_present_and_not_empty() { - // Given - when(columnDefinitions.contains("[applied]")).thenReturn(false); - Queue> data = new ArrayDeque<>(); - data.add(Lists.newArrayList(Bytes.fromHexString("0xffff"))); - - // When - DefaultAsyncResultSet resultSet = - new DefaultAsyncResultSet(columnDefinitions, executionInfo, data, session, context); - - // Then - assertThat(resultSet.wasApplied()).isTrue(); - } - - @Test - public void should_report_not_applied_if_column_present_and_false() { - // Given - when(columnDefinitions.contains("[applied]")).thenReturn(true); - ColumnDefinition columnDefinition = mock(ColumnDefinition.class); - when(columnDefinition.getType()).thenReturn(DataTypes.BOOLEAN); - when(columnDefinitions.get("[applied]")).thenReturn(columnDefinition); - when(columnDefinitions.firstIndexOf("[applied]")).thenReturn(0); - when(columnDefinitions.get(0)).thenReturn(columnDefinition); - - Queue> data = new ArrayDeque<>(); - data.add(Lists.newArrayList(TypeCodecs.BOOLEAN.encode(false, DefaultProtocolVersion.DEFAULT))); - - // When - DefaultAsyncResultSet resultSet = - new DefaultAsyncResultSet(columnDefinitions, executionInfo, data, session, context); - - // Then - assertThat(resultSet.wasApplied()).isFalse(); - } - - @Test - public void should_report_not_applied_if_column_present_and_true() { - // Given - when(columnDefinitions.contains("[applied]")).thenReturn(true); - ColumnDefinition columnDefinition = mock(ColumnDefinition.class); - when(columnDefinition.getType()).thenReturn(DataTypes.BOOLEAN); - when(columnDefinitions.get("[applied]")).thenReturn(columnDefinition); - when(columnDefinitions.firstIndexOf("[applied]")).thenReturn(0); - when(columnDefinitions.get(0)).thenReturn(columnDefinition); - - Queue> data = new ArrayDeque<>(); - data.add(Lists.newArrayList(TypeCodecs.BOOLEAN.encode(true, DefaultProtocolVersion.DEFAULT))); - - // When - DefaultAsyncResultSet resultSet = - new DefaultAsyncResultSet(columnDefinitions, executionInfo, data, session, context); - - // Then - assertThat(resultSet.wasApplied()).isTrue(); - } - - @Test(expected = IllegalStateException.class) - public void should_fail_to_report_if_applied_if_column_present_but_empty() { - // Given - when(columnDefinitions.contains("[applied]")).thenReturn(true); - ColumnDefinition columnDefinition = mock(ColumnDefinition.class); - when(columnDefinition.getType()).thenReturn(DataTypes.BOOLEAN); - when(columnDefinitions.get("[applied]")).thenReturn(columnDefinition); - - // When - DefaultAsyncResultSet resultSet = - new DefaultAsyncResultSet( - columnDefinitions, executionInfo, new ArrayDeque<>(), session, context); - - // Then - resultSet.wasApplied(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/PagingIterableSpliteratorTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/PagingIterableSpliteratorTest.java deleted file mode 100644 index d6787cc018e..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/PagingIterableSpliteratorTest.java +++ /dev/null @@ -1,234 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import static java.util.stream.StreamSupport.stream; -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.internal.core.MockPagingIterable; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.ArrayList; -import java.util.List; -import java.util.Spliterator; -import java.util.function.Consumer; -import java.util.stream.Collectors; -import java.util.stream.IntStream; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class PagingIterableSpliteratorTest { - - @Test - @UseDataProvider("splitsWithEstimatedSize") - public void should_split_with_estimated_size( - int size, int chunkSize, List expectedLeft, List expectedRight) { - // given - PagingIterableSpliterator.Builder builder = - PagingIterableSpliterator.builder(iterableOfSize(size)) - .withEstimatedSize(size) - .withChunkSize(chunkSize); - // when - PagingIterableSpliterator right = builder.build(); - Spliterator left = right.trySplit(); - // then - assertThat(right.characteristics()) - .isEqualTo( - Spliterator.ORDERED - | Spliterator.IMMUTABLE - | Spliterator.NONNULL - | Spliterator.SIZED - | Spliterator.SUBSIZED); - assertThat(right.estimateSize()).isEqualTo(expectedRight.size()); - assertThat(right.getExactSizeIfKnown()).isEqualTo(expectedRight.size()); - TestConsumer rightConsumer = new TestConsumer(); - right.forEachRemaining(rightConsumer); - assertThat(rightConsumer.items).containsExactlyElementsOf(expectedRight); - if (expectedLeft.isEmpty()) { - assertThat(left).isNull(); - } else { - assertThat(left.characteristics()) - .isEqualTo( - Spliterator.ORDERED - | Spliterator.IMMUTABLE - | Spliterator.NONNULL - | Spliterator.SIZED - | Spliterator.SUBSIZED); - assertThat(left.estimateSize()).isEqualTo(expectedLeft.size()); - assertThat(left.getExactSizeIfKnown()).isEqualTo(expectedLeft.size()); - TestConsumer leftConsumer = new TestConsumer(); - left.forEachRemaining(leftConsumer); - assertThat(leftConsumer.items).containsExactlyElementsOf(expectedLeft); - } - } - - @DataProvider - public static Iterable splitsWithEstimatedSize() { - List> arguments = new ArrayList<>(); - arguments.add(Lists.newArrayList(0, 1, ImmutableList.of(), ImmutableList.of())); - arguments.add(Lists.newArrayList(1, 1, ImmutableList.of(), ImmutableList.of(0))); - arguments.add(Lists.newArrayList(1, 2, ImmutableList.of(), ImmutableList.of(0))); - arguments.add(Lists.newArrayList(2, 1, ImmutableList.of(0), ImmutableList.of(1))); - arguments.add( - Lists.newArrayList( - 10, 1, ImmutableList.of(0), ImmutableList.of(1, 2, 3, 4, 5, 6, 7, 8, 9))); - arguments.add( - Lists.newArrayList( - 10, 5, ImmutableList.of(0, 1, 2, 3, 4), ImmutableList.of(5, 6, 7, 8, 9))); - arguments.add( - Lists.newArrayList( - 10, 9, ImmutableList.of(0, 1, 2, 3, 4, 5, 6, 7, 8), ImmutableList.of(9))); - arguments.add( - Lists.newArrayList( - 10, 10, ImmutableList.of(), ImmutableList.of(0, 1, 2, 3, 4, 5, 6, 7, 8, 9))); - return arguments; - } - - @Test - @UseDataProvider("splitsWithUnknownSize") - public void should_split_with_unknown_size( - int size, int chunkSize, List expectedLeft, List expectedRight) { - // given - PagingIterableSpliterator.Builder builder = - PagingIterableSpliterator.builder(iterableOfSize(size)).withChunkSize(chunkSize); - // when - PagingIterableSpliterator right = builder.build(); - Spliterator left = right.trySplit(); - // then - assertThat(right.characteristics()) - .isEqualTo(Spliterator.ORDERED | Spliterator.IMMUTABLE | Spliterator.NONNULL); - assertThat(right.estimateSize()).isEqualTo(Long.MAX_VALUE); - assertThat(right.getExactSizeIfKnown()).isEqualTo(-1); - TestConsumer rightConsumer = new TestConsumer(); - right.forEachRemaining(rightConsumer); - assertThat(rightConsumer.items).containsExactlyElementsOf(expectedRight); - if (expectedLeft.isEmpty()) { - assertThat(left).isNull(); - } else { - // left side will also be SIZED and SUBSIZED - assertThat(left.characteristics()) - .isEqualTo( - Spliterator.ORDERED - | Spliterator.IMMUTABLE - | Spliterator.NONNULL - | Spliterator.SIZED - | Spliterator.SUBSIZED); - assertThat(left.estimateSize()).isEqualTo(expectedLeft.size()); - assertThat(left.getExactSizeIfKnown()).isEqualTo(expectedLeft.size()); - TestConsumer leftConsumer = new TestConsumer(); - left.forEachRemaining(leftConsumer); - assertThat(leftConsumer.items).containsExactlyElementsOf(expectedLeft); - } - } - - @DataProvider - public static Iterable splitsWithUnknownSize() { - List> arguments = new ArrayList<>(); - arguments.add(Lists.newArrayList(0, 1, ImmutableList.of(), ImmutableList.of())); - arguments.add(Lists.newArrayList(1, 1, ImmutableList.of(0), ImmutableList.of())); - arguments.add(Lists.newArrayList(1, 2, ImmutableList.of(0), ImmutableList.of())); - arguments.add(Lists.newArrayList(2, 1, ImmutableList.of(0), ImmutableList.of(1))); - arguments.add( - Lists.newArrayList( - 10, 1, ImmutableList.of(0), ImmutableList.of(1, 2, 3, 4, 5, 6, 7, 8, 9))); - arguments.add( - Lists.newArrayList( - 10, 5, ImmutableList.of(0, 1, 2, 3, 4), ImmutableList.of(5, 6, 7, 8, 9))); - arguments.add( - Lists.newArrayList( - 10, 9, ImmutableList.of(0, 1, 2, 3, 4, 5, 6, 7, 8), ImmutableList.of(9))); - arguments.add( - Lists.newArrayList( - 10, 10, ImmutableList.of(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), ImmutableList.of())); - return arguments; - } - - @Test - public void should_consume_with_tryAdvance() { - // given - PagingIterableSpliterator spliterator = - new PagingIterableSpliterator<>(iterableOfSize(10)); - TestConsumer action = new TestConsumer(); - // when - for (int i = 0; i < 20; i++) { - spliterator.tryAdvance(action); - } - // then - assertThat(action.items).containsExactly(0, 1, 2, 3, 4, 5, 6, 7, 8, 9); - } - - @Test - public void should_consume_with_forEachRemaining() { - // given - PagingIterableSpliterator spliterator = - new PagingIterableSpliterator<>(iterableOfSize(10)); - TestConsumer action = new TestConsumer(); - // when - spliterator.forEachRemaining(action); - // then - assertThat(action.items).containsExactly(0, 1, 2, 3, 4, 5, 6, 7, 8, 9); - } - - @Test - @UseDataProvider("streams") - public void should_consume_stream(int size, int chunkSize, boolean parallel) { - // given - PagingIterableSpliterator spliterator = - PagingIterableSpliterator.builder(iterableOfSize(size)) - .withEstimatedSize(size) - .withChunkSize(chunkSize) - .build(); - // when - long count = stream(spliterator, parallel).count(); - // then - assertThat(count).isEqualTo(size); - } - - @DataProvider - public static Iterable streams() { - List> arguments = new ArrayList<>(); - arguments.add(Lists.newArrayList(10_000, 5_000, false)); - arguments.add(Lists.newArrayList(10_000, 1_000, false)); - arguments.add(Lists.newArrayList(10_000, 9_999, false)); - arguments.add(Lists.newArrayList(10_000, 1, false)); - arguments.add(Lists.newArrayList(10_000, 5_000, true)); - arguments.add(Lists.newArrayList(10_000, 1_000, true)); - arguments.add(Lists.newArrayList(10_000, 9_999, true)); - arguments.add(Lists.newArrayList(10_000, 1, true)); - return arguments; - } - - private static MockPagingIterable iterableOfSize(int size) { - return new MockPagingIterable<>( - IntStream.range(0, size).boxed().collect(Collectors.toList()).iterator()); - } - - private static class TestConsumer implements Consumer { - - private final List items = new ArrayList<>(); - - @Override - public void accept(Integer integer) { - items.add(integer); - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/PoolBehavior.java b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/PoolBehavior.java deleted file mode 100644 index 9b018f17531..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/PoolBehavior.java +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyBoolean; -import static org.mockito.ArgumentMatchers.anyMap; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.channel.ResponseCallback; -import com.datastax.oss.protocol.internal.Frame; -import com.datastax.oss.protocol.internal.Message; -import io.netty.channel.ChannelConfig; -import io.netty.channel.ChannelFuture; -import io.netty.channel.EventLoop; -import io.netty.channel.socket.DefaultSocketChannelConfig; -import io.netty.util.concurrent.ImmediateEventExecutor; -import io.netty.util.concurrent.Promise; -import java.util.concurrent.CompletableFuture; - -/** - * The simulated behavior of the connection pool for a given node in a {@link - * RequestHandlerTestHarness}. - * - *

This only covers a single attempt, if the node is to be tried multiple times there will be - * multiple instances of this class. - */ -public class PoolBehavior { - - final Node node; - final DriverChannel channel; - private final Promise writePromise; - private final CompletableFuture callbackFuture = new CompletableFuture<>(); - - public PoolBehavior(Node node, boolean createChannel) { - this.node = node; - if (!createChannel) { - this.channel = null; - this.writePromise = null; - } else { - this.channel = mock(DriverChannel.class); - EventLoop eventLoop = mock(EventLoop.class); - ChannelConfig config = mock(DefaultSocketChannelConfig.class); - this.writePromise = ImmediateEventExecutor.INSTANCE.newPromise(); - when(channel.preAcquireId()).thenReturn(true); - when(channel.write(any(Message.class), anyBoolean(), anyMap(), any(ResponseCallback.class))) - .thenAnswer( - invocation -> { - ResponseCallback callback = invocation.getArgument(3); - callback.onStreamIdAssigned(1); - callbackFuture.complete(callback); - return writePromise; - }); - ChannelFuture closeFuture = mock(ChannelFuture.class); - when(channel.closeFuture()).thenReturn(closeFuture); - when(channel.eventLoop()).thenReturn(eventLoop); - when(channel.config()).thenReturn(config); - } - } - - public void verifyWrite() { - verify(channel).write(any(Message.class), anyBoolean(), anyMap(), any(ResponseCallback.class)); - } - - public void verifyNoWrite() { - verify(channel, never()) - .write(any(Message.class), anyBoolean(), anyMap(), any(ResponseCallback.class)); - } - - public void setWriteSuccess() { - writePromise.setSuccess(null); - } - - public void setWriteFailure(Throwable cause) { - writePromise.setFailure(cause); - } - - public void setResponseSuccess(Frame responseFrame) { - callbackFuture.thenAccept(callback -> callback.onResponse(responseFrame)); - } - - public void setResponseFailure(Throwable cause) { - callbackFuture.thenAccept(callback -> callback.onFailure(cause)); - } - - public Node getNode() { - return node; - } - - public DriverChannel getChannel() { - return channel; - } - - /** Mocks a follow-up request on the same channel. */ - public void mockFollowupRequest(Class expectedMessage, Frame responseFrame) { - Promise writePromise2 = ImmediateEventExecutor.INSTANCE.newPromise(); - CompletableFuture callbackFuture2 = new CompletableFuture<>(); - when(channel.write(any(expectedMessage), anyBoolean(), anyMap(), any(ResponseCallback.class))) - .thenAnswer( - invocation -> { - callbackFuture2.complete(invocation.getArgument(3)); - return writePromise2; - }); - writePromise2.setSuccess(null); - callbackFuture2.thenAccept(callback -> callback.onResponse(responseFrame)); - } - - public void verifyCancellation() { - verify(channel).cancel(any(ResponseCallback.class)); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/QueryTraceFetcherTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/QueryTraceFetcherTest.java deleted file mode 100644 index dc238775bc1..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/QueryTraceFetcherTest.java +++ /dev/null @@ -1,397 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyNoMoreInteractions; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.DefaultConsistencyLevel; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.QueryTrace; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.cql.TraceEvent; -import com.datastax.oss.driver.api.core.uuid.Uuids; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.context.NettyOptions; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.protocol.internal.util.Bytes; -import io.netty.util.concurrent.EventExecutor; -import io.netty.util.concurrent.EventExecutorGroup; -import java.net.InetAddress; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.time.Instant; -import java.util.ArrayList; -import java.util.List; -import java.util.UUID; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.TimeUnit; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.ArgumentCaptor; -import org.mockito.Captor; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public class QueryTraceFetcherTest { - - private static final UUID TRACING_ID = UUID.randomUUID(); - private static final ByteBuffer PAGING_STATE = Bytes.fromHexString("0xdeadbeef"); - private static final int PORT = 7000; - - @Mock private CqlSession session; - @Mock private InternalDriverContext context; - @Mock private DriverExecutionProfile config; - @Mock private DriverExecutionProfile traceConfig; - @Mock private NettyOptions nettyOptions; - @Mock private EventExecutorGroup adminEventExecutorGroup; - @Mock private EventExecutor eventExecutor; - private InetAddress address = InetAddress.getLoopbackAddress(); - - @Captor private ArgumentCaptor statementCaptor; - - @Before - public void setup() { - when(context.getNettyOptions()).thenReturn(nettyOptions); - when(nettyOptions.adminEventExecutorGroup()).thenReturn(adminEventExecutorGroup); - when(adminEventExecutorGroup.next()).thenReturn(eventExecutor); - // Always execute scheduled tasks immediately: - when(eventExecutor.schedule(any(Runnable.class), anyLong(), any(TimeUnit.class))) - .thenAnswer( - invocation -> { - Runnable runnable = invocation.getArgument(0); - runnable.run(); - // OK because the production code doesn't use the result: - return null; - }); - - when(config.getInt(DefaultDriverOption.REQUEST_TRACE_ATTEMPTS)).thenReturn(3); - // Doesn't really matter since we mock the scheduler - when(config.getDuration(DefaultDriverOption.REQUEST_TRACE_INTERVAL)).thenReturn(Duration.ZERO); - when(config.getString(DefaultDriverOption.REQUEST_CONSISTENCY)) - .thenReturn(DefaultConsistencyLevel.LOCAL_ONE.name()); - when(config.getString(DefaultDriverOption.REQUEST_TRACE_CONSISTENCY)) - .thenReturn(DefaultConsistencyLevel.ONE.name()); - - when(config.withString( - DefaultDriverOption.REQUEST_CONSISTENCY, DefaultConsistencyLevel.ONE.name())) - .thenReturn(traceConfig); - } - - @Test - public void should_succeed_when_both_queries_succeed_immediately() { - // Given - CompletionStage sessionRow = completeSessionRow(); - CompletionStage eventRows = singlePageEventRows(); - when(session.executeAsync(any(SimpleStatement.class))) - .thenAnswer(invocation -> sessionRow) - .thenAnswer(invocation -> eventRows); - - // When - QueryTraceFetcher fetcher = new QueryTraceFetcher(TRACING_ID, session, context, config); - CompletionStage traceFuture = fetcher.fetch(); - - // Then - verify(session, times(2)).executeAsync(statementCaptor.capture()); - List statements = statementCaptor.getAllValues(); - assertSessionQuery(statements.get(0)); - SimpleStatement statement = statements.get(1); - assertEventsQuery(statement); - verifyNoMoreInteractions(session); - - assertThatStage(traceFuture) - .isSuccess( - trace -> { - assertThat(trace.getTracingId()).isEqualTo(TRACING_ID); - assertThat(trace.getRequestType()).isEqualTo("mock request"); - assertThat(trace.getDurationMicros()).isEqualTo(42); - assertThat(trace.getCoordinatorAddress().getAddress()).isEqualTo(address); - assertThat(trace.getCoordinatorAddress().getPort()).isEqualTo(PORT); - assertThat(trace.getParameters()) - .hasSize(2) - .containsEntry("key1", "value1") - .containsEntry("key2", "value2"); - assertThat(trace.getStartedAt()).isEqualTo(0); - - List events = trace.getEvents(); - assertThat(events).hasSize(3); - for (int i = 0; i < events.size(); i++) { - TraceEvent event = events.get(i); - assertThat(event.getActivity()).isEqualTo("mock activity " + i); - assertThat(event.getTimestamp()).isEqualTo(i); - assertThat(event.getSourceAddress()).isNotNull(); - assertThat(event.getSourceAddress().getAddress()).isEqualTo(address); - assertThat(event.getSourceAddress().getPort()).isEqualTo(PORT); - assertThat(event.getSourceElapsedMicros()).isEqualTo(i); - assertThat(event.getThreadName()).isEqualTo("mock thread " + i); - } - }); - } - - /** - * This should not happen with a sane configuration, but we need to handle it in case {@link - * DefaultDriverOption#REQUEST_PAGE_SIZE} is set ridiculously low. - */ - @Test - public void should_succeed_when_events_query_is_paged() { - // Given - CompletionStage sessionRow = completeSessionRow(); - CompletionStage eventRows1 = multiPageEventRows1(); - CompletionStage eventRows2 = multiPageEventRows2(); - when(session.executeAsync(any(SimpleStatement.class))) - .thenAnswer(invocation -> sessionRow) - .thenAnswer(invocation -> eventRows1) - .thenAnswer(invocation -> eventRows2); - - // When - QueryTraceFetcher fetcher = new QueryTraceFetcher(TRACING_ID, session, context, config); - CompletionStage traceFuture = fetcher.fetch(); - - // Then - verify(session, times(3)).executeAsync(statementCaptor.capture()); - List statements = statementCaptor.getAllValues(); - assertSessionQuery(statements.get(0)); - assertEventsQuery(statements.get(1)); - assertEventsQuery(statements.get(2)); - assertThat(statements.get(2).getPagingState()).isEqualTo(PAGING_STATE); - verifyNoMoreInteractions(session); - - assertThatStage(traceFuture).isSuccess(trace -> assertThat(trace.getEvents()).hasSize(2)); - } - - @Test - public void should_retry_when_session_row_is_incomplete() { - // Given - CompletionStage sessionRow1 = incompleteSessionRow(); - CompletionStage sessionRow2 = completeSessionRow(); - CompletionStage eventRows = singlePageEventRows(); - when(session.executeAsync(any(SimpleStatement.class))) - .thenAnswer(invocation -> sessionRow1) - .thenAnswer(invocation -> sessionRow2) - .thenAnswer(invocation -> eventRows); - - // When - QueryTraceFetcher fetcher = new QueryTraceFetcher(TRACING_ID, session, context, config); - CompletionStage traceFuture = fetcher.fetch(); - - // Then - verify(session, times(3)).executeAsync(statementCaptor.capture()); - List statements = statementCaptor.getAllValues(); - assertSessionQuery(statements.get(0)); - assertSessionQuery(statements.get(1)); - assertEventsQuery(statements.get(2)); - verifyNoMoreInteractions(session); - - assertThatStage(traceFuture) - .isSuccess( - trace -> { - assertThat(trace.getTracingId()).isEqualTo(TRACING_ID); - assertThat(trace.getRequestType()).isEqualTo("mock request"); - assertThat(trace.getDurationMicros()).isEqualTo(42); - assertThat(trace.getCoordinatorAddress().getAddress()).isEqualTo(address); - assertThat(trace.getCoordinatorAddress().getPort()).isEqualTo(PORT); - assertThat(trace.getParameters()) - .hasSize(2) - .containsEntry("key1", "value1") - .containsEntry("key2", "value2"); - assertThat(trace.getStartedAt()).isEqualTo(0); - - List events = trace.getEvents(); - assertThat(events).hasSize(3); - for (int i = 0; i < events.size(); i++) { - TraceEvent event = events.get(i); - assertThat(event.getActivity()).isEqualTo("mock activity " + i); - assertThat(event.getTimestamp()).isEqualTo(i); - assertThat(event.getSourceAddress()).isNotNull(); - assertThat(event.getSourceAddress().getAddress()).isEqualTo(address); - assertThat(event.getSourceAddress().getPort()).isEqualTo(PORT); - assertThat(event.getSourceElapsedMicros()).isEqualTo(i); - assertThat(event.getThreadName()).isEqualTo("mock thread " + i); - } - }); - } - - @Test - public void should_fail_when_session_query_fails() { - // Given - RuntimeException mockError = new RuntimeException("mock error"); - when(session.executeAsync(any(SimpleStatement.class))) - .thenReturn(CompletableFutures.failedFuture(mockError)); - - // When - QueryTraceFetcher fetcher = new QueryTraceFetcher(TRACING_ID, session, context, config); - CompletionStage traceFuture = fetcher.fetch(); - - // Then - verify(session).executeAsync(statementCaptor.capture()); - SimpleStatement statement = statementCaptor.getValue(); - assertSessionQuery(statement); - verifyNoMoreInteractions(session); - - assertThatStage(traceFuture).isFailed(error -> assertThat(error).isSameAs(mockError)); - } - - @Test - public void should_fail_when_session_query_still_incomplete_after_max_tries() { - // Given - CompletionStage sessionRow1 = incompleteSessionRow(); - CompletionStage sessionRow2 = incompleteSessionRow(); - CompletionStage sessionRow3 = incompleteSessionRow(); - when(session.executeAsync(any(SimpleStatement.class))) - .thenAnswer(invocation -> sessionRow1) - .thenAnswer(invocation -> sessionRow2) - .thenAnswer(invocation -> sessionRow3); - - // When - QueryTraceFetcher fetcher = new QueryTraceFetcher(TRACING_ID, session, context, config); - CompletionStage traceFuture = fetcher.fetch(); - - // Then - verify(session, times(3)).executeAsync(statementCaptor.capture()); - List statements = statementCaptor.getAllValues(); - for (int i = 0; i < 3; i++) { - assertSessionQuery(statements.get(i)); - } - - assertThatStage(traceFuture) - .isFailed( - error -> - assertThat(error.getMessage()) - .isEqualTo( - String.format("Trace %s still not complete after 3 attempts", TRACING_ID))); - } - - private CompletionStage completeSessionRow() { - return sessionRow(42); - } - - private CompletionStage incompleteSessionRow() { - return sessionRow(null); - } - - private CompletionStage sessionRow(Integer duration) { - Row row = mock(Row.class); - ColumnDefinitions definitions = mock(ColumnDefinitions.class); - when(row.getColumnDefinitions()).thenReturn(definitions); - when(row.getString("request")).thenReturn("mock request"); - if (duration == null) { - when(row.isNull("duration")).thenReturn(true); - } else { - when(row.getInt("duration")).thenReturn(duration); - } - when(row.getInetAddress("coordinator")).thenReturn(address); - when(definitions.contains("coordinator_port")).thenReturn(true); - when(row.getInt("coordinator_port")).thenReturn(PORT); - when(row.getMap("parameters", String.class, String.class)) - .thenReturn(ImmutableMap.of("key1", "value1", "key2", "value2")); - when(row.isNull("started_at")).thenReturn(false); - when(row.getInstant("started_at")).thenReturn(Instant.EPOCH); - - AsyncResultSet rs = mock(AsyncResultSet.class); - when(rs.one()).thenReturn(row); - return CompletableFuture.completedFuture(rs); - } - - private CompletionStage singlePageEventRows() { - List rows = new ArrayList<>(); - for (int i = 0; i < 3; i++) { - rows.add(eventRow(i)); - } - - AsyncResultSet rs = mock(AsyncResultSet.class); - when(rs.currentPage()).thenReturn(rows); - - ExecutionInfo executionInfo = mock(ExecutionInfo.class); - when(executionInfo.getPagingState()).thenReturn(null); - when(rs.getExecutionInfo()).thenReturn(executionInfo); - - return CompletableFuture.completedFuture(rs); - } - - private CompletionStage multiPageEventRows1() { - AsyncResultSet rs = mock(AsyncResultSet.class); - - ImmutableList rows = ImmutableList.of(eventRow(0)); - when(rs.currentPage()).thenReturn(rows); - - ExecutionInfo executionInfo = mock(ExecutionInfo.class); - when(executionInfo.getPagingState()).thenReturn(PAGING_STATE); - when(rs.getExecutionInfo()).thenReturn(executionInfo); - - return CompletableFuture.completedFuture(rs); - } - - private CompletionStage multiPageEventRows2() { - AsyncResultSet rs = mock(AsyncResultSet.class); - - ImmutableList rows = ImmutableList.of(eventRow(1)); - when(rs.currentPage()).thenReturn(rows); - - ExecutionInfo executionInfo = mock(ExecutionInfo.class); - when(executionInfo.getPagingState()).thenReturn(null); - when(rs.getExecutionInfo()).thenReturn(executionInfo); - - return CompletableFuture.completedFuture(rs); - } - - private Row eventRow(int i) { - Row row = mock(Row.class); - ColumnDefinitions definitions = mock(ColumnDefinitions.class); - when(row.getColumnDefinitions()).thenReturn(definitions); - when(row.getString("activity")).thenReturn("mock activity " + i); - when(row.getUuid("event_id")).thenReturn(Uuids.startOf(i)); - when(row.getInetAddress("source")).thenReturn(address); - when(definitions.contains("source_port")).thenReturn(true); - when(row.getInt("source_port")).thenReturn(PORT); - when(row.getInt("source_elapsed")).thenReturn(i); - when(row.getString("thread")).thenReturn("mock thread " + i); - return row; - } - - private void assertSessionQuery(SimpleStatement statement) { - assertThat(statement.getQuery()) - .isEqualTo("SELECT * FROM system_traces.sessions WHERE session_id = ?"); - assertThat(statement.getPositionalValues()).containsOnly(TRACING_ID); - assertThat(statement.getExecutionProfile()).isEqualTo(traceConfig); - } - - private void assertEventsQuery(SimpleStatement statement) { - assertThat(statement.getQuery()) - .isEqualTo("SELECT * FROM system_traces.events WHERE session_id = ?"); - assertThat(statement.getPositionalValues()).containsOnly(TRACING_ID); - assertThat(statement.getExecutionProfile()).isEqualTo(traceConfig); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/RequestHandlerTestHarness.java b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/RequestHandlerTestHarness.java deleted file mode 100644 index 6a7657d5809..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/RequestHandlerTestHarness.java +++ /dev/null @@ -1,321 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyInt; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.DefaultConsistencyLevel; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.SessionMetric; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.core.specex.SpeculativeExecutionPolicy; -import com.datastax.oss.driver.api.core.time.TimestampGenerator; -import com.datastax.oss.driver.api.core.tracker.RequestIdGenerator; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.internal.core.DefaultConsistencyLevelRegistry; -import com.datastax.oss.driver.internal.core.ProtocolFeature; -import com.datastax.oss.driver.internal.core.ProtocolVersionRegistry; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.context.NettyOptions; -import com.datastax.oss.driver.internal.core.metadata.DefaultMetadata; -import com.datastax.oss.driver.internal.core.metadata.LoadBalancingPolicyWrapper; -import com.datastax.oss.driver.internal.core.metrics.SessionMetricUpdater; -import com.datastax.oss.driver.internal.core.pool.ChannelPool; -import com.datastax.oss.driver.internal.core.servererrors.DefaultWriteTypeRegistry; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.datastax.oss.driver.internal.core.session.throttling.PassThroughRequestThrottler; -import com.datastax.oss.driver.internal.core.tracker.NoopRequestTracker; -import com.datastax.oss.driver.internal.core.util.concurrent.CapturingTimer; -import com.datastax.oss.driver.internal.core.util.concurrent.CapturingTimer.CapturedTimeout; -import com.datastax.oss.protocol.internal.Frame; -import io.netty.channel.EventLoopGroup; -import java.time.Duration; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.Queue; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentLinkedQueue; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; -import org.mockito.stubbing.OngoingStubbing; - -/** - * Provides the environment to test a request handler, where a query plan can be defined, and the - * behavior of each successive node simulated. - */ -public class RequestHandlerTestHarness implements AutoCloseable { - - public static Builder builder() { - return new Builder(); - } - - private final CapturingTimer timer = new CapturingTimer(); - private final Map pools; - - @Mock protected InternalDriverContext context; - @Mock protected DefaultSession session; - @Mock protected EventLoopGroup eventLoopGroup; - @Mock protected NettyOptions nettyOptions; - @Mock protected DriverConfig config; - @Mock protected DriverExecutionProfile defaultProfile; - @Mock protected LoadBalancingPolicyWrapper loadBalancingPolicyWrapper; - @Mock protected RetryPolicy retryPolicy; - @Mock protected SpeculativeExecutionPolicy speculativeExecutionPolicy; - @Mock protected TimestampGenerator timestampGenerator; - @Mock protected ProtocolVersionRegistry protocolVersionRegistry; - @Mock protected SessionMetricUpdater sessionMetricUpdater; - - protected RequestHandlerTestHarness(Builder builder) { - MockitoAnnotations.initMocks(this); - - when(nettyOptions.getTimer()).thenReturn(timer); - when(nettyOptions.ioEventLoopGroup()).thenReturn(eventLoopGroup); - when(context.getNettyOptions()).thenReturn(nettyOptions); - - when(defaultProfile.getName()).thenReturn(DriverExecutionProfile.DEFAULT_NAME); - // TODO make configurable in the test, also handle profiles - when(defaultProfile.getDuration(DefaultDriverOption.REQUEST_TIMEOUT)) - .thenReturn(Duration.ofMillis(500)); - when(defaultProfile.getString(DefaultDriverOption.REQUEST_CONSISTENCY)) - .thenReturn(DefaultConsistencyLevel.LOCAL_ONE.name()); - when(defaultProfile.getInt(DefaultDriverOption.REQUEST_PAGE_SIZE)).thenReturn(5000); - when(defaultProfile.getString(DefaultDriverOption.REQUEST_SERIAL_CONSISTENCY)) - .thenReturn(DefaultConsistencyLevel.SERIAL.name()); - when(defaultProfile.getBoolean(DefaultDriverOption.REQUEST_DEFAULT_IDEMPOTENCE)) - .thenReturn(builder.defaultIdempotence); - when(defaultProfile.getBoolean(DefaultDriverOption.PREPARE_ON_ALL_NODES)).thenReturn(true); - - when(config.getDefaultProfile()).thenReturn(defaultProfile); - when(context.getConfig()).thenReturn(config); - - when(loadBalancingPolicyWrapper.newQueryPlan( - any(Request.class), anyString(), any(Session.class))) - .thenReturn(builder.buildQueryPlan()); - when(context.getLoadBalancingPolicyWrapper()).thenReturn(loadBalancingPolicyWrapper); - - when(context.getRetryPolicy(anyString())).thenReturn(retryPolicy); - - // Disable speculative executions by default - when(speculativeExecutionPolicy.nextExecution( - any(Node.class), any(CqlIdentifier.class), any(Request.class), anyInt())) - .thenReturn(-1L); - when(context.getSpeculativeExecutionPolicy(anyString())).thenReturn(speculativeExecutionPolicy); - - when(context.getCodecRegistry()).thenReturn(CodecRegistry.DEFAULT); - - when(timestampGenerator.next()).thenReturn(Statement.NO_DEFAULT_TIMESTAMP); - when(context.getTimestampGenerator()).thenReturn(timestampGenerator); - - pools = builder.buildMockPools(); - when(session.getChannel(any(Node.class), anyString())) - .thenAnswer( - invocation -> { - Node node = invocation.getArgument(0); - return pools.get(node).next(); - }); - when(session.getRepreparePayloads()).thenReturn(new ConcurrentHashMap<>()); - - when(session.setKeyspace(any(CqlIdentifier.class))) - .thenReturn(CompletableFuture.completedFuture(null)); - - when(session.getMetricUpdater()).thenReturn(sessionMetricUpdater); - when(sessionMetricUpdater.isEnabled(any(SessionMetric.class), anyString())).thenReturn(true); - - when(session.getMetadata()).thenReturn(DefaultMetadata.EMPTY); - - when(context.getProtocolVersionRegistry()).thenReturn(protocolVersionRegistry); - when(protocolVersionRegistry.supports(any(ProtocolVersion.class), any(ProtocolFeature.class))) - .thenReturn(true); - - if (builder.protocolVersion != null) { - when(context.getProtocolVersion()).thenReturn(builder.protocolVersion); - } - - when(context.getConsistencyLevelRegistry()).thenReturn(new DefaultConsistencyLevelRegistry()); - - when(context.getWriteTypeRegistry()).thenReturn(new DefaultWriteTypeRegistry()); - - when(context.getRequestThrottler()).thenReturn(new PassThroughRequestThrottler(context)); - - when(context.getRequestTracker()).thenReturn(new NoopRequestTracker(context)); - - when(context.getRequestIdGenerator()) - .thenReturn(Optional.ofNullable(builder.requestIdGenerator)); - } - - public DefaultSession getSession() { - return session; - } - - public InternalDriverContext getContext() { - return context; - } - - public DriverChannel getChannel(Node node) { - ChannelPool pool = pools.get(node); - return pool.next(); - } - - /** - * Returns the next task that was scheduled on the request handler's admin executor. The test must - * run it manually. - */ - public CapturedTimeout nextScheduledTimeout() { - return timer.getNextTimeout(); - } - - @Override - public void close() { - timer.stop(); - } - - public static class Builder { - private final List poolBehaviors = new ArrayList<>(); - private boolean defaultIdempotence; - private ProtocolVersion protocolVersion; - private RequestIdGenerator requestIdGenerator; - - /** - * Sets the given node as the next one in the query plan; an empty pool will be simulated when - * it gets used. - */ - public Builder withEmptyPool(Node node) { - poolBehaviors.add(new PoolBehavior(node, false)); - return this; - } - - /** - * Sets the given node as the next one in the query plan; a channel write failure will be - * simulated when it gets used. - */ - public Builder withWriteFailure(Node node, Throwable cause) { - PoolBehavior behavior = new PoolBehavior(node, true); - behavior.setWriteFailure(cause); - poolBehaviors.add(behavior); - return this; - } - - /** - * Sets the given node as the next one in the query plan; the write to the channel will succeed, - * but a response failure will be simulated immediately after. - */ - public Builder withResponseFailure(Node node, Throwable cause) { - PoolBehavior behavior = new PoolBehavior(node, true); - behavior.setWriteSuccess(); - behavior.setResponseFailure(cause); - poolBehaviors.add(behavior); - return this; - } - - /** - * Sets the given node as the next one in the query plan; the write to the channel will succeed, - * and the given response will be simulated immediately after. - */ - public Builder withResponse(Node node, Frame response) { - PoolBehavior behavior = new PoolBehavior(node, true); - behavior.setWriteSuccess(); - behavior.setResponseSuccess(response); - poolBehaviors.add(behavior); - return this; - } - - public Builder withDefaultIdempotence(boolean defaultIdempotence) { - this.defaultIdempotence = defaultIdempotence; - return this; - } - - public Builder withProtocolVersion(ProtocolVersion protocolVersion) { - this.protocolVersion = protocolVersion; - return this; - } - - public Builder withRequestIdGenerator(RequestIdGenerator requestIdGenerator) { - this.requestIdGenerator = requestIdGenerator; - return this; - } - - /** - * Sets the given node as the next one in the query plan; the test code is responsible of - * calling the methods on the returned object to complete the write and the query. - */ - public PoolBehavior customBehavior(Node node) { - PoolBehavior behavior = new PoolBehavior(node, true); - poolBehaviors.add(behavior); - return behavior; - } - - public RequestHandlerTestHarness build() { - return new RequestHandlerTestHarness(this); - } - - private Queue buildQueryPlan() { - ConcurrentLinkedQueue queryPlan = new ConcurrentLinkedQueue<>(); - for (PoolBehavior behavior : poolBehaviors) { - // We don't want duplicates in the query plan: the only way a node is tried multiple times - // is if the retry policy returns a RETRY_SAME, the request handler does not re-read from - // the plan. - if (!queryPlan.contains(behavior.node)) { - queryPlan.offer(behavior.node); - } - } - return queryPlan; - } - - private Map buildMockPools() { - Map pools = new ConcurrentHashMap<>(); - Map> stubbings = new HashMap<>(); - for (PoolBehavior behavior : poolBehaviors) { - Node node = behavior.node; - ChannelPool pool = pools.computeIfAbsent(node, n -> mock(ChannelPool.class)); - - // The goal of the code below is to generate the equivalent of: - // - // when(pool.next()) - // .thenReturn(behavior1.channel) - // .thenReturn(behavior2.channel) - // ... - stubbings.compute( - node, - (sameNode, previous) -> { - if (previous == null) { - previous = when(pool.next()); - } - return previous.thenReturn(behavior.channel); - }); - } - return pools; - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/ResultSetTestBase.java b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/ResultSetTestBase.java deleted file mode 100644 index 54b215458fe..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/ResultSetTestBase.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.internal.core.util.CountingIterator; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import java.util.Arrays; -import java.util.Iterator; -import java.util.Queue; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; - -public abstract class ResultSetTestBase { - - /** Mocks an async result set where column 0 has type INT, with rows with the provided data. */ - protected AsyncResultSet mockPage(boolean nextPage, Integer... data) { - AsyncResultSet page = mock(AsyncResultSet.class); - - ColumnDefinitions columnDefinitions = mock(ColumnDefinitions.class); - when(page.getColumnDefinitions()).thenReturn(columnDefinitions); - - ExecutionInfo executionInfo = mock(ExecutionInfo.class); - when(page.getExecutionInfo()).thenReturn(executionInfo); - - if (nextPage) { - when(page.hasMorePages()).thenReturn(true); - when(page.fetchNextPage()).thenReturn(spy(new CompletableFuture<>())); - } else { - when(page.hasMorePages()).thenReturn(false); - when(page.fetchNextPage()).thenThrow(new IllegalStateException()); - } - - // Emulate DefaultAsyncResultSet's internals (this is a bit sketchy, maybe it would be better - // to use real DefaultAsyncResultSet instances) - Queue queue = Lists.newLinkedList(Arrays.asList(data)); - CountingIterator iterator = - new CountingIterator(queue.size()) { - @Override - protected Row computeNext() { - Integer index = queue.poll(); - return (index == null) ? endOfData() : mockRow(index); - } - }; - when(page.currentPage()).thenReturn(() -> iterator); - when(page.remaining()).thenAnswer(invocation -> iterator.remaining()); - - return page; - } - - private Row mockRow(int index) { - Row row = mock(Row.class); - when(row.getInt(0)).thenReturn(index); - return row; - } - - protected static void complete(CompletionStage stage, AsyncResultSet result) { - stage.toCompletableFuture().complete(result); - } - - protected void assertNextRow(Iterator iterator, int expectedValue) { - assertThat(iterator.hasNext()).isTrue(); - Row row = iterator.next(); - assertThat(row.getInt(0)).isEqualTo(expectedValue); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/ResultSetsTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/ResultSetsTest.java deleted file mode 100644 index 0b5860f7e95..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/ResultSetsTest.java +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import static com.datastax.oss.driver.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Row; -import java.util.Iterator; -import org.junit.Test; - -public class ResultSetsTest extends ResultSetTestBase { - - @Test - public void should_create_result_set_from_single_page() { - // Given - AsyncResultSet page1 = mockPage(false, 0, 1, 2); - - // When - ResultSet resultSet = ResultSets.newInstance(page1); - - // Then - assertThat(resultSet.getColumnDefinitions()).isSameAs(page1.getColumnDefinitions()); - assertThat(resultSet.getExecutionInfo()).isSameAs(page1.getExecutionInfo()); - assertThat(resultSet.getExecutionInfos()).containsExactly(page1.getExecutionInfo()); - - Iterator iterator = resultSet.iterator(); - - assertNextRow(iterator, 0); - assertNextRow(iterator, 1); - assertNextRow(iterator, 2); - - assertThat(iterator.hasNext()).isFalse(); - } - - @Test - public void should_create_result_set_from_multiple_pages() { - // Given - AsyncResultSet page1 = mockPage(true, 0, 1, 2); - AsyncResultSet page2 = mockPage(true, 3, 4, 5); - AsyncResultSet page3 = mockPage(false, 6, 7, 8); - - complete(page1.fetchNextPage(), page2); - complete(page2.fetchNextPage(), page3); - - // When - ResultSet resultSet = ResultSets.newInstance(page1); - - // Then - assertThat(resultSet.iterator().hasNext()).isTrue(); - - assertThat(resultSet.getColumnDefinitions()).isSameAs(page1.getColumnDefinitions()); - assertThat(resultSet.getExecutionInfo()).isSameAs(page1.getExecutionInfo()); - assertThat(resultSet.getExecutionInfos()).containsExactly(page1.getExecutionInfo()); - - Iterator iterator = resultSet.iterator(); - - assertNextRow(iterator, 0); - assertNextRow(iterator, 1); - assertNextRow(iterator, 2); - - assertThat(iterator.hasNext()).isTrue(); - // This should have triggered the fetch of page2 - assertThat(resultSet.getExecutionInfo()).isEqualTo(page2.getExecutionInfo()); - assertThat(resultSet.getExecutionInfos()) - .containsExactly(page1.getExecutionInfo(), page2.getExecutionInfo()); - - assertNextRow(iterator, 3); - assertNextRow(iterator, 4); - assertNextRow(iterator, 5); - - assertThat(iterator.hasNext()).isTrue(); - // This should have triggered the fetch of page3 - assertThat(resultSet.getExecutionInfo()).isEqualTo(page3.getExecutionInfo()); - assertThat(resultSet.getExecutionInfos()) - .containsExactly( - page1.getExecutionInfo(), page2.getExecutionInfo(), page3.getExecutionInfo()); - - assertNextRow(iterator, 6); - assertNextRow(iterator, 7); - assertNextRow(iterator, 8); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/StatementSizeTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/StatementSizeTest.java deleted file mode 100644 index dc3ab0702f7..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/StatementSizeTest.java +++ /dev/null @@ -1,292 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.BatchStatement; -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.ColumnDefinition; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.DefaultBatchType; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.time.TimestampGenerator; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.internal.core.DefaultProtocolVersionRegistry; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.shaded.guava.common.base.Charsets; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.response.result.ColumnSpec; -import com.datastax.oss.protocol.internal.response.result.RawType; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.nio.ByteBuffer; -import java.util.Collections; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -public class StatementSizeTest { - - private static final byte[] MOCK_PAGING_STATE = Bytes.getArray(Bytes.fromHexString("0xdeadbeef")); - private static final ByteBuffer MOCK_PAYLOAD_VALUE1 = Bytes.fromHexString("0xabcd"); - private static final ByteBuffer MOCK_PAYLOAD_VALUE2 = Bytes.fromHexString("0xef"); - private static final ImmutableMap MOCK_PAYLOAD = - ImmutableMap.of("key1", MOCK_PAYLOAD_VALUE1, "key2", MOCK_PAYLOAD_VALUE2); - private static final byte[] PREPARED_ID = Bytes.getArray(Bytes.fromHexString("0xaaaa")); - private static final byte[] RESULT_METADATA_ID = Bytes.getArray(Bytes.fromHexString("0xbbbb")); - - @Mock PreparedStatement preparedStatement; - @Mock InternalDriverContext driverContext; - @Mock DriverConfig config; - @Mock DriverExecutionProfile defaultProfile; - @Mock TimestampGenerator timestampGenerator; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - - ByteBuffer preparedId = ByteBuffer.wrap(PREPARED_ID); - when(preparedStatement.getId()).thenReturn(preparedId); - ByteBuffer resultMetadataId = ByteBuffer.wrap(RESULT_METADATA_ID); - when(preparedStatement.getResultMetadataId()).thenReturn(resultMetadataId); - - ColumnDefinitions columnDefinitions = - DefaultColumnDefinitions.valueOf( - ImmutableList.of( - phonyColumnDef("ks", "table", "c1", -1, ProtocolConstants.DataType.INT), - phonyColumnDef("ks", "table", "c2", -1, ProtocolConstants.DataType.VARCHAR))); - - when(preparedStatement.getVariableDefinitions()).thenReturn(columnDefinitions); - - when(driverContext.getProtocolVersion()).thenReturn(DefaultProtocolVersion.V5); - when(driverContext.getCodecRegistry()).thenReturn(CodecRegistry.DEFAULT); - when(driverContext.getProtocolVersionRegistry()) - .thenReturn(new DefaultProtocolVersionRegistry(null)); - when(config.getDefaultProfile()).thenReturn(defaultProfile); - when(driverContext.getConfig()).thenReturn(config); - when(driverContext.getTimestampGenerator()).thenReturn(timestampGenerator); - } - - private ColumnDefinition phonyColumnDef( - String keyspace, String table, String column, int index, int typeCode) { - return new DefaultColumnDefinition( - new ColumnSpec(keyspace, table, column, index, RawType.PRIMITIVES.get(typeCode)), - AttachmentPoint.NONE); - } - - @Test - public void should_measure_size_of_simple_statement() { - String queryString = "SELECT release_version FROM system.local WHERE key = ?"; - SimpleStatement statement = SimpleStatement.newInstance(queryString); - int expectedSize = - 9 // header - + (4 + queryString.getBytes(Charsets.UTF_8).length) // query string - + 2 // consistency level - + 2 // serial consistency level - + 4 // fetch size - + 8 // timestamp - + 4; // flags - - assertThat(v5SizeOf(statement)).isEqualTo(expectedSize); - - String value1 = "local"; - SimpleStatement statementWithAnonymousValue = SimpleStatement.newInstance(queryString, value1); - assertThat(v5SizeOf(statementWithAnonymousValue)) - .isEqualTo( - expectedSize - + 2 // size of number of values - + (4 + value1.getBytes(Charsets.UTF_8).length) // value - ); - - String key1 = "key"; - SimpleStatement statementWithNamedValue = - SimpleStatement.newInstance(queryString, ImmutableMap.of(key1, value1)); - assertThat(v5SizeOf(statementWithNamedValue)) - .isEqualTo( - expectedSize - + 2 // size of number of values - + (2 + key1.getBytes(Charsets.UTF_8).length) // key - + (4 + value1.getBytes(Charsets.UTF_8).length) // value - ); - - SimpleStatement statementWithPagingState = - statement.setPagingState(ByteBuffer.wrap(MOCK_PAGING_STATE)); - assertThat(v5SizeOf(statementWithPagingState)) - .isEqualTo(expectedSize + 4 + MOCK_PAGING_STATE.length); - - SimpleStatement statementWithPayload = statement.setCustomPayload(MOCK_PAYLOAD); - assertThat(v5SizeOf(statementWithPayload)) - .isEqualTo( - expectedSize - + 2 // size of number of keys in the map - // size of each key/value pair - + (2 + "key1".getBytes(Charsets.UTF_8).length) - + (4 + MOCK_PAYLOAD_VALUE1.remaining()) - + (2 + "key2".getBytes(Charsets.UTF_8).length) - + (4 + MOCK_PAYLOAD_VALUE2.remaining())); - - SimpleStatement statementWithKeyspace = statement.setKeyspace("testKeyspace"); - assertThat(v5SizeOf(statementWithKeyspace)) - .isEqualTo(expectedSize + 2 + "testKeyspace".getBytes(Charsets.UTF_8).length); - } - - @Test - public void should_measure_size_of_bound_statement() { - - BoundStatement statement = - newBoundStatement( - preparedStatement, - new ByteBuffer[] {ProtocolConstants.UNSET_VALUE, ProtocolConstants.UNSET_VALUE}); - - int expectedSize = - 9 // header size - + 4 // flags - + 2 // consistency level - + 2 // serial consistency level - + 8 // timestamp - + (2 + PREPARED_ID.length) - + (2 + RESULT_METADATA_ID.length) - + 2 // size of value list - + 2 * 4 // two null values (size = -1) - + 4 // fetch size - ; - assertThat(v5SizeOf(statement)).isEqualTo(expectedSize); - - BoundStatement withValues = statement.setInt(0, 0).setString(1, "test"); - expectedSize += - 4 // the size of the int value - + "test".getBytes(Charsets.UTF_8).length; - assertThat(v5SizeOf(withValues)).isEqualTo(expectedSize); - - BoundStatement withPagingState = withValues.setPagingState(ByteBuffer.wrap(MOCK_PAGING_STATE)); - expectedSize += 4 + MOCK_PAGING_STATE.length; - assertThat(v5SizeOf(withPagingState)).isEqualTo(expectedSize); - - BoundStatement withPayload = withPagingState.setCustomPayload(MOCK_PAYLOAD); - expectedSize += - 2 // size of number of keys in the map - // size of each key/value pair - + (2 + "key1".getBytes(Charsets.UTF_8).length) - + (4 + MOCK_PAYLOAD_VALUE1.remaining()) - + (2 + "key2".getBytes(Charsets.UTF_8).length) - + (4 + MOCK_PAYLOAD_VALUE2.remaining()); - assertThat(v5SizeOf(withPayload)).isEqualTo(expectedSize); - } - - @Test - public void should_measure_size_of_batch_statement() { - String queryString = "SELECT release_version FROM system.local"; - String key1 = "key"; - String value1 = "value"; - SimpleStatement statement1 = - SimpleStatement.newInstance(queryString, ImmutableMap.of(key1, value1)); - - BoundStatement statement2 = - newBoundStatement( - preparedStatement, - new ByteBuffer[] {ProtocolConstants.UNSET_VALUE, ProtocolConstants.UNSET_VALUE}) - .setInt(0, 0) - .setString(1, "test"); - BoundStatement statement3 = - newBoundStatement( - preparedStatement, - new ByteBuffer[] {ProtocolConstants.UNSET_VALUE, ProtocolConstants.UNSET_VALUE}) - .setInt(0, 0) - .setString(1, "test2"); - - BatchStatement batchStatement = - BatchStatement.newInstance(DefaultBatchType.UNLOGGED) - .add(statement1) - .add(statement2) - .add(statement3); - - int expectedSize = - 9 // header size - + 1 - + 2 // batch type + number of queries - // statements' type of id + id (query string/prepared id): - + 1 - + (4 + queryString.getBytes(Charsets.UTF_8).length) - + 1 - + (2 + PREPARED_ID.length) - + 1 - + (2 + PREPARED_ID.length) - // simple statement values - + 2 // size of number of values - + (2 + key1.getBytes(Charsets.UTF_8).length) // key - + (4 + value1.getBytes(Charsets.UTF_8).length) // value - // bound statements values - + (2 + (4 + 4) + (4 + "test".getBytes(Charsets.UTF_8).length)) - + (2 + (4 + 4) + (4 + "test2".getBytes(Charsets.UTF_8).length)) - + 2 // consistency level - + 2 // serial consistency level - + 8 // timestamp - + 4; // flags - assertThat(v5SizeOf(batchStatement)).isEqualTo(expectedSize); - - BatchStatement withPayload = batchStatement.setCustomPayload(MOCK_PAYLOAD); - expectedSize += - 2 // size of number of keys in the map - // size of each key/value pair - + (2 + "key1".getBytes(Charsets.UTF_8).length) - + (4 + MOCK_PAYLOAD_VALUE1.remaining()) - + (2 + "key2".getBytes(Charsets.UTF_8).length) - + (4 + MOCK_PAYLOAD_VALUE2.remaining()); - assertThat(v5SizeOf(withPayload)).isEqualTo(expectedSize); - } - - private int v5SizeOf(Statement statement) { - return statement.computeSizeInBytes(driverContext); - } - - private BoundStatement newBoundStatement( - PreparedStatement preparedStatement, ByteBuffer[] initialValues) { - return new DefaultBoundStatement( - preparedStatement, - preparedStatement.getVariableDefinitions(), - initialValues, - null, - null, - null, - null, - null, - Collections.emptyMap(), - null, - false, - -1, - null, - Integer.MIN_VALUE, - null, - null, - null, - CodecRegistry.DEFAULT, - DefaultProtocolVersion.V5, - null, - Statement.NO_NOW_IN_SECONDS); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/data/AccessibleByIdTestBase.java b/core/src/test/java/com/datastax/oss/driver/internal/core/data/AccessibleByIdTestBase.java deleted file mode 100644 index c27b55e3f25..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/data/AccessibleByIdTestBase.java +++ /dev/null @@ -1,485 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.data; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyZeroInteractions; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.data.GettableById; -import com.datastax.oss.driver.api.core.data.GettableByName; -import com.datastax.oss.driver.api.core.data.SettableById; -import com.datastax.oss.driver.api.core.data.SettableByName; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.codec.CqlIntToStringCodec; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.nio.ByteBuffer; -import org.junit.Test; - -public abstract class AccessibleByIdTestBase< - T extends GettableById & SettableById & GettableByName & SettableByName> - extends AccessibleByIndexTestBase { - - private static final CqlIdentifier FIELD0_ID = CqlIdentifier.fromInternal("field0"); - private static final String FIELD0_NAME = "field0"; - - @Test - public void should_set_primitive_value_by_id() { - // Given - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - - // When - t = t.setInt(FIELD0_ID, 1); - - // Then - verify(codecRegistry).codecFor(DataTypes.INT, Integer.class); - verify(intCodec).encodePrimitive(1, ProtocolVersion.DEFAULT); - assertThat(t.getBytesUnsafe(FIELD0_ID)).isEqualTo(Bytes.fromHexString("0x00000001")); - } - - @Test - public void should_set_object_value_by_id() { - // Given - T t = newInstance(ImmutableList.of(DataTypes.TEXT), attachmentPoint); - - // When - t = t.setString(FIELD0_ID, "a"); - - // Then - verify(codecRegistry).codecFor(DataTypes.TEXT, String.class); - verify(textCodec).encode("a", ProtocolVersion.DEFAULT); - assertThat(t.getBytesUnsafe(FIELD0_ID)).isEqualTo(Bytes.fromHexString("0x61")); - } - - @Test - public void should_set_bytes_by_id() { - // Given - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - - // When - t = t.setBytesUnsafe(FIELD0_ID, Bytes.fromHexString("0x00000001")); - - // Then - verifyZeroInteractions(codecRegistry); - assertThat(t.getBytesUnsafe(FIELD0_ID)).isEqualTo(Bytes.fromHexString("0x00000001")); - } - - @Test - public void should_set_to_null_by_id() { - // Given - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - t = t.setBytesUnsafe(FIELD0_ID, Bytes.fromHexString("0x00000001")); - - // When - t = t.setToNull(FIELD0_ID); - - // Then - verifyZeroInteractions(codecRegistry); - assertThat(t.getBytesUnsafe(FIELD0_ID)).isNull(); - } - - @Test - public void should_set_with_explicit_class_by_id() { - // Given - CqlIntToStringCodec intToStringCodec = spy(new CqlIntToStringCodec()); - when(codecRegistry.codecFor(DataTypes.INT, String.class)).thenAnswer(i -> intToStringCodec); - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - - // When - t = t.set(FIELD0_ID, "1", String.class); - - // Then - verify(codecRegistry).codecFor(DataTypes.INT, String.class); - verify(intToStringCodec).encode("1", ProtocolVersion.DEFAULT); - assertThat(t.getBytesUnsafe(FIELD0_ID)).isEqualTo(Bytes.fromHexString("0x00000001")); - } - - @Test - public void should_set_with_explicit_type_by_id() { - // Given - CqlIntToStringCodec intToStringCodec = spy(new CqlIntToStringCodec()); - when(codecRegistry.codecFor(DataTypes.INT, GenericType.STRING)) - .thenAnswer(i -> intToStringCodec); - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - - // When - t = t.set(FIELD0_ID, "1", GenericType.STRING); - - // Then - verify(codecRegistry).codecFor(DataTypes.INT, GenericType.STRING); - verify(intToStringCodec).encode("1", ProtocolVersion.DEFAULT); - assertThat(t.getBytesUnsafe(FIELD0_ID)).isEqualTo(Bytes.fromHexString("0x00000001")); - } - - @Test - public void should_set_with_explicit_codec_by_id() { - // Given - CqlIntToStringCodec intToStringCodec = spy(new CqlIntToStringCodec()); - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - - // When - t = t.set(FIELD0_ID, "1", intToStringCodec); - - // Then - verifyZeroInteractions(codecRegistry); - verify(intToStringCodec).encode("1", ProtocolVersion.DEFAULT); - assertThat(t.getBytesUnsafe(FIELD0_ID)).isEqualTo(Bytes.fromHexString("0x00000001")); - } - - @Test - public void should_get_primitive_value_by_id() { - // Given - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - t = t.setBytesUnsafe(FIELD0_ID, Bytes.fromHexString("0x00000001")); - - // When - int i = t.getInt(FIELD0_ID); - - // Then - verify(codecRegistry).codecFor(DataTypes.INT, Integer.class); - verify(intCodec).decodePrimitive(any(ByteBuffer.class), eq(ProtocolVersion.DEFAULT)); - assertThat(i).isEqualTo(1); - } - - @Test - public void should_get_object_value_by_id() { - // Given - T t = newInstance(ImmutableList.of(DataTypes.TEXT), attachmentPoint); - t = t.setBytesUnsafe(FIELD0_ID, Bytes.fromHexString("0x61")); - - // When - String s = t.getString(FIELD0_ID); - - // Then - verify(codecRegistry).codecFor(DataTypes.TEXT, String.class); - verify(textCodec).decode(any(ByteBuffer.class), eq(ProtocolVersion.DEFAULT)); - assertThat(s).isEqualTo("a"); - } - - @Test - public void should_get_bytes_by_id() { - // Given - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - t = t.setBytesUnsafe(FIELD0_ID, Bytes.fromHexString("0x00000001")); - - // When - ByteBuffer bytes = t.getBytesUnsafe(FIELD0_ID); - - // Then - verifyZeroInteractions(codecRegistry); - assertThat(bytes).isEqualTo(Bytes.fromHexString("0x00000001")); - } - - @Test - public void should_test_if_null_by_id() { - // Given - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - t = t.setBytesUnsafe(FIELD0_ID, null); - - // When - boolean isNull = t.isNull(FIELD0_ID); - - // Then - verifyZeroInteractions(codecRegistry); - assertThat(isNull).isTrue(); - } - - @Test - public void should_get_with_explicit_class_by_id() { - // Given - CqlIntToStringCodec intToStringCodec = spy(new CqlIntToStringCodec()); - when(codecRegistry.codecFor(DataTypes.INT, String.class)).thenAnswer(i -> intToStringCodec); - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - t = t.setBytesUnsafe(FIELD0_ID, Bytes.fromHexString("0x00000001")); - - // When - String s = t.get(FIELD0_ID, String.class); - - // Then - verify(codecRegistry).codecFor(DataTypes.INT, String.class); - verify(intToStringCodec).decode(any(ByteBuffer.class), eq(ProtocolVersion.DEFAULT)); - assertThat(s).isEqualTo("1"); - } - - @Test - public void should_get_with_explicit_type_by_id() { - // Given - CqlIntToStringCodec intToStringCodec = spy(new CqlIntToStringCodec()); - when(codecRegistry.codecFor(DataTypes.INT, GenericType.STRING)) - .thenAnswer(i -> intToStringCodec); - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - t = t.setBytesUnsafe(FIELD0_ID, Bytes.fromHexString("0x00000001")); - - // When - String s = t.get(FIELD0_ID, GenericType.STRING); - - // Then - verify(codecRegistry).codecFor(DataTypes.INT, GenericType.STRING); - verify(intToStringCodec).decode(any(ByteBuffer.class), eq(ProtocolVersion.DEFAULT)); - assertThat(s).isEqualTo("1"); - } - - @Test - public void should_get_with_explicit_codec_by_id() { - // Given - CqlIntToStringCodec intToStringCodec = spy(new CqlIntToStringCodec()); - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - t = t.setBytesUnsafe(FIELD0_ID, Bytes.fromHexString("0x00000001")); - - // When - String s = t.get(FIELD0_ID, intToStringCodec); - - // Then - verifyZeroInteractions(codecRegistry); - verify(intToStringCodec).decode(any(ByteBuffer.class), eq(ProtocolVersion.DEFAULT)); - assertThat(s).isEqualTo("1"); - } - - @Test - public void should_set_primitive_value_by_name() { - // Given - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - - // When - t = t.setInt(FIELD0_NAME, 1); - - // Then - verify(codecRegistry).codecFor(DataTypes.INT, Integer.class); - verify(intCodec).encodePrimitive(1, ProtocolVersion.DEFAULT); - assertThat(t.getBytesUnsafe(FIELD0_NAME)).isEqualTo(Bytes.fromHexString("0x00000001")); - } - - @Test - public void should_set_object_value_by_name() { - // Given - T t = newInstance(ImmutableList.of(DataTypes.TEXT), attachmentPoint); - - // When - t = t.setString(FIELD0_NAME, "a"); - - // Then - verify(codecRegistry).codecFor(DataTypes.TEXT, String.class); - verify(textCodec).encode("a", ProtocolVersion.DEFAULT); - assertThat(t.getBytesUnsafe(FIELD0_NAME)).isEqualTo(Bytes.fromHexString("0x61")); - } - - @Test - public void should_set_bytes_by_name() { - // Given - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - - // When - t = t.setBytesUnsafe(FIELD0_NAME, Bytes.fromHexString("0x00000001")); - - // Then - verifyZeroInteractions(codecRegistry); - assertThat(t.getBytesUnsafe(FIELD0_NAME)).isEqualTo(Bytes.fromHexString("0x00000001")); - } - - @Test - public void should_set_to_null_by_name() { - // Given - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - t = t.setBytesUnsafe(FIELD0_NAME, Bytes.fromHexString("0x00000001")); - - // When - t = t.setToNull(FIELD0_NAME); - - // Then - verifyZeroInteractions(codecRegistry); - assertThat(t.getBytesUnsafe(FIELD0_NAME)).isNull(); - } - - @Test - public void should_set_with_explicit_class_by_name() { - // Given - CqlIntToStringCodec intToStringCodec = spy(new CqlIntToStringCodec()); - when(codecRegistry.codecFor(DataTypes.INT, String.class)).thenAnswer(i -> intToStringCodec); - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - - // When - t = t.set(FIELD0_NAME, "1", String.class); - - // Then - verify(codecRegistry).codecFor(DataTypes.INT, String.class); - verify(intToStringCodec).encode("1", ProtocolVersion.DEFAULT); - assertThat(t.getBytesUnsafe(FIELD0_NAME)).isEqualTo(Bytes.fromHexString("0x00000001")); - } - - @Test - public void should_set_with_explicit_type_by_name() { - // Given - CqlIntToStringCodec intToStringCodec = spy(new CqlIntToStringCodec()); - when(codecRegistry.codecFor(DataTypes.INT, GenericType.STRING)) - .thenAnswer(i -> intToStringCodec); - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - - // When - t = t.set(FIELD0_NAME, "1", GenericType.STRING); - - // Then - verify(codecRegistry).codecFor(DataTypes.INT, GenericType.STRING); - verify(intToStringCodec).encode("1", ProtocolVersion.DEFAULT); - assertThat(t.getBytesUnsafe(FIELD0_NAME)).isEqualTo(Bytes.fromHexString("0x00000001")); - } - - @Test - public void should_set_with_explicit_codec_by_name() { - // Given - CqlIntToStringCodec intToStringCodec = spy(new CqlIntToStringCodec()); - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - - // When - t = t.set(FIELD0_NAME, "1", intToStringCodec); - - // Then - verifyZeroInteractions(codecRegistry); - verify(intToStringCodec).encode("1", ProtocolVersion.DEFAULT); - assertThat(t.getBytesUnsafe(FIELD0_NAME)).isEqualTo(Bytes.fromHexString("0x00000001")); - } - - @Test - public void should_get_primitive_value_by_name() { - // Given - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - t = t.setBytesUnsafe(FIELD0_NAME, Bytes.fromHexString("0x00000001")); - - // When - int i = t.getInt(FIELD0_NAME); - - // Then - verify(codecRegistry).codecFor(DataTypes.INT, Integer.class); - verify(intCodec).decodePrimitive(any(ByteBuffer.class), eq(ProtocolVersion.DEFAULT)); - assertThat(i).isEqualTo(1); - } - - @Test - public void should_get_object_value_by_name() { - // Given - T t = newInstance(ImmutableList.of(DataTypes.TEXT), attachmentPoint); - t = t.setBytesUnsafe(FIELD0_NAME, Bytes.fromHexString("0x61")); - - // When - String s = t.getString(FIELD0_NAME); - - // Then - verify(codecRegistry).codecFor(DataTypes.TEXT, String.class); - verify(textCodec).decode(any(ByteBuffer.class), eq(ProtocolVersion.DEFAULT)); - assertThat(s).isEqualTo("a"); - } - - @Test - public void should_get_bytes_by_name() { - // Given - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - t = t.setBytesUnsafe(FIELD0_NAME, Bytes.fromHexString("0x00000001")); - - // When - ByteBuffer bytes = t.getBytesUnsafe(FIELD0_NAME); - - // Then - verifyZeroInteractions(codecRegistry); - assertThat(bytes).isEqualTo(Bytes.fromHexString("0x00000001")); - } - - @Test - public void should_test_if_null_by_name() { - // Given - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - t = t.setBytesUnsafe(FIELD0_NAME, null); - - // When - boolean isNull = t.isNull(FIELD0_NAME); - - // Then - verifyZeroInteractions(codecRegistry); - assertThat(isNull).isTrue(); - } - - @Test - public void should_get_with_explicit_class_by_name() { - // Given - CqlIntToStringCodec intToStringCodec = spy(new CqlIntToStringCodec()); - when(codecRegistry.codecFor(DataTypes.INT, String.class)).thenAnswer(i -> intToStringCodec); - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - t = t.setBytesUnsafe(FIELD0_NAME, Bytes.fromHexString("0x00000001")); - - // When - String s = t.get(FIELD0_NAME, String.class); - - // Then - verify(codecRegistry).codecFor(DataTypes.INT, String.class); - verify(intToStringCodec).decode(any(ByteBuffer.class), eq(ProtocolVersion.DEFAULT)); - assertThat(s).isEqualTo("1"); - } - - @Test - public void should_get_with_explicit_type_by_name() { - // Given - CqlIntToStringCodec intToStringCodec = spy(new CqlIntToStringCodec()); - when(codecRegistry.codecFor(DataTypes.INT, GenericType.STRING)) - .thenAnswer(i -> intToStringCodec); - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - t = t.setBytesUnsafe(FIELD0_NAME, Bytes.fromHexString("0x00000001")); - - // When - String s = t.get(FIELD0_NAME, GenericType.STRING); - - // Then - verify(codecRegistry).codecFor(DataTypes.INT, GenericType.STRING); - verify(intToStringCodec).decode(any(ByteBuffer.class), eq(ProtocolVersion.DEFAULT)); - assertThat(s).isEqualTo("1"); - } - - @Test - public void should_get_with_explicit_codec_by_name() { - // Given - CqlIntToStringCodec intToStringCodec = spy(new CqlIntToStringCodec()); - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - t = t.setBytesUnsafe(FIELD0_NAME, Bytes.fromHexString("0x00000001")); - - // When - String s = t.get(FIELD0_NAME, intToStringCodec); - - // Then - verifyZeroInteractions(codecRegistry); - verify(intToStringCodec).decode(any(ByteBuffer.class), eq(ProtocolVersion.DEFAULT)); - assertThat(s).isEqualTo("1"); - } - - @Test(expected = IllegalArgumentException.class) - @SuppressWarnings("CheckReturnValue") - public void should_fail_when_id_does_not_exists() { - final CqlIdentifier invalidField = CqlIdentifier.fromInternal("invalidField"); - // Given - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - - // When - t.setInt(invalidField, 1); - - // Then the method will throw IllegalArgumentException up to the client. - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/data/AccessibleByIndexTestBase.java b/core/src/test/java/com/datastax/oss/driver/internal/core/data/AccessibleByIndexTestBase.java deleted file mode 100644 index 94da926f2bc..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/data/AccessibleByIndexTestBase.java +++ /dev/null @@ -1,344 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.data; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyZeroInteractions; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.data.GettableByIndex; -import com.datastax.oss.driver.api.core.data.SettableByIndex; -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.PrimitiveIntCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.codec.CqlIntToStringCodec; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.nio.ByteBuffer; -import java.util.List; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -public abstract class AccessibleByIndexTestBase> { - - protected abstract T newInstance(List dataTypes, AttachmentPoint attachmentPoint); - - protected abstract T newInstance( - List dataTypes, List values, AttachmentPoint attachmentPoint); - - @Mock protected AttachmentPoint attachmentPoint; - @Mock protected AttachmentPoint v3AttachmentPoint; - @Mock protected CodecRegistry codecRegistry; - protected PrimitiveIntCodec intCodec; - protected TypeCodec doubleCodec; - protected TypeCodec textCodec; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - - when(attachmentPoint.getCodecRegistry()).thenReturn(codecRegistry); - when(attachmentPoint.getProtocolVersion()).thenReturn(ProtocolVersion.DEFAULT); - - when(v3AttachmentPoint.getCodecRegistry()).thenReturn(codecRegistry); - when(v3AttachmentPoint.getProtocolVersion()).thenReturn(DefaultProtocolVersion.V3); - - intCodec = spy(TypeCodecs.INT); - doubleCodec = spy(TypeCodecs.DOUBLE); - textCodec = spy(TypeCodecs.TEXT); - - when(codecRegistry.codecFor(DataTypes.INT, Integer.class)).thenAnswer(i -> intCodec); - when(codecRegistry.codecFor(DataTypes.DOUBLE, Double.class)).thenAnswer(i -> doubleCodec); - when(codecRegistry.codecFor(DataTypes.TEXT, String.class)).thenAnswer(i -> textCodec); - - when(codecRegistry.codecFor(DataTypes.INT)).thenAnswer(i -> intCodec); - when(codecRegistry.codecFor(DataTypes.TEXT)).thenAnswer(t -> textCodec); - when(codecRegistry.codecFor(DataTypes.DOUBLE)).thenAnswer(d -> doubleCodec); - } - - @Test - public void should_set_primitive_value_by_index() { - // Given - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - - // When - t = t.setInt(0, 1); - - // Then - verify(codecRegistry).codecFor(DataTypes.INT, Integer.class); - verify(intCodec).encodePrimitive(1, ProtocolVersion.DEFAULT); - assertThat(t.getBytesUnsafe(0)).isEqualTo(Bytes.fromHexString("0x00000001")); - } - - @Test - public void should_set_object_value_by_index() { - // Given - T t = newInstance(ImmutableList.of(DataTypes.TEXT), attachmentPoint); - - // When - t = t.setString(0, "a"); - - // Then - verify(codecRegistry).codecFor(DataTypes.TEXT, String.class); - verify(textCodec).encode("a", ProtocolVersion.DEFAULT); - assertThat(t.getBytesUnsafe(0)).isEqualTo(Bytes.fromHexString("0x61")); - } - - @Test - public void should_set_bytes_by_index() { - // Given - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - - // When - t = t.setBytesUnsafe(0, Bytes.fromHexString("0x00000001")); - - // Then - verifyZeroInteractions(codecRegistry); - assertThat(t.getBytesUnsafe(0)).isEqualTo(Bytes.fromHexString("0x00000001")); - } - - @Test - public void should_set_to_null_by_index() { - // Given - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - t = t.setBytesUnsafe(0, Bytes.fromHexString("0x00000001")); - - // When - t = t.setToNull(0); - - // Then - verifyZeroInteractions(codecRegistry); - assertThat(t.getBytesUnsafe(0)).isNull(); - } - - @Test - public void should_set_with_explicit_class_by_index() { - // Given - CqlIntToStringCodec intToStringCodec = spy(new CqlIntToStringCodec()); - when(codecRegistry.codecFor(DataTypes.INT, String.class)).thenAnswer(i -> intToStringCodec); - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - - // When - t = t.set(0, "1", String.class); - - // Then - verify(codecRegistry).codecFor(DataTypes.INT, String.class); - verify(intToStringCodec).encode("1", ProtocolVersion.DEFAULT); - assertThat(t.getBytesUnsafe(0)).isEqualTo(Bytes.fromHexString("0x00000001")); - } - - @Test - public void should_set_with_explicit_type_by_index() { - // Given - CqlIntToStringCodec intToStringCodec = spy(new CqlIntToStringCodec()); - when(codecRegistry.codecFor(DataTypes.INT, GenericType.STRING)) - .thenAnswer(i -> intToStringCodec); - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - - // When - t = t.set(0, "1", GenericType.STRING); - - // Then - verify(codecRegistry).codecFor(DataTypes.INT, GenericType.STRING); - verify(intToStringCodec).encode("1", ProtocolVersion.DEFAULT); - assertThat(t.getBytesUnsafe(0)).isEqualTo(Bytes.fromHexString("0x00000001")); - } - - @Test - public void should_set_with_explicit_codec_by_index() { - // Given - CqlIntToStringCodec intToStringCodec = spy(new CqlIntToStringCodec()); - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - - // When - t = t.set(0, "1", intToStringCodec); - - // Then - verifyZeroInteractions(codecRegistry); - verify(intToStringCodec).encode("1", ProtocolVersion.DEFAULT); - assertThat(t.getBytesUnsafe(0)).isEqualTo(Bytes.fromHexString("0x00000001")); - } - - @Test - public void should_set_values_in_bulk() { - // Given - when(codecRegistry.codecFor(DataTypes.TEXT, "foo")).thenReturn(TypeCodecs.TEXT); - when(codecRegistry.codecFor(DataTypes.INT, 1)).thenReturn(TypeCodecs.INT); - - // When - T t = - newInstance( - ImmutableList.of(DataTypes.TEXT, DataTypes.INT), - ImmutableList.of("foo", 1), - attachmentPoint); - - // Then - assertThat(t.getString(0)).isEqualTo("foo"); - assertThat(t.getInt(1)).isEqualTo(1); - verify(codecRegistry).codecFor(DataTypes.TEXT, "foo"); - verify(codecRegistry).codecFor(DataTypes.INT, 1); - } - - @Test - public void should_set_values_in_bulk_when_not_enough_values() { - // Given - when(codecRegistry.codecFor(DataTypes.TEXT, "foo")).thenReturn(TypeCodecs.TEXT); - - // When - T t = - newInstance( - ImmutableList.of(DataTypes.TEXT, DataTypes.INT), - ImmutableList.of("foo"), - attachmentPoint); - - // Then - assertThat(t.getString(0)).isEqualTo("foo"); - assertThat(t.isNull(1)).isTrue(); - verify(codecRegistry).codecFor(DataTypes.TEXT, "foo"); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_set_values_in_bulk_when_too_many_values() { - newInstance( - ImmutableList.of(DataTypes.TEXT, DataTypes.INT), - ImmutableList.of("foo", 1, "bar"), - attachmentPoint); - } - - @Test - public void should_get_primitive_value_by_index() { - // Given - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - t = t.setBytesUnsafe(0, Bytes.fromHexString("0x00000001")); - - // When - int i = t.getInt(0); - - // Then - verify(codecRegistry).codecFor(DataTypes.INT, Integer.class); - verify(intCodec).decodePrimitive(any(ByteBuffer.class), eq(ProtocolVersion.DEFAULT)); - assertThat(i).isEqualTo(1); - } - - @Test - public void should_get_object_value_by_index() { - // Given - T t = newInstance(ImmutableList.of(DataTypes.TEXT), attachmentPoint); - t = t.setBytesUnsafe(0, Bytes.fromHexString("0x61")); - - // When - String s = t.getString(0); - - // Then - verify(codecRegistry).codecFor(DataTypes.TEXT, String.class); - verify(textCodec).decode(any(ByteBuffer.class), eq(ProtocolVersion.DEFAULT)); - assertThat(s).isEqualTo("a"); - } - - @Test - public void should_get_bytes_by_index() { - // Given - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - t = t.setBytesUnsafe(0, Bytes.fromHexString("0x00000001")); - - // When - ByteBuffer bytes = t.getBytesUnsafe(0); - - // Then - verifyZeroInteractions(codecRegistry); - assertThat(bytes).isEqualTo(Bytes.fromHexString("0x00000001")); - } - - @Test - public void should_test_if_null_by_index() { - // Given - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - t = t.setBytesUnsafe(0, null); - - // When - boolean isNull = t.isNull(0); - - // Then - verifyZeroInteractions(codecRegistry); - assertThat(isNull).isTrue(); - } - - @Test - public void should_get_with_explicit_class_by_index() { - // Given - CqlIntToStringCodec intToStringCodec = spy(new CqlIntToStringCodec()); - when(codecRegistry.codecFor(DataTypes.INT, String.class)).thenAnswer(i -> intToStringCodec); - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - t = t.setBytesUnsafe(0, Bytes.fromHexString("0x00000001")); - - // When - String s = t.get(0, String.class); - - // Then - verify(codecRegistry).codecFor(DataTypes.INT, String.class); - verify(intToStringCodec).decode(any(ByteBuffer.class), eq(ProtocolVersion.DEFAULT)); - assertThat(s).isEqualTo("1"); - } - - @Test - public void should_get_with_explicit_type_by_index() { - // Given - CqlIntToStringCodec intToStringCodec = spy(new CqlIntToStringCodec()); - when(codecRegistry.codecFor(DataTypes.INT, GenericType.STRING)) - .thenAnswer(i -> intToStringCodec); - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - t = t.setBytesUnsafe(0, Bytes.fromHexString("0x00000001")); - - // When - String s = t.get(0, GenericType.STRING); - - // Then - verify(codecRegistry).codecFor(DataTypes.INT, GenericType.STRING); - verify(intToStringCodec).decode(any(ByteBuffer.class), eq(ProtocolVersion.DEFAULT)); - assertThat(s).isEqualTo("1"); - } - - @Test - public void should_get_with_explicit_codec_by_index() { - // Given - CqlIntToStringCodec intToStringCodec = spy(new CqlIntToStringCodec()); - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - t = t.setBytesUnsafe(0, Bytes.fromHexString("0x00000001")); - - // When - String s = t.get(0, intToStringCodec); - - // Then - verifyZeroInteractions(codecRegistry); - verify(intToStringCodec).decode(any(ByteBuffer.class), eq(ProtocolVersion.DEFAULT)); - assertThat(s).isEqualTo("1"); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/data/DefaultTupleValueTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/data/DefaultTupleValueTest.java deleted file mode 100644 index aed357cb1cd..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/data/DefaultTupleValueTest.java +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.data; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.data.TupleValue; -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.TupleType; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.internal.SerializationHelper; -import com.datastax.oss.driver.internal.core.type.DefaultTupleType; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.util.List; -import org.junit.Test; - -public class DefaultTupleValueTest extends AccessibleByIndexTestBase { - - @Override - protected TupleValue newInstance(List dataTypes, AttachmentPoint attachmentPoint) { - DefaultTupleType type = new DefaultTupleType(dataTypes, attachmentPoint); - return type.newValue(); - } - - @Override - protected TupleValue newInstance( - List dataTypes, List values, AttachmentPoint attachmentPoint) { - DefaultTupleType type = new DefaultTupleType(dataTypes, attachmentPoint); - return type.newValue(values.toArray()); - } - - @Test - public void should_serialize_and_deserialize() { - DefaultTupleType type = - new DefaultTupleType(ImmutableList.of(DataTypes.INT, DataTypes.TEXT), attachmentPoint); - TupleValue in = type.newValue(); - in = in.setBytesUnsafe(0, Bytes.fromHexString("0x00000001")); - in = in.setBytesUnsafe(1, Bytes.fromHexString("0x61")); - - TupleValue out = SerializationHelper.serializeAndDeserialize(in); - - assertThat(out.getType()).isEqualTo(in.getType()); - assertThat(out.getType().isDetached()).isTrue(); - assertThat(Bytes.toHexString(out.getBytesUnsafe(0))).isEqualTo("0x00000001"); - assertThat(Bytes.toHexString(out.getBytesUnsafe(1))).isEqualTo("0x61"); - } - - @Test - public void should_support_null_items_when_setting_in_bulk() { - DefaultTupleType type = - new DefaultTupleType(ImmutableList.of(DataTypes.INT, DataTypes.TEXT), attachmentPoint); - when(codecRegistry.codecFor(DataTypes.INT)).thenReturn(TypeCodecs.INT); - when(codecRegistry.codecFor(DataTypes.TEXT, "foo")).thenReturn(TypeCodecs.TEXT); - TupleValue value = type.newValue(null, "foo"); - - assertThat(value.isNull(0)).isTrue(); - assertThat(value.getString(1)).isEqualTo("foo"); - } - - @Test - public void should_equate_instances_with_same_values_but_different_binary_representations() { - TupleType tupleType = DataTypes.tupleOf(DataTypes.VARINT); - - TupleValue tuple1 = tupleType.newValue().setBytesUnsafe(0, Bytes.fromHexString("0x01")); - TupleValue tuple2 = tupleType.newValue().setBytesUnsafe(0, Bytes.fromHexString("0x0001")); - - assertThat(tuple1).isEqualTo(tuple2); - assertThat(tuple1.hashCode()).isEqualTo(tuple2.hashCode()); - } - - @Test - public void should_not_equate_instances_with_same_binary_representation_but_different_types() { - TupleType tupleType1 = DataTypes.tupleOf(DataTypes.INT); - TupleType tupleType2 = DataTypes.tupleOf(DataTypes.VARINT); - - TupleValue tuple1 = tupleType1.newValue().setBytesUnsafe(0, Bytes.fromHexString("0x00000001")); - TupleValue tuple2 = tupleType2.newValue().setBytesUnsafe(0, Bytes.fromHexString("0x00000001")); - - assertThat(tuple1).isNotEqualTo(tuple2); - } - - @Test - public void should_equate_instances_with_different_protocol_versions() { - TupleType tupleType1 = DataTypes.tupleOf(DataTypes.TEXT); - tupleType1.attach(attachmentPoint); - - // use the V3 attachmentPoint for type2 - TupleType tupleType2 = DataTypes.tupleOf(DataTypes.TEXT); - tupleType2.attach(v3AttachmentPoint); - - TupleValue tuple1 = tupleType1.newValue().setBytesUnsafe(0, Bytes.fromHexString("0x01")); - TupleValue tuple2 = tupleType2.newValue().setBytesUnsafe(0, Bytes.fromHexString("0x01")); - - assertThat(tuple1).isEqualTo(tuple2); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/data/DefaultUdtValueTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/data/DefaultUdtValueTest.java deleted file mode 100644 index 6a9f2886783..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/data/DefaultUdtValueTest.java +++ /dev/null @@ -1,158 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.data; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.data.UdtValue; -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.internal.SerializationHelper; -import com.datastax.oss.driver.internal.core.type.UserDefinedTypeBuilder; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.io.UnsupportedEncodingException; -import java.util.List; -import org.junit.Test; - -public class DefaultUdtValueTest extends AccessibleByIdTestBase { - - @Override - protected UdtValue newInstance(List dataTypes, AttachmentPoint attachmentPoint) { - UserDefinedTypeBuilder builder = - new UserDefinedTypeBuilder( - CqlIdentifier.fromInternal("ks"), CqlIdentifier.fromInternal("type")); - for (int i = 0; i < dataTypes.size(); i++) { - builder.withField(CqlIdentifier.fromInternal("field" + i), dataTypes.get(i)); - } - UserDefinedType userDefinedType = builder.build(); - userDefinedType.attach(attachmentPoint); - return userDefinedType.newValue(); - } - - @Override - protected UdtValue newInstance( - List dataTypes, List values, AttachmentPoint attachmentPoint) { - UserDefinedTypeBuilder builder = - new UserDefinedTypeBuilder( - CqlIdentifier.fromInternal("ks"), CqlIdentifier.fromInternal("type")); - for (int i = 0; i < dataTypes.size(); i++) { - builder.withField(CqlIdentifier.fromInternal("field" + i), dataTypes.get(i)); - } - UserDefinedType userDefinedType = builder.build(); - userDefinedType.attach(attachmentPoint); - return userDefinedType.newValue(values.toArray()); - } - - @Test - public void should_serialize_and_deserialize() { - UserDefinedType type = - new UserDefinedTypeBuilder( - CqlIdentifier.fromInternal("ks"), CqlIdentifier.fromInternal("type")) - .withField(CqlIdentifier.fromInternal("field1"), DataTypes.INT) - .withField(CqlIdentifier.fromInternal("field2"), DataTypes.TEXT) - .build(); - UdtValue in = type.newValue(); - in = in.setBytesUnsafe(0, Bytes.fromHexString("0x00000001")); - in = in.setBytesUnsafe(1, Bytes.fromHexString("0x61")); - - UdtValue out = SerializationHelper.serializeAndDeserialize(in); - - assertThat(out.getType()).isEqualTo(in.getType()); - assertThat(out.getType().isDetached()).isTrue(); - assertThat(Bytes.toHexString(out.getBytesUnsafe(0))).isEqualTo("0x00000001"); - assertThat(Bytes.toHexString(out.getBytesUnsafe(1))).isEqualTo("0x61"); - } - - @Test - public void should_support_null_items_when_setting_in_bulk() throws UnsupportedEncodingException { - UserDefinedType type = - new UserDefinedTypeBuilder( - CqlIdentifier.fromInternal("ks"), CqlIdentifier.fromInternal("type")) - .withField(CqlIdentifier.fromInternal("field1"), DataTypes.INT) - .withField(CqlIdentifier.fromInternal("field2"), DataTypes.TEXT) - .build(); - when(codecRegistry.codecFor(DataTypes.INT)).thenReturn(TypeCodecs.INT); - when(codecRegistry.codecFor(DataTypes.TEXT, "foo")).thenReturn(TypeCodecs.TEXT); - UdtValue value = type.newValue(null, "foo"); - - assertThat(value.isNull(0)).isTrue(); - assertThat(value.getString(1)).isEqualTo("foo"); - } - - @Test - public void should_equate_instances_with_same_values_but_different_binary_representations() { - UserDefinedType type = - new UserDefinedTypeBuilder( - CqlIdentifier.fromInternal("ks"), CqlIdentifier.fromInternal("type")) - .withField(CqlIdentifier.fromInternal("f"), DataTypes.VARINT) - .build(); - - UdtValue udt1 = type.newValue().setBytesUnsafe(0, Bytes.fromHexString("0x01")); - UdtValue udt2 = type.newValue().setBytesUnsafe(0, Bytes.fromHexString("0x0001")); - - assertThat(udt1).isEqualTo(udt2); - } - - @Test - public void should_format_to_string() { - UserDefinedType type = - new UserDefinedTypeBuilder( - CqlIdentifier.fromInternal("ks"), CqlIdentifier.fromInternal("type")) - .withField(CqlIdentifier.fromInternal("t"), DataTypes.TEXT) - .withField(CqlIdentifier.fromInternal("i"), DataTypes.INT) - .withField(CqlIdentifier.fromInternal("d"), DataTypes.DOUBLE) - .build(); - - UdtValue udt = type.newValue().setString("t", "foobar").setDouble("d", 3.14); - - assertThat(udt.getFormattedContents()).isEqualTo("{t:'foobar',i:NULL,d:3.14}"); - } - - @Test - public void should_equate_instances_with_different_protocol_versions() { - - UserDefinedType type1 = - new UserDefinedTypeBuilder( - CqlIdentifier.fromInternal("ks"), CqlIdentifier.fromInternal("type")) - .withField(CqlIdentifier.fromInternal("t"), DataTypes.TEXT) - .withField(CqlIdentifier.fromInternal("i"), DataTypes.INT) - .withField(CqlIdentifier.fromInternal("d"), DataTypes.DOUBLE) - .build(); - type1.attach(attachmentPoint); - - // create an idential type, but with a different attachment point - UserDefinedType type2 = - new UserDefinedTypeBuilder( - CqlIdentifier.fromInternal("ks"), CqlIdentifier.fromInternal("type")) - .withField(CqlIdentifier.fromInternal("t"), DataTypes.TEXT) - .withField(CqlIdentifier.fromInternal("i"), DataTypes.INT) - .withField(CqlIdentifier.fromInternal("d"), DataTypes.DOUBLE) - .build(); - type2.attach(v3AttachmentPoint); - UdtValue udt1 = - type1.newValue().setString("t", "some text string").setInt("i", 42).setDouble("d", 3.14); - UdtValue udt2 = - type2.newValue().setString("t", "some text string").setInt("i", 42).setDouble("d", 3.14); - assertThat(udt1).isEqualTo(udt2); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/data/IdentifierIndexTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/data/IdentifierIndexTest.java deleted file mode 100644 index 697a32fb029..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/data/IdentifierIndexTest.java +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.data; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.TestDataProviders; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.Locale; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class IdentifierIndexTest { - private static final CqlIdentifier Foo = CqlIdentifier.fromInternal("Foo"); - private static final CqlIdentifier foo = CqlIdentifier.fromInternal("foo"); - private static final CqlIdentifier fOO = CqlIdentifier.fromInternal("fOO"); - private IdentifierIndex index = - new IdentifierIndex(ImmutableList.of(Foo, foo, fOO, Foo, foo, fOO)); - - @Test - public void should_find_first_index_of_existing_identifier() { - assertThat(index.firstIndexOf(Foo)).isEqualTo(0); - assertThat(index.firstIndexOf(foo)).isEqualTo(1); - assertThat(index.firstIndexOf(fOO)).isEqualTo(2); - } - - @Test - public void should_not_find_index_of_nonexistent_identifier() { - assertThat(index.firstIndexOf(CqlIdentifier.fromInternal("FOO"))).isEqualTo(-1); - } - - @Test - @UseDataProvider(location = TestDataProviders.class, value = "locales") - public void should_find_first_index_of_case_insensitive_name(Locale locale) { - Locale def = Locale.getDefault(); - try { - Locale.setDefault(locale); - assertThat(index.firstIndexOf("foo")).isEqualTo(0); - assertThat(index.firstIndexOf("FOO")).isEqualTo(0); - assertThat(index.firstIndexOf("fOO")).isEqualTo(0); - } finally { - Locale.setDefault(def); - } - } - - @Test - @UseDataProvider(location = TestDataProviders.class, value = "locales") - public void should_not_find_first_index_of_nonexistent_case_insensitive_name(Locale locale) { - Locale def = Locale.getDefault(); - try { - Locale.setDefault(locale); - assertThat(index.firstIndexOf("bar")).isEqualTo(-1); - assertThat(index.firstIndexOf("BAR")).isEqualTo(-1); - assertThat(index.firstIndexOf("bAR")).isEqualTo(-1); - } finally { - Locale.setDefault(def); - } - } - - @Test - public void should_find_first_index_of_case_sensitive_name() { - assertThat(index.firstIndexOf("\"Foo\"")).isEqualTo(0); - assertThat(index.firstIndexOf("\"foo\"")).isEqualTo(1); - assertThat(index.firstIndexOf("\"fOO\"")).isEqualTo(2); - } - - @Test - public void should_not_find_index_of_nonexistent_case_sensitive_name() { - assertThat(index.firstIndexOf("\"FOO\"")).isEqualTo(-1); - } - - @Test - public void should_find_all_indices_of_existing_identifier() { - assertThat(index.allIndicesOf(Foo)).containsExactly(0, 3); - assertThat(index.allIndicesOf(foo)).containsExactly(1, 4); - assertThat(index.allIndicesOf(fOO)).containsExactly(2, 5); - } - - @Test - public void should_not_find_indices_of_nonexistent_identifier() { - assertThat(index.allIndicesOf(CqlIdentifier.fromInternal("FOO"))).isEmpty(); - } - - @Test - @UseDataProvider(location = TestDataProviders.class, value = "locales") - public void should_find_all_indices_of_case_insensitive_name(Locale locale) { - Locale def = Locale.getDefault(); - try { - Locale.setDefault(locale); - assertThat(index.allIndicesOf("foo")).containsExactly(0, 1, 2, 3, 4, 5); - assertThat(index.allIndicesOf("FOO")).containsExactly(0, 1, 2, 3, 4, 5); - assertThat(index.allIndicesOf("fOO")).containsExactly(0, 1, 2, 3, 4, 5); - } finally { - Locale.setDefault(def); - } - } - - @Test - @UseDataProvider(location = TestDataProviders.class, value = "locales") - public void should_not_find_indices_of_nonexistent_case_insensitive_name(Locale locale) { - Locale def = Locale.getDefault(); - try { - Locale.setDefault(locale); - assertThat(index.allIndicesOf("bar")).isEmpty(); - assertThat(index.allIndicesOf("BAR")).isEmpty(); - assertThat(index.allIndicesOf("bAR")).isEmpty(); - } finally { - Locale.setDefault(def); - } - } - - @Test - public void should_find_all_indices_of_case_sensitive_name() { - assertThat(index.allIndicesOf("\"Foo\"")).containsExactly(0, 3); - assertThat(index.allIndicesOf("\"foo\"")).containsExactly(1, 4); - assertThat(index.allIndicesOf("\"fOO\"")).containsExactly(2, 5); - } - - @Test - public void should_not_find_indices_of_nonexistent_case_sensitive_name() { - assertThat(index.allIndicesOf("\"FOO\"")).isEmpty(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyDcAgnosticTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyDcAgnosticTest.java deleted file mode 100644 index 3c832812662..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyDcAgnosticTest.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import java.util.Optional; -import org.junit.Before; -import org.junit.runner.RunWith; -import org.mockito.junit.MockitoJUnitRunner; - -// TODO fix unnecessary stubbing of config option in parent class (and stop using "silent" runner) -@RunWith(MockitoJUnitRunner.Silent.class) -public class BasicLoadBalancingPolicyDcAgnosticTest extends BasicLoadBalancingPolicyQueryPlanTest { - - @Before - @Override - public void setup() { - super.setup(); - when(defaultProfile.isDefined(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER)) - .thenReturn(false); - when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1)); - when(metadataManager.getMetadata()).thenReturn(metadata); - when(metadata.getTokenMap()).thenAnswer(invocation -> Optional.of(this.tokenMap)); - - // since there is no local datacenter defined, the policy should behave with DC awareness - // disabled and pick nodes regardless of their datacenters; we therefore expect all tests of - // BasicLoadBalancingPolicyQueryPlanTest to pass even with the below DC distribution. - when(node1.getDatacenter()).thenReturn("dc1"); - when(node2.getDatacenter()).thenReturn("dc2"); - when(node3.getDatacenter()).thenReturn("dc3"); - when(node4.getDatacenter()).thenReturn("dc4"); - when(node5.getDatacenter()).thenReturn(null); - - policy = createAndInitPolicy(); - - assertThat(policy.getLiveNodes().dc(null)).containsExactly(node1, node2, node3, node4, node5); - assertThat(policy.getLiveNodes().dcs()).isEmpty(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyDcFailoverTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyDcFailoverTest.java deleted file mode 100644 index dc955c6e5de..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyDcFailoverTest.java +++ /dev/null @@ -1,155 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.atLeast; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.metadata.DefaultNode; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import java.util.Map; -import java.util.UUID; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -// TODO fix unnecessary stubbing of config option in parent class (and stop using "silent" runner) -@RunWith(MockitoJUnitRunner.Silent.class) -public class BasicLoadBalancingPolicyDcFailoverTest extends BasicLoadBalancingPolicyQueryPlanTest { - - @Mock protected DefaultNode node6; - @Mock protected DefaultNode node7; - @Mock protected DefaultNode node8; - @Mock protected DefaultNode node9; - - @Test - @Override - public void should_prioritize_single_replica() { - when(request.getRoutingKeyspace()).thenReturn(KEYSPACE); - when(request.getRoutingKey()).thenReturn(ROUTING_KEY); - when(tokenMap.getReplicas(KEYSPACE, ROUTING_KEY)).thenReturn(ImmutableSet.of(node3)); - - // node3 always first, round-robin on the rest, then remote nodes - assertThat(policy.newQueryPlan(request, session)) - .containsExactly(node3, node1, node2, node4, node5, node7, node8); - assertThat(policy.newQueryPlan(request, session)) - .containsExactly(node3, node2, node1, node4, node5, node7, node8); - assertThat(policy.newQueryPlan(request, session)) - .containsExactly(node3, node1, node2, node4, node5, node7, node8); - - // Should not shuffle replicas since there is only one - verify(policy, never()).shuffleHead(any(), eq(1)); - // But should shuffle remote nodes - verify(policy, times(3)).shuffleHead(any(), eq(4)); - } - - @Test - @Override - public void should_prioritize_and_shuffle_replicas() { - when(request.getRoutingKeyspace()).thenReturn(KEYSPACE); - when(request.getRoutingKey()).thenReturn(ROUTING_KEY); - when(tokenMap.getReplicas(KEYSPACE, ROUTING_KEY)) - .thenReturn(ImmutableSet.of(node2, node3, node5, node8)); - - // node 5 and 8 being in a remote DC, they don't get a boost for being a replica - assertThat(policy.newQueryPlan(request, session)) - .containsExactly(node2, node3, node1, node4, node5, node7, node8); - assertThat(policy.newQueryPlan(request, session)) - .containsExactly(node2, node3, node1, node4, node5, node7, node8); - assertThat(policy.newQueryPlan(request, session)) - .containsExactly(node2, node3, node1, node4, node5, node7, node8); - - // should shuffle replicas - verify(policy, times(3)).shuffleHead(any(), eq(2)); - // should shuffle remote nodes - verify(policy, times(3)).shuffleHead(any(), eq(4)); - // No power of two choices with only two replicas - verify(session, never()).getPools(); - } - - @Override - protected void assertRoundRobinQueryPlans() { - // nodes 4 to 9 being in a remote DC, they always appear after nodes 1, 2, 3 - for (int i = 0; i < 3; i++) { - assertThat(policy.newQueryPlan(request, session)) - .containsExactly(node1, node2, node3, node4, node5, node7, node8); - assertThat(policy.newQueryPlan(request, session)) - .containsExactly(node2, node3, node1, node4, node5, node7, node8); - assertThat(policy.newQueryPlan(request, session)) - .containsExactly(node3, node1, node2, node4, node5, node7, node8); - } - // should shuffle remote nodes - verify(policy, atLeast(1)).shuffleHead(any(), eq(4)); - } - - @Override - protected BasicLoadBalancingPolicy createAndInitPolicy() { - when(node4.getDatacenter()).thenReturn("dc2"); - when(node5.getDatacenter()).thenReturn("dc2"); - when(node6.getDatacenter()).thenReturn("dc2"); - when(node7.getDatacenter()).thenReturn("dc3"); - when(node8.getDatacenter()).thenReturn("dc3"); - when(node9.getDatacenter()).thenReturn("dc3"); - // Accept 2 nodes per remote DC - when(defaultProfile.getInt( - DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_MAX_NODES_PER_REMOTE_DC)) - .thenReturn(2); - when(defaultProfile.getBoolean( - DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_ALLOW_FOR_LOCAL_CONSISTENCY_LEVELS)) - .thenReturn(false); - // Use a subclass to disable shuffling, we just spy to make sure that the shuffling method was - // called (makes tests easier) - BasicLoadBalancingPolicy policy = - spy( - new BasicLoadBalancingPolicy(context, DriverExecutionProfile.DEFAULT_NAME) { - @Override - protected void shuffleHead(Object[] currentNodes, int headLength) { - // nothing (keep in same order) - } - }); - Map nodes = - ImmutableMap.builder() - .put(UUID.randomUUID(), node1) - .put(UUID.randomUUID(), node2) - .put(UUID.randomUUID(), node3) - .put(UUID.randomUUID(), node4) - .put(UUID.randomUUID(), node5) - .put(UUID.randomUUID(), node6) - .put(UUID.randomUUID(), node7) - .put(UUID.randomUUID(), node8) - .put(UUID.randomUUID(), node9) - .build(); - policy.init(nodes, distanceReporter); - assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1, node2, node3); - assertThat(policy.getLiveNodes().dc("dc2")).containsExactly(node4, node5); // only 2 allowed - assertThat(policy.getLiveNodes().dc("dc3")).containsExactly(node7, node8); // only 2 allowed - return policy; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyDistanceTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyDistanceTest.java deleted file mode 100644 index 5b2b6bf864d..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyDistanceTest.java +++ /dev/null @@ -1,224 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.BDDMockito.given; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistanceEvaluator; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.UUID; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -// TODO fix unnecessary stubbing of config option in parent class (and stop using "silent" runner) -@RunWith(MockitoJUnitRunner.Silent.class) -public class BasicLoadBalancingPolicyDistanceTest extends LoadBalancingPolicyTestBase { - - @Mock private NodeDistanceEvaluator nodeDistanceEvaluator; - - private ImmutableMap nodes; - - @Before - @Override - public void setup() { - super.setup(); - when(context.getNodeDistanceEvaluator(DriverExecutionProfile.DEFAULT_NAME)) - .thenReturn(nodeDistanceEvaluator); - when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1, node2, node3)); - nodes = - ImmutableMap.of( - UUID.randomUUID(), node1, UUID.randomUUID(), node2, UUID.randomUUID(), node3); - } - - @Test - public void should_report_distance_reported_by_user_distance_reporter() { - // Given - given(node2.getDatacenter()).willReturn("dc2"); - given(nodeDistanceEvaluator.evaluateDistance(node1, "dc1")).willReturn(NodeDistance.LOCAL); - given(nodeDistanceEvaluator.evaluateDistance(node2, "dc1")).willReturn(NodeDistance.REMOTE); - given(nodeDistanceEvaluator.evaluateDistance(node3, "dc1")).willReturn(NodeDistance.IGNORED); - BasicLoadBalancingPolicy policy = createPolicy(); - // When - policy.init(nodes, distanceReporter); - // Then - verify(distanceReporter).setDistance(node1, NodeDistance.LOCAL); - verify(distanceReporter).setDistance(node2, NodeDistance.REMOTE); - verify(distanceReporter).setDistance(node3, NodeDistance.IGNORED); - assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1); - } - - @Test - public void should_report_LOCAL_when_dc_agnostic() { - // Given - given(defaultProfile.isDefined(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER)) - .willReturn(false); - given(node1.getDatacenter()).willReturn(null); - given(node2.getDatacenter()).willReturn("dc1"); - given(node3.getDatacenter()).willReturn("dc2"); - BasicLoadBalancingPolicy policy = createPolicy(); - // When - policy.init(nodes, distanceReporter); - // Then - verify(distanceReporter).setDistance(node1, NodeDistance.LOCAL); - verify(distanceReporter).setDistance(node2, NodeDistance.LOCAL); - verify(distanceReporter).setDistance(node3, NodeDistance.LOCAL); - assertThat(policy.getLiveNodes().dc(null)).containsExactly(node1, node2, node3); - } - - @Test - public void should_report_LOCAL_when_node_in_local_dc() { - // Given - BasicLoadBalancingPolicy policy = createPolicy(); - // When - policy.init(nodes, distanceReporter); - // Then - verify(distanceReporter).setDistance(node1, NodeDistance.LOCAL); - verify(distanceReporter).setDistance(node2, NodeDistance.LOCAL); - verify(distanceReporter).setDistance(node3, NodeDistance.LOCAL); - assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1, node2, node3); - } - - @Test - public void should_report_IGNORED_when_node_not_in_local_dc() { - // Given - given(node1.getDatacenter()).willReturn(null); - given(node2.getDatacenter()).willReturn("dc2"); - given(node3.getDatacenter()).willReturn("dc3"); - BasicLoadBalancingPolicy policy = createPolicy(); - // When - policy.init(nodes, distanceReporter); - // Then - // Note: driver 3 would have reported LOCAL for node1 since its datacenter is null - verify(distanceReporter).setDistance(node1, NodeDistance.IGNORED); - verify(distanceReporter).setDistance(node2, NodeDistance.IGNORED); - verify(distanceReporter).setDistance(node3, NodeDistance.IGNORED); - assertThat(policy.getLiveNodes().dc(null)).isEmpty(); - assertThat(policy.getLiveNodes().dc("dc1")).isEmpty(); - assertThat(policy.getLiveNodes().dc("dc2")).isEmpty(); - assertThat(policy.getLiveNodes().dc("dc3")).isEmpty(); - } - - @Test - public void should_report_REMOTE_when_node_not_in_local_dc_and_dc_failover_enabled() { - // Given - given(node1.getDatacenter()).willReturn("dc2"); - given(node2.getDatacenter()).willReturn("dc3"); - given(node3.getDatacenter()).willReturn("dc4"); - given( - defaultProfile.getInt( - DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_MAX_NODES_PER_REMOTE_DC)) - .willReturn(1); - BasicLoadBalancingPolicy policy = createPolicy(); - // When - policy.init(nodes, distanceReporter); - // Then - verify(distanceReporter).setDistance(node1, NodeDistance.REMOTE); - verify(distanceReporter).setDistance(node2, NodeDistance.REMOTE); - verify(distanceReporter).setDistance(node3, NodeDistance.REMOTE); - assertThat(policy.getLiveNodes().dc("dc1")).isEmpty(); - assertThat(policy.getLiveNodes().dc("dc2")).containsExactly(node1); - assertThat(policy.getLiveNodes().dc("dc3")).containsExactly(node2); - assertThat(policy.getLiveNodes().dc("dc4")).containsExactly(node3); - } - - @Test - public void should_report_IGNORED_when_node_not_in_local_dc_and_too_many_nodes_for_dc_failover() { - // Given - given(node1.getDatacenter()).willReturn("dc2"); - given(node2.getDatacenter()).willReturn("dc2"); - given(node3.getDatacenter()).willReturn("dc2"); - given( - defaultProfile.getInt( - DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_MAX_NODES_PER_REMOTE_DC)) - .willReturn(2); - BasicLoadBalancingPolicy policy = createPolicy(); - // When - policy.init(nodes, distanceReporter); - // Then - verify(distanceReporter).setDistance(node1, NodeDistance.REMOTE); - verify(distanceReporter).setDistance(node2, NodeDistance.REMOTE); - verify(distanceReporter).setDistance(node3, NodeDistance.IGNORED); - assertThat(policy.getLiveNodes().dc("dc1")).isEmpty(); - assertThat(policy.getLiveNodes().dc("dc2")).containsExactly(node1, node2); - } - - @Test - public void should_report_REMOTE_when_remote_node_up_and_dc_failover() { - // Given - given(node1.getDatacenter()).willReturn("dc2"); - given(node2.getDatacenter()).willReturn("dc2"); - given(node3.getDatacenter()).willReturn("dc2"); - given(node4.getDatacenter()).willReturn("dc2"); - given( - defaultProfile.getInt( - DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_MAX_NODES_PER_REMOTE_DC)) - .willReturn(4); - BasicLoadBalancingPolicy policy = createPolicy(); - // When - policy.init(nodes, distanceReporter); - policy.onUp(node4); - // Then - verify(distanceReporter).setDistance(node1, NodeDistance.REMOTE); - verify(distanceReporter).setDistance(node2, NodeDistance.REMOTE); - verify(distanceReporter).setDistance(node3, NodeDistance.REMOTE); - verify(distanceReporter).setDistance(node4, NodeDistance.REMOTE); - assertThat(policy.getLiveNodes().dc("dc1")).isEmpty(); - assertThat(policy.getLiveNodes().dc("dc2")).containsExactly(node1, node2, node3, node4); - } - - @Test - public void should_report_IGNORED_when_remote_node_up_and_too_many_nodes_for_dc_failover() { - // Given - given(node1.getDatacenter()).willReturn("dc2"); - given(node2.getDatacenter()).willReturn("dc2"); - given(node3.getDatacenter()).willReturn("dc2"); - given(node4.getDatacenter()).willReturn("dc2"); - given( - defaultProfile.getInt( - DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_MAX_NODES_PER_REMOTE_DC)) - .willReturn(3); - BasicLoadBalancingPolicy policy = createPolicy(); - // When - policy.init(nodes, distanceReporter); - policy.onUp(node4); - // Then - verify(distanceReporter).setDistance(node1, NodeDistance.REMOTE); - verify(distanceReporter).setDistance(node2, NodeDistance.REMOTE); - verify(distanceReporter).setDistance(node3, NodeDistance.REMOTE); - verify(distanceReporter).setDistance(node4, NodeDistance.IGNORED); - assertThat(policy.getLiveNodes().dc("dc1")).isEmpty(); - assertThat(policy.getLiveNodes().dc("dc2")).containsExactly(node1, node2, node3); - } - - @NonNull - protected BasicLoadBalancingPolicy createPolicy() { - return new BasicLoadBalancingPolicy(context, DriverExecutionProfile.DEFAULT_NAME); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyEventsTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyEventsTest.java deleted file mode 100644 index 9959ddbd1bc..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyEventsTest.java +++ /dev/null @@ -1,171 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.reset; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistanceEvaluator; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.UUID; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -// TODO fix unnecessary stubbing of config option in parent class (and stop using "silent" runner) -@RunWith(MockitoJUnitRunner.Silent.class) -public class BasicLoadBalancingPolicyEventsTest extends LoadBalancingPolicyTestBase { - - @Mock private NodeDistanceEvaluator nodeDistanceEvaluator; - - private BasicLoadBalancingPolicy policy; - - @Before - @Override - public void setup() { - super.setup(); - when(context.getNodeDistanceEvaluator(DriverExecutionProfile.DEFAULT_NAME)) - .thenReturn(nodeDistanceEvaluator); - when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1)); - policy = createAndInitPolicy(); - reset(distanceReporter); - } - - @Test - public void should_remove_down_node_from_live_set() { - // When - policy.onDown(node2); - - // Then - assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1); - verify(distanceReporter, never()).setDistance(eq(node2), any(NodeDistance.class)); - // should have been called only once, during initialization, but not during onDown - verify(nodeDistanceEvaluator).evaluateDistance(node2, "dc1"); - } - - @Test - public void should_remove_removed_node_from_live_set() { - // When - policy.onRemove(node2); - - // Then - assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1); - verify(distanceReporter, never()).setDistance(eq(node2), any(NodeDistance.class)); - // should have been called only once, during initialization, but not during onRemove - verify(nodeDistanceEvaluator).evaluateDistance(node2, "dc1"); - } - - @Test - public void should_set_added_node_to_local() { - // When - policy.onAdd(node3); - - // Then - verify(distanceReporter).setDistance(node3, NodeDistance.LOCAL); - verify(nodeDistanceEvaluator).evaluateDistance(node3, "dc1"); - // Not added to the live set yet, we're waiting for the pool to open - assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1, node2); - } - - @Test - public void should_ignore_added_node_when_filtered() { - // Given - when(nodeDistanceEvaluator.evaluateDistance(node3, "dc1")).thenReturn(NodeDistance.IGNORED); - - // When - policy.onAdd(node3); - - // Then - verify(distanceReporter).setDistance(node3, NodeDistance.IGNORED); - assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1, node2); - } - - @Test - public void should_ignore_added_node_when_remote_dc() { - // Given - when(node3.getDatacenter()).thenReturn("dc2"); - - // When - policy.onAdd(node3); - - // Then - verify(distanceReporter).setDistance(node3, NodeDistance.IGNORED); - assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1, node2); - assertThat(policy.getLiveNodes().dc("dc2")).isEmpty(); - } - - @Test - public void should_add_up_node_to_live_set() { - // When - policy.onUp(node3); - - // Then - verify(distanceReporter).setDistance(node3, NodeDistance.LOCAL); - verify(nodeDistanceEvaluator).evaluateDistance(node3, "dc1"); - assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1, node2, node3); - } - - @Test - public void should_ignore_up_node_when_filtered() { - // Given - when(nodeDistanceEvaluator.evaluateDistance(node3, "dc1")).thenReturn(NodeDistance.IGNORED); - - // When - policy.onUp(node3); - - // Then - verify(distanceReporter).setDistance(node3, NodeDistance.IGNORED); - verify(nodeDistanceEvaluator).evaluateDistance(node3, "dc1"); - assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1, node2); - } - - @Test - public void should_ignore_up_node_when_remote_dc() { - // Given - when(node3.getDatacenter()).thenReturn("dc2"); - - // When - policy.onUp(node3); - - // Then - verify(distanceReporter).setDistance(node3, NodeDistance.IGNORED); - assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1, node2); - assertThat(policy.getLiveNodes().dc("dc2")).isEmpty(); - } - - @NonNull - protected BasicLoadBalancingPolicy createAndInitPolicy() { - BasicLoadBalancingPolicy policy = - new BasicLoadBalancingPolicy(context, DriverExecutionProfile.DEFAULT_NAME); - policy.init( - ImmutableMap.of(UUID.randomUUID(), node1, UUID.randomUUID(), node2), distanceReporter); - assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1, node2); - return policy; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyInitTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyInitTest.java deleted file mode 100644 index 1863e7357e1..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyInitTest.java +++ /dev/null @@ -1,225 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.filter; -import static org.mockito.Mockito.atLeast; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import ch.qos.logback.classic.Level; -import ch.qos.logback.classic.spi.ILoggingEvent; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.metadata.NodeState; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.UUID; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.junit.MockitoJUnitRunner; - -// TODO fix unnecessary stubbing of config option in parent class (and stop using "silent" runner) -@RunWith(MockitoJUnitRunner.Silent.class) -public class BasicLoadBalancingPolicyInitTest extends LoadBalancingPolicyTestBase { - - @Test - public void should_use_local_dc_if_provided_via_config() { - // Given - when(defaultProfile.isDefined(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER)) - .thenReturn(true); - when(defaultProfile.getString(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER)) - .thenReturn("dc1"); - when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1)); - // the parent class sets the config option to "dc1" - BasicLoadBalancingPolicy policy = createPolicy(); - - // When - policy.init(ImmutableMap.of(UUID.randomUUID(), node1), distanceReporter); - - // Then - assertThat(policy.getLocalDatacenter()).isEqualTo("dc1"); - } - - @Test - public void should_use_local_dc_if_provided_via_context() { - // Given - when(context.getLocalDatacenter(DriverExecutionProfile.DEFAULT_NAME)).thenReturn("dc1"); - when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1)); - // note: programmatic takes priority, the config won't even be inspected so no need to stub the - // option to null - BasicLoadBalancingPolicy policy = createPolicy(); - - // When - policy.init(ImmutableMap.of(UUID.randomUUID(), node1), distanceReporter); - - // Then - assertThat(policy.getLocalDatacenter()).isEqualTo("dc1"); - verify(defaultProfile, never()) - .getString(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER, null); - } - - @Test - public void should_not_infer_local_dc_if_not_provided() { - // Given - when(defaultProfile.isDefined(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER)) - .thenReturn(false); - when(node1.getDatacenter()).thenReturn("dc1"); - when(node2.getDatacenter()).thenReturn("dc2"); - when(node3.getDatacenter()).thenReturn("dc3"); - BasicLoadBalancingPolicy policy = - new BasicLoadBalancingPolicy(context, DriverExecutionProfile.DEFAULT_NAME) {}; - - // When - policy.init( - ImmutableMap.of( - UUID.randomUUID(), node1, UUID.randomUUID(), node2, UUID.randomUUID(), node3), - distanceReporter); - - // Then - assertThat(policy.getLocalDatacenter()).isNull(); - // should not warn about contact points not being in the same DC - verify(appender, never()).doAppend(loggingEventCaptor.capture()); - } - - @Test - public void should_warn_if_contact_points_not_in_local_dc() { - // Given - when(context.getLocalDatacenter(DriverExecutionProfile.DEFAULT_NAME)).thenReturn("dc1"); - when(node2.getDatacenter()).thenReturn("dc2"); - when(node3.getDatacenter()).thenReturn("dc3"); - when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1, node2, node3)); - BasicLoadBalancingPolicy policy = createPolicy(); - - // When - policy.init( - ImmutableMap.of( - UUID.randomUUID(), node1, UUID.randomUUID(), node2, UUID.randomUUID(), node3), - distanceReporter); - - // Then - verify(appender, atLeast(1)).doAppend(loggingEventCaptor.capture()); - Iterable warnLogs = - filter(loggingEventCaptor.getAllValues()).with("level", Level.WARN).get(); - assertThat(warnLogs).hasSize(1); - assertThat(warnLogs.iterator().next().getFormattedMessage()) - .contains( - "You specified dc1 as the local DC, but some contact points are from a different DC") - .contains("node2=dc2") - .contains("node3=dc3"); - } - - @Test - public void should_include_nodes_from_local_dc_if_local_dc_set() { - // Given - when(context.getLocalDatacenter(DriverExecutionProfile.DEFAULT_NAME)).thenReturn("dc1"); - when(node1.getState()).thenReturn(NodeState.UP); - when(node2.getState()).thenReturn(NodeState.DOWN); - when(node3.getState()).thenReturn(NodeState.UNKNOWN); - BasicLoadBalancingPolicy policy = createPolicy(); - - // When - policy.init( - ImmutableMap.of( - UUID.randomUUID(), node1, UUID.randomUUID(), node2, UUID.randomUUID(), node3), - distanceReporter); - - // Then - // Set distance for all nodes in the local DC - verify(distanceReporter).setDistance(node1, NodeDistance.LOCAL); - verify(distanceReporter).setDistance(node2, NodeDistance.LOCAL); - verify(distanceReporter).setDistance(node3, NodeDistance.LOCAL); - // But only include UP or UNKNOWN nodes in the live set - assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1, node3); - } - - @Test - public void should_ignore_nodes_from_remote_dcs_if_local_dc_set() { - // Given - when(node2.getDatacenter()).thenReturn("dc2"); - when(node3.getDatacenter()).thenReturn("dc3"); - when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1, node2)); - BasicLoadBalancingPolicy policy = createPolicy(); - - // When - policy.init( - ImmutableMap.of( - UUID.randomUUID(), node1, UUID.randomUUID(), node2, UUID.randomUUID(), node3), - distanceReporter); - - // Then - verify(distanceReporter).setDistance(node1, NodeDistance.LOCAL); - verify(distanceReporter).setDistance(node2, NodeDistance.IGNORED); - verify(distanceReporter).setDistance(node3, NodeDistance.IGNORED); - assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1); - } - - @Test - public void should_not_ignore_nodes_from_remote_dcs_if_local_dc_not_set() { - // Given - when(defaultProfile.isDefined(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER)) - .thenReturn(false); - when(node2.getDatacenter()).thenReturn("dc2"); - when(node3.getDatacenter()).thenReturn("dc3"); - when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1, node2)); - BasicLoadBalancingPolicy policy = createPolicy(); - - // When - policy.init( - ImmutableMap.of( - UUID.randomUUID(), node1, UUID.randomUUID(), node2, UUID.randomUUID(), node3), - distanceReporter); - - // Then - verify(distanceReporter).setDistance(node1, NodeDistance.LOCAL); - verify(distanceReporter).setDistance(node2, NodeDistance.LOCAL); - verify(distanceReporter).setDistance(node3, NodeDistance.LOCAL); - assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1, node2, node3); - } - - @Test - public void should_ignore_nodes_excluded_by_distance_reporter() { - // Given - when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1, node2)); - when(context.getNodeDistanceEvaluator(DriverExecutionProfile.DEFAULT_NAME)) - .thenReturn((node, dc) -> node.equals(node1) ? NodeDistance.IGNORED : null); - - BasicLoadBalancingPolicy policy = createPolicy(); - - // When - policy.init( - ImmutableMap.of( - UUID.randomUUID(), node1, UUID.randomUUID(), node2, UUID.randomUUID(), node3), - distanceReporter); - - // Then - verify(distanceReporter).setDistance(node1, NodeDistance.IGNORED); - verify(distanceReporter).setDistance(node2, NodeDistance.LOCAL); - verify(distanceReporter).setDistance(node3, NodeDistance.LOCAL); - assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node2, node3); - } - - @NonNull - protected BasicLoadBalancingPolicy createPolicy() { - return new BasicLoadBalancingPolicy(context, DriverExecutionProfile.DEFAULT_NAME); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyPreferredRemoteDcsTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyPreferredRemoteDcsTest.java deleted file mode 100644 index cefdfd31189..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyPreferredRemoteDcsTest.java +++ /dev/null @@ -1,184 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.atLeast; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.metadata.DefaultNode; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import java.util.Map; -import java.util.UUID; -import org.junit.Test; -import org.mockito.Mock; - -public class BasicLoadBalancingPolicyPreferredRemoteDcsTest - extends BasicLoadBalancingPolicyDcFailoverTest { - @Mock protected DefaultNode node10; - @Mock protected DefaultNode node11; - @Mock protected DefaultNode node12; - @Mock protected DefaultNode node13; - @Mock protected DefaultNode node14; - - @Override - @Test - public void should_prioritize_single_replica() { - when(request.getRoutingKeyspace()).thenReturn(KEYSPACE); - when(request.getRoutingKey()).thenReturn(ROUTING_KEY); - when(tokenMap.getReplicas(KEYSPACE, ROUTING_KEY)).thenReturn(ImmutableSet.of(node3)); - - // node3 always first, round-robin on the rest - assertThat(policy.newQueryPlan(request, session)) - .containsExactly( - node3, node1, node2, node4, node5, node9, node10, node6, node7, node12, node13); - assertThat(policy.newQueryPlan(request, session)) - .containsExactly( - node3, node2, node4, node5, node1, node9, node10, node6, node7, node12, node13); - assertThat(policy.newQueryPlan(request, session)) - .containsExactly( - node3, node4, node5, node1, node2, node9, node10, node6, node7, node12, node13); - assertThat(policy.newQueryPlan(request, session)) - .containsExactly( - node3, node5, node1, node2, node4, node9, node10, node6, node7, node12, node13); - - // Should not shuffle replicas since there is only one - verify(policy, never()).shuffleHead(any(), eq(1)); - // But should shuffle remote nodes - verify(policy, times(12)).shuffleHead(any(), eq(2)); - } - - @Override - @Test - public void should_prioritize_and_shuffle_replicas() { - when(request.getRoutingKeyspace()).thenReturn(KEYSPACE); - when(request.getRoutingKey()).thenReturn(ROUTING_KEY); - when(tokenMap.getReplicas(KEYSPACE, ROUTING_KEY)) - .thenReturn(ImmutableSet.of(node1, node2, node3, node6, node9)); - - // node 6 and 9 being in a remote DC, they don't get a boost for being a replica - assertThat(policy.newQueryPlan(request, session)) - .containsExactly( - node1, node2, node3, node4, node5, node9, node10, node6, node7, node12, node13); - assertThat(policy.newQueryPlan(request, session)) - .containsExactly( - node1, node2, node3, node5, node4, node9, node10, node6, node7, node12, node13); - - // should shuffle replicas - verify(policy, times(2)).shuffleHead(any(), eq(3)); - // should shuffle remote nodes - verify(policy, times(6)).shuffleHead(any(), eq(2)); - // No power of two choices with only two replicas - verify(session, never()).getPools(); - } - - @Override - protected void assertRoundRobinQueryPlans() { - for (int i = 0; i < 3; i++) { - assertThat(policy.newQueryPlan(request, session)) - .containsExactly( - node1, node2, node3, node4, node5, node9, node10, node6, node7, node12, node13); - assertThat(policy.newQueryPlan(request, session)) - .containsExactly( - node2, node3, node4, node5, node1, node9, node10, node6, node7, node12, node13); - assertThat(policy.newQueryPlan(request, session)) - .containsExactly( - node3, node4, node5, node1, node2, node9, node10, node6, node7, node12, node13); - assertThat(policy.newQueryPlan(request, session)) - .containsExactly( - node4, node5, node1, node2, node3, node9, node10, node6, node7, node12, node13); - assertThat(policy.newQueryPlan(request, session)) - .containsExactly( - node5, node1, node2, node3, node4, node9, node10, node6, node7, node12, node13); - } - - verify(policy, atLeast(15)).shuffleHead(any(), eq(2)); - } - - @Override - protected BasicLoadBalancingPolicy createAndInitPolicy() { - when(node4.getDatacenter()).thenReturn("dc1"); - when(node5.getDatacenter()).thenReturn("dc1"); - when(node6.getDatacenter()).thenReturn("dc2"); - when(node7.getDatacenter()).thenReturn("dc2"); - when(node8.getDatacenter()).thenReturn("dc2"); - when(node9.getDatacenter()).thenReturn("dc3"); - when(node10.getDatacenter()).thenReturn("dc3"); - when(node11.getDatacenter()).thenReturn("dc3"); - when(node12.getDatacenter()).thenReturn("dc4"); - when(node13.getDatacenter()).thenReturn("dc4"); - when(node14.getDatacenter()).thenReturn("dc4"); - - // Accept 2 nodes per remote DC - when(defaultProfile.getInt( - DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_MAX_NODES_PER_REMOTE_DC)) - .thenReturn(2); - when(defaultProfile.getBoolean( - DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_ALLOW_FOR_LOCAL_CONSISTENCY_LEVELS)) - .thenReturn(false); - - when(defaultProfile.getStringList( - DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_PREFERRED_REMOTE_DCS)) - .thenReturn(ImmutableList.of("dc3", "dc2")); - - // Use a subclass to disable shuffling, we just spy to make sure that the shuffling method was - // called (makes tests easier) - BasicLoadBalancingPolicy policy = - spy( - new BasicLoadBalancingPolicy(context, DriverExecutionProfile.DEFAULT_NAME) { - @Override - protected void shuffleHead(Object[] currentNodes, int headLength) { - // nothing (keep in same order) - } - }); - Map nodes = - ImmutableMap.builder() - .put(UUID.randomUUID(), node1) - .put(UUID.randomUUID(), node2) - .put(UUID.randomUUID(), node3) - .put(UUID.randomUUID(), node4) - .put(UUID.randomUUID(), node5) - .put(UUID.randomUUID(), node6) - .put(UUID.randomUUID(), node7) - .put(UUID.randomUUID(), node8) - .put(UUID.randomUUID(), node9) - .put(UUID.randomUUID(), node10) - .put(UUID.randomUUID(), node11) - .put(UUID.randomUUID(), node12) - .put(UUID.randomUUID(), node13) - .put(UUID.randomUUID(), node14) - .build(); - policy.init(nodes, distanceReporter); - assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1, node2, node3, node4, node5); - assertThat(policy.getLiveNodes().dc("dc2")).containsExactly(node6, node7); // only 2 allowed - assertThat(policy.getLiveNodes().dc("dc3")).containsExactly(node9, node10); // only 2 allowed - assertThat(policy.getLiveNodes().dc("dc4")).containsExactly(node12, node13); // only 2 allowed - return policy; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyQueryPlanTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyQueryPlanTest.java deleted file mode 100644 index 50670ab317a..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyQueryPlanTest.java +++ /dev/null @@ -1,272 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing; - -import static java.util.Collections.emptySet; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyInt; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.then; -import static org.mockito.Mockito.atLeast; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Metadata; -import com.datastax.oss.driver.api.core.metadata.TokenMap; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.nio.ByteBuffer; -import java.util.Collections; -import java.util.Optional; -import java.util.UUID; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -// TODO fix unnecessary stubbing of config option in parent class (and stop using "silent" runner) -@RunWith(MockitoJUnitRunner.Silent.class) -public class BasicLoadBalancingPolicyQueryPlanTest extends LoadBalancingPolicyTestBase { - - protected static final CqlIdentifier KEYSPACE = CqlIdentifier.fromInternal("ks"); - protected static final ByteBuffer ROUTING_KEY = Bytes.fromHexString("0xdeadbeef"); - - @Mock protected Request request; - @Mock protected DefaultSession session; - @Mock protected Metadata metadata; - @Mock protected TokenMap tokenMap; - @Mock protected Token routingToken; - - protected BasicLoadBalancingPolicy policy; - - @Before - @Override - public void setup() { - super.setup(); - when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1)); - when(metadataManager.getMetadata()).thenReturn(metadata); - when(metadata.getTokenMap()).thenAnswer(invocation -> Optional.of(this.tokenMap)); - - policy = createAndInitPolicy(); - } - - @Test - public void should_use_round_robin_when_no_request() { - // Given - request = null; - - // When - assertRoundRobinQueryPlans(); - - // Then - then(tokenMap).should(never()).getReplicas(any(CqlIdentifier.class), any(Token.class)); - then(tokenMap).should(never()).getReplicas(any(CqlIdentifier.class), any(ByteBuffer.class)); - } - - @Test - public void should_use_round_robin_when_no_session() { - // Given - session = null; - - // When - assertRoundRobinQueryPlans(); - - // Then - then(request).should(never()).getRoutingKey(); - then(request).should(never()).getRoutingToken(); - then(tokenMap).should(never()).getReplicas(any(CqlIdentifier.class), any(Token.class)); - then(tokenMap).should(never()).getReplicas(any(CqlIdentifier.class), any(ByteBuffer.class)); - } - - @Test - public void should_use_round_robin_when_request_has_no_routing_keyspace() { - // By default from Mockito: - assertThat(request.getKeyspace()).isNull(); - assertThat(request.getRoutingKeyspace()).isNull(); - - assertRoundRobinQueryPlans(); - - then(request).should(never()).getRoutingKey(); - then(request).should(never()).getRoutingToken(); - then(tokenMap).should(never()).getReplicas(any(CqlIdentifier.class), any(Token.class)); - then(tokenMap).should(never()).getReplicas(any(CqlIdentifier.class), any(ByteBuffer.class)); - } - - @Test - public void should_use_round_robin_when_request_has_no_routing_key_or_token() { - when(request.getRoutingKeyspace()).thenReturn(KEYSPACE); - assertThat(request.getRoutingKey()).isNull(); - assertThat(request.getRoutingToken()).isNull(); - - assertRoundRobinQueryPlans(); - - then(tokenMap).should(never()).getReplicas(any(CqlIdentifier.class), any(Token.class)); - then(tokenMap).should(never()).getReplicas(any(CqlIdentifier.class), any(ByteBuffer.class)); - } - - @Test - public void should_use_round_robin_when_token_map_absent() { - when(metadata.getTokenMap()).thenReturn(Optional.empty()); - - assertRoundRobinQueryPlans(); - - then(tokenMap).should(never()).getReplicas(any(CqlIdentifier.class), any(Token.class)); - then(tokenMap).should(never()).getReplicas(any(CqlIdentifier.class), any(ByteBuffer.class)); - } - - @Test - public void - should_use_round_robin_when_token_map_returns_no_replicas_using_request_keyspace_and_routing_key() { - when(request.getRoutingKeyspace()).thenReturn(KEYSPACE); - when(request.getRoutingKey()).thenReturn(ROUTING_KEY); - when(tokenMap.getReplicas(KEYSPACE, ROUTING_KEY)).thenReturn(Collections.emptySet()); - - assertRoundRobinQueryPlans(); - - then(tokenMap).should(atLeast(1)).getReplicas(KEYSPACE, ROUTING_KEY); - } - - @Test - public void - should_use_round_robin_when_token_map_returns_no_replicas_using_session_keyspace_and_routing_key() { - // Given - given(request.getKeyspace()).willReturn(null); - given(request.getRoutingKeyspace()).willReturn(null); - given(session.getKeyspace()).willReturn(Optional.of(KEYSPACE)); - given(request.getRoutingKey()).willReturn(ROUTING_KEY); - given(tokenMap.getReplicas(KEYSPACE, ROUTING_KEY)).willReturn(emptySet()); - // When - assertRoundRobinQueryPlans(); - // Then - then(tokenMap).should(atLeast(1)).getReplicas(KEYSPACE, ROUTING_KEY); - } - - @Test - public void - should_use_round_robin_when_token_map_returns_no_replicas_using_request_keyspace_and_routing_token() { - // Given - given(request.getKeyspace()).willReturn(null); - given(request.getRoutingKeyspace()).willReturn(KEYSPACE); - given(request.getRoutingToken()).willReturn(routingToken); - given(tokenMap.getReplicas(KEYSPACE, routingToken)).willReturn(emptySet()); - // When - assertRoundRobinQueryPlans(); - // Then - then(tokenMap).should(atLeast(1)).getReplicas(KEYSPACE, routingToken); - } - - @Test - public void should_use_round_robin_and_log_error_when_request_throws() { - // Given - given(request.getKeyspace()).willThrow(new NullPointerException()); - // When - policy.newQueryPlan(request, session); - // Then - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getValue().getFormattedMessage()) - .contains("Unexpected error while trying to compute query plan"); - } - - protected void assertRoundRobinQueryPlans() { - for (int i = 0; i < 3; i++) { - assertThat(policy.newQueryPlan(request, session)) - .containsExactly(node1, node2, node3, node4, node5); - assertThat(policy.newQueryPlan(request, session)) - .containsExactly(node2, node3, node4, node5, node1); - assertThat(policy.newQueryPlan(request, session)) - .containsExactly(node3, node4, node5, node1, node2); - assertThat(policy.newQueryPlan(request, session)) - .containsExactly(node4, node5, node1, node2, node3); - assertThat(policy.newQueryPlan(request, session)) - .containsExactly(node5, node1, node2, node3, node4); - } - } - - @Test - public void should_prioritize_single_replica() { - when(request.getRoutingKeyspace()).thenReturn(KEYSPACE); - when(request.getRoutingKey()).thenReturn(ROUTING_KEY); - when(tokenMap.getReplicas(KEYSPACE, ROUTING_KEY)).thenReturn(ImmutableSet.of(node3)); - - // node3 always first, round-robin on the rest - assertThat(policy.newQueryPlan(request, session)) - .containsExactly(node3, node1, node2, node4, node5); - assertThat(policy.newQueryPlan(request, session)) - .containsExactly(node3, node2, node4, node5, node1); - assertThat(policy.newQueryPlan(request, session)) - .containsExactly(node3, node4, node5, node1, node2); - assertThat(policy.newQueryPlan(request, session)) - .containsExactly(node3, node5, node1, node2, node4); - - // Should not shuffle replicas since there is only one - verify(policy, never()).shuffleHead(any(), anyInt()); - } - - @Test - public void should_prioritize_and_shuffle_replicas() { - when(request.getRoutingKeyspace()).thenReturn(KEYSPACE); - when(request.getRoutingKey()).thenReturn(ROUTING_KEY); - when(tokenMap.getReplicas(KEYSPACE, ROUTING_KEY)).thenReturn(ImmutableSet.of(node3, node5)); - - assertThat(policy.newQueryPlan(request, session)) - .containsExactly(node3, node5, node1, node2, node4); - assertThat(policy.newQueryPlan(request, session)) - .containsExactly(node3, node5, node2, node4, node1); - assertThat(policy.newQueryPlan(request, session)) - .containsExactly(node3, node5, node4, node1, node2); - - verify(policy, times(3)).shuffleHead(any(), eq(2)); - // No power of two choices with only two replicas - verify(session, never()).getPools(); - } - - protected BasicLoadBalancingPolicy createAndInitPolicy() { - // Use a subclass to disable shuffling, we just spy to make sure that the shuffling method was - // called (makes tests easier) - BasicLoadBalancingPolicy policy = - spy( - new BasicLoadBalancingPolicy(context, DriverExecutionProfile.DEFAULT_NAME) { - @Override - protected void shuffleHead(Object[] currentNodes, int headLength) { - // nothing (keep in same order) - } - }); - policy.init( - ImmutableMap.of( - UUID.randomUUID(), node1, - UUID.randomUUID(), node2, - UUID.randomUUID(), node3, - UUID.randomUUID(), node4, - UUID.randomUUID(), node5), - distanceReporter); - assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1, node2, node3, node4, node5); - return policy; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DcInferringLoadBalancingPolicyDcFailoverTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DcInferringLoadBalancingPolicyDcFailoverTest.java deleted file mode 100644 index dd9b74158f1..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DcInferringLoadBalancingPolicyDcFailoverTest.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing; - -import static com.datastax.oss.driver.api.core.config.DriverExecutionProfile.DEFAULT_NAME; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import java.util.Map; -import java.util.UUID; -import org.junit.runner.RunWith; -import org.mockito.junit.MockitoJUnitRunner; - -// TODO fix unnecessary stubbing of config option in parent class (and stop using "silent" runner) -@RunWith(MockitoJUnitRunner.Silent.class) -public class DcInferringLoadBalancingPolicyDcFailoverTest - extends BasicLoadBalancingPolicyDcFailoverTest { - - @Override - protected DcInferringLoadBalancingPolicy createAndInitPolicy() { - when(node4.getDatacenter()).thenReturn("dc2"); - when(node5.getDatacenter()).thenReturn("dc2"); - when(node6.getDatacenter()).thenReturn("dc2"); - when(node7.getDatacenter()).thenReturn("dc3"); - when(node8.getDatacenter()).thenReturn("dc3"); - when(node9.getDatacenter()).thenReturn("dc3"); - // Accept 2 nodes per remote DC - when(defaultProfile.getInt( - DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_MAX_NODES_PER_REMOTE_DC)) - .thenReturn(2); - when(defaultProfile.getBoolean( - DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_ALLOW_FOR_LOCAL_CONSISTENCY_LEVELS)) - .thenReturn(false); - // Use a subclass to disable shuffling, we just spy to make sure that the shuffling method was - // called (makes tests easier) - DcInferringLoadBalancingPolicy policy = - spy( - new DcInferringLoadBalancingPolicy(context, DEFAULT_NAME) { - @Override - protected void shuffleHead(Object[] array, int n) {} - }); - Map nodes = - ImmutableMap.builder() - .put(UUID.randomUUID(), node1) - .put(UUID.randomUUID(), node2) - .put(UUID.randomUUID(), node3) - .put(UUID.randomUUID(), node4) - .put(UUID.randomUUID(), node5) - .put(UUID.randomUUID(), node6) - .put(UUID.randomUUID(), node7) - .put(UUID.randomUUID(), node8) - .put(UUID.randomUUID(), node9) - .build(); - policy.init(nodes, distanceReporter); - assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1, node2, node3); - assertThat(policy.getLiveNodes().dc("dc2")).containsExactly(node4, node5); // only 2 allowed - assertThat(policy.getLiveNodes().dc("dc3")).containsExactly(node7, node8); // only 2 allowed - return policy; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DcInferringLoadBalancingPolicyDistanceTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DcInferringLoadBalancingPolicyDistanceTest.java deleted file mode 100644 index 80c414aa8f2..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DcInferringLoadBalancingPolicyDistanceTest.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.catchThrowable; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import edu.umd.cs.findbugs.annotations.NonNull; -import org.junit.runner.RunWith; -import org.mockito.junit.MockitoJUnitRunner; - -// TODO fix unnecessary stubbing of config option in parent class (and stop using "silent" runner) -@RunWith(MockitoJUnitRunner.Silent.class) -public class DcInferringLoadBalancingPolicyDistanceTest - extends BasicLoadBalancingPolicyDistanceTest { - - @Override - public void should_report_LOCAL_when_dc_agnostic() { - // This policy cannot operate when contact points are from different DCs - Throwable error = catchThrowable(super::should_report_LOCAL_when_dc_agnostic); - assertThat(error) - .isInstanceOfSatisfying( - IllegalStateException.class, - ise -> - assertThat(ise) - .hasMessageContaining( - "No local DC was provided, but the contact points are from different DCs") - .hasMessageContaining("node1=null") - .hasMessageContaining("node2=dc1") - .hasMessageContaining("node3=dc2")); - } - - @NonNull - @Override - protected BasicLoadBalancingPolicy createPolicy() { - return new DcInferringLoadBalancingPolicy(context, DriverExecutionProfile.DEFAULT_NAME); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DcInferringLoadBalancingPolicyEventsTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DcInferringLoadBalancingPolicyEventsTest.java deleted file mode 100644 index 218d6338df9..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DcInferringLoadBalancingPolicyEventsTest.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.api.core.config.DriverExecutionProfile.DEFAULT_NAME; -import static org.mockito.Mockito.reset; - -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.UUID; - -public class DcInferringLoadBalancingPolicyEventsTest extends BasicLoadBalancingPolicyEventsTest { - - @Override - @NonNull - protected BasicLoadBalancingPolicy createAndInitPolicy() { - DcInferringLoadBalancingPolicy policy = - new DcInferringLoadBalancingPolicy(context, DEFAULT_NAME); - policy.init( - ImmutableMap.of(UUID.randomUUID(), node1, UUID.randomUUID(), node2), distanceReporter); - assertThat(policy.getLiveNodes().dc("dc1")).containsOnly(node1, node2); - reset(distanceReporter); - return policy; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DcInferringLoadBalancingPolicyInitTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DcInferringLoadBalancingPolicyInitTest.java deleted file mode 100644 index 20de3afe9c3..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DcInferringLoadBalancingPolicyInitTest.java +++ /dev/null @@ -1,235 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.catchThrowable; -import static org.assertj.core.api.Assertions.filter; -import static org.mockito.Mockito.atLeast; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import ch.qos.logback.classic.Level; -import ch.qos.logback.classic.spi.ILoggingEvent; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.metadata.NodeState; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.UUID; -import org.junit.Test; - -public class DcInferringLoadBalancingPolicyInitTest extends LoadBalancingPolicyTestBase { - - @Test - public void should_use_local_dc_if_provided_via_config() { - // Given - when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1)); - // the parent class sets the config option to "dc1" - BasicLoadBalancingPolicy policy = createPolicy(); - - // When - policy.init(ImmutableMap.of(UUID.randomUUID(), node1), distanceReporter); - - // Then - assertThat(policy.getLocalDatacenter()).isEqualTo("dc1"); - } - - @Test - public void should_use_local_dc_if_provided_via_context() { - // Given - when(context.getLocalDatacenter(DriverExecutionProfile.DEFAULT_NAME)).thenReturn("dc1"); - when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1)); - // note: programmatic takes priority, the config won't even be inspected so no need to stub the - // option to null - BasicLoadBalancingPolicy policy = createPolicy(); - - // When - policy.init(ImmutableMap.of(UUID.randomUUID(), node1), distanceReporter); - - // Then - assertThat(policy.getLocalDatacenter()).isEqualTo("dc1"); - verify(defaultProfile, never()) - .getString(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER, null); - } - - @Test - public void should_infer_local_dc_from_contact_points() { - // Given - when(defaultProfile.isDefined(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER)) - .thenReturn(false); - when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1)); - BasicLoadBalancingPolicy policy = createPolicy(); - - // When - policy.init(ImmutableMap.of(UUID.randomUUID(), node1), distanceReporter); - - // Then - assertThat(policy.getLocalDatacenter()).isEqualTo("dc1"); - } - - @Test - public void should_require_local_dc_if_contact_points_from_different_dcs() { - // Given - when(defaultProfile.isDefined(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER)) - .thenReturn(false); - when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1, node2)); - when(node2.getDatacenter()).thenReturn("dc2"); - BasicLoadBalancingPolicy policy = createPolicy(); - - // When - Throwable t = - catchThrowable( - () -> - policy.init( - ImmutableMap.of(UUID.randomUUID(), node1, UUID.randomUUID(), node2), - distanceReporter)); - - // Then - assertThat(t) - .isInstanceOf(IllegalStateException.class) - .hasMessageContaining( - "No local DC was provided, but the contact points are from different DCs: node1=dc1, node2=dc2"); - } - - @Test - public void should_require_local_dc_if_contact_points_have_null_dcs() { - // Given - when(defaultProfile.isDefined(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER)) - .thenReturn(false); - when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1, node2)); - when(node1.getDatacenter()).thenReturn(null); - when(node2.getDatacenter()).thenReturn(null); - BasicLoadBalancingPolicy policy = createPolicy(); - - // When - Throwable t = - catchThrowable( - () -> - policy.init( - ImmutableMap.of(UUID.randomUUID(), node1, UUID.randomUUID(), node2), - distanceReporter)); - - // Then - assertThat(t) - .isInstanceOf(IllegalStateException.class) - .hasMessageContaining( - "The local DC could not be inferred from contact points, please set it explicitly"); - } - - @Test - public void should_warn_if_contact_points_not_in_local_dc() { - // Given - when(node2.getDatacenter()).thenReturn("dc2"); - when(node3.getDatacenter()).thenReturn("dc3"); - when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1, node2, node3)); - BasicLoadBalancingPolicy policy = createPolicy(); - - // When - policy.init( - ImmutableMap.of( - UUID.randomUUID(), node1, UUID.randomUUID(), node2, UUID.randomUUID(), node3), - distanceReporter); - - // Then - verify(appender, atLeast(1)).doAppend(loggingEventCaptor.capture()); - Iterable warnLogs = - filter(loggingEventCaptor.getAllValues()).with("level", Level.WARN).get(); - assertThat(warnLogs).hasSize(1); - assertThat(warnLogs.iterator().next().getFormattedMessage()) - .contains( - "You specified dc1 as the local DC, but some contact points are from a different DC") - .contains("node2=dc2") - .contains("node3=dc3"); - } - - @Test - public void should_include_nodes_from_local_dc() { - // Given - when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1, node2)); - when(node1.getState()).thenReturn(NodeState.UP); - when(node2.getState()).thenReturn(NodeState.DOWN); - when(node3.getState()).thenReturn(NodeState.UNKNOWN); - BasicLoadBalancingPolicy policy = createPolicy(); - - // When - policy.init( - ImmutableMap.of( - UUID.randomUUID(), node1, UUID.randomUUID(), node2, UUID.randomUUID(), node3), - distanceReporter); - - // Then - // Set distance for all nodes in the local DC - verify(distanceReporter).setDistance(node1, NodeDistance.LOCAL); - verify(distanceReporter).setDistance(node2, NodeDistance.LOCAL); - verify(distanceReporter).setDistance(node3, NodeDistance.LOCAL); - // But only include UP or UNKNOWN nodes in the live set - assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1, node3); - } - - @Test - public void should_ignore_nodes_from_remote_dcs() { - // Given - when(node2.getDatacenter()).thenReturn("dc2"); - when(node3.getDatacenter()).thenReturn("dc3"); - when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1, node2)); - BasicLoadBalancingPolicy policy = createPolicy(); - - // When - policy.init( - ImmutableMap.of( - UUID.randomUUID(), node1, UUID.randomUUID(), node2, UUID.randomUUID(), node3), - distanceReporter); - - // Then - verify(distanceReporter).setDistance(node1, NodeDistance.LOCAL); - verify(distanceReporter).setDistance(node2, NodeDistance.IGNORED); - verify(distanceReporter).setDistance(node3, NodeDistance.IGNORED); - assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1); - } - - @Test - public void should_ignore_nodes_excluded_by_distance_reporter() { - // Given - when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1, node2)); - when(context.getNodeDistanceEvaluator(DriverExecutionProfile.DEFAULT_NAME)) - .thenReturn((node, dc) -> node.equals(node1) ? NodeDistance.IGNORED : null); - - BasicLoadBalancingPolicy policy = createPolicy(); - - // When - policy.init( - ImmutableMap.of( - UUID.randomUUID(), node1, UUID.randomUUID(), node2, UUID.randomUUID(), node3), - distanceReporter); - - // Then - verify(distanceReporter).setDistance(node1, NodeDistance.IGNORED); - verify(distanceReporter).setDistance(node2, NodeDistance.LOCAL); - verify(distanceReporter).setDistance(node3, NodeDistance.LOCAL); - assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node2, node3); - } - - @NonNull - protected DcInferringLoadBalancingPolicy createPolicy() { - return new DcInferringLoadBalancingPolicy(context, DriverExecutionProfile.DEFAULT_NAME); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DcInferringLoadBalancingPolicyQueryPlanTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DcInferringLoadBalancingPolicyQueryPlanTest.java deleted file mode 100644 index 23d4636a615..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DcInferringLoadBalancingPolicyQueryPlanTest.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing; - -import static com.datastax.oss.driver.api.core.config.DriverExecutionProfile.DEFAULT_NAME; -import static org.mockito.Mockito.spy; - -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import java.util.UUID; - -public class DcInferringLoadBalancingPolicyQueryPlanTest - extends DefaultLoadBalancingPolicyQueryPlanTest { - - @Override - protected DcInferringLoadBalancingPolicy createAndInitPolicy() { - DcInferringLoadBalancingPolicy policy = - spy( - new DcInferringLoadBalancingPolicy(context, DEFAULT_NAME) { - @Override - protected void shuffleHead(Object[] array, int n) {} - - @Override - protected long nanoTime() { - return nanoTime; - } - - @Override - protected int diceRoll1d4() { - return diceRoll; - } - }); - policy.init( - ImmutableMap.of( - UUID.randomUUID(), node1, - UUID.randomUUID(), node2, - UUID.randomUUID(), node3, - UUID.randomUUID(), node4, - UUID.randomUUID(), node5), - distanceReporter); - return policy; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyDcFailoverTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyDcFailoverTest.java deleted file mode 100644 index f2e741fd756..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyDcFailoverTest.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing; - -import static com.datastax.oss.driver.api.core.config.DriverExecutionProfile.DEFAULT_NAME; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import java.util.Map; -import java.util.UUID; -import org.junit.runner.RunWith; -import org.mockito.junit.MockitoJUnitRunner; - -// TODO fix unnecessary stubbing of config option in parent class (and stop using "silent" runner) -@RunWith(MockitoJUnitRunner.Silent.class) -public class DefaultLoadBalancingPolicyDcFailoverTest - extends BasicLoadBalancingPolicyDcFailoverTest { - - @Override - protected DefaultLoadBalancingPolicy createAndInitPolicy() { - when(node4.getDatacenter()).thenReturn("dc2"); - when(node5.getDatacenter()).thenReturn("dc2"); - when(node6.getDatacenter()).thenReturn("dc2"); - when(node7.getDatacenter()).thenReturn("dc3"); - when(node8.getDatacenter()).thenReturn("dc3"); - when(node9.getDatacenter()).thenReturn("dc3"); - // Accept 2 nodes per remote DC - when(defaultProfile.getInt( - DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_MAX_NODES_PER_REMOTE_DC)) - .thenReturn(2); - when(defaultProfile.getBoolean( - DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_ALLOW_FOR_LOCAL_CONSISTENCY_LEVELS)) - .thenReturn(false); - // Use a subclass to disable shuffling, we just spy to make sure that the shuffling method was - // called (makes tests easier) - DefaultLoadBalancingPolicy policy = - spy( - new DefaultLoadBalancingPolicy(context, DEFAULT_NAME) { - @Override - protected void shuffleHead(Object[] array, int n) {} - }); - Map nodes = - ImmutableMap.builder() - .put(UUID.randomUUID(), node1) - .put(UUID.randomUUID(), node2) - .put(UUID.randomUUID(), node3) - .put(UUID.randomUUID(), node4) - .put(UUID.randomUUID(), node5) - .put(UUID.randomUUID(), node6) - .put(UUID.randomUUID(), node7) - .put(UUID.randomUUID(), node8) - .put(UUID.randomUUID(), node9) - .build(); - policy.init(nodes, distanceReporter); - assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1, node2, node3); - assertThat(policy.getLiveNodes().dc("dc2")).containsExactly(node4, node5); // only 2 allowed - assertThat(policy.getLiveNodes().dc("dc3")).containsExactly(node7, node8); // only 2 allowed - return policy; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyDistanceTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyDistanceTest.java deleted file mode 100644 index 9cf30e048e9..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyDistanceTest.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.catchThrowable; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import edu.umd.cs.findbugs.annotations.NonNull; -import org.junit.runner.RunWith; -import org.mockito.junit.MockitoJUnitRunner; - -// TODO fix unnecessary stubbing of config option in parent class (and stop using "silent" runner) -@RunWith(MockitoJUnitRunner.Silent.class) -public class DefaultLoadBalancingPolicyDistanceTest extends BasicLoadBalancingPolicyDistanceTest { - - @Override - public void should_report_LOCAL_when_dc_agnostic() { - // This policy cannot operate in dc-agnostic mode - Throwable error = catchThrowable(super::should_report_LOCAL_when_dc_agnostic); - assertThat(error) - .isInstanceOfSatisfying( - IllegalStateException.class, - ise -> - assertThat(ise) - .hasMessageContaining("the local DC must be explicitly set") - .hasMessageContaining("node1=null") - .hasMessageContaining("node2=dc1") - .hasMessageContaining("node3=dc2") - .hasMessageContaining("Current DCs in this cluster are: dc1, dc2")); - } - - @NonNull - @Override - protected BasicLoadBalancingPolicy createPolicy() { - return new DefaultLoadBalancingPolicy(context, DriverExecutionProfile.DEFAULT_NAME); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyEventsTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyEventsTest.java deleted file mode 100644 index 17e926a29e0..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyEventsTest.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.api.core.config.DriverExecutionProfile.DEFAULT_NAME; -import static org.mockito.Mockito.reset; - -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.UUID; - -public class DefaultLoadBalancingPolicyEventsTest extends BasicLoadBalancingPolicyEventsTest { - - @Override - @NonNull - protected DefaultLoadBalancingPolicy createAndInitPolicy() { - DefaultLoadBalancingPolicy policy = new DefaultLoadBalancingPolicy(context, DEFAULT_NAME); - policy.init( - ImmutableMap.of(UUID.randomUUID(), node1, UUID.randomUUID(), node2), distanceReporter); - assertThat(policy.getLiveNodes().dc("dc1")).containsOnly(node1, node2); - reset(distanceReporter); - return policy; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyInitTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyInitTest.java deleted file mode 100644 index 7b875209743..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyInitTest.java +++ /dev/null @@ -1,205 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static org.assertj.core.api.Assertions.filter; -import static org.mockito.Mockito.atLeast; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import ch.qos.logback.classic.Level; -import ch.qos.logback.classic.spi.ILoggingEvent; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.metadata.NodeState; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.UUID; -import org.junit.Test; - -public class DefaultLoadBalancingPolicyInitTest extends LoadBalancingPolicyTestBase { - - @Test - public void should_use_local_dc_if_provided_via_config() { - // Given - when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1)); - // the parent class sets the config option to "dc1" - DefaultLoadBalancingPolicy policy = createPolicy(); - - // When - policy.init(ImmutableMap.of(UUID.randomUUID(), node1), distanceReporter); - - // Then - assertThat(policy.getLocalDatacenter()).isEqualTo("dc1"); - } - - @Test - public void should_use_local_dc_if_provided_via_context() { - // Given - when(context.getLocalDatacenter(DriverExecutionProfile.DEFAULT_NAME)).thenReturn("dc1"); - when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1)); - // note: programmatic takes priority, the config won't even be inspected so no need to stub the - // option to null - DefaultLoadBalancingPolicy policy = createPolicy(); - - // When - policy.init(ImmutableMap.of(UUID.randomUUID(), node1), distanceReporter); - - // Then - assertThat(policy.getLocalDatacenter()).isEqualTo("dc1"); - verify(defaultProfile, never()) - .getString(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER, null); - } - - @Test - public void should_infer_local_dc_if_no_explicit_contact_points() { - // Given - when(defaultProfile.isDefined(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER)) - .thenReturn(false); - when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1)); - when(metadataManager.wasImplicitContactPoint()).thenReturn(true); - DefaultLoadBalancingPolicy policy = createPolicy(); - - // When - policy.init(ImmutableMap.of(UUID.randomUUID(), node1), distanceReporter); - - // Then - assertThat(policy.getLocalDatacenter()).isEqualTo("dc1"); - } - - @Test - public void should_require_local_dc_if_explicit_contact_points() { - // Given - when(defaultProfile.isDefined(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER)) - .thenReturn(false); - when(metadataManager.wasImplicitContactPoint()).thenReturn(false); - DefaultLoadBalancingPolicy policy = createPolicy(); - - // When - assertThatThrownBy( - () -> policy.init(ImmutableMap.of(UUID.randomUUID(), node2), distanceReporter)) - .isInstanceOf(IllegalStateException.class) - .hasMessageContaining( - "Since you provided explicit contact points, the local DC must be explicitly set"); - } - - @Test - public void should_warn_if_contact_points_not_in_local_dc() { - // Given - when(node2.getDatacenter()).thenReturn("dc2"); - when(node3.getDatacenter()).thenReturn("dc3"); - when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1, node2, node3)); - DefaultLoadBalancingPolicy policy = createPolicy(); - - // When - policy.init( - ImmutableMap.of( - UUID.randomUUID(), node1, UUID.randomUUID(), node2, UUID.randomUUID(), node3), - distanceReporter); - - // Then - verify(appender, atLeast(1)).doAppend(loggingEventCaptor.capture()); - Iterable warnLogs = - filter(loggingEventCaptor.getAllValues()).with("level", Level.WARN).get(); - assertThat(warnLogs).hasSize(1); - assertThat(warnLogs.iterator().next().getFormattedMessage()) - .contains( - "You specified dc1 as the local DC, but some contact points are from a different DC") - .contains("node2=dc2") - .contains("node3=dc3"); - } - - @Test - public void should_include_nodes_from_local_dc() { - // Given - when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1, node2)); - when(node1.getState()).thenReturn(NodeState.UP); - when(node2.getState()).thenReturn(NodeState.DOWN); - when(node3.getState()).thenReturn(NodeState.UNKNOWN); - DefaultLoadBalancingPolicy policy = createPolicy(); - - // When - policy.init( - ImmutableMap.of( - UUID.randomUUID(), node1, UUID.randomUUID(), node2, UUID.randomUUID(), node3), - distanceReporter); - - // Then - // Set distance for all nodes in the local DC - verify(distanceReporter).setDistance(node1, NodeDistance.LOCAL); - verify(distanceReporter).setDistance(node2, NodeDistance.LOCAL); - verify(distanceReporter).setDistance(node3, NodeDistance.LOCAL); - // But only include UP or UNKNOWN nodes in the live set - assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1, node3); - } - - @Test - public void should_ignore_nodes_from_remote_dcs() { - // Given - when(node2.getDatacenter()).thenReturn("dc2"); - when(node3.getDatacenter()).thenReturn("dc3"); - when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1, node2)); - DefaultLoadBalancingPolicy policy = createPolicy(); - - // When - policy.init( - ImmutableMap.of( - UUID.randomUUID(), node1, UUID.randomUUID(), node2, UUID.randomUUID(), node3), - distanceReporter); - - // Then - verify(distanceReporter).setDistance(node1, NodeDistance.LOCAL); - verify(distanceReporter).setDistance(node2, NodeDistance.IGNORED); - verify(distanceReporter).setDistance(node3, NodeDistance.IGNORED); - assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1); - assertThat(policy.getLiveNodes().dc("dc2")).isEmpty(); - assertThat(policy.getLiveNodes().dc("dc3")).isEmpty(); - } - - @Test - public void should_ignore_nodes_excluded_by_distance_reporter() { - // Given - when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1, node2)); - when(context.getNodeDistanceEvaluator(DriverExecutionProfile.DEFAULT_NAME)) - .thenReturn((node, dc) -> node.equals(node1) ? NodeDistance.IGNORED : null); - - BasicLoadBalancingPolicy policy = createPolicy(); - - // When - policy.init( - ImmutableMap.of( - UUID.randomUUID(), node1, UUID.randomUUID(), node2, UUID.randomUUID(), node3), - distanceReporter); - - // Then - verify(distanceReporter).setDistance(node1, NodeDistance.IGNORED); - verify(distanceReporter).setDistance(node2, NodeDistance.LOCAL); - verify(distanceReporter).setDistance(node3, NodeDistance.LOCAL); - assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node2, node3); - } - - @NonNull - protected DefaultLoadBalancingPolicy createPolicy() { - return new DefaultLoadBalancingPolicy(context, DriverExecutionProfile.DEFAULT_NAME); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyQueryPlanTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyQueryPlanTest.java deleted file mode 100644 index fff86a1b750..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyQueryPlanTest.java +++ /dev/null @@ -1,362 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing; - -import static com.datastax.oss.driver.api.core.config.DriverExecutionProfile.DEFAULT_NAME; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyInt; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.then; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.times; - -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.pool.ChannelPool; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import java.util.Optional; -import java.util.Queue; -import java.util.UUID; -import java.util.concurrent.atomic.AtomicLongArray; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; - -public class DefaultLoadBalancingPolicyQueryPlanTest extends BasicLoadBalancingPolicyQueryPlanTest { - - private static final long T0 = Long.MIN_VALUE; - private static final long T1 = 100; - private static final long T2 = 200; - private static final long T3 = 300; - - @Mock protected ChannelPool pool1; - @Mock protected ChannelPool pool2; - @Mock protected ChannelPool pool3; - @Mock protected ChannelPool pool4; - @Mock protected ChannelPool pool5; - - long nanoTime; - int diceRoll; - - private DefaultLoadBalancingPolicy dsePolicy; - - @Before - @Override - public void setup() { - nanoTime = T1; - diceRoll = 4; - given(node4.getDatacenter()).willReturn("dc1"); - given(node5.getDatacenter()).willReturn("dc1"); - given(session.getPools()) - .willReturn( - ImmutableMap.of( - node1, pool1, - node2, pool2, - node3, pool3, - node4, pool4, - node5, pool5)); - given(context.getMetadataManager()).willReturn(metadataManager); - given(metadataManager.getMetadata()).willReturn(metadata); - given(metadataManager.getContactPoints()).willReturn(ImmutableSet.of(node1)); - given(metadata.getTokenMap()).willAnswer(invocation -> Optional.of(tokenMap)); - super.setup(); - dsePolicy = (DefaultLoadBalancingPolicy) policy; - // Note: this assertion relies on the fact that policy.getLiveNodes() implementation preserves - // insertion order. - assertThat(dsePolicy.getLiveNodes().dc("dc1")) - .containsExactly(node1, node2, node3, node4, node5); - } - - @Test - public void should_prioritize_and_shuffle_2_replicas() { - // Given - given(request.getRoutingKeyspace()).willReturn(KEYSPACE); - given(request.getRoutingKey()).willReturn(ROUTING_KEY); - given(tokenMap.getReplicas(KEYSPACE, ROUTING_KEY)).willReturn(ImmutableSet.of(node3, node5)); - - // When - Queue plan1 = dsePolicy.newQueryPlan(request, session); - Queue plan2 = dsePolicy.newQueryPlan(request, session); - Queue plan3 = dsePolicy.newQueryPlan(request, session); - - // Then - // node3 and node5 always first, round-robin on the rest - assertThat(plan1).containsExactly(node3, node5, node1, node2, node4); - assertThat(plan2).containsExactly(node3, node5, node2, node4, node1); - assertThat(plan3).containsExactly(node3, node5, node4, node1, node2); - - then(dsePolicy).should(times(3)).shuffleHead(any(), anyInt()); - then(dsePolicy).should(never()).nanoTime(); - then(dsePolicy).should(never()).diceRoll1d4(); - } - - @Test - public void should_prioritize_and_shuffle_3_or_more_replicas_when_all_healthy_and_all_newly_up() { - // Given - given(request.getRoutingKeyspace()).willReturn(KEYSPACE); - given(request.getRoutingKey()).willReturn(ROUTING_KEY); - given(tokenMap.getReplicas(KEYSPACE, ROUTING_KEY)) - .willReturn(ImmutableSet.of(node1, node3, node5)); - dsePolicy.upTimes.put(node1, T1); - dsePolicy.upTimes.put(node3, T2); - dsePolicy.upTimes.put(node5, T3); // newest up replica - given(pool1.getInFlight()).willReturn(0); - given(pool3.getInFlight()).willReturn(0); - - // When - Queue plan1 = dsePolicy.newQueryPlan(request, session); - Queue plan2 = dsePolicy.newQueryPlan(request, session); - - // Then - // nodes 1, 3 and 5 always first, round-robin on the rest - // newest up replica is 5, not in first or second position - assertThat(plan1).containsExactly(node1, node3, node5, node2, node4); - assertThat(plan2).containsExactly(node1, node3, node5, node4, node2); - - then(dsePolicy).should(times(2)).shuffleHead(any(), anyInt()); - then(dsePolicy).should(times(2)).nanoTime(); - then(dsePolicy).should(never()).diceRoll1d4(); - } - - @Test - public void - should_prioritize_and_shuffle_3_or_more_replicas_when_all_healthy_and_some_newly_up_and_dice_roll_4() { - // Given - given(request.getRoutingKeyspace()).willReturn(KEYSPACE); - given(request.getRoutingKey()).willReturn(ROUTING_KEY); - given(tokenMap.getReplicas(KEYSPACE, ROUTING_KEY)) - .willReturn(ImmutableSet.of(node1, node3, node5)); - dsePolicy.upTimes.put(node1, T2); // newest up replica - dsePolicy.upTimes.put(node3, T1); - given(pool3.getInFlight()).willReturn(0); - given(pool5.getInFlight()).willReturn(0); - - // When - Queue plan1 = dsePolicy.newQueryPlan(request, session); - Queue plan2 = dsePolicy.newQueryPlan(request, session); - - // Then - // nodes 1, 3 and 5 always first, round-robin on the rest - // newest up replica is node1 in first position and diceRoll = 4 -> bubbles down - assertThat(plan1).containsExactly(node3, node5, node1, node2, node4); - assertThat(plan2).containsExactly(node3, node5, node1, node4, node2); - - then(dsePolicy).should(times(2)).shuffleHead(any(), anyInt()); - then(dsePolicy).should(times(2)).nanoTime(); - then(dsePolicy).should(times(2)).diceRoll1d4(); - } - - @Test - public void - should_prioritize_and_shuffle_3_or_more_replicas_when_all_healthy_and_some_newly_up_and_dice_roll_1() { - // Given - given(request.getRoutingKeyspace()).willReturn(KEYSPACE); - given(request.getRoutingKey()).willReturn(ROUTING_KEY); - given(tokenMap.getReplicas(KEYSPACE, ROUTING_KEY)) - .willReturn(ImmutableSet.of(node1, node3, node5)); - dsePolicy.upTimes.put(node1, T2); // newest up replica - dsePolicy.upTimes.put(node3, T1); - given(pool1.getInFlight()).willReturn(0); - given(pool3.getInFlight()).willReturn(0); - diceRoll = 1; - - // When - Queue plan1 = dsePolicy.newQueryPlan(request, session); - Queue plan2 = dsePolicy.newQueryPlan(request, session); - - // Then - // nodes 1, 3 and 5 always first, round-robin on the rest - // newest up replica is node1 in first position and diceRoll = 1 -> does not bubble down - assertThat(plan1).containsExactly(node1, node3, node5, node2, node4); - assertThat(plan2).containsExactly(node1, node3, node5, node4, node2); - - then(dsePolicy).should(times(2)).shuffleHead(any(), anyInt()); - then(dsePolicy).should(times(2)).nanoTime(); - then(dsePolicy).should(times(2)).diceRoll1d4(); - } - - @Test - public void should_prioritize_and_shuffle_3_or_more_replicas_when_first_unhealthy() { - // Given - given(request.getRoutingKeyspace()).willReturn(KEYSPACE); - given(request.getRoutingKey()).willReturn(ROUTING_KEY); - given(tokenMap.getReplicas(KEYSPACE, ROUTING_KEY)) - .willReturn(ImmutableSet.of(node1, node3, node5)); - given(pool1.getInFlight()).willReturn(100); // unhealthy - given(pool3.getInFlight()).willReturn(0); - given(pool5.getInFlight()).willReturn(0); - - dsePolicy.responseTimes.put( - node1, - dsePolicy - .new NodeResponseRateSample(new AtomicLongArray(new long[] {T0, T0}))); // unhealthy - - // When - Queue plan1 = dsePolicy.newQueryPlan(request, session); - Queue plan2 = dsePolicy.newQueryPlan(request, session); - - // Then - // nodes 1, 3 and 5 always first, round-robin on the rest - // node1 is unhealthy = 1 -> bubbles down - assertThat(plan1).containsExactly(node3, node5, node1, node2, node4); - assertThat(plan2).containsExactly(node3, node5, node1, node4, node2); - - then(dsePolicy).should(times(2)).shuffleHead(any(), anyInt()); - then(dsePolicy).should(times(2)).nanoTime(); - then(dsePolicy).should(never()).diceRoll1d4(); - } - - @Test - public void - should_not_treat_node_as_unhealthy_if_has_in_flight_exceeded_but_response_times_normal() { - // Given - given(request.getRoutingKeyspace()).willReturn(KEYSPACE); - given(request.getRoutingKey()).willReturn(ROUTING_KEY); - given(tokenMap.getReplicas(KEYSPACE, ROUTING_KEY)) - .willReturn(ImmutableSet.of(node1, node3, node5)); - given(pool1.getInFlight()).willReturn(100); // unhealthy - given(pool3.getInFlight()).willReturn(0); - given(pool5.getInFlight()).willReturn(0); - - dsePolicy.responseTimes.put( - node1, - dsePolicy.new NodeResponseRateSample(new AtomicLongArray(new long[] {T1, T1}))); // healthy - - // When - Queue plan1 = dsePolicy.newQueryPlan(request, session); - Queue plan2 = dsePolicy.newQueryPlan(request, session); - - // Then - // nodes 1, 3 and 5 always first, round-robin on the rest - // node1 has more in-flight than node3 -> swap - assertThat(plan1).containsExactly(node3, node1, node5, node2, node4); - assertThat(plan2).containsExactly(node3, node1, node5, node4, node2); - - then(dsePolicy).should(times(2)).shuffleHead(any(), anyInt()); - then(dsePolicy).should(times(2)).nanoTime(); - then(dsePolicy).should(never()).diceRoll1d4(); - } - - @Test - public void should_prioritize_and_shuffle_3_or_more_replicas_when_last_unhealthy() { - // Given - given(request.getRoutingKeyspace()).willReturn(KEYSPACE); - given(request.getRoutingKey()).willReturn(ROUTING_KEY); - given(tokenMap.getReplicas(KEYSPACE, ROUTING_KEY)) - .willReturn(ImmutableSet.of(node1, node3, node5)); - given(pool1.getInFlight()).willReturn(0); - given(pool3.getInFlight()).willReturn(0); - given(pool5.getInFlight()).willReturn(100); // unhealthy - - // When - Queue plan1 = dsePolicy.newQueryPlan(request, session); - Queue plan2 = dsePolicy.newQueryPlan(request, session); - - // Then - // nodes 1, 3 and 5 always first, round-robin on the rest - // node5 is unhealthy -> noop - assertThat(plan1).containsExactly(node1, node3, node5, node2, node4); - assertThat(plan2).containsExactly(node1, node3, node5, node4, node2); - - then(dsePolicy).should(times(2)).shuffleHead(any(), anyInt()); - then(dsePolicy).should(times(2)).nanoTime(); - then(dsePolicy).should(never()).diceRoll1d4(); - } - - @Test - public void should_prioritize_and_shuffle_3_or_more_replicas_when_majority_unhealthy() { - // Given - given(request.getRoutingKeyspace()).willReturn(KEYSPACE); - given(request.getRoutingKey()).willReturn(ROUTING_KEY); - given(tokenMap.getReplicas(KEYSPACE, ROUTING_KEY)) - .willReturn(ImmutableSet.of(node1, node3, node5)); - given(pool1.getInFlight()).willReturn(100); - given(pool3.getInFlight()).willReturn(100); - given(pool5.getInFlight()).willReturn(0); - - // When - Queue plan1 = dsePolicy.newQueryPlan(request, session); - Queue plan2 = dsePolicy.newQueryPlan(request, session); - - // Then - // nodes 1, 3 and 5 always first, round-robin on the rest - // majority of nodes unhealthy -> noop - assertThat(plan1).containsExactly(node1, node3, node5, node2, node4); - assertThat(plan2).containsExactly(node1, node3, node5, node4, node2); - - then(dsePolicy).should(times(2)).shuffleHead(any(), anyInt()); - then(dsePolicy).should(times(2)).nanoTime(); - then(dsePolicy).should(never()).diceRoll1d4(); - } - - @Test - public void should_reorder_first_two_replicas_when_first_has_more_in_flight_than_second() { - // Given - given(request.getRoutingKeyspace()).willReturn(KEYSPACE); - given(request.getRoutingKey()).willReturn(ROUTING_KEY); - given(tokenMap.getReplicas(KEYSPACE, ROUTING_KEY)) - .willReturn(ImmutableSet.of(node1, node3, node5)); - given(pool1.getInFlight()).willReturn(200); - given(pool3.getInFlight()).willReturn(100); - - // When - Queue plan1 = dsePolicy.newQueryPlan(request, session); - Queue plan2 = dsePolicy.newQueryPlan(request, session); - - // Then - // nodes 1, 3 and 5 always first, round-robin on the rest - // node1 has more in-flight than node3 -> swap - assertThat(plan1).containsExactly(node3, node1, node5, node2, node4); - assertThat(plan2).containsExactly(node3, node1, node5, node4, node2); - - then(dsePolicy).should(times(2)).shuffleHead(any(), anyInt()); - then(dsePolicy).should(times(2)).nanoTime(); - then(dsePolicy).should(never()).diceRoll1d4(); - } - - @Override - protected DefaultLoadBalancingPolicy createAndInitPolicy() { - DefaultLoadBalancingPolicy policy = - spy( - new DefaultLoadBalancingPolicy(context, DEFAULT_NAME) { - @Override - protected void shuffleHead(Object[] array, int n) {} - - @Override - protected long nanoTime() { - return nanoTime; - } - - @Override - protected int diceRoll1d4() { - return diceRoll; - } - }); - policy.init( - ImmutableMap.of( - UUID.randomUUID(), node1, - UUID.randomUUID(), node2, - UUID.randomUUID(), node3, - UUID.randomUUID(), node4, - UUID.randomUUID(), node5), - distanceReporter); - return policy; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyRequestTrackerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyRequestTrackerTest.java deleted file mode 100644 index 757af43ef67..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyRequestTrackerTest.java +++ /dev/null @@ -1,198 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.api.core.config.DriverExecutionProfile.DEFAULT_NAME; -import static org.mockito.BDDMockito.given; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import java.util.UUID; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; - -public class DefaultLoadBalancingPolicyRequestTrackerTest extends LoadBalancingPolicyTestBase { - - @Mock Request request; - @Mock DriverExecutionProfile profile; - final String logPrefix = "lbp-test-log-prefix"; - - private DefaultLoadBalancingPolicy policy; - private long nextNanoTime; - - @Before - @Override - public void setup() { - super.setup(); - given(metadataManager.getContactPoints()).willReturn(ImmutableSet.of(node1)); - policy = - new DefaultLoadBalancingPolicy(context, DEFAULT_NAME) { - @Override - protected long nanoTime() { - return nextNanoTime; - } - }; - policy.init( - ImmutableMap.of( - UUID.randomUUID(), node1, - UUID.randomUUID(), node2, - UUID.randomUUID(), node3), - distanceReporter); - } - - @Test - public void should_record_first_response_time_on_node_success() { - // Given - nextNanoTime = 123; - - // When - policy.onNodeSuccess(request, 0, profile, node1, logPrefix); - - // Then - assertThat(policy.responseTimes) - .hasEntrySatisfying(node1, value -> assertThat(value.oldest).isEqualTo(123L)) - .doesNotContainKeys(node2, node3); - assertThat(policy.isResponseRateInsufficient(node1, nextNanoTime)).isFalse(); - assertThat(policy.isResponseRateInsufficient(node2, nextNanoTime)).isFalse(); - assertThat(policy.isResponseRateInsufficient(node3, nextNanoTime)).isFalse(); - } - - @Test - public void should_record_second_response_time_on_node_success() { - // Given - should_record_first_response_time_on_node_success(); - nextNanoTime = 456; - - // When - policy.onNodeSuccess(request, 0, profile, node1, logPrefix); - - // Then - assertThat(policy.responseTimes) - .hasEntrySatisfying( - node1, - value -> { - // oldest value first - assertThat(value.oldest).isEqualTo(123); - assertThat(value.newest.getAsLong()).isEqualTo(456); - }) - .doesNotContainKeys(node2, node3); - assertThat(policy.isResponseRateInsufficient(node1, nextNanoTime)).isFalse(); - assertThat(policy.isResponseRateInsufficient(node2, nextNanoTime)).isFalse(); - assertThat(policy.isResponseRateInsufficient(node3, nextNanoTime)).isFalse(); - } - - @Test - public void should_record_further_response_times_on_node_success() { - // Given - should_record_second_response_time_on_node_success(); - nextNanoTime = 789; - - // When - policy.onNodeSuccess(request, 0, profile, node1, logPrefix); - policy.onNodeSuccess(request, 0, profile, node2, logPrefix); - - // Then - assertThat(policy.responseTimes) - .hasEntrySatisfying( - node1, - value -> { - // values should rotate left (bubble up) - assertThat(value.oldest).isEqualTo(456); - assertThat(value.newest.getAsLong()).isEqualTo(789); - }) - .hasEntrySatisfying(node2, value -> assertThat(value.oldest).isEqualTo(789)) - .doesNotContainKey(node3); - assertThat(policy.isResponseRateInsufficient(node1, nextNanoTime)).isFalse(); - assertThat(policy.isResponseRateInsufficient(node2, nextNanoTime)).isFalse(); - assertThat(policy.isResponseRateInsufficient(node3, nextNanoTime)).isFalse(); - } - - @Test - public void should_record_first_response_time_on_node_error() { - // Given - nextNanoTime = 123; - Throwable iae = new IllegalArgumentException(); - - // When - policy.onNodeError(request, iae, 0, profile, node1, logPrefix); - - // Then - assertThat(policy.responseTimes) - .hasEntrySatisfying(node1, value -> assertThat(value.oldest).isEqualTo(123L)) - .doesNotContainKeys(node2, node3); - assertThat(policy.isResponseRateInsufficient(node1, nextNanoTime)).isFalse(); - assertThat(policy.isResponseRateInsufficient(node2, nextNanoTime)).isFalse(); - assertThat(policy.isResponseRateInsufficient(node3, nextNanoTime)).isFalse(); - } - - @Test - public void should_record_second_response_time_on_node_error() { - // Given - should_record_first_response_time_on_node_error(); - nextNanoTime = 456; - Throwable iae = new IllegalArgumentException(); - - // When - policy.onNodeError(request, iae, 0, profile, node1, logPrefix); - - // Then - assertThat(policy.responseTimes) - .hasEntrySatisfying( - node1, - value -> { - // oldest value first - assertThat(value.oldest).isEqualTo(123); - assertThat(value.newest.getAsLong()).isEqualTo(456); - }) - .doesNotContainKeys(node2, node3); - assertThat(policy.isResponseRateInsufficient(node1, nextNanoTime)).isFalse(); - assertThat(policy.isResponseRateInsufficient(node2, nextNanoTime)).isFalse(); - assertThat(policy.isResponseRateInsufficient(node3, nextNanoTime)).isFalse(); - } - - @Test - public void should_record_further_response_times_on_node_error() { - // Given - should_record_second_response_time_on_node_error(); - nextNanoTime = 789; - Throwable iae = new IllegalArgumentException(); - - // When - policy.onNodeError(request, iae, 0, profile, node1, logPrefix); - policy.onNodeError(request, iae, 0, profile, node2, logPrefix); - - // Then - assertThat(policy.responseTimes) - .hasEntrySatisfying( - node1, - value -> { - // values should rotate left (bubble up) - assertThat(value.oldest).isEqualTo(456); - assertThat(value.newest.getAsLong()).isEqualTo(789); - }) - .hasEntrySatisfying(node2, value -> assertThat(value.oldest).isEqualTo(789)) - .doesNotContainKey(node3); - assertThat(policy.isResponseRateInsufficient(node1, nextNanoTime)).isFalse(); - assertThat(policy.isResponseRateInsufficient(node2, nextNanoTime)).isFalse(); - assertThat(policy.isResponseRateInsufficient(node3, nextNanoTime)).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/LoadBalancingPolicyTestBase.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/LoadBalancingPolicyTestBase.java deleted file mode 100644 index c9149efa69f..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/LoadBalancingPolicyTestBase.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing; - -import static org.mockito.Mockito.anyString; -import static org.mockito.Mockito.when; - -import ch.qos.logback.classic.Logger; -import ch.qos.logback.classic.spi.ILoggingEvent; -import ch.qos.logback.core.Appender; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.DefaultConsistencyLevelRegistry; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.DefaultNode; -import com.datastax.oss.driver.internal.core.metadata.MetadataManager; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import org.junit.After; -import org.junit.Before; -import org.junit.runner.RunWith; -import org.mockito.ArgumentCaptor; -import org.mockito.Captor; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; -import org.slf4j.LoggerFactory; - -@RunWith(MockitoJUnitRunner.class) -public abstract class LoadBalancingPolicyTestBase { - - @Mock protected DefaultNode node1; - @Mock protected DefaultNode node2; - @Mock protected DefaultNode node3; - @Mock protected DefaultNode node4; - @Mock protected DefaultNode node5; - @Mock protected InternalDriverContext context; - @Mock protected DriverConfig config; - @Mock protected DriverExecutionProfile defaultProfile; - @Mock protected LoadBalancingPolicy.DistanceReporter distanceReporter; - @Mock protected Appender appender; - @Mock protected MetadataManager metadataManager; - - @Captor protected ArgumentCaptor loggingEventCaptor; - - protected Logger logger; - - @Before - public void setup() { - when(context.getSessionName()).thenReturn("test"); - when(context.getConfig()).thenReturn(config); - when(config.getProfile(DriverExecutionProfile.DEFAULT_NAME)).thenReturn(defaultProfile); - - when(defaultProfile.getName()).thenReturn(DriverExecutionProfile.DEFAULT_NAME); - when(defaultProfile.isDefined(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER)) - .thenReturn(true); - when(defaultProfile.getString(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER)) - .thenReturn("dc1"); - when(defaultProfile.getBoolean(DefaultDriverOption.LOAD_BALANCING_POLICY_SLOW_AVOIDANCE, true)) - .thenReturn(true); - when(defaultProfile.getInt( - DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_MAX_NODES_PER_REMOTE_DC)) - .thenReturn(0); - when(defaultProfile.getBoolean( - DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_ALLOW_FOR_LOCAL_CONSISTENCY_LEVELS)) - .thenReturn(false); - when(defaultProfile.getString(DefaultDriverOption.REQUEST_CONSISTENCY)).thenReturn("ONE"); - - when(context.getMetadataManager()).thenReturn(metadataManager); - - logger = - (Logger) LoggerFactory.getLogger("com.datastax.oss.driver.internal.core.loadbalancing"); - logger.addAppender(appender); - - for (Node node : ImmutableList.of(node1, node2, node3, node4, node5)) { - when(node.getDatacenter()).thenReturn("dc1"); - } - - when(context.getLocalDatacenter(anyString())).thenReturn(null); - when(context.getConsistencyLevelRegistry()).thenReturn(new DefaultConsistencyLevelRegistry()); - } - - @After - public void teardown() { - logger.detachAppender(appender); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/DcAgnosticNodeSetTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/DcAgnosticNodeSetTest.java deleted file mode 100644 index 0730bcd346c..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/DcAgnosticNodeSetTest.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing.nodeset; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; - -import com.datastax.oss.driver.api.core.metadata.Node; -import org.junit.Test; - -public class DcAgnosticNodeSetTest { - - @Test - public void should_add_node() { - DcAgnosticNodeSet set = new DcAgnosticNodeSet(); - Node node = mock(Node.class); - assertThat(set.add(node)).isTrue(); - assertThat(set.add(node)).isFalse(); - } - - @Test - public void should_remove_node() { - DcAgnosticNodeSet set = new DcAgnosticNodeSet(); - Node node = mock(Node.class); - set.add(node); - assertThat(set.remove(node)).isTrue(); - assertThat(set.remove(node)).isFalse(); - } - - @Test - public void should_return_all_nodes() { - DcAgnosticNodeSet set = new DcAgnosticNodeSet(); - Node node1 = mock(Node.class); - set.add(node1); - Node node2 = mock(Node.class); - set.add(node2); - assertThat(set.dc(null)).contains(node1, node2); - assertThat(set.dc("irrelevant")).contains(node1, node2); - } - - @Test - public void should_return_empty_dcs() { - DcAgnosticNodeSet set = new DcAgnosticNodeSet(); - assertThat(set.dcs()).isEmpty(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/MultiDcNodeSetTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/MultiDcNodeSetTest.java deleted file mode 100644 index 21c58cbb829..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/MultiDcNodeSetTest.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing.nodeset; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.metadata.Node; -import org.junit.Test; - -public class MultiDcNodeSetTest { - - @Test - public void should_add_node() { - MultiDcNodeSet set = new MultiDcNodeSet(); - Node node1 = mockNode("dc1"); - assertThat(set.add(node1)).isTrue(); - assertThat(set.add(node1)).isFalse(); - Node node2 = mockNode("dc2"); - assertThat(set.add(node2)).isTrue(); - assertThat(set.add(node2)).isFalse(); - } - - @Test - public void should_remove_node() { - MultiDcNodeSet set = new MultiDcNodeSet(); - Node node1 = mockNode("dc1"); - set.add(node1); - assertThat(set.remove(node1)).isTrue(); - assertThat(set.remove(node1)).isFalse(); - Node node2 = mockNode("dc2"); - set.add(node2); - assertThat(set.remove(node2)).isTrue(); - assertThat(set.remove(node2)).isFalse(); - } - - @Test - public void should_return_all_nodes_in_dc() { - MultiDcNodeSet set = new MultiDcNodeSet(); - Node node1 = mockNode("dc1"); - set.add(node1); - Node node2 = mockNode("dc1"); - set.add(node2); - Node node3 = mockNode("dc2"); - set.add(node3); - assertThat(set.dc("dc1")).contains(node1, node2); - assertThat(set.dc("dc2")).contains(node3); - assertThat(set.dc("dc3")).isEmpty(); - assertThat(set.dc(null)).isEmpty(); - } - - @Test - public void should_return_all_dcs() { - MultiDcNodeSet set = new MultiDcNodeSet(); - Node node1 = mockNode("dc1"); - set.add(node1); - Node node2 = mockNode("dc2"); - set.add(node2); - assertThat(set.dcs()).contains("dc1", "dc2"); - } - - private Node mockNode(String dc) { - Node node = mock(Node.class); - when(node.getDatacenter()).thenReturn(dc); - return node; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/SingleDcNodeSetTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/SingleDcNodeSetTest.java deleted file mode 100644 index 063c13c9386..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/SingleDcNodeSetTest.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing.nodeset; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.metadata.Node; -import org.junit.Test; - -public class SingleDcNodeSetTest { - - @Test - public void should_add_node() { - SingleDcNodeSet set = new SingleDcNodeSet("dc1"); - Node node1 = mockNode("dc1"); - assertThat(set.add(node1)).isTrue(); - assertThat(set.add(node1)).isFalse(); - Node node2 = mockNode("dc2"); - assertThat(set.add(node2)).isFalse(); - } - - @Test - public void should_remove_node() { - SingleDcNodeSet set = new SingleDcNodeSet("dc1"); - Node node = mockNode("dc1"); - set.add(node); - assertThat(set.remove(node)).isTrue(); - assertThat(set.remove(node)).isFalse(); - } - - @Test - public void should_return_all_nodes_if_local_dc() { - SingleDcNodeSet set = new SingleDcNodeSet("dc1"); - Node node1 = mockNode("dc1"); - set.add(node1); - Node node2 = mockNode("dc1"); - set.add(node2); - Node node3 = mockNode("dc2"); - set.add(node3); - assertThat(set.dc("dc1")).contains(node1, node2); - assertThat(set.dc("dc2")).isEmpty(); - assertThat(set.dc(null)).isEmpty(); - } - - @Test - public void should_return_only_local_dc() { - SingleDcNodeSet set = new SingleDcNodeSet("dc1"); - assertThat(set.dcs()).contains("dc1"); - } - - private Node mockNode(String dc) { - Node node = mock(Node.class); - when(node.getDatacenter()).thenReturn(dc); - return node; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/AddNodeRefreshTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/AddNodeRefreshTest.java deleted file mode 100644 index 8d337bcc7e3..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/AddNodeRefreshTest.java +++ /dev/null @@ -1,147 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.uuid.Uuids; -import com.datastax.oss.driver.internal.core.channel.ChannelFactory; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metrics.MetricsFactory; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import java.net.InetSocketAddress; -import java.util.Collections; -import java.util.Map; -import java.util.UUID; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public class AddNodeRefreshTest { - - @Mock private InternalDriverContext context; - @Mock protected MetricsFactory metricsFactory; - @Mock private ChannelFactory channelFactory; - - private DefaultNode node1; - - @Before - public void setup() { - when(context.getMetricsFactory()).thenReturn(metricsFactory); - when(context.getChannelFactory()).thenReturn(channelFactory); - node1 = TestNodeFactory.newNode(1, context); - } - - @Test - public void should_add_new_node() { - // Given - DefaultMetadata oldMetadata = - new DefaultMetadata( - ImmutableMap.of(node1.getHostId(), node1), Collections.emptyMap(), null, null); - UUID newHostId = Uuids.random(); - DefaultEndPoint newEndPoint = TestNodeFactory.newEndPoint(2); - UUID newSchemaVersion = Uuids.random(); - DefaultNodeInfo newNodeInfo = - DefaultNodeInfo.builder() - .withHostId(newHostId) - .withEndPoint(newEndPoint) - .withDatacenter("dc1") - .withRack("rack2") - .withSchemaVersion(newSchemaVersion) - .build(); - AddNodeRefresh refresh = new AddNodeRefresh(newNodeInfo); - - // When - MetadataRefresh.Result result = refresh.compute(oldMetadata, false, context); - - // Then - Map newNodes = result.newMetadata.getNodes(); - assertThat(newNodes).containsOnlyKeys(node1.getHostId(), newHostId); - Node node2 = newNodes.get(newHostId); - assertThat(node2.getEndPoint()).isEqualTo(newEndPoint); - assertThat(node2.getDatacenter()).isEqualTo("dc1"); - assertThat(node2.getRack()).isEqualTo("rack2"); - assertThat(node2.getHostId()).isEqualTo(newHostId); - assertThat(node2.getSchemaVersion()).isEqualTo(newSchemaVersion); - assertThat(result.events).containsExactly(NodeStateEvent.added((DefaultNode) node2)); - } - - @Test - public void should_not_add_existing_node_with_same_id_and_endpoint() { - // Given - DefaultMetadata oldMetadata = - new DefaultMetadata( - ImmutableMap.of(node1.getHostId(), node1), Collections.emptyMap(), null, null); - DefaultNodeInfo newNodeInfo = - DefaultNodeInfo.builder() - .withHostId(node1.getHostId()) - .withEndPoint(node1.getEndPoint()) - .withDatacenter("dc1") - .withRack("rack2") - .build(); - AddNodeRefresh refresh = new AddNodeRefresh(newNodeInfo); - - // When - MetadataRefresh.Result result = refresh.compute(oldMetadata, false, context); - - // Then - assertThat(result.newMetadata.getNodes()).containsOnlyKeys(node1.getHostId()); - // Info is not copied over: - assertThat(node1.getDatacenter()).isNull(); - assertThat(node1.getRack()).isNull(); - assertThat(result.events).isEmpty(); - } - - @Test - public void should_add_existing_node_with_same_id_but_different_endpoint() { - // Given - DefaultMetadata oldMetadata = - new DefaultMetadata( - ImmutableMap.of(node1.getHostId(), node1), Collections.emptyMap(), null, null); - DefaultEndPoint newEndPoint = TestNodeFactory.newEndPoint(2); - InetSocketAddress newBroadcastRpcAddress = newEndPoint.resolve(); - UUID newSchemaVersion = Uuids.random(); - DefaultNodeInfo newNodeInfo = - DefaultNodeInfo.builder() - .withHostId(node1.getHostId()) - .withEndPoint(newEndPoint) - .withDatacenter("dc1") - .withRack("rack2") - .withSchemaVersion(newSchemaVersion) - .withBroadcastRpcAddress(newBroadcastRpcAddress) - .build(); - AddNodeRefresh refresh = new AddNodeRefresh(newNodeInfo); - - // When - MetadataRefresh.Result result = refresh.compute(oldMetadata, false, context); - - // Then - Map newNodes = result.newMetadata.getNodes(); - assertThat(newNodes).hasSize(1).containsEntry(node1.getHostId(), node1); - assertThat(node1.getEndPoint()).isEqualTo(newEndPoint); - assertThat(node1.getDatacenter()).isEqualTo("dc1"); - assertThat(node1.getRack()).isEqualTo("rack2"); - assertThat(node1.getSchemaVersion()).isEqualTo(newSchemaVersion); - assertThat(result.events).containsExactly(TopologyEvent.suggestUp(newBroadcastRpcAddress)); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/DefaultEndPointTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/DefaultEndPointTest.java deleted file mode 100644 index 7da8fb39415..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/DefaultEndPointTest.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; - -import java.net.InetSocketAddress; -import org.junit.Test; - -public class DefaultEndPointTest { - - @Test - public void should_create_from_host_name() { - DefaultEndPoint endPoint = new DefaultEndPoint(new InetSocketAddress("localhost", 9042)); - assertThat(endPoint.asMetricPrefix()).isEqualTo("localhost:9042"); - } - - @Test - public void should_create_from_literal_ipv4_address() { - DefaultEndPoint endPoint = new DefaultEndPoint(new InetSocketAddress("127.0.0.1", 9042)); - assertThat(endPoint.asMetricPrefix()).isEqualTo("127_0_0_1:9042"); - } - - @Test - public void should_create_from_literal_ipv6_address() { - DefaultEndPoint endPoint = new DefaultEndPoint(new InetSocketAddress("::1", 9042)); - assertThat(endPoint.asMetricPrefix()).isEqualTo("0:0:0:0:0:0:0:1:9042"); - } - - @Test - public void should_create_from_unresolved_address() { - InetSocketAddress address = InetSocketAddress.createUnresolved("test.com", 9042); - DefaultEndPoint endPoint = new DefaultEndPoint(address); - assertThat(endPoint.asMetricPrefix()).isEqualTo("test_com:9042"); - assertThat(address.isUnresolved()).isTrue(); - } - - @Test - public void should_reject_null_address() { - assertThatThrownBy(() -> new DefaultEndPoint(null)) - .isInstanceOf(NullPointerException.class) - .hasMessage("address can't be null"); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/DefaultMetadataTokenMapTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/DefaultMetadataTokenMapTest.java deleted file mode 100644 index b463f9caa7b..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/DefaultMetadataTokenMapTest.java +++ /dev/null @@ -1,172 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; -import com.datastax.oss.driver.internal.core.channel.ChannelFactory; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.token.DefaultReplicationStrategyFactory; -import com.datastax.oss.driver.internal.core.metadata.token.Murmur3TokenFactory; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import java.util.Collections; -import java.util.Map; -import java.util.UUID; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public class DefaultMetadataTokenMapTest { - - // Simulate the simplest setup possible for a functional token map. We're not testing the token - // map itself, only how the metadata interacts with it. - private static final String TOKEN1 = "-9000000000000000000"; - private static final String TOKEN2 = "9000000000000000000"; - private static final Node NODE1 = mockNode(TOKEN1); - private static final Node NODE2 = mockNode(TOKEN2); - private static final CqlIdentifier KEYSPACE_NAME = CqlIdentifier.fromInternal("ks"); - private static final KeyspaceMetadata KEYSPACE = - mockKeyspace( - KEYSPACE_NAME, - ImmutableMap.of( - "class", "org.apache.cassandra.locator.SimpleStrategy", "replication_factor", "1")); - - @Mock private InternalDriverContext context; - @Mock private ChannelFactory channelFactory; - - @Before - public void setup() { - when(context.getChannelFactory()).thenReturn(channelFactory); - DefaultReplicationStrategyFactory replicationStrategyFactory = - new DefaultReplicationStrategyFactory(context); - when(context.getReplicationStrategyFactory()).thenReturn(replicationStrategyFactory); - } - - @Test - public void should_not_build_token_map_when_initializing_with_contact_points() { - DefaultMetadata contactPointsMetadata = - new DefaultMetadata( - ImmutableMap.of(NODE1.getHostId(), NODE1), Collections.emptyMap(), null, null); - assertThat(contactPointsMetadata.getTokenMap()).isNotPresent(); - } - - @Test - public void should_build_minimal_token_map_on_first_refresh() { - DefaultMetadata contactPointsMetadata = - new DefaultMetadata( - ImmutableMap.of(NODE1.getHostId(), NODE1), Collections.emptyMap(), null, null); - DefaultMetadata firstRefreshMetadata = - contactPointsMetadata.withNodes( - ImmutableMap.of(NODE1.getHostId(), NODE1), - true, - true, - new Murmur3TokenFactory(), - context); - assertThat(firstRefreshMetadata.getTokenMap().get().getTokenRanges()).hasSize(1); - } - - @Test - public void should_not_build_token_map_when_disabled() { - DefaultMetadata contactPointsMetadata = - new DefaultMetadata( - ImmutableMap.of(NODE1.getHostId(), NODE1), Collections.emptyMap(), null, null); - DefaultMetadata firstRefreshMetadata = - contactPointsMetadata.withNodes( - ImmutableMap.of(NODE1.getHostId(), NODE1), - false, - true, - new Murmur3TokenFactory(), - context); - assertThat(firstRefreshMetadata.getTokenMap()).isNotPresent(); - } - - @Test - public void should_stay_empty_on_first_refresh_if_partitioner_missing() { - DefaultMetadata contactPointsMetadata = - new DefaultMetadata( - ImmutableMap.of(NODE1.getHostId(), NODE1), Collections.emptyMap(), null, null); - DefaultMetadata firstRefreshMetadata = - contactPointsMetadata.withNodes( - ImmutableMap.of(NODE1.getHostId(), NODE1), true, true, null, context); - assertThat(firstRefreshMetadata.getTokenMap()).isNotPresent(); - } - - @Test - public void should_update_minimal_token_map_if_new_node_and_still_no_schema() { - DefaultMetadata contactPointsMetadata = - new DefaultMetadata( - ImmutableMap.of(NODE1.getHostId(), NODE1), Collections.emptyMap(), null, null); - DefaultMetadata firstRefreshMetadata = - contactPointsMetadata.withNodes( - ImmutableMap.of(NODE1.getHostId(), NODE1), - true, - true, - new Murmur3TokenFactory(), - context); - DefaultMetadata secondRefreshMetadata = - firstRefreshMetadata.withNodes( - ImmutableMap.of(NODE1.getHostId(), NODE1, NODE2.getHostId(), NODE2), - true, - false, - null, - context); - assertThat(secondRefreshMetadata.getTokenMap().get().getTokenRanges()).hasSize(2); - } - - @Test - public void should_update_token_map_when_schema_changes() { - DefaultMetadata contactPointsMetadata = - new DefaultMetadata( - ImmutableMap.of(NODE1.getHostId(), NODE1), Collections.emptyMap(), null, null); - DefaultMetadata firstRefreshMetadata = - contactPointsMetadata.withNodes( - ImmutableMap.of(NODE1.getHostId(), NODE1), - true, - true, - new Murmur3TokenFactory(), - context); - DefaultMetadata schemaRefreshMetadata = - firstRefreshMetadata.withSchema(ImmutableMap.of(KEYSPACE_NAME, KEYSPACE), true, context); - assertThat(schemaRefreshMetadata.getTokenMap().get().getTokenRanges(KEYSPACE_NAME, NODE1)) - .isNotEmpty(); - } - - private static DefaultNode mockNode(String token) { - DefaultNode node = mock(DefaultNode.class); - when(node.getHostId()).thenReturn(UUID.randomUUID()); - when(node.getRawTokens()).thenReturn(ImmutableSet.of(token)); - return node; - } - - private static KeyspaceMetadata mockKeyspace( - CqlIdentifier name, Map replicationConfig) { - KeyspaceMetadata keyspace = mock(KeyspaceMetadata.class); - when(keyspace.getName()).thenReturn(name); - when(keyspace.getReplication()).thenReturn(replicationConfig); - return keyspace; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/DefaultNodeTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/DefaultNodeTest.java deleted file mode 100644 index 6a53fe3e433..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/DefaultNodeTest.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.internal.core.context.MockedDriverContextFactory; -import java.net.InetSocketAddress; -import java.util.UUID; -import org.junit.Test; - -public class DefaultNodeTest { - - private final String uuidStr = "1e4687e6-f94e-432e-a792-216f89ef265f"; - private final UUID hostId = UUID.fromString(uuidStr); - private final EndPoint endPoint = new DefaultEndPoint(new InetSocketAddress("localhost", 9042)); - - @Test - public void should_have_expected_string_representation() { - - DefaultNode node = new DefaultNode(endPoint, MockedDriverContextFactory.defaultDriverContext()); - node.hostId = hostId; - - String expected = - String.format( - "Node(endPoint=localhost/127.0.0.1:9042, hostId=1e4687e6-f94e-432e-a792-216f89ef265f, hashCode=%x)", - node.hashCode()); - assertThat(node.toString()).isEqualTo(expected); - } - - @Test - public void should_have_expected_string_representation_if_hostid_is_null() { - - DefaultNode node = new DefaultNode(endPoint, MockedDriverContextFactory.defaultDriverContext()); - node.hostId = null; - - String expected = - String.format( - "Node(endPoint=localhost/127.0.0.1:9042, hostId=null, hashCode=%x)", node.hashCode()); - assertThat(node.toString()).isEqualTo(expected); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/DefaultTopologyMonitorTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/DefaultTopologyMonitorTest.java deleted file mode 100644 index dd40f233518..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/DefaultTopologyMonitorTest.java +++ /dev/null @@ -1,805 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.assertj.core.api.Assertions.fail; -import static org.assertj.core.api.Assertions.filter; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.Mockito.atLeast; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import ch.qos.logback.classic.Level; -import ch.qos.logback.classic.Logger; -import ch.qos.logback.classic.spi.ILoggingEvent; -import ch.qos.logback.core.Appender; -import com.datastax.oss.driver.api.core.addresstranslation.AddressTranslator; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.ssl.SslEngineFactory; -import com.datastax.oss.driver.internal.core.addresstranslation.PassThroughAddressTranslator; -import com.datastax.oss.driver.internal.core.adminrequest.AdminResult; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.internal.core.adminrequest.UnexpectedResponseException; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.control.ControlConnection; -import com.datastax.oss.driver.internal.core.metrics.MetricsFactory; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import com.datastax.oss.driver.shaded.guava.common.collect.Iterators; -import com.datastax.oss.driver.shaded.guava.common.collect.Maps; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.response.Error; -import com.google.common.collect.Streams; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.net.UnknownHostException; -import java.time.Duration; -import java.util.ArrayDeque; -import java.util.Arrays; -import java.util.Collections; -import java.util.Iterator; -import java.util.Map; -import java.util.Optional; -import java.util.Queue; -import java.util.UUID; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.ArgumentCaptor; -import org.mockito.Captor; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; -import org.slf4j.LoggerFactory; - -@RunWith(DataProviderRunner.class) -public class DefaultTopologyMonitorTest { - - private static final InetSocketAddress ADDRESS2 = new InetSocketAddress("127.0.0.2", 9042); - - @Mock private InternalDriverContext context; - @Mock private DriverConfig config; - @Mock private DriverExecutionProfile defaultConfig; - @Mock private ControlConnection controlConnection; - @Mock private DriverChannel channel; - @Mock protected MetricsFactory metricsFactory; - - @Mock private Appender appender; - @Captor private ArgumentCaptor loggingEventCaptor; - - @Mock private SslEngineFactory sslEngineFactory; - - private DefaultNode node1; - private DefaultNode node2; - - private TestTopologyMonitor topologyMonitor; - - private Logger logger; - private Level initialLogLevel; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - when(context.getMetricsFactory()).thenReturn(metricsFactory); - - node1 = TestNodeFactory.newNode(1, context); - node2 = TestNodeFactory.newNode(2, context); - - when(defaultConfig.getDuration(DefaultDriverOption.CONTROL_CONNECTION_TIMEOUT)) - .thenReturn(Duration.ofSeconds(1)); - when(config.getDefaultProfile()).thenReturn(defaultConfig); - when(context.getConfig()).thenReturn(config); - - AddressTranslator addressTranslator = spy(new PassThroughAddressTranslator(context)); - when(context.getAddressTranslator()).thenReturn(addressTranslator); - - when(channel.getEndPoint()).thenReturn(node1.getEndPoint()); - when(controlConnection.channel()).thenReturn(channel); - when(context.getControlConnection()).thenReturn(controlConnection); - - topologyMonitor = new TestTopologyMonitor(context); - - logger = (Logger) LoggerFactory.getLogger(DefaultTopologyMonitor.class); - initialLogLevel = logger.getLevel(); - logger.setLevel(Level.INFO); - logger.addAppender(appender); - } - - @After - public void teardown() { - logger.detachAppender(appender); - logger.setLevel(initialLogLevel); - } - - @Test - public void should_initialize_control_connection() { - // When - topologyMonitor.init(); - - // Then - verify(controlConnection).init(true, false, true); - } - - @Test - public void should_not_refresh_control_node() { - // When - CompletionStage> futureInfo = topologyMonitor.refreshNode(node1); - - // Then - assertThatStage(futureInfo).isSuccess(maybeInfo -> assertThat(maybeInfo.isPresent()).isFalse()); - } - - @Test - public void should_refresh_node_from_peers_if_broadcast_address_is_present() { - // Given - node2.broadcastAddress = ADDRESS2; - topologyMonitor.isSchemaV2 = false; - topologyMonitor.stubQueries( - new StubbedQuery( - "SELECT * FROM system.peers WHERE peer = :address", - ImmutableMap.of("address", ADDRESS2.getAddress()), - mockResult(mockPeersRow(2, node2.getHostId())))); - - // When - CompletionStage> futureInfo = topologyMonitor.refreshNode(node2); - - // Then - assertThatStage(futureInfo) - .isSuccess( - maybeInfo -> { - assertThat(maybeInfo.isPresent()).isTrue(); - NodeInfo info = maybeInfo.get(); - assertThat(info.getDatacenter()).isEqualTo("dc2"); - }); - } - - @Test - public void should_refresh_node_from_peers_if_broadcast_address_is_present_v2() { - // Given - node2.broadcastAddress = ADDRESS2; - topologyMonitor.isSchemaV2 = true; - topologyMonitor.stubQueries( - new StubbedQuery( - "SELECT * FROM system.peers_v2 WHERE peer = :address and peer_port = :port", - ImmutableMap.of("address", ADDRESS2.getAddress(), "port", 9042), - mockResult(mockPeersV2Row(2, node2.getHostId())))); - - // When - CompletionStage> futureInfo = topologyMonitor.refreshNode(node2); - - // Then - assertThatStage(futureInfo) - .isSuccess( - maybeInfo -> { - assertThat(maybeInfo.isPresent()).isTrue(); - NodeInfo info = maybeInfo.get(); - assertThat(info.getDatacenter()).isEqualTo("dc2"); - assertThat(info.getBroadcastAddress().get().getPort()).isEqualTo(7002); - }); - } - - @Test - public void should_refresh_node_from_peers_if_broadcast_address_is_not_present() { - // Given - topologyMonitor.isSchemaV2 = false; - node2.broadcastAddress = null; - AdminRow peer3 = mockPeersRow(3, UUID.randomUUID()); - AdminRow peer2 = mockPeersRow(2, node2.getHostId()); - topologyMonitor.stubQueries( - new StubbedQuery("SELECT * FROM system.peers", mockResult(peer3, peer2))); - - // When - CompletionStage> futureInfo = topologyMonitor.refreshNode(node2); - - // Then - assertThatStage(futureInfo) - .isSuccess( - maybeInfo -> { - assertThat(maybeInfo.isPresent()).isTrue(); - NodeInfo info = maybeInfo.get(); - assertThat(info.getDatacenter()).isEqualTo("dc2"); - }); - // The rpc_address in each row should have been tried, only the last row should have been - // converted - verify(peer3).getUuid("host_id"); - verify(peer3, never()).getString(anyString()); - - verify(peer2, times(2)).getUuid("host_id"); - verify(peer2).getString("data_center"); - } - - @Test - public void should_refresh_node_from_peers_if_broadcast_address_is_not_present_V2() { - // Given - topologyMonitor.isSchemaV2 = true; - node2.broadcastAddress = null; - AdminRow peer3 = mockPeersV2Row(3, UUID.randomUUID()); - AdminRow peer2 = mockPeersV2Row(2, node2.getHostId()); - topologyMonitor.stubQueries( - new StubbedQuery("SELECT * FROM system.peers_v2", mockResult(peer3, peer2))); - - // When - CompletionStage> futureInfo = topologyMonitor.refreshNode(node2); - - // Then - assertThatStage(futureInfo) - .isSuccess( - maybeInfo -> { - assertThat(maybeInfo.isPresent()).isTrue(); - NodeInfo info = maybeInfo.get(); - assertThat(info.getDatacenter()).isEqualTo("dc2"); - }); - // The host_id in each row should have been tried, only the last row should have been - // converted - verify(peer3).getUuid("host_id"); - verify(peer3, never()).getString(anyString()); - - verify(peer2, times(2)).getUuid("host_id"); - verify(peer2).getString("data_center"); - } - - @Test - public void should_get_new_node_from_peers() { - // Given - AdminRow peer3 = mockPeersRow(4, UUID.randomUUID()); - AdminRow peer2 = mockPeersRow(3, node2.getHostId()); - AdminRow peer1 = mockPeersRow(2, node1.getHostId()); - topologyMonitor.isSchemaV2 = false; - topologyMonitor.stubQueries( - new StubbedQuery("SELECT * FROM system.peers", mockResult(peer3, peer2, peer1))); - - // When - CompletionStage> futureInfo = topologyMonitor.getNewNodeInfo(ADDRESS2); - - // Then - assertThatStage(futureInfo) - .isSuccess( - maybeInfo -> { - assertThat(maybeInfo.isPresent()).isTrue(); - NodeInfo info = maybeInfo.get(); - assertThat(info.getDatacenter()).isEqualTo("dc2"); - }); - // The rpc_address in each row should have been tried, only the last row should have been - // converted - verify(peer3).getInetAddress("rpc_address"); - verify(peer3, never()).getString(anyString()); - - verify(peer2).getInetAddress("rpc_address"); - verify(peer2, never()).getString(anyString()); - - verify(peer1).getInetAddress("rpc_address"); - verify(peer1).getString("data_center"); - } - - @Test - public void should_get_new_node_from_peers_v2() { - // Given - AdminRow peer3 = mockPeersV2Row(4, UUID.randomUUID()); - AdminRow peer2 = mockPeersV2Row(3, node2.getHostId()); - AdminRow peer1 = mockPeersV2Row(2, node1.getHostId()); - topologyMonitor.isSchemaV2 = true; - topologyMonitor.stubQueries( - new StubbedQuery("SELECT * FROM system.peers_v2", mockResult(peer3, peer2, peer1))); - - // When - CompletionStage> futureInfo = topologyMonitor.getNewNodeInfo(ADDRESS2); - - // Then - assertThatStage(futureInfo) - .isSuccess( - maybeInfo -> { - assertThat(maybeInfo.isPresent()).isTrue(); - NodeInfo info = maybeInfo.get(); - assertThat(info.getDatacenter()).isEqualTo("dc2"); - }); - // The natove in each row should have been tried, only the last row should have been - // converted - verify(peer3).getInetAddress("native_address"); - verify(peer3, never()).getString(anyString()); - - verify(peer2).getInetAddress("native_address"); - verify(peer2, never()).getString(anyString()); - - verify(peer1).getInetAddress("native_address"); - verify(peer1).getString("data_center"); - } - - @Test - public void should_refresh_node_list_from_local_and_peers() { - // Given - AdminRow local = mockLocalRow(1, node1.getHostId()); - AdminRow peer3 = mockPeersRow(3, UUID.randomUUID()); - AdminRow peer2 = mockPeersRow(2, node2.getHostId()); - topologyMonitor.stubQueries( - new StubbedQuery("SELECT * FROM system.local", mockResult(local)), - new StubbedQuery("SELECT * FROM system.peers_v2", Collections.emptyMap(), null, true), - new StubbedQuery("SELECT * FROM system.peers", mockResult(peer3, peer2))); - - // When - CompletionStage> futureInfos = topologyMonitor.refreshNodeList(); - - // Then - assertThatStage(futureInfos) - .isSuccess( - infos -> { - Iterator iterator = infos.iterator(); - NodeInfo info1 = iterator.next(); - assertThat(info1.getEndPoint()).isEqualTo(node1.getEndPoint()); - assertThat(info1.getDatacenter()).isEqualTo("dc1"); - NodeInfo info3 = iterator.next(); - assertThat(info3.getEndPoint().resolve()) - .isEqualTo(new InetSocketAddress("127.0.0.3", 9042)); - assertThat(info3.getDatacenter()).isEqualTo("dc3"); - NodeInfo info2 = iterator.next(); - assertThat(info2.getEndPoint()).isEqualTo(node2.getEndPoint()); - assertThat(info2.getDatacenter()).isEqualTo("dc2"); - }); - } - - @Test - @UseDataProvider("columnsToCheckV1") - public void should_skip_invalid_peers_row(String columnToCheck) { - // Given - topologyMonitor.isSchemaV2 = false; - node2.broadcastAddress = ADDRESS2; - AdminRow peer2 = mockPeersRow(2, node2.getHostId()); - when(peer2.isNull(columnToCheck)).thenReturn(true); - topologyMonitor.stubQueries( - new StubbedQuery( - "SELECT * FROM system.peers WHERE peer = :address", - ImmutableMap.of("address", ADDRESS2.getAddress()), - mockResult(peer2))); - - // When - CompletionStage> futureInfo = topologyMonitor.refreshNode(node2); - - // Then - assertThatStage(futureInfo).isSuccess(maybeInfo -> assertThat(maybeInfo).isEmpty()); - assertThat(node2.broadcastAddress).isNotNull().isEqualTo(ADDRESS2); - assertLog( - Level.WARN, - "[null] Found invalid row in system.peers for peer: /127.0.0.2. " - + "This is likely a gossip or snitch issue, this node will be ignored."); - } - - @Test - @UseDataProvider("columnsToCheckV2") - public void should_skip_invalid_peers_row_v2(String columnToCheck) { - // Given - topologyMonitor.isSchemaV2 = true; - node2.broadcastAddress = ADDRESS2; - AdminRow peer2 = mockPeersV2Row(2, node2.getHostId()); - when(peer2.isNull(columnToCheck)).thenReturn(true); - topologyMonitor.stubQueries( - new StubbedQuery( - "SELECT * FROM system.peers_v2 WHERE peer = :address and peer_port = :port", - ImmutableMap.of("address", ADDRESS2.getAddress(), "port", 9042), - mockResult(peer2))); - - // When - CompletionStage> futureInfo = topologyMonitor.refreshNode(node2); - - // Then - assertThatStage(futureInfo).isSuccess(maybeInfo -> assertThat(maybeInfo).isEmpty()); - assertThat(node2.broadcastAddress).isNotNull().isEqualTo(ADDRESS2); - assertLog( - Level.WARN, - "[null] Found invalid row in system.peers_v2 for peer: /127.0.0.2. " - + "This is likely a gossip or snitch issue, this node will be ignored."); - } - - @Test - public void should_stop_executing_queries_once_closed() { - // Given - topologyMonitor.close(); - - // When - CompletionStage> futureInfos = topologyMonitor.refreshNodeList(); - - // Then - assertThatStage(futureInfos) - .isFailed(error -> assertThat(error).isInstanceOf(IllegalStateException.class)); - } - - @Test - public void should_warn_when_control_host_found_in_system_peers() { - // Given - AdminRow local = mockLocalRow(1, node1.getHostId()); - AdminRow peer1 = mockPeersRow(1, node2.getHostId()); // invalid - AdminRow peer2 = mockPeersRow(2, node2.getHostId()); - AdminRow peer3 = mockPeersRow(3, UUID.randomUUID()); - topologyMonitor.stubQueries( - new StubbedQuery("SELECT * FROM system.local", mockResult(local)), - new StubbedQuery("SELECT * FROM system.peers_v2", Collections.emptyMap(), null, true), - new StubbedQuery("SELECT * FROM system.peers", mockResult(peer3, peer2, peer1))); - - // When - CompletionStage> futureInfos = topologyMonitor.refreshNodeList(); - - // Then - assertThatStage(futureInfos) - .isSuccess( - infos -> - assertThat(infos) - .hasSize(3) - .extractingResultOf("getEndPoint") - .containsOnlyOnce(node1.getEndPoint())); - assertLogContains( - Level.WARN, - "[null] Control node /127.0.0.1:9042 has an entry for itself in system.peers: " - + "this entry will be ignored. This is likely due to a misconfiguration; " - + "please verify your rpc_address configuration in cassandra.yaml on " - + "all nodes in your cluster."); - } - - @Test - public void should_warn_when_control_host_found_in_system_peers_v2() { - // Given - AdminRow local = mockLocalRow(1, node1.getHostId()); - AdminRow peer3 = mockPeersRow(3, UUID.randomUUID()); - AdminRow peer2 = mockPeersRow(2, node2.getHostId()); - AdminRow peer1 = mockPeersRow(1, node2.getHostId()); // invalid - topologyMonitor.stubQueries( - new StubbedQuery("SELECT * FROM system.local", mockResult(local)), - new StubbedQuery("SELECT * FROM system.peers_v2", mockResult(peer3, peer2, peer1))); - - // When - CompletionStage> futureInfos = topologyMonitor.refreshNodeList(); - - // Then - assertThatStage(futureInfos) - .isSuccess( - infos -> - assertThat(infos) - .hasSize(3) - .extractingResultOf("getEndPoint") - .containsOnlyOnce(node1.getEndPoint())); - assertLogContains( - Level.WARN, - "[null] Control node /127.0.0.1:9042 has an entry for itself in system.peers_v2: " - + "this entry will be ignored. This is likely due to a misconfiguration; " - + "please verify your rpc_address configuration in cassandra.yaml on " - + "all nodes in your cluster."); - } - - // Confirm the base case of extracting peer info from peers_v2, no SSL involved - @Test - public void should_get_peer_address_info_peers_v2() { - // Given - AdminRow local = mockLocalRow(1, node1.getHostId()); - AdminRow peer2 = mockPeersV2Row(3, node2.getHostId()); - AdminRow peer1 = mockPeersV2Row(2, node1.getHostId()); - topologyMonitor.isSchemaV2 = true; - topologyMonitor.stubQueries( - new StubbedQuery("SELECT * FROM system.local", mockResult(local)), - new StubbedQuery("SELECT * FROM system.peers_v2", mockResult(peer2, peer1))); - when(context.getSslEngineFactory()).thenReturn(Optional.empty()); - - // When - CompletionStage> futureInfos = topologyMonitor.refreshNodeList(); - - // Then - assertThatStage(futureInfos) - .isSuccess( - infos -> { - Iterator iterator = infos.iterator(); - // First NodeInfo is for local, skip past that - iterator.next(); - NodeInfo peer2nodeInfo = iterator.next(); - assertThat(peer2nodeInfo.getEndPoint().resolve()) - .isEqualTo(new InetSocketAddress("127.0.0.3", 9042)); - NodeInfo peer1nodeInfo = iterator.next(); - assertThat(peer1nodeInfo.getEndPoint().resolve()) - .isEqualTo(new InetSocketAddress("127.0.0.2", 9042)); - }); - } - - // Confirm the base case of extracting peer info from DSE peers table, no SSL involved - @Test - public void should_get_peer_address_info_peers_dse() { - // Given - AdminRow local = mockLocalRow(1, node1.getHostId()); - AdminRow peer2 = mockPeersRowDse(3, node2.getHostId()); - AdminRow peer1 = mockPeersRowDse(2, node1.getHostId()); - topologyMonitor.isSchemaV2 = true; - topologyMonitor.stubQueries( - new StubbedQuery("SELECT * FROM system.local", mockResult(local)), - new StubbedQuery("SELECT * FROM system.peers_v2", Maps.newHashMap(), null, true), - new StubbedQuery("SELECT * FROM system.peers", mockResult(peer2, peer1))); - when(context.getSslEngineFactory()).thenReturn(Optional.empty()); - - // When - CompletionStage> futureInfos = topologyMonitor.refreshNodeList(); - - // Then - assertThatStage(futureInfos) - .isSuccess( - infos -> { - Iterator iterator = infos.iterator(); - // First NodeInfo is for local, skip past that - iterator.next(); - NodeInfo peer2nodeInfo = iterator.next(); - assertThat(peer2nodeInfo.getEndPoint().resolve()) - .isEqualTo(new InetSocketAddress("127.0.0.3", 9042)); - NodeInfo peer1nodeInfo = iterator.next(); - assertThat(peer1nodeInfo.getEndPoint().resolve()) - .isEqualTo(new InetSocketAddress("127.0.0.2", 9042)); - }); - } - - // Confirm the base case of extracting peer info from DSE peers table, this time with SSL - @Test - public void should_get_peer_address_info_peers_dse_with_ssl() { - // Given - AdminRow local = mockLocalRow(1, node1.getHostId()); - AdminRow peer2 = mockPeersRowDseWithSsl(3, node2.getHostId()); - AdminRow peer1 = mockPeersRowDseWithSsl(2, node1.getHostId()); - topologyMonitor.isSchemaV2 = true; - topologyMonitor.stubQueries( - new StubbedQuery("SELECT * FROM system.local", mockResult(local)), - new StubbedQuery("SELECT * FROM system.peers_v2", Maps.newHashMap(), null, true), - new StubbedQuery("SELECT * FROM system.peers", mockResult(peer2, peer1))); - when(context.getSslEngineFactory()).thenReturn(Optional.of(sslEngineFactory)); - - // When - CompletionStage> futureInfos = topologyMonitor.refreshNodeList(); - - // Then - assertThatStage(futureInfos) - .isSuccess( - infos -> { - Iterator iterator = infos.iterator(); - // First NodeInfo is for local, skip past that - iterator.next(); - NodeInfo peer2nodeInfo = iterator.next(); - assertThat(peer2nodeInfo.getEndPoint().resolve()) - .isEqualTo(new InetSocketAddress("127.0.0.3", 9043)); - NodeInfo peer1nodeInfo = iterator.next(); - assertThat(peer1nodeInfo.getEndPoint().resolve()) - .isEqualTo(new InetSocketAddress("127.0.0.2", 9043)); - }); - } - - @DataProvider - public static Object[][] columnsToCheckV1() { - return new Object[][] {{"rpc_address"}, {"host_id"}, {"data_center"}, {"rack"}, {"tokens"}}; - } - - @DataProvider - public static Object[][] columnsToCheckV2() { - return new Object[][] { - {"native_address"}, {"native_port"}, {"host_id"}, {"data_center"}, {"rack"}, {"tokens"} - }; - } - - /** Mocks the query execution logic. */ - private static class TestTopologyMonitor extends DefaultTopologyMonitor { - - private final Queue queries = new ArrayDeque<>(); - - private TestTopologyMonitor(InternalDriverContext context) { - super(context); - port = 9042; - } - - private void stubQueries(StubbedQuery... queries) { - this.queries.addAll(Arrays.asList(queries)); - } - - @Override - protected CompletionStage query( - DriverChannel channel, String queryString, Map parameters) { - StubbedQuery nextQuery = queries.poll(); - assertThat(nextQuery).isNotNull(); - assertThat(nextQuery.queryString).isEqualTo(queryString); - assertThat(nextQuery.parameters).isEqualTo(parameters); - if (nextQuery.error) { - Message error = - new Error( - ProtocolConstants.ErrorCode.SERVER_ERROR, - "Unknown keyspace/cf pair (system.peers_v2)"); - return CompletableFutures.failedFuture(new UnexpectedResponseException(queryString, error)); - } - return CompletableFuture.completedFuture(nextQuery.result); - } - } - - private static class StubbedQuery { - private final String queryString; - private final Map parameters; - private final AdminResult result; - private final boolean error; - - private StubbedQuery( - String queryString, Map parameters, AdminResult result, boolean error) { - this.queryString = queryString; - this.parameters = parameters; - this.result = result; - this.error = error; - } - - private StubbedQuery(String queryString, Map parameters, AdminResult result) { - this(queryString, parameters, result, false); - } - - private StubbedQuery(String queryString, AdminResult result) { - this(queryString, Collections.emptyMap(), result); - } - } - - private AdminRow mockLocalRow(int i, UUID hostId) { - try { - AdminRow row = mock(AdminRow.class); - when(row.isNull("host_id")).thenReturn(hostId == null); - when(row.getUuid("host_id")).thenReturn(hostId); - when(row.getInetAddress("broadcast_address")) - .thenReturn(InetAddress.getByName("127.0.0." + i)); - when(row.isNull("data_center")).thenReturn(false); - when(row.getString("data_center")).thenReturn("dc" + i); - when(row.getInetAddress("listen_address")).thenReturn(InetAddress.getByName("127.0.0." + i)); - when(row.isNull("rack")).thenReturn(false); - when(row.getString("rack")).thenReturn("rack" + i); - when(row.getString("release_version")).thenReturn("release_version" + i); - - // The driver should not use this column for the local row, because it can contain the - // non-broadcast RPC address. Simulate the bug to ensure it's handled correctly. - when(row.isNull("rpc_address")).thenReturn(false); - when(row.getInetAddress("rpc_address")).thenReturn(InetAddress.getByName("0.0.0.0")); - - when(row.isNull("tokens")).thenReturn(false); - when(row.getSetOfString("tokens")).thenReturn(ImmutableSet.of("token" + i)); - when(row.contains("peer")).thenReturn(false); - return row; - } catch (UnknownHostException e) { - fail("unexpected", e); - return null; - } - } - - private AdminRow mockPeersRow(int i, UUID hostId) { - try { - AdminRow row = mock(AdminRow.class); - when(row.isNull("host_id")).thenReturn(hostId == null); - when(row.getUuid("host_id")).thenReturn(hostId); - when(row.getInetAddress("peer")).thenReturn(InetAddress.getByName("127.0.0." + i)); - when(row.isNull("data_center")).thenReturn(false); - when(row.getString("data_center")).thenReturn("dc" + i); - when(row.isNull("rack")).thenReturn(false); - when(row.getString("rack")).thenReturn("rack" + i); - when(row.getString("release_version")).thenReturn("release_version" + i); - when(row.isNull("rpc_address")).thenReturn(false); - when(row.getInetAddress("rpc_address")).thenReturn(InetAddress.getByName("127.0.0." + i)); - when(row.isNull("tokens")).thenReturn(false); - when(row.getSetOfString("tokens")).thenReturn(ImmutableSet.of("token" + i)); - when(row.contains("peer")).thenReturn(true); - - when(row.isNull("native_address")).thenReturn(true); - when(row.isNull("native_port")).thenReturn(true); - - return row; - } catch (UnknownHostException e) { - fail("unexpected", e); - return null; - } - } - - private AdminRow mockPeersV2Row(int i, UUID hostId) { - try { - AdminRow row = mock(AdminRow.class); - when(row.isNull("host_id")).thenReturn(hostId == null); - when(row.getUuid("host_id")).thenReturn(hostId); - when(row.getInetAddress("peer")).thenReturn(InetAddress.getByName("127.0.0." + i)); - when(row.getInteger("peer_port")).thenReturn(7000 + i); - when(row.isNull("data_center")).thenReturn(false); - when(row.getString("data_center")).thenReturn("dc" + i); - when(row.isNull("rack")).thenReturn(false); - when(row.getString("rack")).thenReturn("rack" + i); - when(row.getString("release_version")).thenReturn("release_version" + i); - when(row.isNull("native_address")).thenReturn(false); - when(row.getInetAddress("native_address")).thenReturn(InetAddress.getByName("127.0.0." + i)); - when(row.isNull("native_port")).thenReturn(false); - when(row.getInteger("native_port")).thenReturn(9042); - when(row.isNull("tokens")).thenReturn(false); - when(row.getSetOfString("tokens")).thenReturn(ImmutableSet.of("token" + i)); - when(row.contains("peer")).thenReturn(true); - when(row.contains("peer_port")).thenReturn(true); - when(row.contains("native_port")).thenReturn(true); - - when(row.isNull("rpc_address")).thenReturn(true); - return row; - } catch (UnknownHostException e) { - fail("unexpected", e); - return null; - } - } - - // Mock row for DSE ~6.8 - private AdminRow mockPeersRowDse(int i, UUID hostId) { - try { - AdminRow row = mock(AdminRow.class); - when(row.contains("peer")).thenReturn(true); - when(row.isNull("data_center")).thenReturn(false); - when(row.getString("data_center")).thenReturn("dc" + i); - when(row.getString("dse_version")).thenReturn("6.8.30"); - when(row.contains("graph")).thenReturn(true); - when(row.isNull("host_id")).thenReturn(hostId == null); - when(row.getUuid("host_id")).thenReturn(hostId); - when(row.getInetAddress("peer")).thenReturn(InetAddress.getByName("127.0.0." + i)); - when(row.isNull("rack")).thenReturn(false); - when(row.getString("rack")).thenReturn("rack" + i); - when(row.isNull("native_transport_address")).thenReturn(false); - when(row.getInetAddress("native_transport_address")) - .thenReturn(InetAddress.getByName("127.0.0." + i)); - when(row.isNull("native_transport_port")).thenReturn(false); - when(row.getInteger("native_transport_port")).thenReturn(9042); - when(row.isNull("tokens")).thenReturn(false); - when(row.getSetOfString("tokens")).thenReturn(ImmutableSet.of("token" + i)); - when(row.isNull("rpc_address")).thenReturn(false); - - return row; - } catch (UnknownHostException e) { - fail("unexpected", e); - return null; - } - } - - private AdminRow mockPeersRowDseWithSsl(int i, UUID hostId) { - AdminRow row = mockPeersRowDse(i, hostId); - when(row.isNull("native_transport_port_ssl")).thenReturn(false); - when(row.getInteger("native_transport_port_ssl")).thenReturn(9043); - return row; - } - - private AdminResult mockResult(AdminRow... rows) { - AdminResult result = mock(AdminResult.class); - when(result.iterator()).thenReturn(Iterators.forArray(rows)); - return result; - } - - private void assertLog(Level level, String message) { - verify(appender, atLeast(1)).doAppend(loggingEventCaptor.capture()); - Iterable logs = - filter(loggingEventCaptor.getAllValues()).with("level", level).get(); - assertThat(logs).hasSize(1); - assertThat(logs.iterator().next().getFormattedMessage()).contains(message); - } - - private void assertLogContains(Level level, String message) { - verify(appender, atLeast(1)).doAppend(loggingEventCaptor.capture()); - Iterable logs = - filter(loggingEventCaptor.getAllValues()).with("level", level).get(); - assertThat( - Streams.stream(logs).map(ILoggingEvent::getFormattedMessage).anyMatch(message::contains)); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/FullNodeListRefreshTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/FullNodeListRefreshTest.java deleted file mode 100644 index 679ec1be037..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/FullNodeListRefreshTest.java +++ /dev/null @@ -1,180 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.api.core.uuid.Uuids; -import com.datastax.oss.driver.internal.core.channel.ChannelFactory; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metrics.MetricsFactory; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import java.util.Collections; -import java.util.UUID; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public class FullNodeListRefreshTest { - - @Mock private InternalDriverContext context; - @Mock protected MetricsFactory metricsFactory; - @Mock private ChannelFactory channelFactory; - - private DefaultNode node1; - private DefaultNode node2; - private EndPoint endPoint3; - private UUID hostId3; - - @Before - public void setup() { - when(context.getMetricsFactory()).thenReturn(metricsFactory); - when(context.getChannelFactory()).thenReturn(channelFactory); - - node1 = TestNodeFactory.newNode(1, context); - node2 = TestNodeFactory.newNode(2, context); - - endPoint3 = TestNodeFactory.newEndPoint(3); - hostId3 = UUID.randomUUID(); - } - - @Test - public void should_add_and_remove_nodes() { - // Given - DefaultMetadata oldMetadata = - new DefaultMetadata( - ImmutableMap.of(node1.getHostId(), node1, node2.getHostId(), node2), - Collections.emptyMap(), - null, - null); - Iterable newInfos = - ImmutableList.of( - DefaultNodeInfo.builder() - .withEndPoint(node2.getEndPoint()) - .withHostId(node2.getHostId()) - .build(), - DefaultNodeInfo.builder().withEndPoint(endPoint3).withHostId(hostId3).build()); - FullNodeListRefresh refresh = new FullNodeListRefresh(newInfos); - - // When - MetadataRefresh.Result result = refresh.compute(oldMetadata, false, context); - - // Then - assertThat(result.newMetadata.getNodes()).containsOnlyKeys(node2.getHostId(), hostId3); - DefaultNode node3 = (DefaultNode) result.newMetadata.getNodes().get(hostId3); - assertThat(result.events) - .containsOnly(NodeStateEvent.removed(node1), NodeStateEvent.added(node3)); - } - - @Test - public void should_update_existing_nodes() { - // Given - DefaultMetadata oldMetadata = - new DefaultMetadata( - ImmutableMap.of(node1.getHostId(), node1, node2.getHostId(), node2), - Collections.emptyMap(), - null, - null); - - UUID schemaVersion1 = Uuids.random(); - UUID schemaVersion2 = Uuids.random(); - Iterable newInfos = - ImmutableList.of( - DefaultNodeInfo.builder() - .withEndPoint(node1.getEndPoint()) - .withDatacenter("dc1") - .withRack("rack1") - .withHostId(node1.getHostId()) - .withSchemaVersion(schemaVersion1) - .build(), - DefaultNodeInfo.builder() - .withEndPoint(node2.getEndPoint()) - .withDatacenter("dc1") - .withRack("rack2") - .withHostId(node2.getHostId()) - .withSchemaVersion(schemaVersion2) - .build()); - FullNodeListRefresh refresh = new FullNodeListRefresh(newInfos); - - // When - MetadataRefresh.Result result = refresh.compute(oldMetadata, false, context); - - // Then - assertThat(result.newMetadata.getNodes()) - .containsOnlyKeys(node1.getHostId(), node2.getHostId()); - assertThat(node1.getDatacenter()).isEqualTo("dc1"); - assertThat(node1.getRack()).isEqualTo("rack1"); - assertThat(node1.getSchemaVersion()).isEqualTo(schemaVersion1); - assertThat(node2.getDatacenter()).isEqualTo("dc1"); - assertThat(node2.getRack()).isEqualTo("rack2"); - assertThat(node2.getSchemaVersion()).isEqualTo(schemaVersion2); - assertThat(result.events).isEmpty(); - } - - @Test - public void should_ignore_duplicate_host_ids() { - // Given - DefaultMetadata oldMetadata = - new DefaultMetadata( - ImmutableMap.of(node1.getHostId(), node1, node2.getHostId(), node2), - Collections.emptyMap(), - null, - null); - - Iterable newInfos = - ImmutableList.of( - DefaultNodeInfo.builder() - .withEndPoint(node1.getEndPoint()) - .withDatacenter("dc1") - .withRack("rack1") - .withHostId(node1.getHostId()) - .build(), - DefaultNodeInfo.builder() - .withEndPoint(node2.getEndPoint()) - .withDatacenter("dc1") - .withRack("rack2") - .withHostId(node2.getHostId()) - .build(), - // Duplicate host id for node 2, should be ignored: - DefaultNodeInfo.builder() - .withEndPoint(node2.getEndPoint()) - .withDatacenter("dc1") - .withRack("rack3") - .withHostId(node2.getHostId()) - .build()); - FullNodeListRefresh refresh = new FullNodeListRefresh(newInfos); - - // When - MetadataRefresh.Result result = refresh.compute(oldMetadata, false, context); - - // Then - assertThat(result.newMetadata.getNodes()) - .containsOnlyKeys(node1.getHostId(), node2.getHostId()); - assertThat(node1.getDatacenter()).isEqualTo("dc1"); - assertThat(node1.getRack()).isEqualTo("rack1"); - assertThat(node2.getDatacenter()).isEqualTo("dc1"); - assertThat(node2.getRack()).isEqualTo("rack2"); - assertThat(result.events).isEmpty(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/InitialNodeListRefreshTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/InitialNodeListRefreshTest.java deleted file mode 100644 index 3787bf8fe10..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/InitialNodeListRefreshTest.java +++ /dev/null @@ -1,186 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.channel.ChannelFactory; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metrics.MetricsFactory; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import java.util.Map; -import java.util.UUID; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public class InitialNodeListRefreshTest { - - @Mock private InternalDriverContext context; - @Mock protected MetricsFactory metricsFactory; - @Mock private ChannelFactory channelFactory; - - private DefaultNode contactPoint1; - private DefaultNode contactPoint2; - private EndPoint endPoint3; - private UUID hostId1; - private UUID hostId2; - private UUID hostId3; - private UUID hostId4; - private UUID hostId5; - - @Before - public void setup() { - when(context.getMetricsFactory()).thenReturn(metricsFactory); - when(context.getChannelFactory()).thenReturn(channelFactory); - - contactPoint1 = TestNodeFactory.newContactPoint(1, context); - contactPoint2 = TestNodeFactory.newContactPoint(2, context); - - endPoint3 = TestNodeFactory.newEndPoint(3); - hostId1 = UUID.randomUUID(); - hostId2 = UUID.randomUUID(); - hostId3 = UUID.randomUUID(); - hostId4 = UUID.randomUUID(); - hostId5 = UUID.randomUUID(); - } - - @Test - public void should_copy_contact_points_on_first_endpoint_match_only() { - // Given - Iterable newInfos = - ImmutableList.of( - DefaultNodeInfo.builder() - .withEndPoint(contactPoint1.getEndPoint()) - // in practice there are more fields, but hostId is enough to validate the logic - .withHostId(hostId1) - .build(), - DefaultNodeInfo.builder() - .withEndPoint(contactPoint2.getEndPoint()) - .withHostId(hostId2) - .build(), - DefaultNodeInfo.builder().withEndPoint(endPoint3).withHostId(hostId3).build(), - DefaultNodeInfo.builder() - // address translator can translate node addresses to the same endpoints - .withEndPoint(contactPoint2.getEndPoint()) - .withHostId(hostId4) - .build(), - DefaultNodeInfo.builder() - // address translator can translate node addresses to the same endpoints - .withEndPoint(endPoint3) - .withHostId(hostId5) - .build()); - InitialNodeListRefresh refresh = - new InitialNodeListRefresh(newInfos, ImmutableSet.of(contactPoint1, contactPoint2)); - - // When - MetadataRefresh.Result result = refresh.compute(DefaultMetadata.EMPTY, false, context); - - // Then - // contact points have been copied to the metadata, and completed with missing information - Map newNodes = result.newMetadata.getNodes(); - assertThat(newNodes).containsOnlyKeys(hostId1, hostId2, hostId3, hostId4, hostId5); - assertThat(newNodes.get(hostId1)).isEqualTo(contactPoint1); - assertThat(contactPoint1.getHostId()).isEqualTo(hostId1); - assertThat(newNodes.get(hostId2)).isEqualTo(contactPoint2); - assertThat(contactPoint2.getHostId()).isEqualTo(hostId2); - // And - // node has been added for the new endpoint - assertThat(newNodes.get(hostId3).getEndPoint()).isEqualTo(endPoint3); - assertThat(newNodes.get(hostId3).getHostId()).isEqualTo(hostId3); - // And - // nodes have been added for duplicated endpoints - assertThat(newNodes.get(hostId4).getEndPoint()).isEqualTo(contactPoint2.getEndPoint()); - assertThat(newNodes.get(hostId4).getHostId()).isEqualTo(hostId4); - assertThat(newNodes.get(hostId5).getEndPoint()).isEqualTo(endPoint3); - assertThat(newNodes.get(hostId5).getHostId()).isEqualTo(hostId5); - assertThat(result.events) - .containsExactlyInAnyOrder( - NodeStateEvent.added((DefaultNode) newNodes.get(hostId3)), - NodeStateEvent.added((DefaultNode) newNodes.get(hostId4)), - NodeStateEvent.added((DefaultNode) newNodes.get(hostId5))); - } - - @Test - public void should_add_other_nodes() { - // Given - Iterable newInfos = - ImmutableList.of( - DefaultNodeInfo.builder() - .withEndPoint(contactPoint1.getEndPoint()) - // in practice there are more fields, but hostId is enough to validate the logic - .withHostId(hostId1) - .build(), - DefaultNodeInfo.builder() - .withEndPoint(contactPoint2.getEndPoint()) - .withHostId(hostId2) - .build(), - DefaultNodeInfo.builder().withEndPoint(endPoint3).withHostId(hostId3).build()); - InitialNodeListRefresh refresh = - new InitialNodeListRefresh(newInfos, ImmutableSet.of(contactPoint1, contactPoint2)); - - // When - MetadataRefresh.Result result = refresh.compute(DefaultMetadata.EMPTY, false, context); - - // Then - // new node created in addition to the contact points - Map newNodes = result.newMetadata.getNodes(); - assertThat(newNodes).containsOnlyKeys(hostId1, hostId2, hostId3); - Node node3 = newNodes.get(hostId3); - assertThat(node3.getEndPoint()).isEqualTo(endPoint3); - assertThat(node3.getHostId()).isEqualTo(hostId3); - } - - @Test - public void should_ignore_duplicate_host_ids() { - // Given - Iterable newInfos = - ImmutableList.of( - DefaultNodeInfo.builder() - .withEndPoint(contactPoint1.getEndPoint()) - // in practice there are more fields, but hostId is enough to validate the logic - .withHostId(hostId1) - .withDatacenter("dc1") - .build(), - DefaultNodeInfo.builder() - .withEndPoint(contactPoint1.getEndPoint()) - .withDatacenter("dc2") - .withHostId(hostId1) - .build()); - InitialNodeListRefresh refresh = - new InitialNodeListRefresh(newInfos, ImmutableSet.of(contactPoint1)); - - // When - MetadataRefresh.Result result = refresh.compute(DefaultMetadata.EMPTY, false, context); - - // Then - // only the first nodeInfo should have been copied - Map newNodes = result.newMetadata.getNodes(); - assertThat(newNodes).containsOnlyKeys(hostId1); - assertThat(newNodes.get(hostId1)).isEqualTo(contactPoint1); - assertThat(contactPoint1.getHostId()).isEqualTo(hostId1); - assertThat(contactPoint1.getDatacenter()).isEqualTo("dc1"); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/LoadBalancingPolicyWrapperTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/LoadBalancingPolicyWrapperTest.java deleted file mode 100644 index 1a0292e3947..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/LoadBalancingPolicyWrapperTest.java +++ /dev/null @@ -1,272 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyMap; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; -import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy.DistanceReporter; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.metadata.Metadata; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeState; -import com.datastax.oss.driver.internal.core.context.EventBus; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metrics.MetricsFactory; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import java.util.Map; -import java.util.Objects; -import java.util.Queue; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.ArgumentCaptor; -import org.mockito.Captor; -import org.mockito.InOrder; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public class LoadBalancingPolicyWrapperTest { - - private DefaultNode node1; - private DefaultNode node2; - private DefaultNode node3; - - private Set contactPoints; - private Queue defaultPolicyQueryPlan; - - @Mock private InternalDriverContext context; - @Mock private LoadBalancingPolicy policy1; - @Mock private LoadBalancingPolicy policy2; - @Mock private LoadBalancingPolicy policy3; - private EventBus eventBus; - @Mock private MetadataManager metadataManager; - @Mock private Metadata metadata; - @Mock protected MetricsFactory metricsFactory; - @Captor private ArgumentCaptor> initNodesCaptor; - - private LoadBalancingPolicyWrapper wrapper; - - @Before - public void setup() { - when(context.getMetricsFactory()).thenReturn(metricsFactory); - - node1 = TestNodeFactory.newNode(1, context); - node2 = TestNodeFactory.newNode(2, context); - node3 = TestNodeFactory.newNode(3, context); - - contactPoints = ImmutableSet.of(node1, node2); - Map allNodes = - ImmutableMap.of( - Objects.requireNonNull(node1.getHostId()), node1, - Objects.requireNonNull(node2.getHostId()), node2, - Objects.requireNonNull(node3.getHostId()), node3); - when(metadataManager.getMetadata()).thenReturn(metadata); - when(metadata.getNodes()).thenReturn(allNodes); - when(metadataManager.getContactPoints()).thenReturn(contactPoints); - when(context.getMetadataManager()).thenReturn(metadataManager); - - defaultPolicyQueryPlan = Lists.newLinkedList(ImmutableList.of(node3, node2, node1)); - when(policy1.newQueryPlan(null, null)).thenReturn(defaultPolicyQueryPlan); - - eventBus = spy(new EventBus("test")); - when(context.getEventBus()).thenReturn(eventBus); - - wrapper = - new LoadBalancingPolicyWrapper( - context, - ImmutableMap.of( - DriverExecutionProfile.DEFAULT_NAME, - policy1, - "profile1", - policy1, - "profile2", - policy2, - "profile3", - policy3)); - } - - @Test - public void should_build_query_plan_from_contact_points_before_init() { - // When - Queue queryPlan = wrapper.newQueryPlan(); - - // Then - for (LoadBalancingPolicy policy : ImmutableList.of(policy1, policy2, policy3)) { - verify(policy, never()).newQueryPlan(null, null); - } - assertThat(queryPlan).hasSameElementsAs(contactPoints); - } - - @Test - public void should_fetch_query_plan_from_policy_after_init() { - // Given - wrapper.init(); - for (LoadBalancingPolicy policy : ImmutableList.of(policy1, policy2, policy3)) { - verify(policy).init(anyMap(), any(DistanceReporter.class)); - } - - // When - Queue queryPlan = wrapper.newQueryPlan(); - - // Then - // no-arg newQueryPlan() uses the default profile - verify(policy1).newQueryPlan(null, null); - assertThat(queryPlan).isEqualTo(defaultPolicyQueryPlan); - } - - @Test - public void should_init_policies_with_all_nodes() { - // Given - node1.state = NodeState.UP; - node2.state = NodeState.UNKNOWN; - node3.state = NodeState.DOWN; - - // When - wrapper.init(); - - // Then - for (LoadBalancingPolicy policy : ImmutableList.of(policy1, policy2, policy3)) { - verify(policy).init(initNodesCaptor.capture(), any(DistanceReporter.class)); - Map initNodes = initNodesCaptor.getValue(); - assertThat(initNodes.values()).containsOnly(node1, node2, node3); - } - } - - @Test - public void should_propagate_distances_from_policies() { - // Given - wrapper.init(); - ArgumentCaptor captor1 = ArgumentCaptor.forClass(DistanceReporter.class); - verify(policy1).init(anyMap(), captor1.capture()); - DistanceReporter distanceReporter1 = captor1.getValue(); - ArgumentCaptor captor2 = ArgumentCaptor.forClass(DistanceReporter.class); - verify(policy2).init(anyMap(), captor2.capture()); - DistanceReporter distanceReporter2 = captor1.getValue(); - ArgumentCaptor captor3 = ArgumentCaptor.forClass(DistanceReporter.class); - verify(policy3).init(anyMap(), captor3.capture()); - DistanceReporter distanceReporter3 = captor3.getValue(); - - InOrder inOrder = inOrder(eventBus); - - // When - distanceReporter1.setDistance(node1, NodeDistance.REMOTE); - - // Then - // first event defines the distance - inOrder.verify(eventBus).fire(new DistanceEvent(NodeDistance.REMOTE, node1)); - - // When - distanceReporter2.setDistance(node1, NodeDistance.REMOTE); - - // Then - // event is ignored if the node is already at this distance - inOrder.verify(eventBus, times(0)).fire(any(DistanceEvent.class)); - - // When - distanceReporter2.setDistance(node1, NodeDistance.LOCAL); - - // Then - // event is applied if it sets a smaller distance - inOrder.verify(eventBus).fire(new DistanceEvent(NodeDistance.LOCAL, node1)); - - // When - distanceReporter3.setDistance(node1, NodeDistance.IGNORED); - - // Then - // event is ignored if the node is already at a closer distance - inOrder.verify(eventBus, times(0)).fire(any(DistanceEvent.class)); - } - - @Test - public void should_not_propagate_node_states_to_policies_until_init() { - // When - eventBus.fire(NodeStateEvent.changed(NodeState.UNKNOWN, NodeState.UP, node1)); - - // Then - for (LoadBalancingPolicy policy : ImmutableList.of(policy1, policy2, policy3)) { - verify(policy, never()).onUp(node1); - } - } - - @Test - public void should_propagate_node_states_to_policies_after_init() { - // Given - wrapper.init(); - - // When - eventBus.fire(NodeStateEvent.changed(NodeState.UNKNOWN, NodeState.UP, node1)); - - // Then - for (LoadBalancingPolicy policy : ImmutableList.of(policy1, policy2, policy3)) { - verify(policy).onUp(node1); - } - } - - @Test - public void should_accumulate_events_during_init_and_replay() throws InterruptedException { - // Given - // Hack to obtain concurrency: the main thread releases another thread and blocks; then the - // other thread fires an event on the bus and unblocks the main thread. - CountDownLatch eventLatch = new CountDownLatch(1); - CountDownLatch initLatch = new CountDownLatch(1); - - // When - Runnable runnable = - () -> { - try { - eventLatch.await(); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - eventBus.fire(NodeStateEvent.changed(NodeState.UNKNOWN, NodeState.DOWN, node1)); - initLatch.countDown(); - }; - Thread thread = new Thread(runnable); - thread.start(); - wrapper.init(); - - // Then - // unblock the thread that will fire the event, and waits until it finishes - eventLatch.countDown(); - boolean ok = initLatch.await(500, TimeUnit.MILLISECONDS); - assertThat(ok).isTrue(); - for (LoadBalancingPolicy policy : ImmutableList.of(policy1, policy2, policy3)) { - verify(policy).onDown(node1); - } - thread.join(500); - assertThat(thread.isAlive()).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/MetadataManagerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/MetadataManagerTest.java deleted file mode 100644 index f9a909400f9..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/MetadataManagerTest.java +++ /dev/null @@ -1,356 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.awaitility.Awaitility.await; -import static org.mockito.ArgumentMatchers.anyBoolean; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.timeout; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.context.EventBus; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.context.NettyOptions; -import com.datastax.oss.driver.internal.core.control.ControlConnection; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.SchemaParserFactory; -import com.datastax.oss.driver.internal.core.metadata.schema.queries.SchemaQueriesFactory; -import com.datastax.oss.driver.internal.core.metrics.MetricsFactory; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import io.netty.channel.DefaultEventLoopGroup; -import java.net.InetSocketAddress; -import java.time.Duration; -import java.util.Collections; -import java.util.List; -import java.util.Optional; -import java.util.UUID; -import java.util.concurrent.Callable; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.TimeUnit; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -public class MetadataManagerTest { - - // Don't use 1 because that's the default when no contact points are provided - private static final EndPoint END_POINT2 = TestNodeFactory.newEndPoint(2); - private static final EndPoint END_POINT3 = TestNodeFactory.newEndPoint(3); - - @Mock private InternalDriverContext context; - @Mock private NettyOptions nettyOptions; - @Mock private ControlConnection controlConnection; - @Mock private TopologyMonitor topologyMonitor; - @Mock private DriverConfig config; - @Mock private DriverExecutionProfile defaultProfile; - @Mock private EventBus eventBus; - @Mock private SchemaQueriesFactory schemaQueriesFactory; - @Mock private SchemaParserFactory schemaParserFactory; - @Mock protected MetricsFactory metricsFactory; - - private DefaultEventLoopGroup adminEventLoopGroup; - - private TestMetadataManager metadataManager; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - - adminEventLoopGroup = new DefaultEventLoopGroup(1); - when(nettyOptions.adminEventExecutorGroup()).thenReturn(adminEventLoopGroup); - when(context.getNettyOptions()).thenReturn(nettyOptions); - - when(context.getTopologyMonitor()).thenReturn(topologyMonitor); - when(context.getControlConnection()).thenReturn(controlConnection); - - when(defaultProfile.getDuration(DefaultDriverOption.METADATA_SCHEMA_WINDOW)) - .thenReturn(Duration.ZERO); - when(defaultProfile.getInt(DefaultDriverOption.METADATA_SCHEMA_MAX_EVENTS)).thenReturn(1); - when(config.getDefaultProfile()).thenReturn(defaultProfile); - when(context.getConfig()).thenReturn(config); - - when(context.getEventBus()).thenReturn(eventBus); - when(context.getSchemaQueriesFactory()).thenReturn(schemaQueriesFactory); - when(context.getSchemaParserFactory()).thenReturn(schemaParserFactory); - - when(context.getMetricsFactory()).thenReturn(metricsFactory); - - metadataManager = new TestMetadataManager(context); - } - - @After - public void teardown() { - adminEventLoopGroup.shutdownGracefully(100, 200, TimeUnit.MILLISECONDS); - } - - @Test - public void should_add_contact_points() { - // When - metadataManager.addContactPoints(ImmutableSet.of(END_POINT2)); - - // Then - assertThat(metadataManager.getContactPoints()) - .extracting(Node::getEndPoint) - .containsOnly(END_POINT2); - assertThat(metadataManager.wasImplicitContactPoint()).isFalse(); - } - - @Test - public void should_use_default_if_no_contact_points_provided() { - // When - metadataManager.addContactPoints(Collections.emptySet()); - - // Then - assertThat(metadataManager.getContactPoints()) - .extracting(Node::getEndPoint) - .containsOnly(MetadataManager.DEFAULT_CONTACT_POINT); - assertThat(metadataManager.wasImplicitContactPoint()).isTrue(); - } - - @Test - public void should_copy_contact_points_on_refresh_of_all_nodes() { - // Given - // Run previous scenario to trigger the addition of the default contact point: - should_use_default_if_no_contact_points_provided(); - - NodeInfo info1 = mock(NodeInfo.class); - NodeInfo info2 = mock(NodeInfo.class); - List infos = ImmutableList.of(info1, info2); - when(topologyMonitor.refreshNodeList()).thenReturn(CompletableFuture.completedFuture(infos)); - - // When - CompletionStage refreshNodesFuture = metadataManager.refreshNodes(); - waitForPendingAdminTasks(() -> metadataManager.refreshes.size() == 1); - - // Then - assertThatStage(refreshNodesFuture).isSuccess(); - assertThat(metadataManager.refreshes).hasSize(1); - InitialNodeListRefresh refresh = (InitialNodeListRefresh) metadataManager.refreshes.get(0); - assertThat(refresh.contactPoints) - .extracting(Node::getEndPoint) - .containsOnly(MetadataManager.DEFAULT_CONTACT_POINT); - assertThat(refresh.nodeInfos).containsExactlyInAnyOrder(info1, info2); - } - - @Test - public void should_refresh_all_nodes() { - // Given - // Run previous scenario to trigger the addition of the default contact point and a first - // refresh: - should_copy_contact_points_on_refresh_of_all_nodes(); - // Discard that first refresh, we don't really care about it in the context of this test, only - // that the next one won't be the first - metadataManager.refreshes.clear(); - - NodeInfo info1 = mock(NodeInfo.class); - NodeInfo info2 = mock(NodeInfo.class); - List infos = ImmutableList.of(info1, info2); - when(topologyMonitor.refreshNodeList()).thenReturn(CompletableFuture.completedFuture(infos)); - - // When - CompletionStage refreshNodesFuture = metadataManager.refreshNodes(); - waitForPendingAdminTasks(() -> metadataManager.refreshes.size() == 1); - - // Then - assertThatStage(refreshNodesFuture).isSuccess(); - assertThat(metadataManager.refreshes).hasSize(1); - FullNodeListRefresh refresh = (FullNodeListRefresh) metadataManager.refreshes.get(0); - assertThat(refresh.nodeInfos).containsExactlyInAnyOrder(info1, info2); - } - - @Test - public void should_refresh_single_node() { - // Given - Node node = TestNodeFactory.newNode(2, context); - NodeInfo info = mock(NodeInfo.class); - when(info.getDatacenter()).thenReturn("dc1"); - when(info.getHostId()).thenReturn(UUID.randomUUID()); - when(info.getEndPoint()).thenReturn(node.getEndPoint()); - when(topologyMonitor.refreshNode(node)) - .thenReturn(CompletableFuture.completedFuture(Optional.of(info))); - - // When - CompletionStage refreshNodeFuture = metadataManager.refreshNode(node); - - // Then - // the info should have been copied to the node - assertThatStage(refreshNodeFuture).isSuccess(); - verify(info, timeout(500)).getDatacenter(); - assertThat(node.getDatacenter()).isEqualTo("dc1"); - } - - @Test - public void should_ignore_node_refresh_if_topology_monitor_does_not_have_info() { - // Given - Node node = mock(Node.class); - when(topologyMonitor.refreshNode(node)) - .thenReturn(CompletableFuture.completedFuture(Optional.empty())); - - // When - CompletionStage refreshNodeFuture = metadataManager.refreshNode(node); - - // Then - assertThatStage(refreshNodeFuture).isSuccess(); - } - - @Test - public void should_add_node() { - // Given - InetSocketAddress broadcastRpcAddress = ((InetSocketAddress) END_POINT2.resolve()); - NodeInfo info = mock(NodeInfo.class); - when(info.getBroadcastRpcAddress()).thenReturn(Optional.of(broadcastRpcAddress)); - when(topologyMonitor.getNewNodeInfo(broadcastRpcAddress)) - .thenReturn(CompletableFuture.completedFuture(Optional.of(info))); - - // When - metadataManager.addNode(broadcastRpcAddress); - waitForPendingAdminTasks(() -> metadataManager.addNodeCount == 1); - - // Then - assertThat(metadataManager.refreshes).hasSize(1); - AddNodeRefresh refresh = (AddNodeRefresh) metadataManager.refreshes.get(0); - assertThat(refresh.newNodeInfo).isEqualTo(info); - } - - @Test - public void should_not_add_node_if_broadcast_rpc_address_does_not_match() { - // Given - InetSocketAddress broadcastRpcAddress2 = ((InetSocketAddress) END_POINT2.resolve()); - InetSocketAddress broadcastRpcAddress3 = ((InetSocketAddress) END_POINT3.resolve()); - NodeInfo info = mock(NodeInfo.class); - when(topologyMonitor.getNewNodeInfo(broadcastRpcAddress2)) - .thenReturn(CompletableFuture.completedFuture(Optional.of(info))); - when(info.getBroadcastRpcAddress()) - .thenReturn( - Optional.of(broadcastRpcAddress3) // Does not match the address we got the info with - ); - - // When - metadataManager.addNode(broadcastRpcAddress2); - waitForPendingAdminTasks(() -> metadataManager.addNodeCount == 1); - - // Then - assertThat(metadataManager.refreshes).isEmpty(); - } - - @Test - public void should_not_add_node_if_topology_monitor_does_not_have_info() { - // Given - InetSocketAddress broadcastRpcAddress2 = ((InetSocketAddress) END_POINT2.resolve()); - when(topologyMonitor.getNewNodeInfo(broadcastRpcAddress2)) - .thenReturn(CompletableFuture.completedFuture(Optional.empty())); - - // When - metadataManager.addNode(broadcastRpcAddress2); - waitForPendingAdminTasks(() -> metadataManager.addNodeCount == 1); - - // Then - assertThat(metadataManager.refreshes).isEmpty(); - } - - @Test - public void should_remove_node() { - // Given - InetSocketAddress broadcastRpcAddress2 = ((InetSocketAddress) END_POINT2.resolve()); - - // When - metadataManager.removeNode(broadcastRpcAddress2); - waitForPendingAdminTasks(() -> metadataManager.removeNodeCount == 1); - - // Then - assertThat(metadataManager.refreshes).hasSize(1); - RemoveNodeRefresh refresh = (RemoveNodeRefresh) metadataManager.refreshes.get(0); - assertThat(refresh.broadcastRpcAddressToRemove).isEqualTo(broadcastRpcAddress2); - } - - @Test - public void refreshSchema_should_work() { - // Given - IllegalStateException expectedException = new IllegalStateException("Error we're testing"); - when(schemaQueriesFactory.newInstance()).thenThrow(expectedException); - when(topologyMonitor.refreshNodeList()) - .thenReturn(CompletableFuture.completedFuture(ImmutableList.of(mock(NodeInfo.class)))); - when(topologyMonitor.checkSchemaAgreement()) - .thenReturn(CompletableFuture.completedFuture(Boolean.TRUE)); - when(controlConnection.init(anyBoolean(), anyBoolean(), anyBoolean())) - .thenReturn(CompletableFuture.completedFuture(null)); - metadataManager.refreshNodes(); // required internal state setup for this - waitForPendingAdminTasks(() -> metadataManager.refreshes.size() == 1); // sanity check - - // When - CompletionStage result = - metadataManager.refreshSchema("foo", true, true); - - // Then - waitForPendingAdminTasks(() -> result.toCompletableFuture().isDone()); - assertThatStage(result).isFailed(t -> assertThat(t).isEqualTo(expectedException)); - } - - private static class TestMetadataManager extends MetadataManager { - - private List refreshes = new CopyOnWriteArrayList<>(); - private volatile int addNodeCount = 0; - private volatile int removeNodeCount = 0; - - public TestMetadataManager(InternalDriverContext context) { - super(context); - } - - @Override - Void apply(MetadataRefresh refresh) { - // Do not execute refreshes, just store them for inspection in the test - refreshes.add(refresh); - return null; - } - - @Override - public void addNode(InetSocketAddress broadcastRpcAddress) { - // Keep track of addNode calls for condition checking - synchronized (this) { - ++addNodeCount; - } - super.addNode(broadcastRpcAddress); - } - - @Override - public void removeNode(InetSocketAddress broadcastRpcAddress) { - // Keep track of removeNode calls for condition checking - synchronized (this) { - ++removeNodeCount; - } - super.removeNode(broadcastRpcAddress); - } - } - - // Wait for all the tasks on the pool's admin executor to complete. - private void waitForPendingAdminTasks(Callable condition) { - await().atMost(500, TimeUnit.MILLISECONDS).until(condition); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/MultiplexingNodeStateListenerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/MultiplexingNodeStateListenerTest.java deleted file mode 100644 index 8e9f591510a..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/MultiplexingNodeStateListenerTest.java +++ /dev/null @@ -1,196 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.BDDMockito.willThrow; -import static org.mockito.Mockito.verify; - -import ch.qos.logback.classic.Level; -import ch.qos.logback.classic.Logger; -import ch.qos.logback.classic.spi.ILoggingEvent; -import ch.qos.logback.core.Appender; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeStateListener; -import com.datastax.oss.driver.api.core.session.Session; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.ArgumentCaptor; -import org.mockito.Captor; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; -import org.slf4j.LoggerFactory; - -@RunWith(MockitoJUnitRunner.Strict.class) -public class MultiplexingNodeStateListenerTest { - - @Mock private NodeStateListener child1; - @Mock private NodeStateListener child2; - @Mock private Node node; - @Mock private Session session; - - @Mock private Appender appender; - @Captor private ArgumentCaptor loggingEventCaptor; - - private Logger logger; - private Level initialLogLevel; - - @Before - public void addAppenders() { - logger = (Logger) LoggerFactory.getLogger(MultiplexingNodeStateListener.class); - initialLogLevel = logger.getLevel(); - logger.setLevel(Level.WARN); - logger.addAppender(appender); - } - - @After - public void removeAppenders() { - logger.detachAppender(appender); - logger.setLevel(initialLogLevel); - } - - @Test - public void should_register() { - // given - MultiplexingNodeStateListener listener = new MultiplexingNodeStateListener(); - // when - listener.register(child1); - listener.register(child2); - // then - assertThat(listener).extracting("listeners").asList().hasSize(2).contains(child1, child2); - } - - @Test - public void should_flatten_child_multiplexing_listener_via_constructor() { - // given - MultiplexingNodeStateListener listener = - new MultiplexingNodeStateListener(new MultiplexingNodeStateListener(child1, child2)); - // when - // then - assertThat(listener).extracting("listeners").asList().hasSize(2).contains(child1, child2); - } - - @Test - public void should_flatten_child_multiplexing_listener_via_register() { - // given - MultiplexingNodeStateListener listener = new MultiplexingNodeStateListener(); - // when - listener.register(new MultiplexingNodeStateListener(child1, child2)); - // then - assertThat(listener).extracting("listeners").asList().hasSize(2).contains(child1, child2); - } - - @Test - public void should_notify_onUp() { - // given - MultiplexingNodeStateListener listener = new MultiplexingNodeStateListener(child1, child2); - willThrow(new NullPointerException()).given(child1).onUp(node); - // when - listener.onUp(node); - // then - verify(child1).onUp(node); - verify(child2).onUp(node); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "Unexpected error while notifying node state listener child1 of an onUp event. (NullPointerException: null)"); - } - - @Test - public void should_notify_onDown() { - // given - MultiplexingNodeStateListener listener = new MultiplexingNodeStateListener(child1, child2); - willThrow(new NullPointerException()).given(child1).onDown(node); - // when - listener.onDown(node); - // then - verify(child1).onDown(node); - verify(child2).onDown(node); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "Unexpected error while notifying node state listener child1 of an onDown event. (NullPointerException: null)"); - } - - @Test - public void should_notify_onAdd() { - // given - MultiplexingNodeStateListener listener = new MultiplexingNodeStateListener(child1, child2); - willThrow(new NullPointerException()).given(child1).onAdd(node); - // when - listener.onAdd(node); - // then - verify(child1).onAdd(node); - verify(child2).onAdd(node); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "Unexpected error while notifying node state listener child1 of an onAdd event. (NullPointerException: null)"); - } - - @Test - public void should_notify_onRemove() { - // given - MultiplexingNodeStateListener listener = new MultiplexingNodeStateListener(child1, child2); - willThrow(new NullPointerException()).given(child1).onRemove(node); - // when - listener.onRemove(node); - // then - verify(child1).onRemove(node); - verify(child2).onRemove(node); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "Unexpected error while notifying node state listener child1 of an onRemove event. (NullPointerException: null)"); - } - - @Test - public void should_notify_onSessionReady() { - // given - MultiplexingNodeStateListener listener = new MultiplexingNodeStateListener(child1, child2); - willThrow(new NullPointerException()).given(child1).onSessionReady(session); - // when - listener.onSessionReady(session); - // then - verify(child1).onSessionReady(session); - verify(child2).onSessionReady(session); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "Unexpected error while notifying node state listener child1 of an onSessionReady event. (NullPointerException: null)"); - } - - @Test - public void should_notify_close() throws Exception { - // given - MultiplexingNodeStateListener listener = new MultiplexingNodeStateListener(child1, child2); - Exception child1Error = new NullPointerException(); - willThrow(child1Error).given(child1).close(); - // when - listener.close(); - // then - verify(child1).close(); - verify(child2).close(); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "Unexpected error while closing node state listener child1. (NullPointerException: null)"); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/NodeStateManagerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/NodeStateManagerTest.java deleted file mode 100644 index d99b06a33ae..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/NodeStateManagerTest.java +++ /dev/null @@ -1,546 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Metadata; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeState; -import com.datastax.oss.driver.internal.core.channel.ChannelEvent; -import com.datastax.oss.driver.internal.core.context.EventBus; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.context.NettyOptions; -import com.datastax.oss.driver.internal.core.metrics.MetricsFactory; -import com.datastax.oss.driver.internal.core.util.concurrent.BlockingOperation; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.util.concurrent.Uninterruptibles; -import io.netty.channel.DefaultEventLoopGroup; -import io.netty.util.concurrent.Future; -import java.net.InetSocketAddress; -import java.time.Duration; -import java.util.Collections; -import java.util.UUID; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -public class NodeStateManagerTest { - private static final InetSocketAddress NEW_ADDRESS = new InetSocketAddress("127.0.0.3", 9042); - - @Mock private InternalDriverContext context; - @Mock private DriverConfig config; - @Mock private DriverExecutionProfile defaultProfile; - @Mock private NettyOptions nettyOptions; - @Mock private MetadataManager metadataManager; - @Mock protected MetricsFactory metricsFactory; - private DefaultNode node1, node2; - private EventBus eventBus; - private DefaultEventLoopGroup adminEventLoopGroup; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - - // Disable debouncing by default, tests that need it will override - when(defaultProfile.getDuration(DefaultDriverOption.METADATA_TOPOLOGY_WINDOW)) - .thenReturn(Duration.ofSeconds(0)); - when(defaultProfile.getInt(DefaultDriverOption.METADATA_TOPOLOGY_MAX_EVENTS)).thenReturn(1); - when(config.getDefaultProfile()).thenReturn(defaultProfile); - when(context.getConfig()).thenReturn(config); - - this.eventBus = spy(new EventBus("test")); - when(context.getEventBus()).thenReturn(eventBus); - - adminEventLoopGroup = new DefaultEventLoopGroup(1, new BlockingOperation.SafeThreadFactory()); - when(nettyOptions.adminEventExecutorGroup()).thenReturn(adminEventLoopGroup); - when(context.getNettyOptions()).thenReturn(nettyOptions); - - when(context.getMetricsFactory()).thenReturn(metricsFactory); - node1 = TestNodeFactory.newNode(1, context); - node2 = TestNodeFactory.newNode(2, context); - ImmutableMap nodes = - ImmutableMap.builder() - .put(node1.getHostId(), node1) - .put(node2.getHostId(), node2) - .build(); - Metadata metadata = new DefaultMetadata(nodes, Collections.emptyMap(), null, null); - when(metadataManager.getMetadata()).thenReturn(metadata); - when(metadataManager.refreshNode(any(Node.class))) - .thenReturn(CompletableFuture.completedFuture(null)); - when(context.getMetadataManager()).thenReturn(metadataManager); - } - - @After - public void teardown() { - adminEventLoopGroup.shutdownGracefully(100, 200, TimeUnit.MILLISECONDS); - } - - @Test - public void should_ignore_up_event_if_node_is_already_up_or_forced_down() { - new NodeStateManager(context); - - for (NodeState oldState : ImmutableList.of(NodeState.UP, NodeState.FORCED_DOWN)) { - // Given - node1.state = oldState; - - // When - eventBus.fire(TopologyEvent.suggestUp(node1.getBroadcastRpcAddress().get())); - waitForPendingAdminTasks(); - - // Then - assertThat(node1.state).isEqualTo(oldState); - } - verify(eventBus, never()).fire(any(NodeStateEvent.class)); - } - - @Test - public void should_apply_up_event_if_node_is_unknown_or_down() { - new NodeStateManager(context); - - int i = 0; - for (NodeState oldState : ImmutableList.of(NodeState.UNKNOWN, NodeState.DOWN)) { - // Given - node1.state = oldState; - - // When - eventBus.fire(TopologyEvent.suggestUp(node1.getBroadcastRpcAddress().get())); - waitForPendingAdminTasks(); - - // Then - assertThat(node1.state).isEqualTo(NodeState.UP); - if (oldState != NodeState.UNKNOWN) { - verify(metadataManager, times(++i)).refreshNode(node1); - } - verify(eventBus).fire(NodeStateEvent.changed(oldState, NodeState.UP, node1)); - } - } - - @Test - public void should_refresh_node_list_if_up_event_and_not_in_metadata() { - // Given - new NodeStateManager(context); - - // When - eventBus.fire(TopologyEvent.suggestUp(NEW_ADDRESS)); - waitForPendingAdminTasks(); - - // Then - verify(eventBus, never()).fire(any(NodeStateEvent.class)); - verify(metadataManager).refreshNodes(); - } - - @Test - public void should_ignore_down_event_if_node_is_down_or_forced_down() { - new NodeStateManager(context); - - for (NodeState oldState : ImmutableList.of(NodeState.DOWN, NodeState.FORCED_DOWN)) { - // Given - node1.state = oldState; - - // When - eventBus.fire(TopologyEvent.suggestDown(node1.getBroadcastRpcAddress().get())); - waitForPendingAdminTasks(); - - // Then - assertThat(node1.state).isEqualTo(oldState); - } - verify(eventBus, never()).fire(any(NodeStateEvent.class)); - } - - @Test - public void should_ignore_down_event_if_node_has_active_connections() { - new NodeStateManager(context); - node1.state = NodeState.UP; - eventBus.fire(ChannelEvent.channelOpened(node1)); - waitForPendingAdminTasks(); - assertThat(node1.openConnections).isEqualTo(1); - - // When - eventBus.fire(TopologyEvent.suggestDown(node1.getBroadcastRpcAddress().get())); - waitForPendingAdminTasks(); - - // Then - assertThat(node1.state).isEqualTo(NodeState.UP); - verify(eventBus, never()).fire(any(NodeStateEvent.class)); - } - - @Test - public void should_apply_down_event_if_node_has_no_active_connections() { - new NodeStateManager(context); - - for (NodeState oldState : ImmutableList.of(NodeState.UP, NodeState.UNKNOWN)) { - // Given - node1.state = oldState; - assertThat(node1.openConnections).isEqualTo(0); - - // When - eventBus.fire(TopologyEvent.suggestDown(node1.getBroadcastRpcAddress().get())); - waitForPendingAdminTasks(); - - // Then - assertThat(node1.state).isEqualTo(NodeState.DOWN); - verify(eventBus).fire(NodeStateEvent.changed(oldState, NodeState.DOWN, node1)); - } - } - - @Test - public void should_ignore_down_event_if_not_in_metadata() { - // Given - new NodeStateManager(context); - - // When - eventBus.fire(TopologyEvent.suggestDown(NEW_ADDRESS)); - waitForPendingAdminTasks(); - - // Then - verify(eventBus, never()).fire(any(NodeStateEvent.class)); - verify(metadataManager, never()).addNode(NEW_ADDRESS); - } - - @Test - public void should_ignore_force_down_event_if_already_forced_down() { - // Given - new NodeStateManager(context); - node1.state = NodeState.FORCED_DOWN; - - // When - eventBus.fire(TopologyEvent.forceDown(node1.getBroadcastRpcAddress().get())); - waitForPendingAdminTasks(); - - // Then - assertThat(node1.state).isEqualTo(NodeState.FORCED_DOWN); - verify(eventBus, never()).fire(any(NodeStateEvent.class)); - } - - @Test - public void should_apply_force_down_event_over_any_other_state() { - new NodeStateManager(context); - - for (NodeState oldState : ImmutableList.of(NodeState.UNKNOWN, NodeState.DOWN, NodeState.UP)) { - // Given - node1.state = oldState; - - // When - eventBus.fire(TopologyEvent.forceDown(node1.getBroadcastRpcAddress().get())); - waitForPendingAdminTasks(); - - // Then - assertThat(node1.state).isEqualTo(NodeState.FORCED_DOWN); - verify(eventBus).fire(NodeStateEvent.changed(oldState, NodeState.FORCED_DOWN, node1)); - } - } - - @Test - public void should_ignore_force_down_event_if_not_in_metadata() { - // Given - new NodeStateManager(context); - - // When - eventBus.fire(TopologyEvent.forceDown(NEW_ADDRESS)); - waitForPendingAdminTasks(); - - // Then - verify(eventBus, never()).fire(any(NodeStateEvent.class)); - verify(metadataManager, never()).addNode(NEW_ADDRESS); - } - - @Test - public void should_ignore_force_up_event_if_node_is_already_up() { - // Given - new NodeStateManager(context); - node1.state = NodeState.UP; - - // When - eventBus.fire(TopologyEvent.forceUp(node1.getBroadcastRpcAddress().get())); - waitForPendingAdminTasks(); - - // Then - assertThat(node1.state).isEqualTo(NodeState.UP); - verify(eventBus, never()).fire(any(NodeStateEvent.class)); - } - - @Test - public void should_apply_force_up_event_if_node_is_not_up() { - new NodeStateManager(context); - - int i = 0; - for (NodeState oldState : - ImmutableList.of(NodeState.UNKNOWN, NodeState.DOWN, NodeState.FORCED_DOWN)) { - // Given - node1.state = oldState; - - // When - eventBus.fire(TopologyEvent.forceUp(node1.getBroadcastRpcAddress().get())); - waitForPendingAdminTasks(); - - // Then - assertThat(node1.state).isEqualTo(NodeState.UP); - verify(eventBus).fire(NodeStateEvent.changed(oldState, NodeState.UP, node1)); - if (oldState != NodeState.UNKNOWN) { - verify(metadataManager, times(++i)).refreshNode(node1); - } - } - } - - @Test - public void should_add_node_if_force_up_and_not_in_metadata() { - // Given - new NodeStateManager(context); - - // When - eventBus.fire(TopologyEvent.forceUp(NEW_ADDRESS)); - waitForPendingAdminTasks(); - - // Then - verify(eventBus, never()).fire(any(NodeStateEvent.class)); - verify(metadataManager).addNode(NEW_ADDRESS); - } - - @Test - public void should_notify_metadata_of_node_addition() { - // Given - new NodeStateManager(context); - InetSocketAddress newAddress = NEW_ADDRESS; - - // When - eventBus.fire(TopologyEvent.suggestAdded(newAddress)); - waitForPendingAdminTasks(); - - // Then - verify(metadataManager).addNode(newAddress); - } - - @Test - public void should_ignore_addition_of_existing_node() { - // Given - new NodeStateManager(context); - - // When - eventBus.fire(TopologyEvent.suggestAdded(node1.getBroadcastRpcAddress().get())); - waitForPendingAdminTasks(); - - // Then - verify(metadataManager, never()).addNode(any(InetSocketAddress.class)); - } - - @Test - public void should_notify_metadata_of_node_removal() { - // Given - new NodeStateManager(context); - - // When - eventBus.fire(TopologyEvent.suggestRemoved(node1.getBroadcastRpcAddress().get())); - waitForPendingAdminTasks(); - - // Then - verify(metadataManager).removeNode(node1.getBroadcastRpcAddress().get()); - } - - @Test - public void should_ignore_removal_of_nonexistent_node() { - // Given - new NodeStateManager(context); - InetSocketAddress newAddress = NEW_ADDRESS; - - // When - eventBus.fire(TopologyEvent.suggestRemoved(newAddress)); - waitForPendingAdminTasks(); - - // Then - verify(metadataManager, never()).removeNode(any(InetSocketAddress.class)); - } - - @Test - public void should_coalesce_topology_events() { - // Given - when(defaultProfile.getDuration(DefaultDriverOption.METADATA_TOPOLOGY_WINDOW)) - .thenReturn(Duration.ofDays(1)); - when(defaultProfile.getInt(DefaultDriverOption.METADATA_TOPOLOGY_MAX_EVENTS)).thenReturn(5); - new NodeStateManager(context); - node1.state = NodeState.FORCED_DOWN; - node2.state = NodeState.DOWN; - - // When - eventBus.fire(TopologyEvent.suggestDown(node1.getBroadcastRpcAddress().get())); - eventBus.fire(TopologyEvent.forceUp(node1.getBroadcastRpcAddress().get())); - eventBus.fire(TopologyEvent.suggestDown(node2.getBroadcastRpcAddress().get())); - eventBus.fire(TopologyEvent.suggestDown(node1.getBroadcastRpcAddress().get())); - eventBus.fire(TopologyEvent.suggestUp(node2.getBroadcastRpcAddress().get())); - waitForPendingAdminTasks(); - - // Then - // down / forceUp / down => keep the last forced event => forceUp - assertThat(node1.state).isEqualTo(NodeState.UP); - // down / up => keep the last => up - assertThat(node2.state).isEqualTo(NodeState.UP); - } - - @Test - public void should_track_open_connections() { - new NodeStateManager(context); - - assertThat(node1.openConnections).isEqualTo(0); - - eventBus.fire(ChannelEvent.channelOpened(node1)); - eventBus.fire(ChannelEvent.channelOpened(node1)); - waitForPendingAdminTasks(); - assertThat(node1.openConnections).isEqualTo(2); - - eventBus.fire(ChannelEvent.channelClosed(node1)); - waitForPendingAdminTasks(); - assertThat(node1.openConnections).isEqualTo(1); - } - - @Test - public void should_mark_node_up_if_down_or_unknown_and_connection_opened() { - new NodeStateManager(context); - - for (NodeState oldState : ImmutableList.of(NodeState.DOWN, NodeState.UNKNOWN)) { - // Given - node1.state = oldState; - - // When - eventBus.fire(ChannelEvent.channelOpened(node1)); - waitForPendingAdminTasks(); - - // Then - assertThat(node1.state).isEqualTo(NodeState.UP); - verify(eventBus).fire(NodeStateEvent.changed(oldState, NodeState.UP, node1)); - } - } - - @Test - public void should_not_mark_node_up_if_forced_down_and_connection_opened() { - // Given - new NodeStateManager(context); - node1.state = NodeState.FORCED_DOWN; - - // When - eventBus.fire(ChannelEvent.channelOpened(node1)); - waitForPendingAdminTasks(); - - // Then - assertThat(node1.state).isEqualTo(NodeState.FORCED_DOWN); - verify(eventBus, never()).fire(any(NodeStateEvent.class)); - } - - @Test - public void should_track_reconnections() { - new NodeStateManager(context); - - assertThat(node1.reconnections).isEqualTo(0); - - eventBus.fire(ChannelEvent.reconnectionStarted(node1)); - eventBus.fire(ChannelEvent.reconnectionStarted(node1)); - waitForPendingAdminTasks(); - assertThat(node1.reconnections).isEqualTo(2); - - eventBus.fire(ChannelEvent.reconnectionStopped(node1)); - waitForPendingAdminTasks(); - assertThat(node1.reconnections).isEqualTo(1); - } - - @Test - public void should_mark_node_down_if_reconnection_starts_with_no_connections() { - new NodeStateManager(context); - - node1.state = NodeState.UP; - node1.openConnections = 1; - - eventBus.fire(ChannelEvent.channelClosed(node1)); - eventBus.fire(ChannelEvent.reconnectionStarted(node1)); - waitForPendingAdminTasks(); - - assertThat(node1.state).isEqualTo(NodeState.DOWN); - verify(eventBus).fire(NodeStateEvent.changed(NodeState.UP, NodeState.DOWN, node1)); - } - - @Test - public void should_mark_node_down_if_no_connections_and_reconnection_already_started() { - new NodeStateManager(context); - - node1.state = NodeState.UP; - node1.openConnections = 1; - - eventBus.fire(ChannelEvent.reconnectionStarted(node1)); - eventBus.fire(ChannelEvent.channelClosed(node1)); - waitForPendingAdminTasks(); - - assertThat(node1.state).isEqualTo(NodeState.DOWN); - verify(eventBus).fire(NodeStateEvent.changed(NodeState.UP, NodeState.DOWN, node1)); - } - - @Test - public void should_keep_node_up_if_reconnection_starts_with_some_connections() { - new NodeStateManager(context); - - node1.state = NodeState.UP; - node1.openConnections = 2; - - eventBus.fire(ChannelEvent.channelClosed(node1)); - eventBus.fire(ChannelEvent.reconnectionStarted(node1)); - waitForPendingAdminTasks(); - - assertThat(node1.state).isEqualTo(NodeState.UP); - verify(eventBus, never()).fire(any(NodeStateEvent.class)); - } - - @Test - public void should_ignore_events_when_closed() throws Exception { - NodeStateManager manager = new NodeStateManager(context); - assertThat(node1.reconnections).isEqualTo(0); - - manager.close(); - - eventBus.fire(ChannelEvent.reconnectionStarted(node1)); - waitForPendingAdminTasks(); - - assertThat(node1.reconnections).isEqualTo(0); - } - - // Wait for all the tasks on the pool's admin executor to complete. - private void waitForPendingAdminTasks() { - // This works because the event loop group is single-threaded - Future f = adminEventLoopGroup.schedule(() -> null, 5, TimeUnit.NANOSECONDS); - try { - Uninterruptibles.getUninterruptibly(f, 100, TimeUnit.MILLISECONDS); - } catch (ExecutionException e) { - fail("unexpected error", e.getCause()); - } catch (TimeoutException e) { - fail("timed out while waiting for admin tasks to complete", e); - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/PeerRowValidatorTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/PeerRowValidatorTest.java deleted file mode 100644 index c1a189259d7..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/PeerRowValidatorTest.java +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.datastax.oss.driver.internal.core.metadata; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class PeerRowValidatorTest { - - @DataProvider - public static Object[][] nullColumnsV1() { - return new Object[][] { - {"rpc_address"}, {"host_id"}, {"data_center"}, {"rack"}, {"tokens"}, {"schema_version"} - }; - } - - @DataProvider - public static Object[][] nullColumnsV2() { - return new Object[][] { - {"native_address"}, - {"native_port"}, - {"host_id"}, - {"data_center"}, - {"rack"}, - {"tokens"}, - {"schema_version"} - }; - } - - @Test - @UseDataProvider("nullColumnsV1") - public void should_fail_for_invalid_peer_v1(String nullColumn) { - assertThat(PeerRowValidator.isValid(mockRowV1(nullColumn))).isFalse(); - } - - @Test - @UseDataProvider("nullColumnsV2") - public void should_fail_for_invalid_peer_v2(String nullColumn) { - assertThat(PeerRowValidator.isValid(mockRowV2(nullColumn))).isFalse(); - } - - @Test - public void should_succeed_for_valid_peer_v1() { - AdminRow peerRow = mock(AdminRow.class); - when(peerRow.isNull("host_id")).thenReturn(false); - when(peerRow.isNull("rpc_address")).thenReturn(false); - when(peerRow.isNull("native_address")).thenReturn(true); - when(peerRow.isNull("native_port")).thenReturn(true); - when(peerRow.isNull("data_center")).thenReturn(false); - when(peerRow.isNull("rack")).thenReturn(false); - when(peerRow.isNull("tokens")).thenReturn(false); - when(peerRow.isNull("schema_version")).thenReturn(false); - - assertThat(PeerRowValidator.isValid(peerRow)).isTrue(); - } - - @Test - public void should_succeed_for_valid_peer_v2() { - AdminRow peerRow = mock(AdminRow.class); - when(peerRow.isNull("host_id")).thenReturn(false); - when(peerRow.isNull("rpc_address")).thenReturn(true); - when(peerRow.isNull("native_address")).thenReturn(false); - when(peerRow.isNull("native_port")).thenReturn(false); - when(peerRow.isNull("data_center")).thenReturn(false); - when(peerRow.isNull("rack")).thenReturn(false); - when(peerRow.isNull("tokens")).thenReturn(false); - when(peerRow.isNull("schema_version")).thenReturn(false); - - assertThat(PeerRowValidator.isValid(peerRow)).isTrue(); - } - - private AdminRow mockRowV1(String nullColumn) { - AdminRow peerRow = mock(AdminRow.class); - when(peerRow.isNull("host_id")).thenReturn(nullColumn.equals("host_id")); - when(peerRow.isNull("rpc_address")).thenReturn(nullColumn.equals("rpc_address")); - when(peerRow.isNull("native_address")).thenReturn(true); - when(peerRow.isNull("native_port")).thenReturn(true); - when(peerRow.isNull("data_center")).thenReturn(nullColumn.equals("data_center")); - when(peerRow.isNull("rack")).thenReturn(nullColumn.equals("rack")); - when(peerRow.isNull("tokens")).thenReturn(nullColumn.equals("tokens")); - when(peerRow.isNull("schema_version")).thenReturn(nullColumn.equals("schema_version")); - - return peerRow; - } - - private AdminRow mockRowV2(String nullColumn) { - AdminRow peerRow = mock(AdminRow.class); - when(peerRow.isNull("host_id")).thenReturn(nullColumn.equals("host_id")); - when(peerRow.isNull("native_address")).thenReturn(nullColumn.equals("native_address")); - when(peerRow.isNull("native_port")).thenReturn(nullColumn.equals("native_port")); - when(peerRow.isNull("rpc_address")).thenReturn(true); - when(peerRow.isNull("data_center")).thenReturn(nullColumn.equals("data_center")); - when(peerRow.isNull("rack")).thenReturn(nullColumn.equals("rack")); - when(peerRow.isNull("tokens")).thenReturn(nullColumn.equals("tokens")); - when(peerRow.isNull("schema_version")).thenReturn(nullColumn.equals("schema_version")); - - return peerRow; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/RemoveNodeRefreshTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/RemoveNodeRefreshTest.java deleted file mode 100644 index f2a4b36a3c3..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/RemoveNodeRefreshTest.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.internal.core.channel.ChannelFactory; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metrics.MetricsFactory; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import java.util.Collections; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public class RemoveNodeRefreshTest { - - @Mock private InternalDriverContext context; - @Mock protected MetricsFactory metricsFactory; - @Mock private ChannelFactory channelFactory; - - private DefaultNode node1; - private DefaultNode node2; - - @Before - public void setup() { - when(context.getMetricsFactory()).thenReturn(metricsFactory); - when(context.getChannelFactory()).thenReturn(channelFactory); - node1 = TestNodeFactory.newNode(1, context); - node2 = TestNodeFactory.newNode(2, context); - } - - @Test - public void should_remove_existing_node() { - // Given - DefaultMetadata oldMetadata = - new DefaultMetadata( - ImmutableMap.of(node1.getHostId(), node1, node2.getHostId(), node2), - Collections.emptyMap(), - null, - null); - RemoveNodeRefresh refresh = new RemoveNodeRefresh(node2.getBroadcastRpcAddress().get()); - - // When - MetadataRefresh.Result result = refresh.compute(oldMetadata, false, context); - - // Then - assertThat(result.newMetadata.getNodes()).containsOnlyKeys(node1.getHostId()); - assertThat(result.events).containsExactly(NodeStateEvent.removed(node2)); - } - - @Test - public void should_not_remove_nonexistent_node() { - // Given - DefaultMetadata oldMetadata = - new DefaultMetadata( - ImmutableMap.of(node1.getHostId(), node1), Collections.emptyMap(), null, null); - RemoveNodeRefresh refresh = new RemoveNodeRefresh(node2.getBroadcastRpcAddress().get()); - - // When - MetadataRefresh.Result result = refresh.compute(oldMetadata, false, context); - - // Then - assertThat(result.newMetadata.getNodes()).containsOnlyKeys(node1.getHostId()); - assertThat(result.events).isEmpty(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/SchemaAgreementCheckerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/SchemaAgreementCheckerTest.java deleted file mode 100644 index 5e0dfbd7802..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/SchemaAgreementCheckerTest.java +++ /dev/null @@ -1,334 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Metadata; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeState; -import com.datastax.oss.driver.internal.core.adminrequest.AdminResult; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metrics.MetricsFactory; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.Iterators; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import io.netty.channel.EventLoop; -import java.time.Duration; -import java.util.ArrayDeque; -import java.util.Arrays; -import java.util.Map; -import java.util.Objects; -import java.util.Queue; -import java.util.UUID; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.TimeUnit; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -@RunWith(DataProviderRunner.class) -public class SchemaAgreementCheckerTest { - - private static final UUID VERSION1 = UUID.randomUUID(); - private static final UUID VERSION2 = UUID.randomUUID(); - - private static final UUID NODE_2_HOST_ID = UUID.randomUUID(); - - @Mock private InternalDriverContext context; - @Mock private DriverConfig config; - @Mock private DriverExecutionProfile defaultConfig; - @Mock private DriverChannel channel; - @Mock private EventLoop eventLoop; - @Mock private MetadataManager metadataManager; - @Mock private MetricsFactory metricsFactory; - @Mock private Metadata metadata; - @Mock private DefaultNode node1; - @Mock private DefaultNode node2; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - when(context.getMetricsFactory()).thenReturn(metricsFactory); - - node1 = TestNodeFactory.newNode(1, context); - node2 = TestNodeFactory.newNode(2, NODE_2_HOST_ID, context); - - when(defaultConfig.getDuration(DefaultDriverOption.CONTROL_CONNECTION_TIMEOUT)) - .thenReturn(Duration.ofSeconds(1)); - when(defaultConfig.getDuration(DefaultDriverOption.CONTROL_CONNECTION_AGREEMENT_INTERVAL)) - .thenReturn(Duration.ofMillis(200)); - when(defaultConfig.getDuration(DefaultDriverOption.CONTROL_CONNECTION_AGREEMENT_TIMEOUT)) - .thenReturn(Duration.ofSeconds(10)); - when(defaultConfig.getBoolean(DefaultDriverOption.CONTROL_CONNECTION_AGREEMENT_WARN)) - .thenReturn(true); - when(config.getDefaultProfile()).thenReturn(defaultConfig); - when(context.getConfig()).thenReturn(config); - - Map nodes = - ImmutableMap.of( - Objects.requireNonNull(node1.getHostId()), - node1, - Objects.requireNonNull(node2.getHostId()), - node2); - when(metadata.getNodes()).thenReturn(nodes); - when(metadataManager.getMetadata()).thenReturn(metadata); - when(context.getMetadataManager()).thenReturn(metadataManager); - - node2.state = NodeState.UP; - - when(eventLoop.schedule(any(Runnable.class), anyLong(), any(TimeUnit.class))) - .thenAnswer( - invocation -> { // Ignore delay and run immediately: - Runnable task = invocation.getArgument(0); - task.run(); - return null; - }); - when(channel.eventLoop()).thenReturn(eventLoop); - } - - @Test - public void should_skip_if_timeout_is_zero() { - // Given - when(defaultConfig.getDuration(DefaultDriverOption.CONTROL_CONNECTION_AGREEMENT_TIMEOUT)) - .thenReturn(Duration.ZERO); - TestSchemaAgreementChecker checker = new TestSchemaAgreementChecker(channel, context); - - // When - CompletionStage future = checker.run(); - - // Then - assertThatStage(future).isSuccess(b -> assertThat(b).isFalse()); - } - - @Test - public void should_succeed_if_only_one_node() { - // Given - TestSchemaAgreementChecker checker = new TestSchemaAgreementChecker(channel, context); - checker.stubQueries( - new StubbedQuery( - "SELECT schema_version FROM system.local WHERE key='local'", - mockResult(mockLocalRow(VERSION1))), - new StubbedQuery("SELECT * FROM system.peers", mockResult(/*empty*/ ))); - - // When - CompletionStage future = checker.run(); - - // Then - assertThatStage(future).isSuccess(b -> assertThat(b).isTrue()); - } - - @Test - public void should_succeed_if_versions_match_on_first_try() { - // Given - TestSchemaAgreementChecker checker = new TestSchemaAgreementChecker(channel, context); - checker.stubQueries( - new StubbedQuery( - "SELECT schema_version FROM system.local WHERE key='local'", - mockResult(mockLocalRow(VERSION1))), - new StubbedQuery("SELECT * FROM system.peers", mockResult(mockValidPeerRow(VERSION1)))); - - // When - CompletionStage future = checker.run(); - - // Then - assertThatStage(future).isSuccess(b -> assertThat(b).isTrue()); - } - - @Test - public void should_ignore_down_peers() { - // Given - TestSchemaAgreementChecker checker = new TestSchemaAgreementChecker(channel, context); - node2.state = NodeState.DOWN; - checker.stubQueries( - new StubbedQuery( - "SELECT schema_version FROM system.local WHERE key='local'", - mockResult(mockLocalRow(VERSION1))), - new StubbedQuery("SELECT * FROM system.peers", mockResult(mockValidPeerRow(VERSION2)))); - - // When - CompletionStage future = checker.run(); - - // Then - assertThatStage(future).isSuccess(b -> assertThat(b).isTrue()); - } - - @DataProvider - public static Object[][] malformedPeer() { - return new Object[][] { - // missing host id - {mockPeerRow(null, VERSION2, true, true, true, true)}, - // missing schema version - {mockPeerRow(NODE_2_HOST_ID, null, true, true, true, true)}, - // missing datacenter - {mockPeerRow(NODE_2_HOST_ID, VERSION2, false, true, true, true)}, - // missing rack - {mockPeerRow(NODE_2_HOST_ID, VERSION2, true, false, true, true)}, - // missing RPC address - {mockPeerRow(NODE_2_HOST_ID, VERSION2, true, true, false, true)}, - // missing tokens - {mockPeerRow(NODE_2_HOST_ID, VERSION2, true, true, true, false)}, - }; - } - - @Test - @UseDataProvider("malformedPeer") - public void should_ignore_malformed_rows(AdminRow malformedPeer) { - // Given - TestSchemaAgreementChecker checker = new TestSchemaAgreementChecker(channel, context); - checker.stubQueries( - new StubbedQuery( - "SELECT schema_version FROM system.local WHERE key='local'", - mockResult(mockLocalRow(VERSION1))), - new StubbedQuery("SELECT * FROM system.peers", mockResult(malformedPeer))); - - // When - CompletionStage future = checker.run(); - - // Then - assertThatStage(future).isSuccess(b -> assertThat(b).isTrue()); - } - - @Test - public void should_reschedule_if_versions_do_not_match_on_first_try() { - // Given - TestSchemaAgreementChecker checker = new TestSchemaAgreementChecker(channel, context); - checker.stubQueries( - // First round - new StubbedQuery( - "SELECT schema_version FROM system.local WHERE key='local'", - mockResult(mockLocalRow(VERSION1))), - new StubbedQuery("SELECT * FROM system.peers", mockResult(mockValidPeerRow(VERSION2))), - - // Second round - new StubbedQuery( - "SELECT schema_version FROM system.local WHERE key='local'", - mockResult(mockLocalRow(VERSION1))), - new StubbedQuery("SELECT * FROM system.peers", mockResult(mockValidPeerRow(VERSION1)))); - - // When - CompletionStage future = checker.run(); - - // Then - assertThatStage(future).isSuccess(b -> assertThat(b).isTrue()); - } - - @Test - public void should_fail_if_versions_do_not_match_after_timeout() { - // Given - when(defaultConfig.getDuration(DefaultDriverOption.CONTROL_CONNECTION_AGREEMENT_TIMEOUT)) - .thenReturn(Duration.ofNanos(10)); - TestSchemaAgreementChecker checker = new TestSchemaAgreementChecker(channel, context); - checker.stubQueries( - new StubbedQuery( - "SELECT schema_version FROM system.local WHERE key='local'", - mockResult(mockLocalRow(VERSION1))), - new StubbedQuery("SELECT * FROM system.peers", mockResult(mockValidPeerRow(VERSION1)))); - - // When - CompletionStage future = checker.run(); - - // Then - assertThatStage(future).isSuccess(b -> assertThat(b).isFalse()); - } - - /** Extend to mock the query execution logic. */ - private static class TestSchemaAgreementChecker extends SchemaAgreementChecker { - - private final Queue queries = new ArrayDeque<>(); - - TestSchemaAgreementChecker(DriverChannel channel, InternalDriverContext context) { - super(channel, context, "test"); - } - - private void stubQueries(StubbedQuery... queries) { - this.queries.addAll(Arrays.asList(queries)); - } - - @Override - protected CompletionStage query(String queryString) { - StubbedQuery nextQuery = queries.poll(); - assertThat(nextQuery).isNotNull(); - assertThat(queryString).isEqualTo(nextQuery.queryString); - return CompletableFuture.completedFuture(nextQuery.result); - } - } - - private static class StubbedQuery { - private final String queryString; - private final AdminResult result; - - private StubbedQuery(String queryString, AdminResult result) { - this.queryString = queryString; - this.result = result; - } - } - - private AdminRow mockLocalRow(@SuppressWarnings("SameParameterValue") UUID schemaVersion) { - AdminRow row = mock(AdminRow.class); - when(row.getUuid("host_id")).thenReturn(node1.getHostId()); - when(row.getUuid("schema_version")).thenReturn(schemaVersion); - return row; - } - - private AdminRow mockValidPeerRow(UUID schemaVersion) { - return mockPeerRow(node2.getHostId(), schemaVersion, true, true, true, true); - } - - private static AdminRow mockPeerRow( - UUID hostId, - UUID schemaVersion, - boolean hasDatacenter, - boolean hasRack, - boolean hasRpcAddress, - boolean hasTokens) { - AdminRow row = mock(AdminRow.class); - when(row.getUuid("host_id")).thenReturn(hostId); - when(row.isNull("host_id")).thenReturn(hostId == null); - when(row.getUuid("schema_version")).thenReturn(schemaVersion); - when(row.isNull("schema_version")).thenReturn(schemaVersion == null); - when(row.isNull("data_center")).thenReturn(!hasDatacenter); - when(row.isNull("rack")).thenReturn(!hasRack); - when(row.isNull("tokens")).thenReturn(!hasTokens); - when(row.isNull("rpc_address")).thenReturn(!hasRpcAddress); - when(row.isNull("native_address")).thenReturn(true); - when(row.isNull("native_port")).thenReturn(true); - return row; - } - - private AdminResult mockResult(AdminRow... rows) { - AdminResult result = mock(AdminResult.class); - when(result.iterator()).thenReturn(Iterators.forArray(rows)); - return result; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/TestNodeFactory.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/TestNodeFactory.java deleted file mode 100644 index 7986834bca2..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/TestNodeFactory.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import java.net.InetSocketAddress; -import java.util.UUID; - -public class TestNodeFactory { - - public static DefaultNode newNode(int lastIpByte, InternalDriverContext context) { - DefaultNode node = newContactPoint(lastIpByte, context); - node.hostId = UUID.randomUUID(); - node.broadcastRpcAddress = ((InetSocketAddress) node.getEndPoint().resolve()); - return node; - } - - public static DefaultNode newNode(int lastIpByte, UUID hostId, InternalDriverContext context) { - DefaultNode node = newContactPoint(lastIpByte, context); - node.hostId = hostId; - node.broadcastRpcAddress = ((InetSocketAddress) node.getEndPoint().resolve()); - return node; - } - - public static DefaultNode newContactPoint(int lastIpByte, InternalDriverContext context) { - DefaultEndPoint endPoint = newEndPoint(lastIpByte); - return new DefaultNode(endPoint, context); - } - - public static DefaultEndPoint newEndPoint(int lastByteOfIp) { - return new DefaultEndPoint(new InetSocketAddress("127.0.0." + lastByteOfIp, 9042)); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/IndexMetadataTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/IndexMetadataTest.java deleted file mode 100644 index b772d243976..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/IndexMetadataTest.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.IndexKind; -import com.datastax.oss.driver.api.core.metadata.schema.IndexMetadata; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import org.junit.Test; - -public class IndexMetadataTest { - - @Test - public void should_describe_custom_index_class_correctly() { - IndexMetadata indexMetadata = - new DefaultIndexMetadata( - CqlIdentifier.fromCql("ks1"), - CqlIdentifier.fromCql("myTable"), - CqlIdentifier.fromCql("myName"), - IndexKind.CUSTOM, - "myTarget", - ImmutableMap.of("class_name", "com.datastax.MyClass")); - String describe = indexMetadata.describe(true); - assertThat(describe) - .contains( - "CREATE CUSTOM INDEX myname ON ks1.mytable (myTarget)\n" - + "USING 'com.datastax.MyClass'"); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/MultiplexingSchemaChangeListenerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/MultiplexingSchemaChangeListenerTest.java deleted file mode 100644 index a7dee02f5e3..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/MultiplexingSchemaChangeListenerTest.java +++ /dev/null @@ -1,452 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.BDDMockito.willThrow; -import static org.mockito.Mockito.verify; - -import ch.qos.logback.classic.Level; -import ch.qos.logback.classic.Logger; -import ch.qos.logback.classic.spi.ILoggingEvent; -import ch.qos.logback.core.Appender; -import com.datastax.oss.driver.api.core.metadata.schema.AggregateMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.FunctionMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.SchemaChangeListener; -import com.datastax.oss.driver.api.core.metadata.schema.TableMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.ViewMetadata; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.ArgumentCaptor; -import org.mockito.Captor; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; -import org.slf4j.LoggerFactory; - -@RunWith(MockitoJUnitRunner.Strict.class) -public class MultiplexingSchemaChangeListenerTest { - - @Mock private SchemaChangeListener child1; - @Mock private SchemaChangeListener child2; - @Mock private Session session; - @Mock private KeyspaceMetadata keyspace1, keyspace2; - @Mock private TableMetadata table1, table2; - @Mock private UserDefinedType userDefinedType1, userDefinedType2; - @Mock private FunctionMetadata function1, function2; - @Mock private AggregateMetadata aggregate1, aggregate2; - @Mock private ViewMetadata view1, view2; - - @Mock private Appender appender; - @Captor private ArgumentCaptor loggingEventCaptor; - - private Logger logger; - private Level initialLogLevel; - - @Before - public void addAppenders() { - logger = (Logger) LoggerFactory.getLogger(MultiplexingSchemaChangeListener.class); - initialLogLevel = logger.getLevel(); - logger.setLevel(Level.WARN); - logger.addAppender(appender); - } - - @After - public void removeAppenders() { - logger.detachAppender(appender); - logger.setLevel(initialLogLevel); - } - - @Test - public void should_register() { - // given - MultiplexingSchemaChangeListener listener = new MultiplexingSchemaChangeListener(); - // when - listener.register(child1); - listener.register(child2); - // then - assertThat(listener).extracting("listeners").asList().hasSize(2).contains(child1, child2); - } - - @Test - public void should_flatten_child_multiplexing_listener_via_constructor() { - // given - MultiplexingSchemaChangeListener listener = - new MultiplexingSchemaChangeListener(new MultiplexingSchemaChangeListener(child1, child2)); - // when - // then - assertThat(listener).extracting("listeners").asList().hasSize(2).contains(child1, child2); - } - - @Test - public void should_flatten_child_multiplexing_listener_via_register() { - // given - MultiplexingSchemaChangeListener listener = new MultiplexingSchemaChangeListener(); - // when - listener.register(new MultiplexingSchemaChangeListener(child1, child2)); - // then - assertThat(listener).extracting("listeners").asList().hasSize(2).contains(child1, child2); - } - - @Test - public void should_notify_onKeyspaceCreated() { - // given - MultiplexingSchemaChangeListener listener = - new MultiplexingSchemaChangeListener(child1, child2); - willThrow(new NullPointerException()).given(child1).onKeyspaceCreated(keyspace1); - // when - listener.onKeyspaceCreated(keyspace1); - // then - verify(child1).onKeyspaceCreated(keyspace1); - verify(child2).onKeyspaceCreated(keyspace1); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "Unexpected error while notifying schema change listener child1 of an onKeyspaceCreated event. (NullPointerException: null)"); - } - - @Test - public void should_notify_onKeyspaceDropped() { - // given - MultiplexingSchemaChangeListener listener = - new MultiplexingSchemaChangeListener(child1, child2); - willThrow(new NullPointerException()).given(child1).onKeyspaceDropped(keyspace1); - // when - listener.onKeyspaceDropped(keyspace1); - // then - verify(child1).onKeyspaceDropped(keyspace1); - verify(child2).onKeyspaceDropped(keyspace1); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "Unexpected error while notifying schema change listener child1 of an onKeyspaceDropped event. (NullPointerException: null)"); - } - - @Test - public void should_notify_onKeyspaceUpdated() { - // given - MultiplexingSchemaChangeListener listener = - new MultiplexingSchemaChangeListener(child1, child2); - willThrow(new NullPointerException()).given(child1).onKeyspaceUpdated(keyspace1, keyspace2); - // when - listener.onKeyspaceUpdated(keyspace1, keyspace2); - // then - verify(child1).onKeyspaceUpdated(keyspace1, keyspace2); - verify(child2).onKeyspaceUpdated(keyspace1, keyspace2); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "Unexpected error while notifying schema change listener child1 of an onKeyspaceUpdated event. (NullPointerException: null)"); - } - - @Test - public void should_notify_onTableCreated() { - // given - MultiplexingSchemaChangeListener listener = - new MultiplexingSchemaChangeListener(child1, child2); - willThrow(new NullPointerException()).given(child1).onTableCreated(table1); - // when - listener.onTableCreated(table1); - // then - verify(child1).onTableCreated(table1); - verify(child2).onTableCreated(table1); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "Unexpected error while notifying schema change listener child1 of an onTableCreated event. (NullPointerException: null)"); - } - - @Test - public void should_notify_onTableDropped() { - // given - MultiplexingSchemaChangeListener listener = - new MultiplexingSchemaChangeListener(child1, child2); - willThrow(new NullPointerException()).given(child1).onTableDropped(table1); - // when - listener.onTableDropped(table1); - // then - verify(child1).onTableDropped(table1); - verify(child2).onTableDropped(table1); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "Unexpected error while notifying schema change listener child1 of an onTableDropped event. (NullPointerException: null)"); - } - - @Test - public void should_notify_onTableUpdated() { - // given - MultiplexingSchemaChangeListener listener = - new MultiplexingSchemaChangeListener(child1, child2); - willThrow(new NullPointerException()).given(child1).onTableUpdated(table1, table2); - // when - listener.onTableUpdated(table1, table2); - // then - verify(child1).onTableUpdated(table1, table2); - verify(child2).onTableUpdated(table1, table2); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "Unexpected error while notifying schema change listener child1 of an onTableUpdated event. (NullPointerException: null)"); - } - - @Test - public void should_notify_onUserDefinedTypeCreated() { - // given - MultiplexingSchemaChangeListener listener = - new MultiplexingSchemaChangeListener(child1, child2); - willThrow(new NullPointerException()).given(child1).onUserDefinedTypeCreated(userDefinedType1); - // when - listener.onUserDefinedTypeCreated(userDefinedType1); - // then - verify(child1).onUserDefinedTypeCreated(userDefinedType1); - verify(child2).onUserDefinedTypeCreated(userDefinedType1); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "Unexpected error while notifying schema change listener child1 of an onUserDefinedTypeCreated event. (NullPointerException: null)"); - } - - @Test - public void should_notify_onUserDefinedTypeDropped() { - // given - MultiplexingSchemaChangeListener listener = - new MultiplexingSchemaChangeListener(child1, child2); - willThrow(new NullPointerException()).given(child1).onUserDefinedTypeDropped(userDefinedType1); - // when - listener.onUserDefinedTypeDropped(userDefinedType1); - // then - verify(child1).onUserDefinedTypeDropped(userDefinedType1); - verify(child2).onUserDefinedTypeDropped(userDefinedType1); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "Unexpected error while notifying schema change listener child1 of an onUserDefinedTypeDropped event. (NullPointerException: null)"); - } - - @Test - public void should_notify_onUserDefinedTypeUpdated() { - // given - MultiplexingSchemaChangeListener listener = - new MultiplexingSchemaChangeListener(child1, child2); - willThrow(new NullPointerException()) - .given(child1) - .onUserDefinedTypeUpdated(userDefinedType1, userDefinedType2); - // when - listener.onUserDefinedTypeUpdated(userDefinedType1, userDefinedType2); - // then - verify(child1).onUserDefinedTypeUpdated(userDefinedType1, userDefinedType2); - verify(child2).onUserDefinedTypeUpdated(userDefinedType1, userDefinedType2); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "Unexpected error while notifying schema change listener child1 of an onUserDefinedTypeUpdated event. (NullPointerException: null)"); - } - - @Test - public void should_notify_onFunctionCreated() { - // given - MultiplexingSchemaChangeListener listener = - new MultiplexingSchemaChangeListener(child1, child2); - willThrow(new NullPointerException()).given(child1).onFunctionCreated(function1); - // when - listener.onFunctionCreated(function1); - // then - verify(child1).onFunctionCreated(function1); - verify(child2).onFunctionCreated(function1); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "Unexpected error while notifying schema change listener child1 of an onFunctionCreated event. (NullPointerException: null)"); - } - - @Test - public void should_notify_onFunctionDropped() { - // given - MultiplexingSchemaChangeListener listener = - new MultiplexingSchemaChangeListener(child1, child2); - willThrow(new NullPointerException()).given(child1).onFunctionDropped(function1); - // when - listener.onFunctionDropped(function1); - // then - verify(child1).onFunctionDropped(function1); - verify(child2).onFunctionDropped(function1); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "Unexpected error while notifying schema change listener child1 of an onFunctionDropped event. (NullPointerException: null)"); - } - - @Test - public void should_notify_onFunctionUpdated() { - // given - MultiplexingSchemaChangeListener listener = - new MultiplexingSchemaChangeListener(child1, child2); - willThrow(new NullPointerException()).given(child1).onFunctionUpdated(function1, function2); - // when - listener.onFunctionUpdated(function1, function2); - // then - verify(child1).onFunctionUpdated(function1, function2); - verify(child2).onFunctionUpdated(function1, function2); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "Unexpected error while notifying schema change listener child1 of an onFunctionUpdated event. (NullPointerException: null)"); - } - - @Test - public void should_notify_onAggregateCreated() { - // given - MultiplexingSchemaChangeListener listener = - new MultiplexingSchemaChangeListener(child1, child2); - willThrow(new NullPointerException()).given(child1).onAggregateCreated(aggregate1); - // when - listener.onAggregateCreated(aggregate1); - // then - verify(child1).onAggregateCreated(aggregate1); - verify(child2).onAggregateCreated(aggregate1); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "Unexpected error while notifying schema change listener child1 of an onAggregateCreated event. (NullPointerException: null)"); - } - - @Test - public void should_notify_onAggregateDropped() { - // given - MultiplexingSchemaChangeListener listener = - new MultiplexingSchemaChangeListener(child1, child2); - willThrow(new NullPointerException()).given(child1).onAggregateDropped(aggregate1); - // when - listener.onAggregateDropped(aggregate1); - // then - verify(child1).onAggregateDropped(aggregate1); - verify(child2).onAggregateDropped(aggregate1); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "Unexpected error while notifying schema change listener child1 of an onAggregateDropped event. (NullPointerException: null)"); - } - - @Test - public void should_notify_onAggregateUpdated() { - // given - MultiplexingSchemaChangeListener listener = - new MultiplexingSchemaChangeListener(child1, child2); - willThrow(new NullPointerException()).given(child1).onAggregateUpdated(aggregate1, aggregate2); - // when - listener.onAggregateUpdated(aggregate1, aggregate2); - // then - verify(child1).onAggregateUpdated(aggregate1, aggregate2); - verify(child2).onAggregateUpdated(aggregate1, aggregate2); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "Unexpected error while notifying schema change listener child1 of an onAggregateUpdated event. (NullPointerException: null)"); - } - - @Test - public void should_notify_onViewCreated() { - // given - MultiplexingSchemaChangeListener listener = - new MultiplexingSchemaChangeListener(child1, child2); - willThrow(new NullPointerException()).given(child1).onViewCreated(view1); - // when - listener.onViewCreated(view1); - // then - verify(child1).onViewCreated(view1); - verify(child2).onViewCreated(view1); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "Unexpected error while notifying schema change listener child1 of an onViewCreated event. (NullPointerException: null)"); - } - - @Test - public void should_notify_onViewDropped() { - // given - MultiplexingSchemaChangeListener listener = - new MultiplexingSchemaChangeListener(child1, child2); - willThrow(new NullPointerException()).given(child1).onViewDropped(view1); - // when - listener.onViewDropped(view1); - // then - verify(child1).onViewDropped(view1); - verify(child2).onViewDropped(view1); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "Unexpected error while notifying schema change listener child1 of an onViewDropped event. (NullPointerException: null)"); - } - - @Test - public void should_notify_onViewUpdated() { - // given - MultiplexingSchemaChangeListener listener = - new MultiplexingSchemaChangeListener(child1, child2); - willThrow(new NullPointerException()).given(child1).onViewUpdated(view1, view2); - // when - listener.onViewUpdated(view1, view2); - // then - verify(child1).onViewUpdated(view1, view2); - verify(child2).onViewUpdated(view1, view2); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "Unexpected error while notifying schema change listener child1 of an onViewUpdated event. (NullPointerException: null)"); - } - - @Test - public void should_notify_onSessionReady() { - // given - MultiplexingSchemaChangeListener listener = - new MultiplexingSchemaChangeListener(child1, child2); - willThrow(new NullPointerException()).given(child1).onSessionReady(session); - // when - listener.onSessionReady(session); - // then - verify(child1).onSessionReady(session); - verify(child2).onSessionReady(session); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "Unexpected error while notifying schema change listener child1 of an onSessionReady event. (NullPointerException: null)"); - } - - @Test - public void should_notify_close() throws Exception { - // given - MultiplexingSchemaChangeListener listener = - new MultiplexingSchemaChangeListener(child1, child2); - Exception child1Error = new NullPointerException(); - willThrow(child1Error).given(child1).close(); - // when - listener.close(); - // then - verify(child1).close(); - verify(child2).close(); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "Unexpected error while closing schema change listener child1. (NullPointerException: null)"); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/TableMetadataTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/TableMetadataTest.java deleted file mode 100644 index 03d63230992..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/TableMetadataTest.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema; - -import static com.datastax.oss.driver.api.core.CqlIdentifier.fromCql; -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.metadata.schema.TableMetadata; -import com.datastax.oss.driver.internal.core.type.DefaultVectorType; -import com.datastax.oss.driver.internal.core.type.PrimitiveType; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.protocol.internal.ProtocolConstants.DataType; -import com.google.common.collect.ImmutableList; -import java.util.UUID; -import org.junit.Test; - -public class TableMetadataTest { - - /** Tests CASSJAVA-2 */ - @Test - public void should_describe_table_with_vector_correctly() { - TableMetadata tableMetadata = - new DefaultTableMetadata( - fromCql("ks"), - fromCql("tb"), - UUID.randomUUID(), - false, - false, - ImmutableList.of( - new DefaultColumnMetadata( - fromCql("ks"), - fromCql("ks"), - fromCql("tb"), - new PrimitiveType(DataType.ASCII), - false)), - ImmutableMap.of(), - ImmutableMap.of( - fromCql("a"), - new DefaultColumnMetadata( - fromCql("ks"), - fromCql("ks"), - fromCql("tb"), - new DefaultVectorType(new PrimitiveType(DataType.INT), 3), - false)), - ImmutableMap.of(), - ImmutableMap.of()); - - String describe1 = tableMetadata.describe(true); - - assertThat(describe1).contains("vector,"); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/AggregateParserTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/AggregateParserTest.java deleted file mode 100644 index 9cf5ba60983..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/AggregateParserTest.java +++ /dev/null @@ -1,125 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.parsing; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.metadata.schema.AggregateMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.FunctionSignature; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.util.Collections; -import java.util.Optional; -import org.junit.Before; -import org.junit.Test; - -public class AggregateParserTest extends SchemaParserTestBase { - - private static final AdminRow SUM_AND_TO_STRING_ROW_2_2 = - mockAggregateRow( - "ks", - "sum_and_to_string", - ImmutableList.of("org.apache.cassandra.db.marshal.Int32Type"), - "plus", - "org.apache.cassandra.db.marshal.Int32Type", - "to_string", - "org.apache.cassandra.db.marshal.UTF8Type", - Bytes.fromHexString("0x00000000")); - - static final AdminRow SUM_AND_TO_STRING_ROW_3_0 = - mockAggregateRow( - "ks", - "sum_and_to_string", - ImmutableList.of("int"), - "plus", - "int", - "to_string", - "text", - "0"); - - @Before - @Override - public void setup() { - super.setup(); - when(context.getCodecRegistry()).thenReturn(CodecRegistry.DEFAULT); - when(context.getProtocolVersion()).thenReturn(ProtocolVersion.DEFAULT); - } - - @Test - public void should_parse_modern_table() { - AggregateParser parser = new AggregateParser(new DataTypeCqlNameParser(), context); - AggregateMetadata aggregate = - parser.parseAggregate(SUM_AND_TO_STRING_ROW_3_0, KEYSPACE_ID, Collections.emptyMap()); - - assertThat(aggregate.getKeyspace().asInternal()).isEqualTo("ks"); - assertThat(aggregate.getSignature().getName().asInternal()).isEqualTo("sum_and_to_string"); - assertThat(aggregate.getSignature().getParameterTypes()).containsExactly(DataTypes.INT); - - FunctionSignature stateFuncSignature = aggregate.getStateFuncSignature(); - assertThat(stateFuncSignature.getName().asInternal()).isEqualTo("plus"); - assertThat(stateFuncSignature.getParameterTypes()) - .containsExactly(DataTypes.INT, DataTypes.INT); - assertThat(aggregate.getStateType()).isEqualTo(DataTypes.INT); - - Optional finalFuncSignature = aggregate.getFinalFuncSignature(); - assertThat(finalFuncSignature).isPresent(); - assertThat(finalFuncSignature) - .hasValueSatisfying( - signature -> { - assertThat(signature.getName().asInternal()).isEqualTo("to_string"); - assertThat(signature.getParameterTypes()).containsExactly(DataTypes.INT); - }); - assertThat(aggregate.getReturnType()).isEqualTo(DataTypes.TEXT); - - assertThat(aggregate.getInitCond().get()).isInstanceOf(Integer.class).isEqualTo(0); - } - - @Test - public void should_parse_legacy_table() { - AggregateParser parser = new AggregateParser(new DataTypeClassNameParser(), context); - AggregateMetadata aggregate = - parser.parseAggregate(SUM_AND_TO_STRING_ROW_2_2, KEYSPACE_ID, Collections.emptyMap()); - - assertThat(aggregate.getKeyspace().asInternal()).isEqualTo("ks"); - assertThat(aggregate.getSignature().getName().asInternal()).isEqualTo("sum_and_to_string"); - assertThat(aggregate.getSignature().getParameterTypes()).containsExactly(DataTypes.INT); - - FunctionSignature stateFuncSignature = aggregate.getStateFuncSignature(); - assertThat(stateFuncSignature.getName().asInternal()).isEqualTo("plus"); - assertThat(stateFuncSignature.getParameterTypes()) - .containsExactly(DataTypes.INT, DataTypes.INT); - assertThat(aggregate.getStateType()).isEqualTo(DataTypes.INT); - - Optional finalFuncSignature = aggregate.getFinalFuncSignature(); - assertThat(finalFuncSignature).isPresent(); - assertThat(finalFuncSignature) - .hasValueSatisfying( - signature -> { - assertThat(signature.getName().asInternal()).isEqualTo("to_string"); - assertThat(signature.getParameterTypes()).containsExactly(DataTypes.INT); - }); - assertThat(aggregate.getReturnType()).isEqualTo(DataTypes.TEXT); - - assertThat(aggregate.getInitCond().get()).isInstanceOf(Integer.class).isEqualTo(0); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeClassNameParserTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeClassNameParserTest.java deleted file mode 100644 index 84f5c09317f..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeClassNameParserTest.java +++ /dev/null @@ -1,223 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.parsing; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verify; - -import com.datastax.oss.driver.TestDataProviders; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.SetType; -import com.datastax.oss.driver.api.core.type.TupleType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.Locale; -import java.util.Map; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -@RunWith(DataProviderRunner.class) -public class DataTypeClassNameParserTest { - - private static final CqlIdentifier KEYSPACE_ID = CqlIdentifier.fromInternal("ks"); - - @Mock private InternalDriverContext context; - private DataTypeClassNameParser parser; - - @Before - public void setUp() throws Exception { - MockitoAnnotations.initMocks(this); - parser = new DataTypeClassNameParser(); - } - - @Test - @UseDataProvider(location = TestDataProviders.class, value = "locales") - public void should_parse_native_types(Locale locale) { - Locale def = Locale.getDefault(); - try { - Locale.setDefault(locale); - for (Map.Entry entry : - DataTypeClassNameParser.NATIVE_TYPES_BY_CLASS_NAME.entrySet()) { - - String className = entry.getKey(); - DataType expectedType = entry.getValue(); - - assertThat(parse(className)).isEqualTo(expectedType); - } - } finally { - Locale.setDefault(def); - } - } - - @Test - @UseDataProvider(location = TestDataProviders.class, value = "locales") - public void should_parse_collection_types(Locale locale) { - Locale def = Locale.getDefault(); - try { - Locale.setDefault(locale); - assertThat( - parse( - "org.apache.cassandra.db.marshal.ListType(" - + "org.apache.cassandra.db.marshal.UTF8Type)")) - .isEqualTo(DataTypes.listOf(DataTypes.TEXT)); - - assertThat( - parse( - "org.apache.cassandra.db.marshal.FrozenType(" - + ("org.apache.cassandra.db.marshal.ListType(" - + "org.apache.cassandra.db.marshal.UTF8Type))"))) - .isEqualTo(DataTypes.frozenListOf(DataTypes.TEXT)); - - assertThat( - parse( - "org.apache.cassandra.db.marshal.SetType(" - + "org.apache.cassandra.db.marshal.UTF8Type)")) - .isEqualTo(DataTypes.setOf(DataTypes.TEXT)); - - assertThat( - parse( - "org.apache.cassandra.db.marshal.MapType(" - + "org.apache.cassandra.db.marshal.UTF8Type," - + "org.apache.cassandra.db.marshal.UTF8Type)")) - .isEqualTo(DataTypes.mapOf(DataTypes.TEXT, DataTypes.TEXT)); - - assertThat( - parse( - "org.apache.cassandra.db.marshal.MapType(" - + "org.apache.cassandra.db.marshal.UTF8Type," - + "org.apache.cassandra.db.marshal.FrozenType(" - + ("org.apache.cassandra.db.marshal.MapType(" - + "org.apache.cassandra.db.marshal.Int32Type," - + "org.apache.cassandra.db.marshal.Int32Type)))"))) - .isEqualTo( - DataTypes.mapOf(DataTypes.TEXT, DataTypes.frozenMapOf(DataTypes.INT, DataTypes.INT))); - } finally { - Locale.setDefault(def); - } - } - - @Test - @UseDataProvider(location = TestDataProviders.class, value = "locales") - public void should_parse_user_type_when_definition_not_already_available(Locale locale) { - Locale def = Locale.getDefault(); - try { - Locale.setDefault(locale); - UserDefinedType addressType = - (UserDefinedType) - parse( - "org.apache.cassandra.db.marshal.UserType(" - + "foo,61646472657373," - + ("737472656574:org.apache.cassandra.db.marshal.UTF8Type," - + "7a6970636f6465:org.apache.cassandra.db.marshal.Int32Type," - + ("70686f6e6573:org.apache.cassandra.db.marshal.SetType(" - + "org.apache.cassandra.db.marshal.UserType(foo,70686f6e65," - + "6e616d65:org.apache.cassandra.db.marshal.UTF8Type," - + "6e756d626572:org.apache.cassandra.db.marshal.UTF8Type)") - + "))")); - - assertThat(addressType.getKeyspace().asInternal()).isEqualTo("foo"); - assertThat(addressType.getName().asInternal()).isEqualTo("address"); - assertThat(addressType.isFrozen()).isTrue(); - assertThat(addressType.getFieldNames().size()).isEqualTo(3); - - assertThat(addressType.getFieldNames().get(0).asInternal()).isEqualTo("street"); - assertThat(addressType.getFieldTypes().get(0)).isEqualTo(DataTypes.TEXT); - - assertThat(addressType.getFieldNames().get(1).asInternal()).isEqualTo("zipcode"); - assertThat(addressType.getFieldTypes().get(1)).isEqualTo(DataTypes.INT); - - assertThat(addressType.getFieldNames().get(2).asInternal()).isEqualTo("phones"); - DataType phonesType = addressType.getFieldTypes().get(2); - assertThat(phonesType).isInstanceOf(SetType.class); - UserDefinedType phoneType = ((UserDefinedType) ((SetType) phonesType).getElementType()); - - assertThat(phoneType.getKeyspace().asInternal()).isEqualTo("foo"); - assertThat(phoneType.getName().asInternal()).isEqualTo("phone"); - assertThat(phoneType.isFrozen()).isTrue(); - assertThat(phoneType.getFieldNames().size()).isEqualTo(2); - - assertThat(phoneType.getFieldNames().get(0).asInternal()).isEqualTo("name"); - assertThat(phoneType.getFieldTypes().get(0)).isEqualTo(DataTypes.TEXT); - - assertThat(phoneType.getFieldNames().get(1).asInternal()).isEqualTo("number"); - assertThat(phoneType.getFieldTypes().get(1)).isEqualTo(DataTypes.TEXT); - } finally { - Locale.setDefault(def); - } - } - - @Test - @UseDataProvider(location = TestDataProviders.class, value = "locales") - public void should_make_a_frozen_copy_user_type_when_definition_already_available(Locale locale) { - Locale def = Locale.getDefault(); - try { - Locale.setDefault(locale); - UserDefinedType existing = mock(UserDefinedType.class); - - parse( - "org.apache.cassandra.db.marshal.UserType(foo,70686f6e65," - + "6e616d65:org.apache.cassandra.db.marshal.UTF8Type," - + "6e756d626572:org.apache.cassandra.db.marshal.UTF8Type)", - ImmutableMap.of(CqlIdentifier.fromInternal("phone"), existing)); - - verify(existing).copy(true); - } finally { - Locale.setDefault(def); - } - } - - @Test - @UseDataProvider(location = TestDataProviders.class, value = "locales") - public void should_parse_tuple(Locale locale) { - Locale def = Locale.getDefault(); - try { - Locale.setDefault(locale); - TupleType tupleType = - (TupleType) - parse( - "org.apache.cassandra.db.marshal.TupleType(" - + "org.apache.cassandra.db.marshal.Int32Type," - + "org.apache.cassandra.db.marshal.UTF8Type," - + "org.apache.cassandra.db.marshal.FloatType)"); - - assertThat(tupleType.getComponentTypes().size()).isEqualTo(3); - assertThat(tupleType.getComponentTypes().get(0)).isEqualTo(DataTypes.INT); - assertThat(tupleType.getComponentTypes().get(1)).isEqualTo(DataTypes.TEXT); - assertThat(tupleType.getComponentTypes().get(2)).isEqualTo(DataTypes.FLOAT); - } finally { - Locale.setDefault(def); - } - } - - private DataType parse(String toParse) { - return parse(toParse, null); - } - - private DataType parse(String toParse, Map existingTypes) { - return parser.parse(KEYSPACE_ID, toParse, existingTypes, context); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeCqlNameParserTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeCqlNameParserTest.java deleted file mode 100644 index 04ebaf4d68a..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeCqlNameParserTest.java +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.parsing; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.ListType; -import com.datastax.oss.driver.api.core.type.MapType; -import com.datastax.oss.driver.api.core.type.TupleType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.schema.ShallowUserDefinedType; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import java.util.Map; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; - -public class DataTypeCqlNameParserTest { - - private static final CqlIdentifier KEYSPACE_ID = CqlIdentifier.fromInternal("ks"); - - @Mock private InternalDriverContext context; - private DataTypeCqlNameParser parser; - - @Before - public void setUp() throws Exception { - parser = new DataTypeCqlNameParser(); - } - - @Test - public void should_parse_native_types() { - for (Map.Entry entry : - DataTypeCqlNameParser.NATIVE_TYPES_BY_NAME.entrySet()) { - - String className = entry.getKey(); - DataType expectedType = entry.getValue(); - - assertThat(parse(className)).isEqualTo(expectedType); - } - } - - @Test - public void should_parse_collection_types() { - assertThat(parse("list")).isEqualTo(DataTypes.listOf(DataTypes.TEXT)); - assertThat(parse("frozen>")).isEqualTo(DataTypes.frozenListOf(DataTypes.TEXT)); - assertThat(parse("set")).isEqualTo(DataTypes.setOf(DataTypes.TEXT)); - assertThat(parse("map")).isEqualTo(DataTypes.mapOf(DataTypes.TEXT, DataTypes.TEXT)); - assertThat(parse("map>>")) - .isEqualTo( - DataTypes.mapOf(DataTypes.TEXT, DataTypes.frozenMapOf(DataTypes.INT, DataTypes.INT))); - } - - @Test - public void should_parse_top_level_user_type_as_shallow() { - UserDefinedType addressType = (UserDefinedType) parse("address"); - assertThat(addressType).isInstanceOf(ShallowUserDefinedType.class); - assertThat(addressType.getKeyspace()).isEqualTo(KEYSPACE_ID); - assertThat(addressType.getName().asInternal()).isEqualTo("address"); - assertThat(addressType.isFrozen()).isFalse(); - - UserDefinedType frozenAddressType = (UserDefinedType) parse("frozen
"); - assertThat(frozenAddressType).isInstanceOf(ShallowUserDefinedType.class); - assertThat(frozenAddressType.getKeyspace()).isEqualTo(KEYSPACE_ID); - assertThat(frozenAddressType.getName().asInternal()).isEqualTo("address"); - assertThat(frozenAddressType.isFrozen()).isTrue(); - } - - @Test - public void should_reuse_existing_user_type_when_not_top_level() { - UserDefinedType addressType = mock(UserDefinedType.class); - UserDefinedType frozenAddressType = mock(UserDefinedType.class); - when(addressType.copy(false)).thenReturn(addressType); - when(addressType.copy(true)).thenReturn(frozenAddressType); - - ImmutableMap existingTypes = - ImmutableMap.of(CqlIdentifier.fromInternal("address"), addressType); - - ListType listOfAddress = (ListType) parse("list
", existingTypes); - assertThat(listOfAddress.getElementType()).isEqualTo(addressType); - - ListType listOfFrozenAddress = (ListType) parse("list>", existingTypes); - assertThat(listOfFrozenAddress.getElementType()).isEqualTo(frozenAddressType); - } - - @Test - public void should_parse_tuple() { - TupleType tupleType = (TupleType) parse("tuple"); - - assertThat(tupleType.getComponentTypes().size()).isEqualTo(3); - assertThat(tupleType.getComponentTypes().get(0)).isEqualTo(DataTypes.INT); - assertThat(tupleType.getComponentTypes().get(1)).isEqualTo(DataTypes.TEXT); - assertThat(tupleType.getComponentTypes().get(2)).isEqualTo(DataTypes.FLOAT); - } - - @Test - public void should_parse_udt_named_like_collection_type() { - // Those are all valid UDT names! - assertThat(parse("tuple")).isInstanceOf(UserDefinedType.class); - assertThat(parse("list")).isInstanceOf(UserDefinedType.class); - assertThat(parse("map")).isInstanceOf(UserDefinedType.class); - assertThat(parse("frozen")).isInstanceOf(UserDefinedType.class); - - MapType mapType = (MapType) parse("map"); - assertThat(mapType.getKeyType()).isInstanceOf(UserDefinedType.class); - assertThat(mapType.getValueType()).isInstanceOf(UserDefinedType.class); - } - - private DataType parse(String toParse) { - return parse(toParse, null); - } - - private DataType parse(String toParse, Map existingTypes) { - return parser.parse(KEYSPACE_ID, toParse, existingTypes, context); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/FunctionParserTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/FunctionParserTest.java deleted file mode 100644 index ab2d2e725ea..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/FunctionParserTest.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.parsing; - -import static com.datastax.oss.driver.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.FunctionMetadata; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import java.util.Collections; -import org.junit.Test; - -public class FunctionParserTest extends SchemaParserTestBase { - - private static final AdminRow ID_ROW_2_2 = - mockFunctionRow( - "ks", - "id", - ImmutableList.of("i"), - ImmutableList.of("org.apache.cassandra.db.marshal.Int32Type"), - "return i;", - false, - "java", - "org.apache.cassandra.db.marshal.Int32Type"); - - static final AdminRow ID_ROW_3_0 = - mockFunctionRow( - "ks", - "id", - ImmutableList.of("i"), - ImmutableList.of("int"), - "return i;", - false, - "java", - "int"); - - @Test - public void should_parse_modern_table() { - FunctionParser parser = new FunctionParser(new DataTypeCqlNameParser(), context); - FunctionMetadata function = - parser.parseFunction(ID_ROW_3_0, KEYSPACE_ID, Collections.emptyMap()); - - assertThat(function.getKeyspace().asInternal()).isEqualTo("ks"); - assertThat(function.getSignature().getName().asInternal()).isEqualTo("id"); - assertThat(function.getSignature().getParameterTypes()).containsExactly(DataTypes.INT); - assertThat(function.getParameterNames()).containsExactly(CqlIdentifier.fromInternal("i")); - assertThat(function.getBody()).isEqualTo("return i;"); - assertThat(function.isCalledOnNullInput()).isFalse(); - assertThat(function.getLanguage()).isEqualTo("java"); - assertThat(function.getReturnType()).isEqualTo(DataTypes.INT); - } - - @Test - public void should_parse_legacy_table() { - FunctionParser parser = new FunctionParser(new DataTypeClassNameParser(), context); - FunctionMetadata function = - parser.parseFunction(ID_ROW_2_2, KEYSPACE_ID, Collections.emptyMap()); - - assertThat(function.getKeyspace().asInternal()).isEqualTo("ks"); - assertThat(function.getSignature().getName().asInternal()).isEqualTo("id"); - assertThat(function.getSignature().getParameterTypes()).containsExactly(DataTypes.INT); - assertThat(function.getParameterNames()).containsExactly(CqlIdentifier.fromInternal("i")); - assertThat(function.getBody()).isEqualTo("return i;"); - assertThat(function.isCalledOnNullInput()).isFalse(); - assertThat(function.getLanguage()).isEqualTo("java"); - assertThat(function.getReturnType()).isEqualTo(DataTypes.INT); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/SchemaParserTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/SchemaParserTest.java deleted file mode 100644 index a08a6cba838..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/SchemaParserTest.java +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.parsing; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.FunctionSignature; -import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.internal.core.metadata.MetadataRefresh; -import com.datastax.oss.driver.internal.core.metadata.schema.queries.CassandraSchemaRows; -import com.datastax.oss.driver.internal.core.metadata.schema.queries.SchemaRows; -import com.datastax.oss.driver.internal.core.metadata.schema.refresh.SchemaRefresh; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import java.util.Map; -import java.util.function.Consumer; -import org.junit.Test; - -public class SchemaParserTest extends SchemaParserTestBase { - - @Test - public void should_parse_modern_keyspace_row() { - SchemaRefresh refresh = - (SchemaRefresh) - parse(rows -> rows.withKeyspaces(ImmutableList.of(mockModernKeyspaceRow("ks")))); - - assertThat(refresh.newKeyspaces).hasSize(1); - KeyspaceMetadata keyspace = refresh.newKeyspaces.values().iterator().next(); - checkKeyspace(keyspace); - } - - @Test - public void should_parse_legacy_keyspace_row() { - SchemaRefresh refresh = - (SchemaRefresh) - parse(rows -> rows.withKeyspaces(ImmutableList.of(mockLegacyKeyspaceRow("ks")))); - - assertThat(refresh.newKeyspaces).hasSize(1); - KeyspaceMetadata keyspace = refresh.newKeyspaces.values().iterator().next(); - checkKeyspace(keyspace); - } - - @Test - public void should_parse_keyspace_with_all_children() { - // Needed to parse the aggregate - when(context.getCodecRegistry()).thenReturn(CodecRegistry.DEFAULT); - - SchemaRefresh refresh = - (SchemaRefresh) - parse( - rows -> - rows.withKeyspaces(ImmutableList.of(mockModernKeyspaceRow("ks"))) - .withTypes( - ImmutableList.of( - mockTypeRow( - "ks", "t", ImmutableList.of("i"), ImmutableList.of("int")))) - .withTables(ImmutableList.of(TableParserTest.TABLE_ROW_3_0)) - .withColumns(TableParserTest.COLUMN_ROWS_3_0) - .withIndexes(TableParserTest.INDEX_ROWS_3_0) - .withViews(ImmutableList.of(ViewParserTest.VIEW_ROW_3_0)) - .withColumns(ViewParserTest.COLUMN_ROWS_3_0) - .withFunctions(ImmutableList.of(FunctionParserTest.ID_ROW_3_0)) - .withAggregates( - ImmutableList.of(AggregateParserTest.SUM_AND_TO_STRING_ROW_3_0))); - - assertThat(refresh.newKeyspaces).hasSize(1); - KeyspaceMetadata keyspace = refresh.newKeyspaces.values().iterator().next(); - checkKeyspace(keyspace); - - assertThat(keyspace.getUserDefinedTypes()) - .hasSize(1) - .containsKey(CqlIdentifier.fromInternal("t")); - assertThat(keyspace.getTables()).hasSize(1).containsKey(CqlIdentifier.fromInternal("foo")); - assertThat(keyspace.getViews()) - .hasSize(1) - .containsKey(CqlIdentifier.fromInternal("alltimehigh")); - assertThat(keyspace.getFunctions()) - .hasSize(1) - .containsKey(new FunctionSignature(CqlIdentifier.fromInternal("id"), DataTypes.INT)); - assertThat(keyspace.getAggregates()) - .hasSize(1) - .containsKey( - new FunctionSignature(CqlIdentifier.fromInternal("sum_and_to_string"), DataTypes.INT)); - } - - // Common assertions, the keyspace has the same info in all of our single keyspace examples - private void checkKeyspace(KeyspaceMetadata keyspace) { - assertThat(keyspace.getName().asInternal()).isEqualTo("ks"); - assertThat(keyspace.isDurableWrites()).isTrue(); - assertThat(keyspace.getReplication()) - .hasSize(2) - .containsEntry("class", "org.apache.cassandra.locator.SimpleStrategy") - .containsEntry("replication_factor", "1"); - } - - @Test - public void should_parse_multiple_keyspaces() { - SchemaRefresh refresh = - (SchemaRefresh) - parse( - rows -> - rows.withKeyspaces( - ImmutableList.of( - mockModernKeyspaceRow("ks1"), mockModernKeyspaceRow("ks2"))) - .withTypes( - ImmutableList.of( - mockTypeRow( - "ks1", "t1", ImmutableList.of("i"), ImmutableList.of("int")), - mockTypeRow( - "ks2", "t2", ImmutableList.of("i"), ImmutableList.of("int"))))); - - Map keyspaces = refresh.newKeyspaces; - assertThat(keyspaces).hasSize(2); - KeyspaceMetadata ks1 = keyspaces.get(CqlIdentifier.fromInternal("ks1")); - KeyspaceMetadata ks2 = keyspaces.get(CqlIdentifier.fromInternal("ks2")); - - assertThat(ks1.getName().asInternal()).isEqualTo("ks1"); - assertThat(ks1.getUserDefinedTypes()).hasSize(1).containsKey(CqlIdentifier.fromInternal("t1")); - assertThat(ks2.getName().asInternal()).isEqualTo("ks2"); - assertThat(ks2.getUserDefinedTypes()).hasSize(1).containsKey(CqlIdentifier.fromInternal("t2")); - } - - private MetadataRefresh parse(Consumer builderConfig) { - CassandraSchemaRows.Builder builder = - new CassandraSchemaRows.Builder(NODE_3_0, keyspaceFilter, "test"); - builderConfig.accept(builder); - SchemaRows rows = builder.build(); - return new CassandraSchemaParser(rows, context).parse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/SchemaParserTestBase.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/SchemaParserTestBase.java deleted file mode 100644 index e5f0c732f7a..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/SchemaParserTestBase.java +++ /dev/null @@ -1,317 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.parsing; - -import static org.assertj.core.api.Assertions.fail; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.DefaultMetadata; -import com.datastax.oss.driver.internal.core.metadata.schema.queries.KeyspaceFilter; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import java.nio.ByteBuffer; -import java.util.Collections; -import java.util.List; -import org.junit.Before; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.Silent.class) -public abstract class SchemaParserTestBase { - - protected static final Node NODE_2_2 = mockNode(Version.V2_2_0); - protected static final Node NODE_3_0 = mockNode(Version.V3_0_0); - protected static final CqlIdentifier KEYSPACE_ID = CqlIdentifier.fromInternal("ks"); - @Mock protected DefaultMetadata currentMetadata; - @Mock protected InternalDriverContext context; - @Mock protected KeyspaceFilter keyspaceFilter; - - @Before - public void setup() { - when(keyspaceFilter.includes(anyString())).thenReturn(true); - } - - protected static AdminRow mockFunctionRow( - String keyspace, - String name, - List argumentNames, - List argumentTypes, - String body, - boolean calledOnNullInput, - String language, - String returnType) { - - AdminRow row = mock(AdminRow.class); - - when(row.contains("keyspace_name")).thenReturn(true); - when(row.contains("function_name")).thenReturn(true); - when(row.contains("argument_names")).thenReturn(true); - when(row.contains("argument_types")).thenReturn(true); - when(row.contains("body")).thenReturn(true); - when(row.contains("called_on_null_input")).thenReturn(true); - when(row.contains("language")).thenReturn(true); - when(row.contains("return_type")).thenReturn(true); - - when(row.getString("keyspace_name")).thenReturn(keyspace); - when(row.getString("function_name")).thenReturn(name); - when(row.getListOfString("argument_names")).thenReturn(argumentNames); - when(row.getListOfString("argument_types")).thenReturn(argumentTypes); - when(row.getString("body")).thenReturn(body); - when(row.getBoolean("called_on_null_input")).thenReturn(calledOnNullInput); - when(row.getString("language")).thenReturn(language); - when(row.getString("return_type")).thenReturn(returnType); - - return row; - } - - protected static AdminRow mockAggregateRow( - String keyspace, - String name, - List argumentTypes, - String stateFunc, - String stateType, - String finalFunc, - String returnType, - Object initCond) { - - AdminRow row = mock(AdminRow.class); - - when(row.contains("keyspace_name")).thenReturn(true); - when(row.contains("aggregate_name")).thenReturn(true); - when(row.contains("argument_types")).thenReturn(true); - when(row.contains("state_func")).thenReturn(true); - when(row.contains("state_type")).thenReturn(true); - when(row.contains("final_func")).thenReturn(true); - when(row.contains("return_type")).thenReturn(true); - when(row.contains("initcond")).thenReturn(true); - - when(row.getString("keyspace_name")).thenReturn(keyspace); - when(row.getString("aggregate_name")).thenReturn(name); - when(row.getListOfString("argument_types")).thenReturn(argumentTypes); - when(row.getString("state_func")).thenReturn(stateFunc); - when(row.getString("state_type")).thenReturn(stateType); - when(row.getString("final_func")).thenReturn(finalFunc); - when(row.getString("return_type")).thenReturn(returnType); - - if (initCond instanceof ByteBuffer) { - when(row.isString("initcond")).thenReturn(false); - when(row.getByteBuffer("initcond")).thenReturn(((ByteBuffer) initCond)); - } else if (initCond instanceof String) { - when(row.isString("initcond")).thenReturn(true); - when(row.getString("initcond")).thenReturn(((String) initCond)); - } else { - fail("Unsupported initcond type" + initCond.getClass()); - } - - return row; - } - - protected static AdminRow mockTypeRow( - String keyspace, String name, List fieldNames, List fieldTypes) { - AdminRow row = mock(AdminRow.class); - - when(row.getString("keyspace_name")).thenReturn(keyspace); - when(row.getString("type_name")).thenReturn(name); - when(row.getListOfString("field_names")).thenReturn(fieldNames); - when(row.getListOfString("field_types")).thenReturn(fieldTypes); - - return row; - } - - protected static AdminRow mockLegacyTableRow(String keyspace, String name, String comparator) { - AdminRow row = mock(AdminRow.class); - - when(row.contains("table_name")).thenReturn(false); - - when(row.getString("keyspace_name")).thenReturn(keyspace); - when(row.getString("columnfamily_name")).thenReturn(name); - when(row.getBoolean("is_dense")).thenReturn(false); - when(row.getString("comparator")).thenReturn(comparator); - when(row.isString("caching")).thenReturn(true); - when(row.getString("caching")) - .thenReturn("{\"keys\":\"ALL\", \"rows_per_partition\":\"NONE\"}"); - when(row.getString("compaction_strategy_class")) - .thenReturn("org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy"); - when(row.getString("compaction_strategy_options")).thenReturn("{\"mock_option\":\"1\"}"); - - return row; - } - - protected static AdminRow mockLegacyColumnRow( - String keyspaceName, - String tableName, - String name, - String kind, - String dataType, - Integer position) { - return mockLegacyColumnRow( - keyspaceName, tableName, name, kind, dataType, position, null, null, null); - } - - protected static AdminRow mockLegacyColumnRow( - String keyspaceName, - String tableName, - String name, - String kind, - String dataType, - int position, - String indexName, - String indexType, - String indexOptions) { - AdminRow row = mock(AdminRow.class); - - when(row.contains("validator")).thenReturn(true); - - when(row.getString("keyspace_name")).thenReturn(keyspaceName); - when(row.getString("columnfamily_name")).thenReturn(tableName); - when(row.getString("column_name")).thenReturn(name); - when(row.getString("type")).thenReturn(kind); - when(row.getString("validator")).thenReturn(dataType); - when(row.getInteger("component_index")).thenReturn(position); - when(row.getString("index_name")).thenReturn(indexName); - when(row.getString("index_type")).thenReturn(indexType); - when(row.getString("index_options")).thenReturn(indexOptions); - - return row; - } - - protected static AdminRow mockModernTableRow(String keyspace, String name) { - AdminRow row = mock(AdminRow.class); - - when(row.contains("flags")).thenReturn(true); - when(row.contains("table_name")).thenReturn(true); - - when(row.getString("keyspace_name")).thenReturn(keyspace); - when(row.getString("table_name")).thenReturn(name); - when(row.getSetOfString("flags")).thenReturn(ImmutableSet.of("compound")); - when(row.isString("caching")).thenReturn(false); - when(row.get("caching", RelationParser.MAP_OF_TEXT_TO_TEXT)) - .thenReturn(ImmutableMap.of("keys", "ALL", "rows_per_partition", "NONE")); - when(row.get("compaction", RelationParser.MAP_OF_TEXT_TO_TEXT)) - .thenReturn( - ImmutableMap.of( - "class", - "org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy", - "mock_option", - "1")); - - return row; - } - - protected static AdminRow mockModernColumnRow( - String keyspaceName, - String tableName, - String name, - String kind, - String dataType, - String clusteringOrder, - Integer position) { - AdminRow row = mock(AdminRow.class); - - when(row.contains("kind")).thenReturn(true); - when(row.contains("position")).thenReturn(true); - when(row.contains("clustering_order")).thenReturn(true); - - when(row.getString("keyspace_name")).thenReturn(keyspaceName); - when(row.getString("table_name")).thenReturn(tableName); - when(row.getString("column_name")).thenReturn(name); - when(row.getString("kind")).thenReturn(kind); - when(row.getString("type")).thenReturn(dataType); - when(row.getInteger("position")).thenReturn(position); - when(row.getString("clustering_order")).thenReturn(clusteringOrder); - - return row; - } - - protected static AdminRow mockIndexRow( - String keyspaceName, - String tableName, - String name, - String kind, - ImmutableMap options) { - AdminRow row = mock(AdminRow.class); - - when(row.getString("keyspace_name")).thenReturn(keyspaceName); - when(row.getString("table_name")).thenReturn(tableName); - when(row.getString("index_name")).thenReturn(name); - when(row.getString("kind")).thenReturn(kind); - when(row.getMapOfStringToString("options")).thenReturn(options); - - return row; - } - - protected static AdminRow mockViewRow( - String keyspaceName, - String viewName, - String baseTableName, - boolean includeAllColumns, - String whereClause) { - AdminRow row = mock(AdminRow.class); - - when(row.getString("keyspace_name")).thenReturn(keyspaceName); - when(row.getString("view_name")).thenReturn(viewName); - when(row.getString("base_table_name")).thenReturn(baseTableName); - when(row.getBoolean("include_all_columns")).thenReturn(includeAllColumns); - when(row.getString("where_clause")).thenReturn(whereClause); - - return row; - } - - protected static AdminRow mockModernKeyspaceRow(String keyspaceName) { - AdminRow row = mock(AdminRow.class); - - when(row.getString("keyspace_name")).thenReturn(keyspaceName); - when(row.getBoolean("durable_writes")).thenReturn(true); - - when(row.contains("strategy_class")).thenReturn(false); - when(row.getMapOfStringToString("replication")) - .thenReturn( - ImmutableMap.of( - "class", "org.apache.cassandra.locator.SimpleStrategy", "replication_factor", "1")); - - return row; - } - - protected static AdminRow mockLegacyKeyspaceRow(String keyspaceName) { - AdminRow row = mock(AdminRow.class); - - when(row.getString("keyspace_name")).thenReturn(keyspaceName); - when(row.getBoolean("durable_writes")).thenReturn(true); - - when(row.contains("strategy_class")).thenReturn(true); - when(row.getString("strategy_class")).thenReturn("org.apache.cassandra.locator.SimpleStrategy"); - when(row.getString("strategy_options")).thenReturn("{\"replication_factor\":\"1\"}"); - - return row; - } - - private static Node mockNode(Version version) { - Node node = mock(Node.class); - when(node.getExtras()).thenReturn(Collections.emptyMap()); - when(node.getCassandraVersion()).thenReturn(version); - return node; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/TableParserTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/TableParserTest.java deleted file mode 100644 index a316473d071..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/TableParserTest.java +++ /dev/null @@ -1,217 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.parsing; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.schema.ClusteringOrder; -import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.IndexKind; -import com.datastax.oss.driver.api.core.metadata.schema.IndexMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.TableMetadata; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.internal.core.metadata.schema.queries.CassandraSchemaRows; -import com.datastax.oss.driver.internal.core.metadata.schema.queries.SchemaRows; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import java.util.Collections; -import java.util.Iterator; -import java.util.Map; -import org.junit.Test; - -public class TableParserTest extends SchemaParserTestBase { - - private static final AdminRow TABLE_ROW_2_2 = - mockLegacyTableRow( - "ks", - "foo", - "org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.Int32Type,org.apache.cassandra.db.marshal.Int32Type,org.apache.cassandra.db.marshal.UTF8Type)"); - private static final ImmutableList COLUMN_ROWS_2_2 = - ImmutableList.of( - mockLegacyColumnRow( - "ks", "foo", "k2", "partition_key", "org.apache.cassandra.db.marshal.UTF8Type", 1), - mockLegacyColumnRow( - "ks", "foo", "k1", "partition_key", "org.apache.cassandra.db.marshal.Int32Type", 0), - mockLegacyColumnRow( - "ks", "foo", "cc1", "clustering_key", "org.apache.cassandra.db.marshal.Int32Type", 0), - mockLegacyColumnRow( - "ks", - "foo", - "cc2", - "clustering_key", - "org.apache.cassandra.db.marshal.ReversedType(org.apache.cassandra.db.marshal.Int32Type)", - 1), - mockLegacyColumnRow( - "ks", - "foo", - "v", - "regular", - "org.apache.cassandra.db.marshal.ReversedType(org.apache.cassandra.db.marshal.Int32Type)", - -1, - "foo_v_idx", - "COMPOSITES", - "{}")); - - static final AdminRow TABLE_ROW_3_0 = mockModernTableRow("ks", "foo"); - static final ImmutableList COLUMN_ROWS_3_0 = - ImmutableList.of( - mockModernColumnRow("ks", "foo", "k2", "partition_key", "text", "none", 1), - mockModernColumnRow("ks", "foo", "k1", "partition_key", "int", "none", 0), - mockModernColumnRow("ks", "foo", "cc1", "clustering", "int", "asc", 0), - mockModernColumnRow("ks", "foo", "cc2", "clustering", "int", "desc", 1), - mockModernColumnRow("ks", "foo", "v", "regular", "int", "none", -1)); - static final ImmutableList INDEX_ROWS_3_0 = - ImmutableList.of( - mockIndexRow("ks", "foo", "foo_v_idx", "COMPOSITES", ImmutableMap.of("target", "v"))); - - @Test - public void should_skip_when_no_column_rows() { - SchemaRows rows = legacyRows(TABLE_ROW_2_2, Collections.emptyList()); - TableParser parser = new TableParser(rows, context); - TableMetadata table = parser.parseTable(TABLE_ROW_2_2, KEYSPACE_ID, Collections.emptyMap()); - - assertThat(table).isNull(); - } - - @Test - public void should_parse_legacy_tables() { - SchemaRows rows = legacyRows(TABLE_ROW_2_2, COLUMN_ROWS_2_2); - TableParser parser = new TableParser(rows, context); - TableMetadata table = parser.parseTable(TABLE_ROW_2_2, KEYSPACE_ID, Collections.emptyMap()); - - checkTable(table); - - assertThat(table.getOptions().get(CqlIdentifier.fromInternal("caching"))) - .isEqualTo("{\"keys\":\"ALL\", \"rows_per_partition\":\"NONE\"}"); - } - - @Test - public void should_parse_modern_tables() { - SchemaRows rows = modernRows(TABLE_ROW_3_0, COLUMN_ROWS_3_0, INDEX_ROWS_3_0); - TableParser parser = new TableParser(rows, context); - TableMetadata table = parser.parseTable(TABLE_ROW_3_0, KEYSPACE_ID, Collections.emptyMap()); - - checkTable(table); - - @SuppressWarnings("unchecked") - Map caching = - (Map) table.getOptions().get(CqlIdentifier.fromInternal("caching")); - assertThat(caching) - .hasSize(2) - .containsEntry("keys", "ALL") - .containsEntry("rows_per_partition", "NONE"); - } - - /** Covers two additional Cassandra 4.0 options added in JAVA-2090. */ - @Test - public void should_parse_read_repair_and_additional_write_policy() { - AdminRow tableRow40 = mockModernTableRow("ks", "foo"); - when(tableRow40.get("read_repair", TypeCodecs.TEXT)).thenReturn("NONE"); - when(tableRow40.get("additional_write_policy", TypeCodecs.TEXT)).thenReturn("40p"); - - SchemaRows rows = modernRows(tableRow40, COLUMN_ROWS_3_0, INDEX_ROWS_3_0); - TableParser parser = new TableParser(rows, context); - TableMetadata table = parser.parseTable(tableRow40, KEYSPACE_ID, Collections.emptyMap()); - - checkTable(table); - - assertThat(table.getOptions()) - .containsEntry(CqlIdentifier.fromInternal("read_repair"), "NONE") - .containsEntry(CqlIdentifier.fromInternal("additional_write_policy"), "40p"); - } - - // Shared between 2.2 and 3.0 tests, all expected values are the same except the 'caching' option - private void checkTable(TableMetadata table) { - assertThat(table.getKeyspace().asInternal()).isEqualTo("ks"); - assertThat(table.getName().asInternal()).isEqualTo("foo"); - - assertThat(table.getPartitionKey()).hasSize(2); - ColumnMetadata pk0 = table.getPartitionKey().get(0); - assertThat(pk0.getName().asInternal()).isEqualTo("k1"); - assertThat(pk0.getType()).isEqualTo(DataTypes.INT); - ColumnMetadata pk1 = table.getPartitionKey().get(1); - assertThat(pk1.getName().asInternal()).isEqualTo("k2"); - assertThat(pk1.getType()).isEqualTo(DataTypes.TEXT); - - assertThat(table.getClusteringColumns().entrySet()).hasSize(2); - Iterator clusteringColumnsIterator = - table.getClusteringColumns().keySet().iterator(); - ColumnMetadata clusteringColumn1 = clusteringColumnsIterator.next(); - assertThat(clusteringColumn1.getName().asInternal()).isEqualTo("cc1"); - ColumnMetadata clusteringColumn2 = clusteringColumnsIterator.next(); - assertThat(clusteringColumn2.getName().asInternal()).isEqualTo("cc2"); - assertThat(table.getClusteringColumns().values()) - .containsExactly(ClusteringOrder.ASC, ClusteringOrder.DESC); - - assertThat(table.getColumns()) - .containsOnlyKeys( - CqlIdentifier.fromInternal("k1"), - CqlIdentifier.fromInternal("k2"), - CqlIdentifier.fromInternal("cc1"), - CqlIdentifier.fromInternal("cc2"), - CqlIdentifier.fromInternal("v")); - ColumnMetadata regularColumn = table.getColumns().get(CqlIdentifier.fromInternal("v")); - assertThat(regularColumn.getName().asInternal()).isEqualTo("v"); - assertThat(regularColumn.getType()).isEqualTo(DataTypes.INT); - - assertThat(table.getIndexes()).containsOnlyKeys(CqlIdentifier.fromInternal("foo_v_idx")); - IndexMetadata index = table.getIndexes().get(CqlIdentifier.fromInternal("foo_v_idx")); - assertThat(index.getKeyspace().asInternal()).isEqualTo("ks"); - assertThat(index.getTable().asInternal()).isEqualTo("foo"); - assertThat(index.getName().asInternal()).isEqualTo("foo_v_idx"); - assertThat(index.getClassName()).isNotPresent(); - assertThat(index.getKind()).isEqualTo(IndexKind.COMPOSITES); - assertThat(index.getTarget()).isEqualTo("v"); - - assertThat(table.getIndex("foo_v_idx")).hasValue(index); - - @SuppressWarnings("unchecked") - Map compaction = - (Map) table.getOptions().get(CqlIdentifier.fromInternal("compaction")); - assertThat(compaction) - .hasSize(2) - .containsEntry("class", "org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy") - .containsEntry("mock_option", "1"); - } - - private SchemaRows legacyRows(AdminRow tableRow, Iterable columnRows) { - return rows(tableRow, columnRows, null, NODE_2_2); - } - - private SchemaRows modernRows( - AdminRow tableRow, Iterable columnRows, Iterable indexesRows) { - return rows(tableRow, columnRows, indexesRows, NODE_3_0); - } - - private SchemaRows rows( - AdminRow tableRow, Iterable columnRows, Iterable indexesRows, Node node) { - CassandraSchemaRows.Builder builder = - new CassandraSchemaRows.Builder(node, keyspaceFilter, "test") - .withTables(ImmutableList.of(tableRow)) - .withColumns(columnRows); - if (indexesRows != null) { - builder.withIndexes(indexesRows); - } - return builder.build(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/UserDefinedTypeListParserTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/UserDefinedTypeListParserTest.java deleted file mode 100644 index f90d07ebe6d..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/UserDefinedTypeListParserTest.java +++ /dev/null @@ -1,229 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.parsing; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.ListType; -import com.datastax.oss.driver.api.core.type.MapType; -import com.datastax.oss.driver.api.core.type.SetType; -import com.datastax.oss.driver.api.core.type.TupleType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import java.util.Map; -import org.junit.Test; - -public class UserDefinedTypeListParserTest extends SchemaParserTestBase { - - private static final AdminRow PERSON_ROW_2_2 = - mockTypeRow( - "ks", - "person", - ImmutableList.of("first_name", "last_name", "address"), - ImmutableList.of( - "org.apache.cassandra.db.marshal.UTF8Type", - "org.apache.cassandra.db.marshal.UTF8Type", - "org.apache.cassandra.db.marshal.UserType(" - + "ks,61646472657373," // address - + "737472656574:org.apache.cassandra.db.marshal.UTF8Type," // street - + "7a6970636f6465:org.apache.cassandra.db.marshal.Int32Type)")); // zipcode - - private static final AdminRow PERSON_ROW_3_0 = - mockTypeRow( - "ks", - "person", - ImmutableList.of("first_name", "last_name", "address"), - ImmutableList.of("text", "text", "address")); - - private static final AdminRow ADDRESS_ROW_3_0 = - mockTypeRow( - "ks", "address", ImmutableList.of("street", "zipcode"), ImmutableList.of("text", "int")); - - @Test - public void should_parse_modern_table() { - UserDefinedTypeParser parser = new UserDefinedTypeParser(new DataTypeCqlNameParser(), context); - Map types = - parser.parse(KEYSPACE_ID, PERSON_ROW_3_0, ADDRESS_ROW_3_0); - - assertThat(types).hasSize(2); - UserDefinedType personType = types.get(CqlIdentifier.fromInternal("person")); - UserDefinedType addressType = types.get(CqlIdentifier.fromInternal("address")); - - assertThat(personType.getKeyspace().asInternal()).isEqualTo("ks"); - assertThat(personType.getName().asInternal()).isEqualTo("person"); - assertThat(personType.getFieldNames()) - .containsExactly( - CqlIdentifier.fromInternal("first_name"), - CqlIdentifier.fromInternal("last_name"), - CqlIdentifier.fromInternal("address")); - assertThat(personType.getFieldTypes().get(0)).isEqualTo(DataTypes.TEXT); - assertThat(personType.getFieldTypes().get(1)).isEqualTo(DataTypes.TEXT); - assertThat(personType.getFieldTypes().get(2)).isSameAs(addressType); - } - - @Test - public void should_parse_legacy_table() { - UserDefinedTypeParser parser = - new UserDefinedTypeParser(new DataTypeClassNameParser(), context); - // no need to add a column for the address type, because in 2.2 UDTs are always fully redefined - // in column and field types (instead of referencing an existing type) - Map types = parser.parse(KEYSPACE_ID, PERSON_ROW_2_2); - - assertThat(types).hasSize(1); - UserDefinedType personType = types.get(CqlIdentifier.fromInternal("person")); - - assertThat(personType.getKeyspace().asInternal()).isEqualTo("ks"); - assertThat(personType.getName().asInternal()).isEqualTo("person"); - assertThat(personType.getFieldNames()) - .containsExactly( - CqlIdentifier.fromInternal("first_name"), - CqlIdentifier.fromInternal("last_name"), - CqlIdentifier.fromInternal("address")); - assertThat(personType.getFieldTypes().get(0)).isEqualTo(DataTypes.TEXT); - assertThat(personType.getFieldTypes().get(1)).isEqualTo(DataTypes.TEXT); - UserDefinedType addressType = ((UserDefinedType) personType.getFieldTypes().get(2)); - assertThat(addressType.getKeyspace().asInternal()).isEqualTo("ks"); - assertThat(addressType.getName().asInternal()).isEqualTo("address"); - assertThat(addressType.getFieldNames()) - .containsExactly( - CqlIdentifier.fromInternal("street"), CqlIdentifier.fromInternal("zipcode")); - } - - @Test - public void should_parse_empty_list() { - UserDefinedTypeParser parser = new UserDefinedTypeParser(new DataTypeCqlNameParser(), context); - assertThat(parser.parse(KEYSPACE_ID /* no types*/)).isEmpty(); - } - - @Test - public void should_parse_singleton_list() { - UserDefinedTypeParser parser = new UserDefinedTypeParser(new DataTypeCqlNameParser(), context); - Map types = - parser.parse( - KEYSPACE_ID, mockTypeRow("ks", "t", ImmutableList.of("i"), ImmutableList.of("int"))); - - assertThat(types).hasSize(1); - UserDefinedType type = types.get(CqlIdentifier.fromInternal("t")); - assertThat(type.getKeyspace().asInternal()).isEqualTo("ks"); - assertThat(type.getName().asInternal()).isEqualTo("t"); - assertThat(type.getFieldNames()).containsExactly(CqlIdentifier.fromInternal("i")); - assertThat(type.getFieldTypes()).containsExactly(DataTypes.INT); - } - - @Test - public void should_resolve_list_dependency() { - UserDefinedTypeParser parser = new UserDefinedTypeParser(new DataTypeCqlNameParser(), context); - Map types = - parser.parse( - KEYSPACE_ID, - mockTypeRow( - "ks", "a", ImmutableList.of("bs"), ImmutableList.of("frozen>>")), - mockTypeRow("ks", "b", ImmutableList.of("i"), ImmutableList.of("int"))); - - assertThat(types).hasSize(2); - UserDefinedType aType = types.get(CqlIdentifier.fromInternal("a")); - UserDefinedType bType = types.get(CqlIdentifier.fromInternal("b")); - assertThat(((ListType) aType.getFieldTypes().get(0)).getElementType()).isEqualTo(bType); - } - - @Test - public void should_resolve_set_dependency() { - UserDefinedTypeParser parser = new UserDefinedTypeParser(new DataTypeCqlNameParser(), context); - Map types = - parser.parse( - KEYSPACE_ID, - mockTypeRow( - "ks", "a", ImmutableList.of("bs"), ImmutableList.of("frozen>>")), - mockTypeRow("ks", "b", ImmutableList.of("i"), ImmutableList.of("int"))); - - assertThat(types).hasSize(2); - UserDefinedType aType = types.get(CqlIdentifier.fromInternal("a")); - UserDefinedType bType = types.get(CqlIdentifier.fromInternal("b")); - assertThat(((SetType) aType.getFieldTypes().get(0)).getElementType()).isEqualTo(bType); - } - - @Test - public void should_resolve_map_dependency() { - UserDefinedTypeParser parser = new UserDefinedTypeParser(new DataTypeCqlNameParser(), context); - Map types = - parser.parse( - KEYSPACE_ID, - mockTypeRow( - "ks", - "a1", - ImmutableList.of("bs"), - ImmutableList.of("frozen>>")), - mockTypeRow( - "ks", - "a2", - ImmutableList.of("bs"), - ImmutableList.of("frozen, int>>")), - mockTypeRow("ks", "b", ImmutableList.of("i"), ImmutableList.of("int"))); - - assertThat(types).hasSize(3); - UserDefinedType a1Type = types.get(CqlIdentifier.fromInternal("a1")); - UserDefinedType a2Type = types.get(CqlIdentifier.fromInternal("a2")); - UserDefinedType bType = types.get(CqlIdentifier.fromInternal("b")); - assertThat(((MapType) a1Type.getFieldTypes().get(0)).getValueType()).isEqualTo(bType); - assertThat(((MapType) a2Type.getFieldTypes().get(0)).getKeyType()).isEqualTo(bType); - } - - @Test - public void should_resolve_tuple_dependency() { - UserDefinedTypeParser parser = new UserDefinedTypeParser(new DataTypeCqlNameParser(), context); - Map types = - parser.parse( - KEYSPACE_ID, - mockTypeRow( - "ks", - "a", - ImmutableList.of("b"), - ImmutableList.of("frozen>>")), - mockTypeRow("ks", "b", ImmutableList.of("i"), ImmutableList.of("int"))); - - assertThat(types).hasSize(2); - UserDefinedType aType = types.get(CqlIdentifier.fromInternal("a")); - UserDefinedType bType = types.get(CqlIdentifier.fromInternal("b")); - assertThat(((TupleType) aType.getFieldTypes().get(0)).getComponentTypes().get(1)) - .isEqualTo(bType); - } - - @Test - public void should_resolve_nested_dependency() { - UserDefinedTypeParser parser = new UserDefinedTypeParser(new DataTypeCqlNameParser(), context); - Map types = - parser.parse( - KEYSPACE_ID, - mockTypeRow( - "ks", - "a", - ImmutableList.of("bs"), - ImmutableList.of("frozen>>>>")), - mockTypeRow("ks", "b", ImmutableList.of("i"), ImmutableList.of("int"))); - - assertThat(types).hasSize(2); - UserDefinedType aType = types.get(CqlIdentifier.fromInternal("a")); - UserDefinedType bType = types.get(CqlIdentifier.fromInternal("b")); - TupleType tupleType = (TupleType) aType.getFieldTypes().get(0); - ListType listType = (ListType) tupleType.getComponentTypes().get(1); - assertThat(listType.getElementType()).isEqualTo(bType); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/ViewParserTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/ViewParserTest.java deleted file mode 100644 index 1ba471e08f5..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/ViewParserTest.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.parsing; - -import static com.datastax.oss.driver.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.ViewMetadata; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.internal.core.metadata.schema.queries.CassandraSchemaRows; -import com.datastax.oss.driver.internal.core.metadata.schema.queries.SchemaRows; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import java.util.Collections; -import java.util.Iterator; -import org.junit.Test; - -public class ViewParserTest extends SchemaParserTestBase { - - static final AdminRow VIEW_ROW_3_0 = - mockViewRow("ks", "alltimehigh", "scores", false, "game IS NOT NULL"); - static final ImmutableList COLUMN_ROWS_3_0 = - ImmutableList.of( - mockModernColumnRow("ks", "alltimehigh", "game", "partition_key", "text", "none", 0), - mockModernColumnRow("ks", "alltimehigh", "score", "clustering", "int", "desc", 0), - mockModernColumnRow("ks", "alltimehigh", "user", "clustering", "text", "asc", 1), - mockModernColumnRow("ks", "alltimehigh", "year", "clustering", "int", "asc", 2), - mockModernColumnRow("ks", "alltimehigh", "month", "clustering", "int", "asc", 3), - mockModernColumnRow("ks", "alltimehigh", "day", "clustering", "int", "asc", 4)); - - @Test - public void should_skip_when_no_column_rows() { - SchemaRows rows = rows(VIEW_ROW_3_0, Collections.emptyList()); - ViewParser parser = new ViewParser(rows, context); - ViewMetadata view = parser.parseView(VIEW_ROW_3_0, KEYSPACE_ID, Collections.emptyMap()); - - assertThat(view).isNull(); - } - - @Test - public void should_parse_view() { - SchemaRows rows = rows(VIEW_ROW_3_0, COLUMN_ROWS_3_0); - ViewParser parser = new ViewParser(rows, context); - ViewMetadata view = parser.parseView(VIEW_ROW_3_0, KEYSPACE_ID, Collections.emptyMap()); - - assertThat(view.getKeyspace().asInternal()).isEqualTo("ks"); - assertThat(view.getName().asInternal()).isEqualTo("alltimehigh"); - assertThat(view.getBaseTable().asInternal()).isEqualTo("scores"); - - assertThat(view.getPartitionKey()).hasSize(1); - ColumnMetadata pk0 = view.getPartitionKey().get(0); - assertThat(pk0.getName().asInternal()).isEqualTo("game"); - assertThat(pk0.getType()).isEqualTo(DataTypes.TEXT); - - assertThat(view.getClusteringColumns().entrySet()).hasSize(5); - Iterator clusteringColumnsIterator = - view.getClusteringColumns().keySet().iterator(); - assertThat(clusteringColumnsIterator.next().getName().asInternal()).isEqualTo("score"); - assertThat(clusteringColumnsIterator.next().getName().asInternal()).isEqualTo("user"); - assertThat(clusteringColumnsIterator.next().getName().asInternal()).isEqualTo("year"); - assertThat(clusteringColumnsIterator.next().getName().asInternal()).isEqualTo("month"); - assertThat(clusteringColumnsIterator.next().getName().asInternal()).isEqualTo("day"); - - assertThat(view.getColumns()) - .containsOnlyKeys( - CqlIdentifier.fromInternal("game"), - CqlIdentifier.fromInternal("score"), - CqlIdentifier.fromInternal("user"), - CqlIdentifier.fromInternal("year"), - CqlIdentifier.fromInternal("month"), - CqlIdentifier.fromInternal("day")); - } - - private SchemaRows rows(AdminRow viewRow, Iterable columnRows) { - return new CassandraSchemaRows.Builder(NODE_3_0, keyspaceFilter, "test") - .withViews(ImmutableList.of(viewRow)) - .withColumns(columnRows) - .build(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra21SchemaQueriesTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra21SchemaQueriesTest.java deleted file mode 100644 index 2dd216474df..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra21SchemaQueriesTest.java +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.queries; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.adminrequest.AdminResult; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import java.util.Collections; -import java.util.Queue; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.LinkedBlockingDeque; -import org.junit.Test; - -// Note: we don't repeat the other tests in Cassandra3SchemaQueriesTest because the logic is -// shared, this class just validates the query strings. -public class Cassandra21SchemaQueriesTest extends SchemaQueriesTest { - - @Test - public void should_query() { - when(config.getStringList( - DefaultDriverOption.METADATA_SCHEMA_REFRESHED_KEYSPACES, Collections.emptyList())) - .thenReturn(Collections.emptyList()); - when(node.getCassandraVersion()).thenReturn(Version.V2_1_0); - - SchemaQueriesWithMockedChannel queries = - new SchemaQueriesWithMockedChannel(driverChannel, node, config, "test"); - - CompletionStage result = queries.execute(); - - // Keyspace - Call call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system.schema_keyspaces"); - call.result.complete( - mockResult(mockRow("keyspace_name", "ks1"), mockRow("keyspace_name", "ks2"))); - - // Types - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system.schema_usertypes"); - call.result.complete(mockResult(mockRow("keyspace_name", "ks1", "type_name", "type"))); - - // Tables - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system.schema_columnfamilies"); - call.result.complete(mockResult(mockRow("keyspace_name", "ks1", "columnfamily_name", "foo"))); - - // Columns - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system.schema_columns"); - call.result.complete( - mockResult( - mockRow("keyspace_name", "ks1", "columnfamily_name", "foo", "column_name", "k"))); - - channel.runPendingTasks(); - - assertThatStage(result) - .isSuccess( - rows -> { - assertThat(rows.getNode()).isEqualTo(node); - - // Keyspace - assertThat(rows.keyspaces()).hasSize(2); - assertThat(rows.keyspaces().get(0).getString("keyspace_name")).isEqualTo("ks1"); - assertThat(rows.keyspaces().get(1).getString("keyspace_name")).isEqualTo("ks2"); - - // Types - assertThat(rows.types().keySet()).containsOnly(KS1_ID); - assertThat(rows.types().get(KS1_ID)).hasSize(1); - assertThat(rows.types().get(KS1_ID).iterator().next().getString("type_name")) - .isEqualTo("type"); - - // Tables - assertThat(rows.tables().keySet()).containsOnly(KS1_ID); - assertThat(rows.tables().get(KS1_ID)).hasSize(1); - assertThat(rows.tables().get(KS1_ID).iterator().next().getString("columnfamily_name")) - .isEqualTo("foo"); - - // Rows - assertThat(rows.columns().keySet()).containsOnly(KS1_ID); - assertThat(rows.columns().get(KS1_ID).keySet()).containsOnly(FOO_ID); - assertThat( - rows.columns() - .get(KS1_ID) - .get(FOO_ID) - .iterator() - .next() - .getString("column_name")) - .isEqualTo("k"); - - // No views, functions or aggregates in this version - assertThat(rows.views().keySet()).isEmpty(); - assertThat(rows.functions().keySet()).isEmpty(); - assertThat(rows.aggregates().keySet()).isEmpty(); - }); - } - - /** Extends the class under test to mock the query execution logic. */ - static class SchemaQueriesWithMockedChannel extends Cassandra21SchemaQueries { - - final Queue calls = new LinkedBlockingDeque<>(); - - SchemaQueriesWithMockedChannel( - DriverChannel channel, Node node, DriverExecutionProfile config, String logPrefix) { - super(channel, node, config, logPrefix); - } - - @Override - protected CompletionStage query(String query) { - Call call = new Call(query); - calls.add(call); - return call.result; - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra22SchemaQueriesTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra22SchemaQueriesTest.java deleted file mode 100644 index fd28be59120..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra22SchemaQueriesTest.java +++ /dev/null @@ -1,162 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.queries; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Metadata; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.adminrequest.AdminResult; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import java.util.Collections; -import java.util.Queue; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.LinkedBlockingDeque; -import org.junit.Test; - -// Note: we don't repeat the other tests in Cassandra3SchemaQueriesTest because the logic is -// shared, this class just validates the query strings. -public class Cassandra22SchemaQueriesTest extends SchemaQueriesTest { - - @Test - public void should_query() { - when(config.getStringList( - DefaultDriverOption.METADATA_SCHEMA_REFRESHED_KEYSPACES, Collections.emptyList())) - .thenReturn(Collections.emptyList()); - when(node.getCassandraVersion()).thenReturn(Version.V2_2_0); - - SchemaQueriesWithMockedChannel queries = - new SchemaQueriesWithMockedChannel(driverChannel, node, null, config, "test"); - - CompletionStage result = queries.execute(); - - // Keyspace - Call call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system.schema_keyspaces"); - call.result.complete( - mockResult(mockRow("keyspace_name", "ks1"), mockRow("keyspace_name", "ks2"))); - - // Types - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system.schema_usertypes"); - call.result.complete(mockResult(mockRow("keyspace_name", "ks1", "type_name", "type"))); - - // Tables - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system.schema_columnfamilies"); - call.result.complete(mockResult(mockRow("keyspace_name", "ks1", "columnfamily_name", "foo"))); - - // Columns - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system.schema_columns"); - call.result.complete( - mockResult( - mockRow("keyspace_name", "ks1", "columnfamily_name", "foo", "column_name", "k"))); - - // Functions - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system.schema_functions"); - call.result.complete(mockResult(mockRow("keyspace_name", "ks2", "function_name", "add"))); - - // Aggregates - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system.schema_aggregates"); - call.result.complete(mockResult(mockRow("keyspace_name", "ks2", "aggregate_name", "add"))); - - channel.runPendingTasks(); - - assertThatStage(result) - .isSuccess( - rows -> { - assertThat(rows.getNode()).isEqualTo(node); - - // Keyspace - assertThat(rows.keyspaces()).hasSize(2); - assertThat(rows.keyspaces().get(0).getString("keyspace_name")).isEqualTo("ks1"); - assertThat(rows.keyspaces().get(1).getString("keyspace_name")).isEqualTo("ks2"); - - // Types - assertThat(rows.types().keySet()).containsOnly(KS1_ID); - assertThat(rows.types().get(KS1_ID)).hasSize(1); - assertThat(rows.types().get(KS1_ID).iterator().next().getString("type_name")) - .isEqualTo("type"); - - // Tables - assertThat(rows.tables().keySet()).containsOnly(KS1_ID); - assertThat(rows.tables().get(KS1_ID)).hasSize(1); - assertThat(rows.tables().get(KS1_ID).iterator().next().getString("columnfamily_name")) - .isEqualTo("foo"); - - // Rows - assertThat(rows.columns().keySet()).containsOnly(KS1_ID); - assertThat(rows.columns().get(KS1_ID).keySet()).containsOnly(FOO_ID); - assertThat( - rows.columns() - .get(KS1_ID) - .get(FOO_ID) - .iterator() - .next() - .getString("column_name")) - .isEqualTo("k"); - - // Functions - assertThat(rows.functions().keySet()).containsOnly(KS2_ID); - assertThat(rows.functions().get(KS2_ID)).hasSize(1); - assertThat(rows.functions().get(KS2_ID).iterator().next().getString("function_name")) - .isEqualTo("add"); - - // Aggregates - assertThat(rows.aggregates().keySet()).containsOnly(KS2_ID); - assertThat(rows.aggregates().get(KS2_ID)).hasSize(1); - assertThat( - rows.aggregates().get(KS2_ID).iterator().next().getString("aggregate_name")) - .isEqualTo("add"); - - // No views in this version - assertThat(rows.views().keySet()).isEmpty(); - }); - } - - /** Extends the class under test to mock the query execution logic. */ - static class SchemaQueriesWithMockedChannel extends Cassandra22SchemaQueries { - - final Queue calls = new LinkedBlockingDeque<>(); - - SchemaQueriesWithMockedChannel( - DriverChannel channel, - Node node, - CompletableFuture refreshFuture, - DriverExecutionProfile config, - String logPrefix) { - super(channel, node, config, logPrefix); - } - - @Override - protected CompletionStage query(String query) { - Call call = new Call(query); - calls.add(call); - return call.result; - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra3SchemaQueriesTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra3SchemaQueriesTest.java deleted file mode 100644 index 3b533e89ed5..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra3SchemaQueriesTest.java +++ /dev/null @@ -1,365 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.queries; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.adminrequest.AdminResult; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import java.util.Collections; -import java.util.Queue; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.LinkedBlockingDeque; -import org.junit.Before; -import org.junit.Test; - -public class Cassandra3SchemaQueriesTest extends SchemaQueriesTest { - - @Before - @Override - public void setup() { - super.setup(); - - // By default, no keyspace filter - when(config.getStringList( - DefaultDriverOption.METADATA_SCHEMA_REFRESHED_KEYSPACES, Collections.emptyList())) - .thenReturn(Collections.emptyList()); - when(node.getCassandraVersion()).thenReturn(Version.V3_0_0); - } - - @Test - public void should_query_without_keyspace_filter() { - should_query_with_where_clause(""); - } - - @Test - public void should_query_with_keyspace_filter() { - when(config.getStringList( - DefaultDriverOption.METADATA_SCHEMA_REFRESHED_KEYSPACES, Collections.emptyList())) - .thenReturn(ImmutableList.of("ks1", "ks2")); - - should_query_with_where_clause(" WHERE keyspace_name IN ('ks1','ks2')"); - } - - private void should_query_with_where_clause(String whereClause) { - SchemaQueriesWithMockedChannel queries = - new SchemaQueriesWithMockedChannel(driverChannel, node, config, "test"); - CompletionStage result = queries.execute(); - - // Keyspace - Call call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system_schema.keyspaces" + whereClause); - call.result.complete( - mockResult(mockRow("keyspace_name", "ks1"), mockRow("keyspace_name", "ks2"))); - - // Types - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system_schema.types" + whereClause); - call.result.complete(mockResult(mockRow("keyspace_name", "ks1", "type_name", "type"))); - - // Tables - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system_schema.tables" + whereClause); - call.result.complete(mockResult(mockRow("keyspace_name", "ks1", "table_name", "foo"))); - - // Columns - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system_schema.columns" + whereClause); - call.result.complete( - mockResult(mockRow("keyspace_name", "ks1", "table_name", "foo", "column_name", "k"))); - - // Indexes - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system_schema.indexes" + whereClause); - call.result.complete( - mockResult(mockRow("keyspace_name", "ks1", "table_name", "foo", "index_name", "index"))); - - // Views - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system_schema.views" + whereClause); - call.result.complete(mockResult(mockRow("keyspace_name", "ks2", "view_name", "foo"))); - - // Functions - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system_schema.functions" + whereClause); - call.result.complete(mockResult(mockRow("keyspace_name", "ks2", "function_name", "add"))); - - // Aggregates - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system_schema.aggregates" + whereClause); - call.result.complete(mockResult(mockRow("keyspace_name", "ks2", "aggregate_name", "add"))); - - channel.runPendingTasks(); - - assertThatStage(result) - .isSuccess( - rows -> { - assertThat(rows.getNode()).isEqualTo(node); - - // Keyspace - assertThat(rows.keyspaces()).hasSize(2); - assertThat(rows.keyspaces().get(0).getString("keyspace_name")).isEqualTo("ks1"); - assertThat(rows.keyspaces().get(1).getString("keyspace_name")).isEqualTo("ks2"); - - // Types - assertThat(rows.types().keySet()).containsOnly(KS1_ID); - assertThat(rows.types().get(KS1_ID)).hasSize(1); - assertThat(rows.types().get(KS1_ID).iterator().next().getString("type_name")) - .isEqualTo("type"); - - // Tables - assertThat(rows.tables().keySet()).containsOnly(KS1_ID); - assertThat(rows.tables().get(KS1_ID)).hasSize(1); - assertThat(rows.tables().get(KS1_ID).iterator().next().getString("table_name")) - .isEqualTo("foo"); - - // Columns - assertThat(rows.columns().keySet()).containsOnly(KS1_ID); - assertThat(rows.columns().get(KS1_ID).keySet()).containsOnly(FOO_ID); - assertThat( - rows.columns() - .get(KS1_ID) - .get(FOO_ID) - .iterator() - .next() - .getString("column_name")) - .isEqualTo("k"); - - // Indexes - assertThat(rows.indexes().keySet()).containsOnly(KS1_ID); - assertThat(rows.indexes().get(KS1_ID).keySet()).containsOnly(FOO_ID); - assertThat( - rows.indexes() - .get(KS1_ID) - .get(FOO_ID) - .iterator() - .next() - .getString("index_name")) - .isEqualTo("index"); - - // Views - assertThat(rows.views().keySet()).containsOnly(KS2_ID); - assertThat(rows.views().get(KS2_ID)).hasSize(1); - assertThat(rows.views().get(KS2_ID).iterator().next().getString("view_name")) - .isEqualTo("foo"); - - // Functions - assertThat(rows.functions().keySet()).containsOnly(KS2_ID); - assertThat(rows.functions().get(KS2_ID)).hasSize(1); - assertThat(rows.functions().get(KS2_ID).iterator().next().getString("function_name")) - .isEqualTo("add"); - - // Aggregates - assertThat(rows.aggregates().keySet()).containsOnly(KS2_ID); - assertThat(rows.aggregates().get(KS2_ID)).hasSize(1); - assertThat( - rows.aggregates().get(KS2_ID).iterator().next().getString("aggregate_name")) - .isEqualTo("add"); - }); - } - - @Test - public void should_query_with_paging() { - SchemaQueriesWithMockedChannel queries = - new SchemaQueriesWithMockedChannel(driverChannel, node, config, "test"); - CompletionStage result = queries.execute(); - - // Keyspace - Call call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system_schema.keyspaces"); - call.result.complete(mockResult(mockRow("keyspace_name", "ks1"))); - - // No types - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system_schema.types"); - call.result.complete(mockResult(/*empty*/ )); - - // Tables - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system_schema.tables"); - call.result.complete(mockResult(mockRow("keyspace_name", "ks1", "table_name", "foo"))); - - // Columns: paged - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system_schema.columns"); - - AdminResult page2 = - mockResult(mockRow("keyspace_name", "ks1", "table_name", "foo", "column_name", "v")); - AdminResult page1 = - mockResult(page2, mockRow("keyspace_name", "ks1", "table_name", "foo", "column_name", "k")); - call.result.complete(page1); - - // No indexes - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system_schema.indexes"); - call.result.complete(mockResult(/*empty*/ )); - - // No views - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system_schema.views"); - call.result.complete(mockResult(/*empty*/ )); - - // No functions - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system_schema.functions"); - call.result.complete(mockResult(/*empty*/ )); - - // No aggregates - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system_schema.aggregates"); - call.result.complete(mockResult(/*empty*/ )); - - channel.runPendingTasks(); - - assertThatStage(result) - .isSuccess( - rows -> { - assertThat(rows.columns().keySet()).containsOnly(KS1_ID); - assertThat(rows.columns().get(KS1_ID).keySet()).containsOnly(FOO_ID); - assertThat(rows.columns().get(KS1_ID).get(FOO_ID)) - .extracting(r -> r.getString("column_name")) - .containsExactly("k", "v"); - }); - } - - @Test - public void should_ignore_malformed_rows() { - SchemaQueriesWithMockedChannel queries = - new SchemaQueriesWithMockedChannel(driverChannel, node, config, "test"); - CompletionStage result = queries.execute(); - - // Keyspace - Call call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system_schema.keyspaces"); - call.result.complete(mockResult(mockRow("keyspace_name", "ks1"))); - - // No types - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system_schema.types"); - call.result.complete(mockResult(/*empty*/ )); - - // Tables - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system_schema.tables"); - call.result.complete( - mockResult( - mockRow("keyspace_name", "ks", "table_name", "foo"), - // Missing keyspace name: - mockRow("table_name", "foo"))); - - // Columns - call = queries.calls.poll(); - call.result.complete( - mockResult( - mockRow("keyspace_name", "ks", "table_name", "foo", "column_name", "k"), - // Missing keyspace name: - mockRow("table_name", "foo", "column_name", "k"), - // Missing table name: - mockRow("keyspace_name", "ks", "column_name", "k"))); - - AdminResult page2 = - mockResult(mockRow("keyspace_name", "ks1", "table_name", "foo", "column_name", "v")); - AdminResult page1 = - mockResult(page2, mockRow("keyspace_name", "ks1", "table_name", "foo", "column_name", "k")); - call.result.complete(page1); - - // No indexes - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system_schema.indexes"); - call.result.complete(mockResult(/*empty*/ )); - - // No views - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system_schema.views"); - call.result.complete(mockResult(/*empty*/ )); - - // No functions - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system_schema.functions"); - call.result.complete(mockResult(/*empty*/ )); - - // No aggregates - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system_schema.aggregates"); - call.result.complete(mockResult(/*empty*/ )); - - channel.runPendingTasks(); - - assertThatStage(result) - .isSuccess( - rows -> { - assertThat(rows.tables().keySet()).containsOnly(KS_ID); - assertThat(rows.tables().get(KS_ID)).hasSize(1); - assertThat(rows.tables().get(KS_ID).iterator().next().getString("table_name")) - .isEqualTo("foo"); - - assertThat(rows.columns().keySet()).containsOnly(KS_ID); - assertThat(rows.columns().get(KS_ID).keySet()).containsOnly(FOO_ID); - assertThat( - rows.columns() - .get(KS_ID) - .get(FOO_ID) - .iterator() - .next() - .getString("column_name")) - .isEqualTo("k"); - }); - } - - @Test - public void should_abort_if_query_fails() { - SchemaQueriesWithMockedChannel queries = - new SchemaQueriesWithMockedChannel(driverChannel, node, config, "test"); - CompletionStage result = queries.execute(); - - Exception mockQueryError = new Exception("mock query error"); - - Call call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system_schema.keyspaces"); - call.result.completeExceptionally(mockQueryError); - - channel.runPendingTasks(); - - assertThatStage(result).isFailed(throwable -> assertThat(throwable).isEqualTo(mockQueryError)); - } - - /** Extends the class under test to mock the query execution logic. */ - static class SchemaQueriesWithMockedChannel extends Cassandra3SchemaQueries { - - final Queue calls = new LinkedBlockingDeque<>(); - - SchemaQueriesWithMockedChannel( - DriverChannel channel, Node node, DriverExecutionProfile config, String logPrefix) { - super(channel, node, config, logPrefix); - } - - @Override - protected CompletionStage query(String query) { - Call call = new Call(query); - calls.add(call); - return call.result; - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/DefaultSchemaQueriesFactoryTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/DefaultSchemaQueriesFactoryTest.java deleted file mode 100644 index f9ac6c05576..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/DefaultSchemaQueriesFactoryTest.java +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.queries; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.dse.driver.api.core.metadata.DseNodeProperties; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.Optional; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class DefaultSchemaQueriesFactoryTest { - - enum Expected { - CASS_21(Cassandra21SchemaQueries.class), - CASS_22(Cassandra22SchemaQueries.class), - CASS_3(Cassandra3SchemaQueries.class), - CASS_4(Cassandra4SchemaQueries.class), - DSE_6_8(Dse68SchemaQueries.class); - - final Class clz; - - Expected(Class clz) { - this.clz = clz; - } - - public Class getClz() { - return clz; - } - } - - private static ImmutableList> cassandraVersions = - ImmutableList.>builder() - .add(ImmutableList.of("2.1.0", Optional.empty(), Expected.CASS_21)) - .add(ImmutableList.of("2.2.0", Optional.empty(), Expected.CASS_22)) - .add(ImmutableList.of("2.2.1", Optional.empty(), Expected.CASS_22)) - // Not a real version, just documenting behaviour of existing impl - .add(ImmutableList.of("2.3.0", Optional.empty(), Expected.CASS_22)) - // We now return you to real versions - .add(ImmutableList.of("3.0.0", Optional.empty(), Expected.CASS_3)) - .add(ImmutableList.of("3.0.1", Optional.empty(), Expected.CASS_3)) - .add(ImmutableList.of("3.1.0", Optional.empty(), Expected.CASS_3)) - .add(ImmutableList.of("4.0.0", Optional.empty(), Expected.CASS_4)) - .add(ImmutableList.of("4.0.1", Optional.empty(), Expected.CASS_4)) - .add(ImmutableList.of("4.1.0", Optional.empty(), Expected.CASS_4)) - .build(); - - private static ImmutableList> dseVersions = - ImmutableList.>builder() - // DSE 6.0.0 - .add(ImmutableList.of("4.0.0.2284", Optional.of("6.0.0"), Expected.CASS_3)) - // DSE 6.0.1 - .add(ImmutableList.of("4.0.0.2349", Optional.of("6.0.1"), Expected.CASS_3)) - // DSE 6.0.2 moved to DSE version (minus dots) in an extra element - .add(ImmutableList.of("4.0.0.602", Optional.of("6.0.2"), Expected.CASS_3)) - // DSE 6.7.0 continued with the same idea - .add(ImmutableList.of("4.0.0.670", Optional.of("6.7.0"), Expected.CASS_4)) - // DSE 6.8.0 does the same - .add(ImmutableList.of("4.0.0.680", Optional.of("6.8.0"), Expected.DSE_6_8)) - .build(); - - private static ImmutableList> allVersions = - ImmutableList.>builder() - .addAll(cassandraVersions) - .addAll(dseVersions) - .build(); - - @DataProvider(format = "%m %p[1] => %p[0]") - public static Iterable expected() { - - return allVersions; - } - - @Test - @UseDataProvider("expected") - public void should_return_correct_schema_queries_impl( - String cassandraVersion, Optional dseVersion, Expected expected) { - - final Node mockNode = mock(Node.class); - when(mockNode.getCassandraVersion()).thenReturn(Version.parse(cassandraVersion)); - dseVersion.ifPresent( - versionStr -> { - when(mockNode.getExtras()) - .thenReturn( - ImmutableMap.of( - DseNodeProperties.DSE_VERSION, Version.parse(versionStr))); - }); - - DefaultSchemaQueriesFactory factory = buildFactory(); - - @SuppressWarnings("unchecked") - SchemaQueries queries = factory.newInstance(mockNode, mock(DriverChannel.class)); - - assertThat(queries.getClass()).isEqualTo(expected.getClz()); - } - - private DefaultSchemaQueriesFactory buildFactory() { - - final DriverExecutionProfile mockProfile = mock(DriverExecutionProfile.class); - final DriverConfig mockConfig = mock(DriverConfig.class); - when(mockConfig.getDefaultProfile()).thenReturn(mockProfile); - final InternalDriverContext mockInternalCtx = mock(InternalDriverContext.class); - when(mockInternalCtx.getConfig()).thenReturn(mockConfig); - - return new DefaultSchemaQueriesFactory(mockInternalCtx); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/KeyspaceFilterTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/KeyspaceFilterTest.java deleted file mode 100644 index 7e2f6219eac..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/KeyspaceFilterTest.java +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.queries; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import java.util.Arrays; -import java.util.Set; -import java.util.stream.Collectors; -import org.junit.Test; - -public class KeyspaceFilterTest { - - private static final ImmutableSet KEYSPACES = - ImmutableSet.of( - "system", "inventory_test", "inventory_prod", "customers_test", "customers_prod"); - - @Test - public void should_not_filter_when_no_rules() { - KeyspaceFilter filter = KeyspaceFilter.newInstance("test", Arrays.asList()); - assertThat(filter.getWhereClause()).isEmpty(); - assertThat(apply(filter, KEYSPACES)).isEqualTo(KEYSPACES); - } - - @Test - public void should_filter_on_server_when_only_exact_rules() { - KeyspaceFilter filter = - KeyspaceFilter.newInstance( - "test", Arrays.asList("inventory_test", "customers_test", "!system")); - // Note that exact excludes are redundant in this case: either they match an include and will be - // ignored, or they don't and the keyspace is already ignored. - // We let it slide, but a warning is logged. - assertThat(filter.getWhereClause()) - .isEqualTo(" WHERE keyspace_name IN ('inventory_test','customers_test')"); - assertThat(apply(filter, KEYSPACES)).containsOnly("inventory_test", "customers_test"); - } - - @Test - public void should_ignore_exact_exclude_that_collides_with_exact_include() { - KeyspaceFilter filter = - KeyspaceFilter.newInstance("test", Arrays.asList("inventory_test", "!inventory_test")); - assertThat(filter.getWhereClause()).isEqualTo(" WHERE keyspace_name IN ('inventory_test')"); - assertThat(apply(filter, KEYSPACES)).containsOnly("inventory_test"); - - // Order does not matter - filter = KeyspaceFilter.newInstance("test", Arrays.asList("!inventory_test", "inventory_test")); - assertThat(filter.getWhereClause()).isEqualTo(" WHERE keyspace_name IN ('inventory_test')"); - assertThat(apply(filter, KEYSPACES)).containsOnly("inventory_test"); - } - - @Test - public void should_apply_disjoint_exact_and_regex_rules() { - KeyspaceFilter filter = - KeyspaceFilter.newInstance("test", Arrays.asList("inventory_test", "/^customers.*/")); - assertThat(filter.getWhereClause()).isEmpty(); - assertThat(apply(filter, KEYSPACES)) - .containsOnly("inventory_test", "customers_test", "customers_prod"); - - filter = KeyspaceFilter.newInstance("test", Arrays.asList("!system", "!/^inventory.*/")); - assertThat(filter.getWhereClause()).isEmpty(); - assertThat(apply(filter, KEYSPACES)).containsOnly("customers_test", "customers_prod"); - - // The remaining cases could be simplified, but they are supported nevertheless: - /*redundant:*/ - filter = KeyspaceFilter.newInstance("test", Arrays.asList("!/^customers.*/", "inventory_test")); - assertThat(filter.getWhereClause()).isEmpty(); - assertThat(apply(filter, KEYSPACES)).containsOnly("inventory_test", "inventory_prod", "system"); - - /*redundant:*/ - filter = KeyspaceFilter.newInstance("test", Arrays.asList("/^customers.*/", "!system")); - assertThat(filter.getWhereClause()).isEmpty(); - assertThat(apply(filter, KEYSPACES)).containsOnly("customers_test", "customers_prod"); - } - - @Test - public void should_apply_intersecting_exact_and_regex_rules() { - // Include all customer keyspaces except one: - KeyspaceFilter filter = - KeyspaceFilter.newInstance("test", Arrays.asList("/^customers.*/", "!customers_test")); - assertThat(filter.getWhereClause()).isEmpty(); - assertThat(apply(filter, KEYSPACES)).containsOnly("customers_prod"); - - // Exclude all customer keyspaces except one (also implies include every other keyspace): - filter = KeyspaceFilter.newInstance("test", Arrays.asList("!/^customers.*/", "customers_test")); - assertThat(filter.getWhereClause()).isEmpty(); - assertThat(apply(filter, KEYSPACES)) - .containsOnly("customers_test", "inventory_test", "inventory_prod", "system"); - } - - @Test - public void should_apply_intersecting_regex_rules() { - KeyspaceFilter filter = - KeyspaceFilter.newInstance("test", Arrays.asList("/^customers.*/", "!/.*test$/")); - assertThat(filter.getWhereClause()).isEmpty(); - assertThat(apply(filter, KEYSPACES)).containsOnly("customers_prod"); - - // Throwing an exact name in the mix doesn't change the other rules - filter = - KeyspaceFilter.newInstance( - "test", Arrays.asList("inventory_prod", "/^customers.*/", "!/.*test$/")); - assertThat(filter.getWhereClause()).isEmpty(); - assertThat(apply(filter, KEYSPACES)).containsOnly("inventory_prod", "customers_prod"); - } - - @Test - public void should_skip_malformed_rule() { - KeyspaceFilter filter = - KeyspaceFilter.newInstance("test", Arrays.asList("inventory_test", "customers_test", "//")); - assertThat(filter.getWhereClause()) - .isEqualTo(" WHERE keyspace_name IN ('inventory_test','customers_test')"); - assertThat(apply(filter, KEYSPACES)).containsOnly("inventory_test", "customers_test"); - } - - @Test - public void should_skip_invalid_regex() { - KeyspaceFilter filter = - KeyspaceFilter.newInstance( - "test", Arrays.asList("inventory_test", "customers_test", "/*/")); - assertThat(filter.getWhereClause()) - .isEqualTo(" WHERE keyspace_name IN ('inventory_test','customers_test')"); - assertThat(apply(filter, KEYSPACES)).containsOnly("inventory_test", "customers_test"); - } - - private static Set apply(KeyspaceFilter filter, Set keyspaces) { - return keyspaces.stream().filter(filter::includes).collect(Collectors.toSet()); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/SchemaQueriesTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/SchemaQueriesTest.java deleted file mode 100644 index e0da405993b..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/SchemaQueriesTest.java +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.queries; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.adminrequest.AdminResult; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.shaded.guava.common.collect.Iterators; -import io.netty.channel.embedded.EmbeddedChannel; -import java.time.Duration; -import java.util.concurrent.CompletableFuture; -import org.junit.Before; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public abstract class SchemaQueriesTest { - - protected static final CqlIdentifier KS_ID = CqlIdentifier.fromInternal("ks"); - protected static final CqlIdentifier KS1_ID = CqlIdentifier.fromInternal("ks1"); - protected static final CqlIdentifier KS2_ID = CqlIdentifier.fromInternal("ks2"); - protected static final CqlIdentifier FOO_ID = CqlIdentifier.fromInternal("foo"); - - @Mock protected Node node; - @Mock protected DriverExecutionProfile config; - @Mock protected DriverChannel driverChannel; - protected EmbeddedChannel channel; - - @Before - public void setup() { - // Whatever, not actually used because the requests are mocked - when(config.getDuration(DefaultDriverOption.METADATA_SCHEMA_REQUEST_TIMEOUT)) - .thenReturn(Duration.ZERO); - when(config.getInt(DefaultDriverOption.METADATA_SCHEMA_REQUEST_PAGE_SIZE)).thenReturn(5000); - - channel = new EmbeddedChannel(); - driverChannel = mock(DriverChannel.class); - when(driverChannel.eventLoop()).thenReturn(channel.eventLoop()); - } - - protected static AdminRow mockRow(String... values) { - AdminRow row = mock(AdminRow.class); - assertThat(values.length % 2).as("Expecting an even number of parameters").isZero(); - for (int i = 0; i < values.length / 2; i++) { - when(row.getString(values[i * 2])).thenReturn(values[i * 2 + 1]); - } - return row; - } - - protected static AdminResult mockResult(AdminRow... rows) { - return mockResult(null, rows); - } - - protected static AdminResult mockResult(AdminResult next, AdminRow... rows) { - AdminResult result = mock(AdminResult.class); - if (next == null) { - when(result.hasNextPage()).thenReturn(false); - } else { - when(result.hasNextPage()).thenReturn(true); - when(result.nextPage()).thenReturn(CompletableFuture.completedFuture(next)); - } - when(result.iterator()).thenReturn(Iterators.forArray(rows)); - return result; - } - - protected static class Call { - final String query; - final CompletableFuture result; - - Call(String query) { - this.query = query; - this.result = new CompletableFuture<>(); - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/refresh/SchemaRefreshTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/refresh/SchemaRefreshTest.java deleted file mode 100644 index 4f124d2c4a0..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/refresh/SchemaRefreshTest.java +++ /dev/null @@ -1,166 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.refresh; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.internal.core.channel.ChannelFactory; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.DefaultMetadata; -import com.datastax.oss.driver.internal.core.metadata.MetadataRefresh; -import com.datastax.oss.driver.internal.core.metadata.schema.DefaultKeyspaceMetadata; -import com.datastax.oss.driver.internal.core.metadata.schema.events.KeyspaceChangeEvent; -import com.datastax.oss.driver.internal.core.metadata.schema.events.TypeChangeEvent; -import com.datastax.oss.driver.internal.core.type.UserDefinedTypeBuilder; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import java.util.Collections; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public class SchemaRefreshTest { - - private static final UserDefinedType OLD_T1 = - new UserDefinedTypeBuilder( - CqlIdentifier.fromInternal("ks1"), CqlIdentifier.fromInternal("t1")) - .withField(CqlIdentifier.fromInternal("i"), DataTypes.INT) - .build(); - private static final UserDefinedType OLD_T2 = - new UserDefinedTypeBuilder( - CqlIdentifier.fromInternal("ks1"), CqlIdentifier.fromInternal("t2")) - .withField(CqlIdentifier.fromInternal("i"), DataTypes.INT) - .build(); - private static final DefaultKeyspaceMetadata OLD_KS1 = newKeyspace("ks1", true, OLD_T1, OLD_T2); - - @Mock private InternalDriverContext context; - @Mock private ChannelFactory channelFactory; - private DefaultMetadata oldMetadata; - - @Before - public void setup() { - when(context.getChannelFactory()).thenReturn(channelFactory); - oldMetadata = - DefaultMetadata.EMPTY.withSchema( - ImmutableMap.of(OLD_KS1.getName(), OLD_KS1), false, context); - } - - @Test - public void should_detect_dropped_keyspace() { - SchemaRefresh refresh = new SchemaRefresh(Collections.emptyMap()); - MetadataRefresh.Result result = refresh.compute(oldMetadata, false, context); - assertThat(result.newMetadata.getKeyspaces()).isEmpty(); - assertThat(result.events).containsExactly(KeyspaceChangeEvent.dropped(OLD_KS1)); - } - - @Test - public void should_detect_created_keyspace() { - DefaultKeyspaceMetadata ks2 = newKeyspace("ks2", true); - SchemaRefresh refresh = - new SchemaRefresh(ImmutableMap.of(OLD_KS1.getName(), OLD_KS1, ks2.getName(), ks2)); - MetadataRefresh.Result result = refresh.compute(oldMetadata, false, context); - assertThat(result.newMetadata.getKeyspaces()).hasSize(2); - assertThat(result.events).containsExactly(KeyspaceChangeEvent.created(ks2)); - } - - @Test - public void should_detect_top_level_update_in_keyspace() { - // Change only one top-level option (durable writes) - DefaultKeyspaceMetadata newKs1 = newKeyspace("ks1", false, OLD_T1, OLD_T2); - SchemaRefresh refresh = new SchemaRefresh(ImmutableMap.of(OLD_KS1.getName(), newKs1)); - MetadataRefresh.Result result = refresh.compute(oldMetadata, false, context); - assertThat(result.newMetadata.getKeyspaces()).hasSize(1); - assertThat(result.events).containsExactly(KeyspaceChangeEvent.updated(OLD_KS1, newKs1)); - } - - @Test - public void should_detect_updated_children_in_keyspace() { - // Drop one type, modify the other and add a third one - UserDefinedType newT2 = - new UserDefinedTypeBuilder( - CqlIdentifier.fromInternal("ks1"), CqlIdentifier.fromInternal("t2")) - .withField(CqlIdentifier.fromInternal("i"), DataTypes.TEXT) - .build(); - UserDefinedType t3 = - new UserDefinedTypeBuilder( - CqlIdentifier.fromInternal("ks1"), CqlIdentifier.fromInternal("t3")) - .withField(CqlIdentifier.fromInternal("i"), DataTypes.INT) - .build(); - DefaultKeyspaceMetadata newKs1 = newKeyspace("ks1", true, newT2, t3); - - SchemaRefresh refresh = new SchemaRefresh(ImmutableMap.of(OLD_KS1.getName(), newKs1)); - MetadataRefresh.Result result = refresh.compute(oldMetadata, false, context); - assertThat(result.newMetadata.getKeyspaces().get(OLD_KS1.getName())).isEqualTo(newKs1); - assertThat(result.events) - .containsExactly( - TypeChangeEvent.dropped(OLD_T1), - TypeChangeEvent.updated(OLD_T2, newT2), - TypeChangeEvent.created(t3)); - } - - @Test - public void should_detect_top_level_change_and_children_changes() { - // Drop one type, modify the other and add a third one - UserDefinedType newT2 = - new UserDefinedTypeBuilder( - CqlIdentifier.fromInternal("ks1"), CqlIdentifier.fromInternal("t2")) - .withField(CqlIdentifier.fromInternal("i"), DataTypes.TEXT) - .build(); - UserDefinedType t3 = - new UserDefinedTypeBuilder( - CqlIdentifier.fromInternal("ks1"), CqlIdentifier.fromInternal("t3")) - .withField(CqlIdentifier.fromInternal("i"), DataTypes.INT) - .build(); - // Also disable durable writes - DefaultKeyspaceMetadata newKs1 = newKeyspace("ks1", false, newT2, t3); - - SchemaRefresh refresh = new SchemaRefresh(ImmutableMap.of(OLD_KS1.getName(), newKs1)); - MetadataRefresh.Result result = refresh.compute(oldMetadata, false, context); - assertThat(result.newMetadata.getKeyspaces().get(OLD_KS1.getName())).isEqualTo(newKs1); - assertThat(result.events) - .containsExactly( - KeyspaceChangeEvent.updated(OLD_KS1, newKs1), - TypeChangeEvent.dropped(OLD_T1), - TypeChangeEvent.updated(OLD_T2, newT2), - TypeChangeEvent.created(t3)); - } - - private static DefaultKeyspaceMetadata newKeyspace( - String name, boolean durableWrites, UserDefinedType... userTypes) { - ImmutableMap.Builder typesMapBuilder = ImmutableMap.builder(); - for (UserDefinedType type : userTypes) { - typesMapBuilder.put(type.getName(), type); - } - return new DefaultKeyspaceMetadata( - CqlIdentifier.fromInternal(name), - durableWrites, - false, - Collections.emptyMap(), - typesMapBuilder.build(), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptyMap()); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/ByteOrderedTokenRangeTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/ByteOrderedTokenRangeTest.java deleted file mode 100644 index 238f4e0687a..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/ByteOrderedTokenRangeTest.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.protocol.internal.util.Bytes; -import org.junit.Test; - -/** @see TokenRangeTest */ -public class ByteOrderedTokenRangeTest { - - private static final String MIN = "0x"; - - @Test - public void should_split_range() { - assertThat(range("0x0a", "0x0d").splitEvenly(3)) - .containsExactly(range("0x0a", "0x0b"), range("0x0b", "0x0c"), range("0x0c", "0x0d")); - } - - @Test - public void should_split_range_producing_empty_splits_near_ring_end() { - // 0x00 is the first token following min. - // This is an edge case where we want to make sure we don't accidentally generate the ]min,min] - // range (which is the whole ring): - assertThat(range(MIN, "0x00").splitEvenly(3)) - .containsExactly(range(MIN, "0x00"), range("0x00", "0x00"), range("0x00", "0x00")); - } - - @Test - public void should_split_range_when_padding_produces_same_token() { - // To compute the ranges, we pad with trailing zeroes until the range is big enough for the - // number of splits. - // But in this case padding produces the same token 0x1100, so adding more zeroes wouldn't help. - assertThat(range("0x11", "0x1100").splitEvenly(3)) - .containsExactly( - range("0x11", "0x1100"), range("0x1100", "0x1100"), range("0x1100", "0x1100")); - } - - @Test - public void should_split_range_that_wraps_around_the_ring() { - assertThat(range("0x0d", "0x0a").splitEvenly(2)) - .containsExactly(range("0x0d", "0x8c"), range("0x8c", "0x0a")); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_split_whole_ring() { - range(MIN, MIN).splitEvenly(1); - } - - private ByteOrderedTokenRange range(String start, String end) { - return new ByteOrderedTokenRange( - new ByteOrderedToken(Bytes.fromHexString(start)), - new ByteOrderedToken(Bytes.fromHexString(end))); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/DefaultTokenMapTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/DefaultTokenMapTest.java deleted file mode 100644 index 3170e2dd6b2..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/DefaultTokenMapTest.java +++ /dev/null @@ -1,384 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.metadata.token.TokenRange; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.DefaultNode; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import java.nio.ByteBuffer; -import java.util.List; -import java.util.Map; -import java.util.Set; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public class DefaultTokenMapTest { - - private static final String DC1 = "DC1"; - private static final String DC2 = "DC2"; - private static final String RACK1 = "RACK1"; - private static final String RACK2 = "RACK2"; - - private static final CqlIdentifier KS1 = CqlIdentifier.fromInternal("ks1"); - private static final CqlIdentifier KS2 = CqlIdentifier.fromInternal("ks2"); - - private static final TokenFactory TOKEN_FACTORY = new Murmur3TokenFactory(); - - private static final String TOKEN1 = "-9000000000000000000"; - private static final String TOKEN2 = "-6000000000000000000"; - private static final String TOKEN3 = "4000000000000000000"; - private static final String TOKEN4 = "9000000000000000000"; - private static final TokenRange RANGE12 = range(TOKEN1, TOKEN2); - private static final TokenRange RANGE23 = range(TOKEN2, TOKEN3); - private static final TokenRange RANGE34 = range(TOKEN3, TOKEN4); - private static final TokenRange RANGE41 = range(TOKEN4, TOKEN1); - private static final TokenRange FULL_RING = - range(TOKEN_FACTORY.minToken(), TOKEN_FACTORY.minToken()); - - // Some random routing keys that land in the ranges above (they were generated manually) - private static ByteBuffer ROUTING_KEY12 = TypeCodecs.BIGINT.encode(2L, DefaultProtocolVersion.V3); - private static ByteBuffer ROUTING_KEY23 = TypeCodecs.BIGINT.encode(0L, DefaultProtocolVersion.V3); - private static ByteBuffer ROUTING_KEY34 = TypeCodecs.BIGINT.encode(1L, DefaultProtocolVersion.V3); - private static ByteBuffer ROUTING_KEY41 = - TypeCodecs.BIGINT.encode(99L, DefaultProtocolVersion.V3); - - private static final ImmutableMap REPLICATE_ON_BOTH_DCS = - ImmutableMap.of( - "class", "org.apache.cassandra.locator.NetworkTopologyStrategy", DC1, "1", DC2, "1"); - private static final ImmutableMap REPLICATE_ON_DC1 = - ImmutableMap.of("class", "org.apache.cassandra.locator.NetworkTopologyStrategy", DC1, "1"); - - @Mock private InternalDriverContext context; - private ReplicationStrategyFactory replicationStrategyFactory; - - @Before - public void setup() { - replicationStrategyFactory = new DefaultReplicationStrategyFactory(context); - } - - @Test - public void should_build_token_map() { - // Given - Node node1 = mockNode(DC1, RACK1, ImmutableSet.of(TOKEN1)); - Node node2 = mockNode(DC2, RACK2, ImmutableSet.of(TOKEN2)); - Node node3 = mockNode(DC1, RACK1, ImmutableSet.of(TOKEN3)); - Node node4 = mockNode(DC2, RACK2, ImmutableSet.of(TOKEN4)); - List nodes = ImmutableList.of(node1, node2, node3, node4); - List keyspaces = - ImmutableList.of( - mockKeyspace(KS1, REPLICATE_ON_BOTH_DCS), mockKeyspace(KS2, REPLICATE_ON_DC1)); - - // When - DefaultTokenMap tokenMap = - DefaultTokenMap.build(nodes, keyspaces, TOKEN_FACTORY, replicationStrategyFactory, "test"); - - // Then - assertThat(tokenMap.getTokenRanges()).containsExactly(RANGE12, RANGE23, RANGE34, RANGE41); - - // For KS1, each node gets its primary range, plus the one of the previous node in the other DC - assertThat(tokenMap.getTokenRanges(KS1, node1)).containsOnly(RANGE41, RANGE34); - assertThat(tokenMap.getTokenRanges(KS1, node2)).containsOnly(RANGE12, RANGE41); - assertThat(tokenMap.getTokenRanges(KS1, node3)).containsOnly(RANGE23, RANGE12); - assertThat(tokenMap.getTokenRanges(KS1, node4)).containsOnly(RANGE34, RANGE23); - - assertThat(tokenMap.getReplicas(KS1, RANGE12)).containsOnly(node2, node3); - assertThat(tokenMap.getReplicas(KS1, RANGE23)).containsOnly(node3, node4); - assertThat(tokenMap.getReplicas(KS1, RANGE34)).containsOnly(node1, node4); - assertThat(tokenMap.getReplicas(KS1, RANGE41)).containsOnly(node1, node2); - - assertThat(tokenMap.getReplicas(KS1, ROUTING_KEY12)).containsOnly(node2, node3); - assertThat(tokenMap.getReplicas(KS1, ROUTING_KEY23)).containsOnly(node3, node4); - assertThat(tokenMap.getReplicas(KS1, ROUTING_KEY34)).containsOnly(node1, node4); - assertThat(tokenMap.getReplicas(KS1, ROUTING_KEY41)).containsOnly(node1, node2); - - // KS2 is only replicated on DC1 - assertThat(tokenMap.getTokenRanges(KS2, node1)).containsOnly(RANGE41, RANGE34); - assertThat(tokenMap.getTokenRanges(KS2, node3)).containsOnly(RANGE23, RANGE12); - assertThat(tokenMap.getTokenRanges(KS2, node2)).isEmpty(); - assertThat(tokenMap.getTokenRanges(KS2, node4)).isEmpty(); - - assertThat(tokenMap.getReplicas(KS2, RANGE12)).containsOnly(node3); - assertThat(tokenMap.getReplicas(KS2, RANGE23)).containsOnly(node3); - assertThat(tokenMap.getReplicas(KS2, RANGE34)).containsOnly(node1); - assertThat(tokenMap.getReplicas(KS2, RANGE41)).containsOnly(node1); - - assertThat(tokenMap.getReplicas(KS2, ROUTING_KEY12)).containsOnly(node3); - assertThat(tokenMap.getReplicas(KS2, ROUTING_KEY23)).containsOnly(node3); - assertThat(tokenMap.getReplicas(KS2, ROUTING_KEY34)).containsOnly(node1); - assertThat(tokenMap.getReplicas(KS2, ROUTING_KEY41)).containsOnly(node1); - } - - @Test - public void should_build_token_map_with_single_node() { - // Given - Node node1 = mockNode(DC1, RACK1, ImmutableSet.of(TOKEN1)); - List nodes = ImmutableList.of(node1); - List keyspaces = - ImmutableList.of( - mockKeyspace(KS1, REPLICATE_ON_BOTH_DCS), mockKeyspace(KS2, REPLICATE_ON_DC1)); - - // When - DefaultTokenMap tokenMap = - DefaultTokenMap.build(nodes, keyspaces, TOKEN_FACTORY, replicationStrategyFactory, "test"); - - // Then - assertThat(tokenMap.getTokenRanges()).containsExactly(FULL_RING); - - assertThat(tokenMap.getTokenRanges(KS1, node1)).containsOnly(FULL_RING); - assertThat(tokenMap.getReplicas(KS1, FULL_RING)).containsOnly(node1); - assertThat(tokenMap.getReplicas(KS1, ROUTING_KEY12)).containsOnly(node1); - assertThat(tokenMap.getReplicas(KS1, ROUTING_KEY23)).containsOnly(node1); - assertThat(tokenMap.getReplicas(KS1, ROUTING_KEY34)).containsOnly(node1); - assertThat(tokenMap.getReplicas(KS1, ROUTING_KEY41)).containsOnly(node1); - - assertThat(tokenMap.getTokenRanges(KS2, node1)).containsOnly(FULL_RING); - assertThat(tokenMap.getReplicas(KS2, FULL_RING)).containsOnly(node1); - assertThat(tokenMap.getReplicas(KS2, ROUTING_KEY12)).containsOnly(node1); - assertThat(tokenMap.getReplicas(KS2, ROUTING_KEY23)).containsOnly(node1); - assertThat(tokenMap.getReplicas(KS2, ROUTING_KEY34)).containsOnly(node1); - assertThat(tokenMap.getReplicas(KS2, ROUTING_KEY41)).containsOnly(node1); - } - - @Test - public void should_refresh_when_keyspace_replication_has_not_changed() { - // Given - Node node1 = mockNode(DC1, RACK1, ImmutableSet.of(TOKEN1)); - Node node2 = mockNode(DC2, RACK2, ImmutableSet.of(TOKEN2)); - Node node3 = mockNode(DC1, RACK1, ImmutableSet.of(TOKEN3)); - Node node4 = mockNode(DC2, RACK2, ImmutableSet.of(TOKEN4)); - List nodes = ImmutableList.of(node1, node2, node3, node4); - List oldKeyspaces = - ImmutableList.of( - mockKeyspace(KS1, REPLICATE_ON_BOTH_DCS), mockKeyspace(KS2, REPLICATE_ON_DC1)); - DefaultTokenMap oldTokenMap = - DefaultTokenMap.build( - nodes, oldKeyspaces, TOKEN_FACTORY, replicationStrategyFactory, "test"); - - // When - // The schema gets refreshed, but no keyspaces are created or dropped, and the replication - // settings do not change (since we mock everything it looks the same here, but it could be a - // new table, etc). - List newKeyspaces = - ImmutableList.of( - mockKeyspace(KS1, REPLICATE_ON_BOTH_DCS), mockKeyspace(KS2, REPLICATE_ON_DC1)); - DefaultTokenMap newTokenMap = - oldTokenMap.refresh(nodes, newKeyspaces, replicationStrategyFactory); - - // Then - // Nothing was recomputed - assertThat(newTokenMap.tokenRanges).isSameAs(oldTokenMap.tokenRanges); - assertThat(newTokenMap.tokenRangesByPrimary).isSameAs(oldTokenMap.tokenRangesByPrimary); - assertThat(newTokenMap.replicationConfigs).isSameAs(oldTokenMap.replicationConfigs); - assertThat(newTokenMap.keyspaceMaps).isSameAs(oldTokenMap.keyspaceMaps); - } - - @Test - public void should_refresh_when_new_keyspace_with_existing_replication() { - // Given - Node node1 = mockNode(DC1, RACK1, ImmutableSet.of(TOKEN1)); - Node node2 = mockNode(DC2, RACK2, ImmutableSet.of(TOKEN2)); - Node node3 = mockNode(DC1, RACK1, ImmutableSet.of(TOKEN3)); - Node node4 = mockNode(DC2, RACK2, ImmutableSet.of(TOKEN4)); - List nodes = ImmutableList.of(node1, node2, node3, node4); - List oldKeyspaces = - ImmutableList.of(mockKeyspace(KS1, REPLICATE_ON_BOTH_DCS)); - DefaultTokenMap oldTokenMap = - DefaultTokenMap.build( - nodes, oldKeyspaces, TOKEN_FACTORY, replicationStrategyFactory, "test"); - assertThat(oldTokenMap.keyspaceMaps).containsOnlyKeys(REPLICATE_ON_BOTH_DCS); - - // When - List newKeyspaces = - ImmutableList.of( - mockKeyspace(KS1, REPLICATE_ON_BOTH_DCS), mockKeyspace(KS2, REPLICATE_ON_BOTH_DCS)); - DefaultTokenMap newTokenMap = - oldTokenMap.refresh(nodes, newKeyspaces, replicationStrategyFactory); - - // Then - assertThat(newTokenMap.tokenRanges).isSameAs(oldTokenMap.tokenRanges); - assertThat(newTokenMap.tokenRangesByPrimary).isSameAs(oldTokenMap.tokenRangesByPrimary); - assertThat(newTokenMap.keyspaceMaps).isEqualTo(oldTokenMap.keyspaceMaps); - assertThat(newTokenMap.replicationConfigs) - .hasSize(2) - .containsEntry(KS1, REPLICATE_ON_BOTH_DCS) - .containsEntry(KS2, REPLICATE_ON_BOTH_DCS); - } - - @Test - public void should_refresh_when_new_keyspace_with_new_replication() { - // Given - Node node1 = mockNode(DC1, RACK1, ImmutableSet.of(TOKEN1)); - Node node2 = mockNode(DC2, RACK2, ImmutableSet.of(TOKEN2)); - Node node3 = mockNode(DC1, RACK1, ImmutableSet.of(TOKEN3)); - Node node4 = mockNode(DC2, RACK2, ImmutableSet.of(TOKEN4)); - List nodes = ImmutableList.of(node1, node2, node3, node4); - List oldKeyspaces = - ImmutableList.of(mockKeyspace(KS1, REPLICATE_ON_BOTH_DCS)); - DefaultTokenMap oldTokenMap = - DefaultTokenMap.build( - nodes, oldKeyspaces, TOKEN_FACTORY, replicationStrategyFactory, "test"); - assertThat(oldTokenMap.keyspaceMaps).containsOnlyKeys(REPLICATE_ON_BOTH_DCS); - - // When - List newKeyspaces = - ImmutableList.of( - mockKeyspace(KS1, REPLICATE_ON_BOTH_DCS), mockKeyspace(KS2, REPLICATE_ON_DC1)); - DefaultTokenMap newTokenMap = - oldTokenMap.refresh(nodes, newKeyspaces, replicationStrategyFactory); - - // Then - assertThat(newTokenMap.tokenRanges).isSameAs(oldTokenMap.tokenRanges); - assertThat(newTokenMap.tokenRangesByPrimary).isSameAs(oldTokenMap.tokenRangesByPrimary); - assertThat(newTokenMap.keyspaceMaps).containsOnlyKeys(REPLICATE_ON_BOTH_DCS, REPLICATE_ON_DC1); - assertThat(newTokenMap.replicationConfigs) - .hasSize(2) - .containsEntry(KS1, REPLICATE_ON_BOTH_DCS) - .containsEntry(KS2, REPLICATE_ON_DC1); - } - - @Test - public void should_refresh_when_dropped_keyspace_with_replication_still_used() { - // Given - Node node1 = mockNode(DC1, RACK1, ImmutableSet.of(TOKEN1)); - Node node2 = mockNode(DC2, RACK2, ImmutableSet.of(TOKEN2)); - Node node3 = mockNode(DC1, RACK1, ImmutableSet.of(TOKEN3)); - Node node4 = mockNode(DC2, RACK2, ImmutableSet.of(TOKEN4)); - List nodes = ImmutableList.of(node1, node2, node3, node4); - List oldKeyspaces = - ImmutableList.of( - mockKeyspace(KS1, REPLICATE_ON_BOTH_DCS), mockKeyspace(KS2, REPLICATE_ON_BOTH_DCS)); - DefaultTokenMap oldTokenMap = - DefaultTokenMap.build( - nodes, oldKeyspaces, TOKEN_FACTORY, replicationStrategyFactory, "test"); - assertThat(oldTokenMap.keyspaceMaps).containsOnlyKeys(REPLICATE_ON_BOTH_DCS); - - // When - List newKeyspaces = - ImmutableList.of(mockKeyspace(KS1, REPLICATE_ON_BOTH_DCS)); - DefaultTokenMap newTokenMap = - oldTokenMap.refresh(nodes, newKeyspaces, replicationStrategyFactory); - - // Then - assertThat(newTokenMap.tokenRanges).isSameAs(oldTokenMap.tokenRanges); - assertThat(newTokenMap.tokenRangesByPrimary).isSameAs(oldTokenMap.tokenRangesByPrimary); - assertThat(newTokenMap.keyspaceMaps).containsOnlyKeys(REPLICATE_ON_BOTH_DCS); - assertThat(newTokenMap.replicationConfigs).hasSize(1).containsEntry(KS1, REPLICATE_ON_BOTH_DCS); - } - - @Test - public void should_refresh_when_dropped_keyspace_with_replication_not_used_anymore() { - // Given - Node node1 = mockNode(DC1, RACK1, ImmutableSet.of(TOKEN1)); - Node node2 = mockNode(DC2, RACK2, ImmutableSet.of(TOKEN2)); - Node node3 = mockNode(DC1, RACK1, ImmutableSet.of(TOKEN3)); - Node node4 = mockNode(DC2, RACK2, ImmutableSet.of(TOKEN4)); - List nodes = ImmutableList.of(node1, node2, node3, node4); - List oldKeyspaces = - ImmutableList.of( - mockKeyspace(KS1, REPLICATE_ON_BOTH_DCS), mockKeyspace(KS2, REPLICATE_ON_DC1)); - DefaultTokenMap oldTokenMap = - DefaultTokenMap.build( - nodes, oldKeyspaces, TOKEN_FACTORY, replicationStrategyFactory, "test"); - assertThat(oldTokenMap.keyspaceMaps).containsOnlyKeys(REPLICATE_ON_BOTH_DCS, REPLICATE_ON_DC1); - - // When - List newKeyspaces = - ImmutableList.of(mockKeyspace(KS1, REPLICATE_ON_BOTH_DCS)); - DefaultTokenMap newTokenMap = - oldTokenMap.refresh(nodes, newKeyspaces, replicationStrategyFactory); - - // Then - assertThat(newTokenMap.tokenRanges).isSameAs(oldTokenMap.tokenRanges); - assertThat(newTokenMap.tokenRangesByPrimary).isSameAs(oldTokenMap.tokenRangesByPrimary); - assertThat(newTokenMap.keyspaceMaps).containsOnlyKeys(REPLICATE_ON_BOTH_DCS); - assertThat(newTokenMap.replicationConfigs).hasSize(1).containsEntry(KS1, REPLICATE_ON_BOTH_DCS); - } - - @Test - public void should_refresh_when_updated_keyspace_with_different_replication() { - // Given - Node node1 = mockNode(DC1, RACK1, ImmutableSet.of(TOKEN1)); - Node node2 = mockNode(DC2, RACK2, ImmutableSet.of(TOKEN2)); - Node node3 = mockNode(DC1, RACK1, ImmutableSet.of(TOKEN3)); - Node node4 = mockNode(DC2, RACK2, ImmutableSet.of(TOKEN4)); - List nodes = ImmutableList.of(node1, node2, node3, node4); - List oldKeyspaces = - ImmutableList.of( - mockKeyspace(KS1, REPLICATE_ON_BOTH_DCS), mockKeyspace(KS2, REPLICATE_ON_DC1)); - DefaultTokenMap oldTokenMap = - DefaultTokenMap.build( - nodes, oldKeyspaces, TOKEN_FACTORY, replicationStrategyFactory, "test"); - assertThat(oldTokenMap.keyspaceMaps).containsOnlyKeys(REPLICATE_ON_BOTH_DCS, REPLICATE_ON_DC1); - - // When - List newKeyspaces = - ImmutableList.of( - mockKeyspace(KS1, REPLICATE_ON_BOTH_DCS), mockKeyspace(KS2, REPLICATE_ON_BOTH_DCS)); - DefaultTokenMap newTokenMap = - oldTokenMap.refresh(nodes, newKeyspaces, replicationStrategyFactory); - - // Then - assertThat(newTokenMap.tokenRanges).isSameAs(oldTokenMap.tokenRanges); - assertThat(newTokenMap.tokenRangesByPrimary).isSameAs(oldTokenMap.tokenRangesByPrimary); - assertThat(newTokenMap.keyspaceMaps).containsOnlyKeys(REPLICATE_ON_BOTH_DCS); - assertThat(newTokenMap.replicationConfigs) - .hasSize(2) - .containsEntry(KS1, REPLICATE_ON_BOTH_DCS) - .containsEntry(KS2, REPLICATE_ON_BOTH_DCS); - } - - private DefaultNode mockNode(String dc, String rack, Set tokens) { - DefaultNode node = mock(DefaultNode.class); - when(node.getDatacenter()).thenReturn(dc); - when(node.getRack()).thenReturn(rack); - when(node.getRawTokens()).thenReturn(tokens); - return node; - } - - private KeyspaceMetadata mockKeyspace(CqlIdentifier name, Map replicationConfig) { - KeyspaceMetadata keyspace = mock(KeyspaceMetadata.class); - when(keyspace.getName()).thenReturn(name); - when(keyspace.getReplication()).thenReturn(replicationConfig); - return keyspace; - } - - private static TokenRange range(String start, String end) { - return range(TOKEN_FACTORY.parse(start), TOKEN_FACTORY.parse(end)); - } - - private static TokenRange range(Token startToken, Token endToken) { - return new Murmur3TokenRange((Murmur3Token) startToken, (Murmur3Token) endToken); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/Murmur3TokenRangeTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/Murmur3TokenRangeTest.java deleted file mode 100644 index e5c1a0fc47c..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/Murmur3TokenRangeTest.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import static com.datastax.oss.driver.Assertions.assertThat; - -import org.junit.Test; - -/** @see TokenRangeTest */ -public class Murmur3TokenRangeTest { - - private static final long MIN = -9223372036854775808L; - private static final long MAX = 9223372036854775807L; - - @Test - public void should_split_range() { - assertThat(range(MIN, 4611686018427387904L).splitEvenly(3)) - .containsExactly( - range(MIN, -4611686018427387904L), - range(-4611686018427387904L, 0), - range(0, 4611686018427387904L)); - } - - @Test - public void should_split_range_that_wraps_around_the_ring() { - assertThat(range(4611686018427387904L, 0).splitEvenly(3)) - .containsExactly( - range(4611686018427387904L, -9223372036854775807L), - range(-9223372036854775807L, -4611686018427387903L), - range(-4611686018427387903L, 0)); - } - - @Test - public void should_split_range_when_division_not_integral() { - assertThat(range(0, 11).splitEvenly(3)).containsExactly(range(0, 4), range(4, 8), range(8, 11)); - } - - @Test - public void should_split_range_producing_empty_splits() { - assertThat(range(0, 2).splitEvenly(5)) - .containsExactly(range(0, 1), range(1, 2), range(2, 2), range(2, 2), range(2, 2)); - } - - @Test - public void should_split_range_producing_empty_splits_near_ring_end() { - // These are edge cases where we want to make sure we don't accidentally generate the ]min,min] - // range (which is the whole ring) - assertThat(range(MAX, MIN).splitEvenly(3)) - .containsExactly(range(MAX, MAX), range(MAX, MAX), range(MAX, MIN)); - - assertThat(range(MIN, MIN + 1).splitEvenly(3)) - .containsExactly(range(MIN, MIN + 1), range(MIN + 1, MIN + 1), range(MIN + 1, MIN + 1)); - } - - @Test - public void should_split_whole_ring() { - assertThat(range(MIN, MIN).splitEvenly(3)) - .containsExactly( - range(MIN, -3074457345618258603L), - range(-3074457345618258603L, 3074457345618258602L), - range(3074457345618258602L, MIN)); - } - - private Murmur3TokenRange range(long start, long end) { - return new Murmur3TokenRange(new Murmur3Token(start), new Murmur3Token(end)); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/NetworkTopologyReplicationStrategyTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/NetworkTopologyReplicationStrategyTest.java deleted file mode 100644 index 42dc5e69199..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/NetworkTopologyReplicationStrategyTest.java +++ /dev/null @@ -1,674 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyInt; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import ch.qos.logback.classic.Level; -import ch.qos.logback.classic.Logger; -import ch.qos.logback.classic.spi.ILoggingEvent; -import ch.qos.logback.core.Appender; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.atomic.AtomicInteger; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.ArgumentCaptor; -import org.mockito.Captor; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; -import org.slf4j.LoggerFactory; - -@RunWith(MockitoJUnitRunner.class) -public class NetworkTopologyReplicationStrategyTest { - - private static final String DC1 = "DC1"; - private static final String DC2 = "DC2"; - private static final String DC3 = "DC3"; - private static final String RACK11 = "RACK11"; - private static final String RACK12 = "RACK12"; - private static final String RACK21 = "RACK21"; - private static final String RACK22 = "RACK22"; - private static final String RACK31 = "RACK31"; - - private static final Token TOKEN01 = new Murmur3Token(-9000000000000000000L); - private static final Token TOKEN02 = new Murmur3Token(-8000000000000000000L); - private static final Token TOKEN03 = new Murmur3Token(-7000000000000000000L); - private static final Token TOKEN04 = new Murmur3Token(-6000000000000000000L); - private static final Token TOKEN05 = new Murmur3Token(-5000000000000000000L); - private static final Token TOKEN06 = new Murmur3Token(-4000000000000000000L); - private static final Token TOKEN07 = new Murmur3Token(-3000000000000000000L); - private static final Token TOKEN08 = new Murmur3Token(-2000000000000000000L); - private static final Token TOKEN09 = new Murmur3Token(-1000000000000000000L); - private static final Token TOKEN10 = new Murmur3Token(0L); - private static final Token TOKEN11 = new Murmur3Token(1000000000000000000L); - private static final Token TOKEN12 = new Murmur3Token(2000000000000000000L); - private static final Token TOKEN13 = new Murmur3Token(3000000000000000000L); - private static final Token TOKEN14 = new Murmur3Token(4000000000000000000L); - private static final Token TOKEN15 = new Murmur3Token(5000000000000000000L); - private static final Token TOKEN16 = new Murmur3Token(6000000000000000000L); - private static final Token TOKEN17 = new Murmur3Token(7000000000000000000L); - private static final Token TOKEN18 = new Murmur3Token(8000000000000000000L); - private static final Token TOKEN19 = new Murmur3Token(9000000000000000000L); - - @Mock private Node node1, node2, node3, node4, node5, node6, node7, node8; - - @Mock private Appender appender; - @Captor private ArgumentCaptor loggingEventCaptor; - - /** 4 tokens, 2 nodes in 2 DCs, RF = 1 in each DC. */ - @Test - public void should_compute_for_simple_layout() { - // Given - List ring = ImmutableList.of(TOKEN01, TOKEN04, TOKEN14, TOKEN19); - locate(node1, DC1, RACK11); - locate(node2, DC2, RACK21); - Map tokenToPrimary = - ImmutableMap.of(TOKEN01, node1, TOKEN04, node2, TOKEN14, node1, TOKEN19, node2); - ReplicationStrategy strategy = - new NetworkTopologyReplicationStrategy(ImmutableMap.of(DC1, "1", DC2, "1"), "test"); - - // When - Map> replicasByToken = strategy.computeReplicasByToken(tokenToPrimary, ring); - - // Then - assertThat(replicasByToken.keySet().size()).isEqualTo(ring.size()); - // Note: this also asserts the iteration order of the sets (unlike containsEntry(token, set)) - assertThat(replicasByToken.get(TOKEN01)).containsExactly(node1, node2); - assertThat(replicasByToken.get(TOKEN04)).containsExactly(node2, node1); - assertThat(replicasByToken.get(TOKEN14)).isSameAs(replicasByToken.get(TOKEN01)); - assertThat(replicasByToken.get(TOKEN19)).isSameAs(replicasByToken.get(TOKEN04)); - } - - /** 8 tokens, 4 nodes in 2 DCs in the same racks, RF = 1 in each DC. */ - @Test - public void should_compute_for_simple_layout_with_multiple_nodes_per_rack() { - // Given - List ring = - ImmutableList.of(TOKEN01, TOKEN03, TOKEN05, TOKEN07, TOKEN13, TOKEN15, TOKEN17, TOKEN19); - locate(node1, DC1, RACK11); - locate(node2, DC2, RACK21); - locate(node3, DC1, RACK11); - locate(node4, DC2, RACK21); - Map tokenToPrimary = - ImmutableMap.builder() - .put(TOKEN01, node1) - .put(TOKEN03, node2) - .put(TOKEN05, node3) - .put(TOKEN07, node4) - .put(TOKEN13, node1) - .put(TOKEN15, node2) - .put(TOKEN17, node3) - .put(TOKEN19, node4) - .build(); - ReplicationStrategy strategy = - new NetworkTopologyReplicationStrategy(ImmutableMap.of(DC1, "1", DC2, "1"), "test"); - - // When - Map> replicasByToken = strategy.computeReplicasByToken(tokenToPrimary, ring); - - // Then - assertThat(replicasByToken.keySet().size()).isEqualTo(ring.size()); - assertThat(replicasByToken.get(TOKEN01)).containsExactly(node1, node2); - assertThat(replicasByToken.get(TOKEN03)).containsExactly(node2, node3); - assertThat(replicasByToken.get(TOKEN05)).containsExactly(node3, node4); - assertThat(replicasByToken.get(TOKEN07)).containsExactly(node4, node1); - assertThat(replicasByToken.get(TOKEN13)).isSameAs(replicasByToken.get(TOKEN01)); - assertThat(replicasByToken.get(TOKEN15)).isSameAs(replicasByToken.get(TOKEN03)); - assertThat(replicasByToken.get(TOKEN17)).isSameAs(replicasByToken.get(TOKEN05)); - assertThat(replicasByToken.get(TOKEN19)).isSameAs(replicasByToken.get(TOKEN07)); - } - - /** 6 tokens, 3 nodes in 3 DCs, RF = 1 in each DC. */ - @Test - public void should_compute_for_simple_layout_with_3_dcs() { - // Given - List ring = ImmutableList.of(TOKEN01, TOKEN05, TOKEN09, TOKEN11, TOKEN15, TOKEN19); - locate(node1, DC1, RACK11); - locate(node2, DC2, RACK21); - locate(node3, DC3, RACK31); - Map tokenToPrimary = - ImmutableMap.builder() - .put(TOKEN01, node1) - .put(TOKEN05, node2) - .put(TOKEN09, node3) - .put(TOKEN11, node1) - .put(TOKEN15, node2) - .put(TOKEN19, node3) - .build(); - ReplicationStrategy strategy = - new NetworkTopologyReplicationStrategy( - ImmutableMap.of(DC1, "1", DC2, "1", DC3, "1"), "test"); - - // When - Map> replicasByToken = strategy.computeReplicasByToken(tokenToPrimary, ring); - - // Then - assertThat(replicasByToken.keySet().size()).isEqualTo(ring.size()); - assertThat(replicasByToken.get(TOKEN01)).containsExactly(node1, node2, node3); - assertThat(replicasByToken.get(TOKEN05)).containsExactly(node2, node3, node1); - assertThat(replicasByToken.get(TOKEN09)).containsExactly(node3, node1, node2); - assertThat(replicasByToken.get(TOKEN11)).isSameAs(replicasByToken.get(TOKEN01)); - assertThat(replicasByToken.get(TOKEN15)).isSameAs(replicasByToken.get(TOKEN05)); - assertThat(replicasByToken.get(TOKEN19)).isSameAs(replicasByToken.get(TOKEN09)); - } - - /** 10 tokens, 4 nodes in 2 DCs, RF = 2 in each DC, 1 node owns 4 tokens, the others only 2. */ - @Test - public void should_compute_for_unbalanced_ring() { - // Given - List ring = - ImmutableList.of( - TOKEN01, TOKEN03, TOKEN05, TOKEN07, TOKEN09, TOKEN11, TOKEN13, TOKEN15, TOKEN17, - TOKEN19); - locate(node1, DC1, RACK11); - locate(node2, DC2, RACK21); - locate(node3, DC1, RACK11); - locate(node4, DC2, RACK21); - Map tokenToPrimary = - ImmutableMap.builder() - .put(TOKEN01, node1) - .put(TOKEN03, node1) - .put(TOKEN05, node2) - .put(TOKEN07, node3) - .put(TOKEN09, node4) - .put(TOKEN11, node1) - .put(TOKEN13, node1) - .put(TOKEN15, node2) - .put(TOKEN17, node3) - .put(TOKEN19, node4) - .build(); - ReplicationStrategy strategy = - new NetworkTopologyReplicationStrategy(ImmutableMap.of(DC1, "2", DC2, "2"), "test"); - - // When - Map> replicasByToken = strategy.computeReplicasByToken(tokenToPrimary, ring); - - // Then - assertThat(replicasByToken.keySet().size()).isEqualTo(ring.size()); - assertThat(replicasByToken.get(TOKEN01)).containsExactly(node1, node2, node3, node4); - assertThat(replicasByToken.get(TOKEN03)).isSameAs(replicasByToken.get(TOKEN01)); - assertThat(replicasByToken.get(TOKEN05)).containsExactly(node2, node3, node4, node1); - assertThat(replicasByToken.get(TOKEN07)).containsExactly(node3, node4, node1, node2); - assertThat(replicasByToken.get(TOKEN09)).containsExactly(node4, node1, node2, node3); - assertThat(replicasByToken.get(TOKEN11)).isSameAs(replicasByToken.get(TOKEN01)); - assertThat(replicasByToken.get(TOKEN13)).isSameAs(replicasByToken.get(TOKEN01)); - assertThat(replicasByToken.get(TOKEN15)).isSameAs(replicasByToken.get(TOKEN05)); - assertThat(replicasByToken.get(TOKEN17)).isSameAs(replicasByToken.get(TOKEN07)); - assertThat(replicasByToken.get(TOKEN19)).isSameAs(replicasByToken.get(TOKEN09)); - ; - } - - /** 16 tokens, 8 nodes in 2 DCs with 2 per rack, RF = 2 in each DC. */ - @Test - public void should_compute_with_multiple_racks_per_dc() { - // Given - List ring = - ImmutableList.of( - TOKEN01, TOKEN02, TOKEN03, TOKEN04, TOKEN05, TOKEN06, TOKEN07, TOKEN08, TOKEN12, - TOKEN13, TOKEN14, TOKEN15, TOKEN16, TOKEN17, TOKEN18, TOKEN19); - locate(node1, DC1, RACK11); - locate(node2, DC2, RACK21); - locate(node3, DC1, RACK12); - locate(node4, DC2, RACK22); - locate(node5, DC1, RACK11); - locate(node6, DC2, RACK21); - locate(node7, DC1, RACK12); - locate(node8, DC2, RACK22); - Map tokenToPrimary = - ImmutableMap.builder() - .put(TOKEN01, node1) - .put(TOKEN02, node2) - .put(TOKEN03, node3) - .put(TOKEN04, node4) - .put(TOKEN05, node5) - .put(TOKEN06, node6) - .put(TOKEN07, node7) - .put(TOKEN08, node8) - .put(TOKEN12, node1) - .put(TOKEN13, node2) - .put(TOKEN14, node3) - .put(TOKEN15, node4) - .put(TOKEN16, node5) - .put(TOKEN17, node6) - .put(TOKEN18, node7) - .put(TOKEN19, node8) - .build(); - ReplicationStrategy strategy = - new NetworkTopologyReplicationStrategy(ImmutableMap.of(DC1, "2", DC2, "2"), "test"); - - // When - Map> replicasByToken = strategy.computeReplicasByToken(tokenToPrimary, ring); - - // Then - assertThat(replicasByToken.keySet().size()).isEqualTo(ring.size()); - assertThat(replicasByToken.get(TOKEN01)).containsExactly(node1, node2, node3, node4); - assertThat(replicasByToken.get(TOKEN02)).containsExactly(node2, node3, node4, node5); - assertThat(replicasByToken.get(TOKEN03)).containsExactly(node3, node4, node5, node6); - assertThat(replicasByToken.get(TOKEN04)).containsExactly(node4, node5, node6, node7); - assertThat(replicasByToken.get(TOKEN05)).containsExactly(node5, node6, node7, node8); - assertThat(replicasByToken.get(TOKEN06)).containsExactly(node6, node7, node8, node1); - assertThat(replicasByToken.get(TOKEN07)).containsExactly(node7, node8, node1, node2); - assertThat(replicasByToken.get(TOKEN08)).containsExactly(node8, node1, node2, node3); - assertThat(replicasByToken.get(TOKEN12)).isSameAs(replicasByToken.get(TOKEN01)); - assertThat(replicasByToken.get(TOKEN13)).isSameAs(replicasByToken.get(TOKEN02)); - assertThat(replicasByToken.get(TOKEN14)).isSameAs(replicasByToken.get(TOKEN03)); - assertThat(replicasByToken.get(TOKEN15)).isSameAs(replicasByToken.get(TOKEN04)); - assertThat(replicasByToken.get(TOKEN16)).isSameAs(replicasByToken.get(TOKEN05)); - assertThat(replicasByToken.get(TOKEN17)).isSameAs(replicasByToken.get(TOKEN06)); - assertThat(replicasByToken.get(TOKEN18)).isSameAs(replicasByToken.get(TOKEN07)); - assertThat(replicasByToken.get(TOKEN19)).isSameAs(replicasByToken.get(TOKEN08)); - } - - /** - * 16 tokens, 8 nodes in 2 DCs with 2 per rack, RF = 3 in each DC. - * - *

The nodes that are in the same rack occupy consecutive positions on the ring. We want to - * reproduce the case where we hit the same rack when we look for the second replica of a DC; the - * expected behavior is to skip the node and go to the next rack, and come back to the first rack - * for the third replica. - */ - @Test - public void should_pick_dc_replicas_in_different_racks_first() { - // Given - List ring = - ImmutableList.of( - TOKEN01, TOKEN02, TOKEN03, TOKEN04, TOKEN05, TOKEN06, TOKEN07, TOKEN08, TOKEN12, - TOKEN13, TOKEN14, TOKEN15, TOKEN16, TOKEN17, TOKEN18, TOKEN19); - locate(node1, DC1, RACK11); - locate(node2, DC2, RACK21); - locate(node3, DC1, RACK11); - locate(node4, DC2, RACK21); - locate(node5, DC1, RACK12); - locate(node6, DC2, RACK22); - locate(node7, DC1, RACK12); - locate(node8, DC2, RACK22); - Map tokenToPrimary = - ImmutableMap.builder() - .put(TOKEN01, node1) - .put(TOKEN02, node2) - .put(TOKEN03, node3) - .put(TOKEN04, node4) - .put(TOKEN05, node5) - .put(TOKEN06, node6) - .put(TOKEN07, node7) - .put(TOKEN08, node8) - .put(TOKEN12, node1) - .put(TOKEN13, node2) - .put(TOKEN14, node3) - .put(TOKEN15, node4) - .put(TOKEN16, node5) - .put(TOKEN17, node6) - .put(TOKEN18, node7) - .put(TOKEN19, node8) - .build(); - ReplicationStrategy strategy = - new NetworkTopologyReplicationStrategy(ImmutableMap.of(DC1, "3", DC2, "3"), "test"); - - // When - Map> replicasByToken = strategy.computeReplicasByToken(tokenToPrimary, ring); - - // Then - assertThat(replicasByToken.keySet().size()).isEqualTo(ring.size()); - assertThat(replicasByToken.get(TOKEN01)) - .containsExactly(node1, node2, node5, node3, node6, node4); - assertThat(replicasByToken.get(TOKEN02)) - .containsExactly(node2, node3, node5, node6, node4, node7); - assertThat(replicasByToken.get(TOKEN03)) - .containsExactly(node3, node4, node5, node6, node7, node8); - assertThat(replicasByToken.get(TOKEN04)) - .containsExactly(node4, node5, node6, node8, node1, node7); - assertThat(replicasByToken.get(TOKEN05)) - .containsExactly(node5, node6, node1, node7, node2, node8); - assertThat(replicasByToken.get(TOKEN06)) - .containsExactly(node6, node7, node1, node2, node8, node3); - assertThat(replicasByToken.get(TOKEN07)) - .containsExactly(node7, node8, node1, node2, node3, node4); - assertThat(replicasByToken.get(TOKEN08)) - .containsExactly(node8, node1, node2, node4, node5, node3); - assertThat(replicasByToken.get(TOKEN12)).isSameAs(replicasByToken.get(TOKEN01)); - assertThat(replicasByToken.get(TOKEN13)).isSameAs(replicasByToken.get(TOKEN02)); - assertThat(replicasByToken.get(TOKEN14)).isSameAs(replicasByToken.get(TOKEN03)); - assertThat(replicasByToken.get(TOKEN15)).isSameAs(replicasByToken.get(TOKEN04)); - assertThat(replicasByToken.get(TOKEN16)).isSameAs(replicasByToken.get(TOKEN05)); - assertThat(replicasByToken.get(TOKEN17)).isSameAs(replicasByToken.get(TOKEN06)); - assertThat(replicasByToken.get(TOKEN18)).isSameAs(replicasByToken.get(TOKEN07)); - assertThat(replicasByToken.get(TOKEN19)).isSameAs(replicasByToken.get(TOKEN08)); - } - - /** - * 16 tokens, 8 nodes in 2 DCs with 2 per rack, RF = 3 in each DC. - * - *

This is the same scenario as {@link #should_pick_dc_replicas_in_different_racks_first()}, - * except that each node owns consecutive tokens on the ring. - */ - @Test - public void should_pick_dc_replicas_in_different_racks_first_when_nodes_own_consecutive_tokens() { - // When - Map> replicasByToken = computeWithDifferentRacksAndConsecutiveTokens(3); - - // Then - assertThat(replicasByToken.keySet().size()).isEqualTo(16); - assertThat(replicasByToken.get(TOKEN01)) - .containsExactly(node1, node5, node3, node2, node6, node4); - assertThat(replicasByToken.get(TOKEN02)).isSameAs(replicasByToken.get(TOKEN01)); - assertThat(replicasByToken.get(TOKEN03)) - .containsExactly(node3, node5, node7, node2, node6, node4); - assertThat(replicasByToken.get(TOKEN04)).isSameAs(replicasByToken.get(TOKEN03)); - assertThat(replicasByToken.get(TOKEN05)) - .containsExactly(node5, node2, node6, node4, node1, node7); - assertThat(replicasByToken.get(TOKEN06)).isSameAs(replicasByToken.get(TOKEN05)); - assertThat(replicasByToken.get(TOKEN07)) - .containsExactly(node7, node2, node6, node4, node1, node3); - assertThat(replicasByToken.get(TOKEN08)).isSameAs(replicasByToken.get(TOKEN07)); - assertThat(replicasByToken.get(TOKEN12)) - .containsExactly(node2, node6, node4, node1, node5, node3); - assertThat(replicasByToken.get(TOKEN13)).isSameAs(replicasByToken.get(TOKEN12)); - assertThat(replicasByToken.get(TOKEN14)) - .containsExactly(node4, node6, node8, node1, node5, node3); - assertThat(replicasByToken.get(TOKEN15)).isSameAs(replicasByToken.get(TOKEN14)); - assertThat(replicasByToken.get(TOKEN16)) - .containsExactly(node6, node1, node5, node3, node2, node8); - assertThat(replicasByToken.get(TOKEN17)).isSameAs(replicasByToken.get(TOKEN16)); - assertThat(replicasByToken.get(TOKEN18)) - .containsExactly(node8, node1, node5, node3, node2, node4); - assertThat(replicasByToken.get(TOKEN19)).isSameAs(replicasByToken.get(TOKEN18)); - } - - /** - * 16 tokens, 8 nodes in 2 DCs with 2 per rack, RF = 4 in each DC. - * - *

This is the same test as {@link - * #should_pick_dc_replicas_in_different_racks_first_when_nodes_own_consecutive_tokens()}, except - * for the replication factors. - */ - @Test - public void should_pick_dc_replicas_in_different_racks_first_when_all_nodes_contain_all_data() { - // When - Map> replicasByToken = computeWithDifferentRacksAndConsecutiveTokens(4); - - // Then - assertThat(replicasByToken.keySet().size()).isEqualTo(16); - assertThat(replicasByToken.get(TOKEN01)) - .containsExactly(node1, node5, node3, node7, node2, node6, node4, node8); - assertThat(replicasByToken.get(TOKEN02)).isSameAs(replicasByToken.get(TOKEN01)); - assertThat(replicasByToken.get(TOKEN03)) - .containsExactly(node3, node5, node7, node2, node6, node4, node8, node1); - assertThat(replicasByToken.get(TOKEN04)).isSameAs(replicasByToken.get(TOKEN03)); - assertThat(replicasByToken.get(TOKEN05)) - .containsExactly(node5, node2, node6, node4, node8, node1, node7, node3); - assertThat(replicasByToken.get(TOKEN06)).isSameAs(replicasByToken.get(TOKEN05)); - assertThat(replicasByToken.get(TOKEN07)) - .containsExactly(node7, node2, node6, node4, node8, node1, node3, node5); - assertThat(replicasByToken.get(TOKEN08)).isSameAs(replicasByToken.get(TOKEN07)); - assertThat(replicasByToken.get(TOKEN12)) - .containsExactly(node2, node6, node4, node8, node1, node5, node3, node7); - assertThat(replicasByToken.get(TOKEN13)).isSameAs(replicasByToken.get(TOKEN12)); - assertThat(replicasByToken.get(TOKEN14)) - .containsExactly(node4, node6, node8, node1, node5, node3, node7, node2); - assertThat(replicasByToken.get(TOKEN15)).isSameAs(replicasByToken.get(TOKEN14)); - assertThat(replicasByToken.get(TOKEN16)) - .containsExactly(node6, node1, node5, node3, node7, node2, node8, node4); - assertThat(replicasByToken.get(TOKEN17)).isSameAs(replicasByToken.get(TOKEN16)); - assertThat(replicasByToken.get(TOKEN18)) - .containsExactly(node8, node1, node5, node3, node7, node2, node4, node6); - assertThat(replicasByToken.get(TOKEN19)).isSameAs(replicasByToken.get(TOKEN18)); - } - - private Map> computeWithDifferentRacksAndConsecutiveTokens( - int replicationFactor) { - List ring = - ImmutableList.of( - TOKEN01, TOKEN02, TOKEN03, TOKEN04, TOKEN05, TOKEN06, TOKEN07, TOKEN08, TOKEN12, - TOKEN13, TOKEN14, TOKEN15, TOKEN16, TOKEN17, TOKEN18, TOKEN19); - locate(node1, DC1, RACK11); - locate(node2, DC2, RACK21); - locate(node3, DC1, RACK11); - locate(node4, DC2, RACK21); - locate(node5, DC1, RACK12); - locate(node6, DC2, RACK22); - locate(node7, DC1, RACK12); - locate(node8, DC2, RACK22); - Map tokenToPrimary = - ImmutableMap.builder() - .put(TOKEN01, node1) - .put(TOKEN02, node1) - .put(TOKEN03, node3) - .put(TOKEN04, node3) - .put(TOKEN05, node5) - .put(TOKEN06, node5) - .put(TOKEN07, node7) - .put(TOKEN08, node7) - .put(TOKEN12, node2) - .put(TOKEN13, node2) - .put(TOKEN14, node4) - .put(TOKEN15, node4) - .put(TOKEN16, node6) - .put(TOKEN17, node6) - .put(TOKEN18, node8) - .put(TOKEN19, node8) - .build(); - ReplicationStrategy strategy = - new NetworkTopologyReplicationStrategy( - ImmutableMap.of( - DC1, Integer.toString(replicationFactor), DC2, Integer.toString(replicationFactor)), - "test"); - - return strategy.computeReplicasByToken(tokenToPrimary, ring); - } - - /** - * 18 tokens, 6 nodes in 2 DCs with 2 in rack 1 and 1 in rack 2, RF = 2 in each DC. - * - *

This is taken from a real-life cluster. - */ - @Test - public void should_compute_complex_layout() { - // When - Map> replicasByToken = computeComplexLayout(2); - - // Then - assertThat(replicasByToken.keySet().size()).isEqualTo(18); - assertThat(replicasByToken.get(TOKEN01)).containsExactly(node1, node5, node2, node6); - assertThat(replicasByToken.get(TOKEN02)).isSameAs(replicasByToken.get(TOKEN01)); - assertThat(replicasByToken.get(TOKEN03)).containsExactly(node5, node3, node2, node6); - assertThat(replicasByToken.get(TOKEN04)).containsExactly(node3, node5, node2, node6); - assertThat(replicasByToken.get(TOKEN05)).isSameAs(replicasByToken.get(TOKEN01)); - assertThat(replicasByToken.get(TOKEN06)).containsExactly(node5, node2, node6, node3); - assertThat(replicasByToken.get(TOKEN07)).containsExactly(node2, node6, node3, node5); - assertThat(replicasByToken.get(TOKEN08)).containsExactly(node6, node3, node4, node5); - assertThat(replicasByToken.get(TOKEN09)).containsExactly(node3, node4, node5, node6); - assertThat(replicasByToken.get(TOKEN10)).containsExactly(node4, node5, node6, node3); - assertThat(replicasByToken.get(TOKEN11)).containsExactly(node5, node4, node6, node3); - assertThat(replicasByToken.get(TOKEN12)).containsExactly(node4, node6, node3, node5); - assertThat(replicasByToken.get(TOKEN13)).isSameAs(replicasByToken.get(TOKEN12)); - assertThat(replicasByToken.get(TOKEN14)).isSameAs(replicasByToken.get(TOKEN07)); - assertThat(replicasByToken.get(TOKEN15)).containsExactly(node6, node3, node2, node5); - assertThat(replicasByToken.get(TOKEN16)).containsExactly(node3, node2, node6, node5); - assertThat(replicasByToken.get(TOKEN17)).containsExactly(node2, node6, node1, node5); - assertThat(replicasByToken.get(TOKEN18)).containsExactly(node6, node1, node5, node2); - } - - /** - * 18 tokens, 6 nodes in 2 DCs with 2 in rack 1 and 1 in rack 2, RF = 4 in each DC. - * - *

This is the same test as {@link #should_compute_complex_layout()}, but with RF = 4, which is - * too high for this cluster (it would require 8 nodes). - */ - @Test - public void should_compute_complex_layout_with_rf_too_high() { - // When - Map> replicasByToken = computeComplexLayout(4); - - // Then - assertThat(replicasByToken.keySet().size()).isEqualTo(18); - assertThat(replicasByToken.get(TOKEN01)) - .containsExactly(node1, node5, node3, node2, node6, node4); - assertThat(replicasByToken.get(TOKEN02)).isSameAs(replicasByToken.get(TOKEN01)); - assertThat(replicasByToken.get(TOKEN03)) - .containsExactly(node5, node3, node1, node2, node6, node4); - assertThat(replicasByToken.get(TOKEN04)) - .containsExactly(node3, node5, node1, node2, node6, node4); - assertThat(replicasByToken.get(TOKEN05)) - .containsExactly(node1, node5, node2, node6, node3, node4); - assertThat(replicasByToken.get(TOKEN06)) - .containsExactly(node5, node2, node6, node3, node4, node1); - assertThat(replicasByToken.get(TOKEN07)) - .containsExactly(node2, node6, node3, node4, node5, node1); - assertThat(replicasByToken.get(TOKEN08)) - .containsExactly(node6, node3, node4, node5, node2, node1); - assertThat(replicasByToken.get(TOKEN09)) - .containsExactly(node3, node4, node5, node6, node2, node1); - assertThat(replicasByToken.get(TOKEN10)) - .containsExactly(node4, node5, node6, node2, node3, node1); - assertThat(replicasByToken.get(TOKEN11)) - .containsExactly(node5, node4, node6, node2, node3, node1); - assertThat(replicasByToken.get(TOKEN12)) - .containsExactly(node4, node6, node2, node3, node5, node1); - assertThat(replicasByToken.get(TOKEN13)).isSameAs(replicasByToken.get(TOKEN12)); - assertThat(replicasByToken.get(TOKEN14)) - .containsExactly(node2, node6, node3, node5, node1, node4); - assertThat(replicasByToken.get(TOKEN15)) - .containsExactly(node6, node3, node2, node5, node1, node4); - assertThat(replicasByToken.get(TOKEN16)) - .containsExactly(node3, node2, node6, node5, node1, node4); - assertThat(replicasByToken.get(TOKEN17)) - .containsExactly(node2, node6, node1, node5, node3, node4); - assertThat(replicasByToken.get(TOKEN18)) - .containsExactly(node6, node1, node5, node3, node2, node4); - } - - private Map> computeComplexLayout(int replicationFactor) { - List ring = - ImmutableList.of( - TOKEN01, TOKEN02, TOKEN03, TOKEN04, TOKEN05, TOKEN06, TOKEN07, TOKEN08, TOKEN09, - TOKEN10, TOKEN11, TOKEN12, TOKEN13, TOKEN14, TOKEN15, TOKEN16, TOKEN17, TOKEN18); - locate(node1, DC1, RACK11); - locate(node2, DC2, RACK21); - locate(node3, DC1, RACK11); - locate(node4, DC2, RACK21); - locate(node5, DC1, RACK12); - locate(node6, DC2, RACK22); - Map tokenToPrimary = - ImmutableMap.builder() - .put(TOKEN01, node1) - .put(TOKEN02, node1) - .put(TOKEN03, node5) - .put(TOKEN04, node3) - .put(TOKEN05, node1) - .put(TOKEN06, node5) - .put(TOKEN07, node2) - .put(TOKEN08, node6) - .put(TOKEN09, node3) - .put(TOKEN10, node4) - .put(TOKEN11, node5) - .put(TOKEN12, node4) - .put(TOKEN13, node4) - .put(TOKEN14, node2) - .put(TOKEN15, node6) - .put(TOKEN16, node3) - .put(TOKEN17, node2) - .put(TOKEN18, node6) - .build(); - ReplicationStrategy strategy = - new NetworkTopologyReplicationStrategy( - ImmutableMap.of( - DC1, Integer.toString(replicationFactor), DC2, Integer.toString(replicationFactor)), - "test"); - - return strategy.computeReplicasByToken(tokenToPrimary, ring); - } - - /** - * When the replication factors are invalid (user error) and a datacenter has a replication factor - * that cannot be met, we want to quickly abort and move on to the next DC (instead of keeping - * scanning the ring in vain, which results in quadratic complexity). We also log a warning to - * give the user a chance to fix their settings. - * - * @see JAVA-702 - * @see JAVA-859 - */ - @Test - public void should_abort_early_and_log_when_bad_replication_factor_cannot_be_met() { - // Given - List ring = ImmutableList.of(TOKEN01, TOKEN04, TOKEN14, TOKEN19); - locate(node1, DC1, RACK11); - locate(node2, DC2, RACK21); - Map tokenToPrimary = - ImmutableMap.of(TOKEN01, node1, TOKEN04, node2, TOKEN14, node1, TOKEN19, node2); - Logger logger = (Logger) LoggerFactory.getLogger(NetworkTopologyReplicationStrategy.class); - logger.addAppender(appender); - - try { - // When - int traversedTokensForValidSettings = - countTraversedTokens(ring, tokenToPrimary, ImmutableMap.of(DC1, "1", DC2, "1")); - - // Then - // No logs: - verify(appender, never()).doAppend(any(ILoggingEvent.class)); - - // When - int traversedTokensForInvalidSettings = - countTraversedTokens(ring, tokenToPrimary, ImmutableMap.of(DC1, "1", DC2, "1", DC3, "1")); - // Did not take more steps than the valid settings - assertThat(traversedTokensForInvalidSettings).isEqualTo(traversedTokensForValidSettings); - // Did log: - verify(appender).doAppend(loggingEventCaptor.capture()); - ILoggingEvent log = loggingEventCaptor.getValue(); - assertThat(log.getLevel()).isEqualTo(Level.WARN); - assertThat(log.getMessage()).contains("could not achieve replication factor"); - } finally { - logger.detachAppender(appender); - } - } - - // Counts the number of steps on the ring for a particular computation - private int countTraversedTokens( - List ring, - Map tokenToPrimary, - ImmutableMap replicationConfig) { - AtomicInteger count = new AtomicInteger(); - List ringSpy = spy(ring); - when(ringSpy.get(anyInt())) - .thenAnswer( - invocation -> { - count.incrementAndGet(); - return invocation.callRealMethod(); - }); - new NetworkTopologyReplicationStrategy(replicationConfig, "test") - .computeReplicasByToken(tokenToPrimary, ringSpy); - return count.get(); - } - - private void locate(Node node, String dc, String rack) { - when(node.getDatacenter()).thenReturn(dc); - when(node.getRack()).thenReturn(rack); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/RandomTokenRangeTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/RandomTokenRangeTest.java deleted file mode 100644 index 54ac8a99738..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/RandomTokenRangeTest.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import static org.assertj.core.api.Assertions.assertThat; - -import java.math.BigInteger; -import org.junit.Test; - -public class RandomTokenRangeTest { - - private static final String MIN = "-1"; - private static final String MAX = "170141183460469231731687303715884105728"; - - @Test - public void should_split_range() { - assertThat(range("0", "127605887595351923798765477786913079296").splitEvenly(3)) - .containsExactly( - range("0", "42535295865117307932921825928971026432"), - range( - "42535295865117307932921825928971026432", "85070591730234615865843651857942052864"), - range( - "85070591730234615865843651857942052864", - "127605887595351923798765477786913079296")); - } - - @Test - public void should_split_range_that_wraps_around_the_ring() { - assertThat( - range( - "127605887595351923798765477786913079296", - "85070591730234615865843651857942052864") - .splitEvenly(3)) - .containsExactly( - range("127605887595351923798765477786913079296", "0"), - range("0", "42535295865117307932921825928971026432"), - range( - "42535295865117307932921825928971026432", - "85070591730234615865843651857942052864")); - } - - @Test - public void should_split_range_producing_empty_splits_near_ring_end() { - // These are edge cases where we want to make sure we don't accidentally generate the ]min,min] - // range (which is the whole ring) - assertThat(range(MAX, MIN).splitEvenly(3)) - .containsExactly(range(MAX, MAX), range(MAX, MAX), range(MAX, MIN)); - - assertThat(range(MIN, "0").splitEvenly(3)) - .containsExactly(range(MIN, "0"), range("0", "0"), range("0", "0")); - } - - @Test - public void should_split_whole_ring() { - assertThat(range(MIN, MIN).splitEvenly(3)) - .containsExactly( - range(MIN, "56713727820156410577229101238628035242"), - range( - "56713727820156410577229101238628035242", - "113427455640312821154458202477256070485"), - range("113427455640312821154458202477256070485", MIN)); - } - - private RandomTokenRange range(String start, String end) { - return new RandomTokenRange( - new RandomToken(new BigInteger(start)), new RandomToken(new BigInteger(end))); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/ReplicationFactorTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/ReplicationFactorTest.java deleted file mode 100644 index d58d13933c2..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/ReplicationFactorTest.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import static com.datastax.oss.driver.Assertions.assertThat; - -import org.junit.Test; - -public class ReplicationFactorTest { - @Test - public void should_parse_factor_from_string() { - ReplicationFactor transFactor = ReplicationFactor.fromString("3/1"); - assertThat(transFactor.fullReplicas()).isEqualTo(2); - assertThat(transFactor.hasTransientReplicas()).isTrue(); - assertThat(transFactor.transientReplicas()).isEqualTo(1); - - ReplicationFactor factor = ReplicationFactor.fromString("3"); - assertThat(factor.fullReplicas()).isEqualTo(3); - assertThat(factor.hasTransientReplicas()).isFalse(); - assertThat(factor.transientReplicas()).isEqualTo(0); - } - - @Test - public void should_create_string_from_factor() { - ReplicationFactor transFactor = new ReplicationFactor(3, 1); - assertThat(transFactor.toString()).isEqualTo("3/1"); - ReplicationFactor factor = new ReplicationFactor(3); - assertThat(factor.toString()).isEqualTo("3"); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/SimpleReplicationStrategyTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/SimpleReplicationStrategyTest.java deleted file mode 100644 index 517d8cfdb84..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/SimpleReplicationStrategyTest.java +++ /dev/null @@ -1,212 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import static com.datastax.oss.driver.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public class SimpleReplicationStrategyTest { - - private static final Token TOKEN01 = new Murmur3Token(-9000000000000000000L); - private static final Token TOKEN02 = new Murmur3Token(-8000000000000000000L); - private static final Token TOKEN03 = new Murmur3Token(-7000000000000000000L); - private static final Token TOKEN04 = new Murmur3Token(-6000000000000000000L); - private static final Token TOKEN05 = new Murmur3Token(-5000000000000000000L); - private static final Token TOKEN06 = new Murmur3Token(-4000000000000000000L); - private static final Token TOKEN07 = new Murmur3Token(-3000000000000000000L); - private static final Token TOKEN08 = new Murmur3Token(-2000000000000000000L); - private static final Token TOKEN09 = new Murmur3Token(-1000000000000000000L); - private static final Token TOKEN10 = new Murmur3Token(0L); - private static final Token TOKEN11 = new Murmur3Token(1000000000000000000L); - private static final Token TOKEN12 = new Murmur3Token(2000000000000000000L); - private static final Token TOKEN13 = new Murmur3Token(3000000000000000000L); - private static final Token TOKEN14 = new Murmur3Token(4000000000000000000L); - private static final Token TOKEN15 = new Murmur3Token(5000000000000000000L); - private static final Token TOKEN16 = new Murmur3Token(6000000000000000000L); - private static final Token TOKEN17 = new Murmur3Token(7000000000000000000L); - private static final Token TOKEN18 = new Murmur3Token(8000000000000000000L); - private static final Token TOKEN19 = new Murmur3Token(9000000000000000000L); - - @Mock private Node node1, node2, node3, node4, node5, node6; - - /** 4 tokens, 2 nodes, RF = 2. */ - @Test - public void should_compute_for_simple_layout() { - // Given - List ring = ImmutableList.of(TOKEN01, TOKEN06, TOKEN14, TOKEN19); - Map tokenToPrimary = - ImmutableMap.of(TOKEN01, node1, TOKEN06, node2, TOKEN14, node1, TOKEN19, node2); - SimpleReplicationStrategy strategy = new SimpleReplicationStrategy(new ReplicationFactor(2)); - - // When - Map> replicasByToken = strategy.computeReplicasByToken(tokenToPrimary, ring); - - // Then - assertThat(replicasByToken.keySet().size()).isEqualTo(ring.size()); - // Note: this also asserts the iteration order of the sets (unlike containsEntry(token, set)) - assertThat(replicasByToken.get(TOKEN01)).containsExactly(node1, node2); - assertThat(replicasByToken.get(TOKEN06)).containsExactly(node2, node1); - assertThat(replicasByToken.get(TOKEN14)).isSameAs(replicasByToken.get(TOKEN01)); - assertThat(replicasByToken.get(TOKEN19)).isSameAs(replicasByToken.get(TOKEN06)); - } - - /** 4 tokens, 2 nodes owning 2 consecutive tokens each, RF = 2. */ - @Test - public void should_compute_when_nodes_own_consecutive_tokens() { - // Given - List ring = ImmutableList.of(TOKEN01, TOKEN06, TOKEN14, TOKEN19); - Map tokenToPrimary = - ImmutableMap.of(TOKEN01, node1, TOKEN06, node1, TOKEN14, node2, TOKEN19, node2); - SimpleReplicationStrategy strategy = new SimpleReplicationStrategy(new ReplicationFactor(2)); - - // When - Map> replicasByToken = strategy.computeReplicasByToken(tokenToPrimary, ring); - - // Then - assertThat(replicasByToken.keySet().size()).isEqualTo(ring.size()); - assertThat(replicasByToken.get(TOKEN01)).containsExactly(node1, node2); - assertThat(replicasByToken.get(TOKEN06)).isSameAs(replicasByToken.get(TOKEN01)); - assertThat(replicasByToken.get(TOKEN14)).containsExactly(node2, node1); - assertThat(replicasByToken.get(TOKEN19)).isSameAs(replicasByToken.get(TOKEN14)); - } - - /** 4 tokens, 1 node owns 3 of them, RF = 2. */ - @Test - public void should_compute_when_ring_unbalanced() { - // Given - List ring = ImmutableList.of(TOKEN01, TOKEN06, TOKEN14, TOKEN19); - Map tokenToPrimary = - ImmutableMap.of(TOKEN01, node1, TOKEN06, node1, TOKEN14, node2, TOKEN19, node1); - SimpleReplicationStrategy strategy = new SimpleReplicationStrategy(new ReplicationFactor(2)); - - // When - Map> replicasByToken = strategy.computeReplicasByToken(tokenToPrimary, ring); - - // Then - assertThat(replicasByToken.keySet().size()).isEqualTo(ring.size()); - assertThat(replicasByToken.get(TOKEN01)).containsExactly(node1, node2); - assertThat(replicasByToken.get(TOKEN06)).containsExactly(node1, node2); - assertThat(replicasByToken.get(TOKEN14)).containsExactly(node2, node1); - assertThat(replicasByToken.get(TOKEN19)).containsExactly(node1, node2); - } - - /** 4 tokens, 2 nodes, RF = 6 (too large, should be <= number of nodes). */ - @Test - public void should_compute_when_replication_factor_is_larger_than_cluster_size() { - // Given - List ring = ImmutableList.of(TOKEN01, TOKEN06, TOKEN14, TOKEN19); - Map tokenToPrimary = - ImmutableMap.of(TOKEN01, node1, TOKEN06, node2, TOKEN14, node1, TOKEN19, node2); - SimpleReplicationStrategy strategy = new SimpleReplicationStrategy(new ReplicationFactor(6)); - - // When - Map> replicasByToken = strategy.computeReplicasByToken(tokenToPrimary, ring); - - // Then - assertThat(replicasByToken.keySet().size()).isEqualTo(ring.size()); - assertThat(replicasByToken.get(TOKEN01)).containsExactly(node1, node2); - assertThat(replicasByToken.get(TOKEN06)).containsExactly(node2, node1); - assertThat(replicasByToken.get(TOKEN14)).isSameAs(replicasByToken.get(TOKEN01)); - assertThat(replicasByToken.get(TOKEN19)).isSameAs(replicasByToken.get(TOKEN06)); - } - - @Test - public void should_compute_for_complex_layout() { - // Given - List ring = - ImmutableList.builder() - .add(TOKEN01) - .add(TOKEN02) - .add(TOKEN03) - .add(TOKEN04) - .add(TOKEN05) - .add(TOKEN06) - .add(TOKEN07) - .add(TOKEN08) - .add(TOKEN09) - .add(TOKEN10) - .add(TOKEN11) - .add(TOKEN12) - .add(TOKEN13) - .add(TOKEN14) - .add(TOKEN15) - .add(TOKEN16) - .add(TOKEN17) - .add(TOKEN18) - .build(); - Map tokenToPrimary = - ImmutableMap.builder() - .put(TOKEN01, node1) - .put(TOKEN02, node1) - .put(TOKEN03, node5) - .put(TOKEN04, node3) - .put(TOKEN05, node1) - .put(TOKEN06, node5) - .put(TOKEN07, node2) - .put(TOKEN08, node6) - .put(TOKEN09, node3) - .put(TOKEN10, node4) - .put(TOKEN11, node5) - .put(TOKEN12, node4) - .put(TOKEN13, node4) - .put(TOKEN14, node2) - .put(TOKEN15, node6) - .put(TOKEN16, node3) - .put(TOKEN17, node2) - .put(TOKEN18, node6) - .build(); - - SimpleReplicationStrategy strategy = new SimpleReplicationStrategy(new ReplicationFactor(3)); - - // When - Map> replicasByToken = strategy.computeReplicasByToken(tokenToPrimary, ring); - - // Then - assertThat(replicasByToken.keySet().size()).isEqualTo(ring.size()); - assertThat(replicasByToken.get(TOKEN01)).containsExactly(node1, node5, node3); - assertThat(replicasByToken.get(TOKEN02)).isSameAs(replicasByToken.get(TOKEN01)); - assertThat(replicasByToken.get(TOKEN03)).containsExactly(node5, node3, node1); - assertThat(replicasByToken.get(TOKEN04)).containsExactly(node3, node1, node5); - assertThat(replicasByToken.get(TOKEN05)).containsExactly(node1, node5, node2); - assertThat(replicasByToken.get(TOKEN06)).containsExactly(node5, node2, node6); - assertThat(replicasByToken.get(TOKEN07)).containsExactly(node2, node6, node3); - assertThat(replicasByToken.get(TOKEN08)).containsExactly(node6, node3, node4); - assertThat(replicasByToken.get(TOKEN09)).containsExactly(node3, node4, node5); - assertThat(replicasByToken.get(TOKEN10)).containsExactly(node4, node5, node2); - assertThat(replicasByToken.get(TOKEN11)).containsExactly(node5, node4, node2); - assertThat(replicasByToken.get(TOKEN12)).containsExactly(node4, node2, node6); - assertThat(replicasByToken.get(TOKEN13)).isSameAs(replicasByToken.get(TOKEN12)); - assertThat(replicasByToken.get(TOKEN14)).isSameAs(replicasByToken.get(TOKEN07)); - assertThat(replicasByToken.get(TOKEN15)).containsExactly(node6, node3, node2); - assertThat(replicasByToken.get(TOKEN16)).containsExactly(node3, node2, node6); - assertThat(replicasByToken.get(TOKEN17)).containsExactly(node2, node6, node1); - assertThat(replicasByToken.get(TOKEN18)).containsExactly(node6, node1, node5); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/TokenRangeAssert.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/TokenRangeAssert.java deleted file mode 100644 index 7fcd56ba86e..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/TokenRangeAssert.java +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.metadata.token.TokenRange; -import java.util.List; -import org.assertj.core.api.AbstractAssert; - -public class TokenRangeAssert extends AbstractAssert { - - public TokenRangeAssert(TokenRange actual) { - super(actual, TokenRangeAssert.class); - } - - public TokenRangeAssert startsWith(Token token) { - assertThat(actual.getStart()).isEqualTo(token); - return this; - } - - public TokenRangeAssert endsWith(Token token) { - assertThat(actual.getEnd()).isEqualTo(token); - return this; - } - - public TokenRangeAssert isEmpty() { - assertThat(actual.isEmpty()).isTrue(); - return this; - } - - public TokenRangeAssert isNotEmpty() { - assertThat(actual.isEmpty()).isFalse(); - return this; - } - - public TokenRangeAssert isWrappedAround() { - assertThat(actual.isWrappedAround()).isTrue(); - - List unwrapped = actual.unwrap(); - assertThat(unwrapped.size()) - .as("%s should unwrap to two ranges, but unwrapped to %s", actual, unwrapped) - .isEqualTo(2); - - return this; - } - - public TokenRangeAssert isNotWrappedAround() { - assertThat(actual.isWrappedAround()).isFalse(); - assertThat(actual.unwrap()).containsExactly(actual); - return this; - } - - public TokenRangeAssert unwrapsTo(TokenRange... subRanges) { - assertThat(actual.unwrap()).containsExactly(subRanges); - return this; - } - - public TokenRangeAssert intersects(TokenRange that) { - assertThat(actual.intersects(that)).as("%s should intersect %s", actual, that).isTrue(); - assertThat(that.intersects(actual)).as("%s should intersect %s", that, actual).isTrue(); - return this; - } - - public TokenRangeAssert doesNotIntersect(TokenRange... that) { - for (TokenRange thatRange : that) { - assertThat(actual.intersects(thatRange)) - .as("%s should not intersect %s", actual, thatRange) - .isFalse(); - assertThat(thatRange.intersects(actual)) - .as("%s should not intersect %s", thatRange, actual) - .isFalse(); - } - return this; - } - - public TokenRangeAssert contains(Token token, boolean isStart) { - assertThat(((TokenRangeBase) actual).contains(actual, token, isStart)).isTrue(); - return this; - } - - public TokenRangeAssert doesNotContain(Token token, boolean isStart) { - assertThat(((TokenRangeBase) actual).contains(actual, token, isStart)).isFalse(); - return this; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/TokenRangeTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/TokenRangeTest.java deleted file mode 100644 index 77cfbb30d77..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/TokenRangeTest.java +++ /dev/null @@ -1,286 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.junit.Assert.fail; - -import com.datastax.oss.driver.api.core.metadata.token.TokenRange; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import java.util.List; -import org.junit.Test; - -/** - * Covers the methods that don't depend on the underlying factory (we use Murmur3 as the - * implementation here). - * - * @see Murmur3TokenRangeTest - * @see ByteOrderedTokenRangeTest - * @see RandomTokenRangeTest - */ -public class TokenRangeTest { - - private Murmur3Token min = Murmur3TokenFactory.MIN_TOKEN; - - @Test - public void should_check_intersection() { - // NB - to make the test more visual, we use watch face numbers - assertThat(range(3, 9)) - .doesNotIntersect(range(11, 1)) - .doesNotIntersect(range(1, 2)) - .doesNotIntersect(range(11, 3)) - .doesNotIntersect(range(2, 3)) - .doesNotIntersect(range(3, 3)) - .intersects(range(2, 6)) - .intersects(range(2, 10)) - .intersects(range(6, 10)) - .intersects(range(4, 8)) - .intersects(range(3, 9)) - .doesNotIntersect(range(9, 10)) - .doesNotIntersect(range(10, 11)); - assertThat(range(9, 3)) - .doesNotIntersect(range(5, 7)) - .doesNotIntersect(range(7, 8)) - .doesNotIntersect(range(5, 9)) - .doesNotIntersect(range(8, 9)) - .doesNotIntersect(range(9, 9)) - .intersects(range(8, 2)) - .intersects(range(8, 4)) - .intersects(range(2, 4)) - .intersects(range(10, 2)) - .intersects(range(9, 3)) - .doesNotIntersect(range(3, 4)) - .doesNotIntersect(range(4, 5)); - assertThat(range(3, 3)).doesNotIntersect(range(3, 3)); - - // Reminder: minToken serves as both lower and upper bound - assertThat(minTo(5)) - .doesNotIntersect(range(6, 7)) - .doesNotIntersect(toMax(6)) - .intersects(range(6, 4)) - .intersects(range(2, 4)) - .intersects(minTo(4)) - .intersects(minTo(5)); - - assertThat(toMax(5)) - .doesNotIntersect(range(3, 4)) - .doesNotIntersect(minTo(4)) - .intersects(range(6, 7)) - .intersects(range(4, 1)) - .intersects(toMax(6)) - .intersects(toMax(5)); - - assertThat(fullRing()) - .intersects(range(3, 4)) - .intersects(toMax(3)) - .intersects(minTo(3)) - .doesNotIntersect(range(3, 3)); - } - - @Test - public void should_compute_intersection() { - assertThat(range(3, 9).intersectWith(range(2, 4))).isEqualTo(ImmutableList.of(range(3, 4))); - assertThat(range(3, 9).intersectWith(range(3, 5))).isEqualTo(ImmutableList.of(range(3, 5))); - assertThat(range(3, 9).intersectWith(range(4, 6))).isEqualTo(ImmutableList.of(range(4, 6))); - assertThat(range(3, 9).intersectWith(range(7, 9))).isEqualTo(ImmutableList.of(range(7, 9))); - assertThat(range(3, 9).intersectWith(range(8, 10))).isEqualTo(ImmutableList.of(range(8, 9))); - } - - @Test - public void should_compute_intersection_with_ranges_around_ring() { - // If a range wraps the ring (like 10, -10 does) this will produce two separate intersected - // ranges. - assertThat(range(10, -10).intersectWith(range(-20, 20))) - .isEqualTo(ImmutableList.of(range(10, 20), range(-20, -10))); - assertThat(range(-20, 20).intersectWith(range(10, -10))) - .isEqualTo(ImmutableList.of(range(10, 20), range(-20, -10))); - - // If both ranges wrap the ring, they should be merged together wrapping across the range. - assertThat(range(10, -30).intersectWith(range(20, -20))) - .isEqualTo(ImmutableList.of(range(20, -30))); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_compute_intersection_when_ranges_dont_intersect() { - range(1, 2).intersectWith(range(2, 3)); - } - - @Test - public void should_merge_with_other_range() { - assertThat(range(3, 9).mergeWith(range(2, 3))).isEqualTo(range(2, 9)); - assertThat(range(3, 9).mergeWith(range(2, 4))).isEqualTo(range(2, 9)); - assertThat(range(3, 9).mergeWith(range(11, 3))).isEqualTo(range(11, 9)); - assertThat(range(3, 9).mergeWith(range(11, 4))).isEqualTo(range(11, 9)); - - assertThat(range(3, 9).mergeWith(range(4, 8))).isEqualTo(range(3, 9)); - assertThat(range(3, 9).mergeWith(range(3, 9))).isEqualTo(range(3, 9)); - assertThat(range(3, 9).mergeWith(range(3, 3))).isEqualTo(range(3, 9)); - assertThat(range(3, 3).mergeWith(range(3, 9))).isEqualTo(range(3, 9)); - - assertThat(range(3, 9).mergeWith(range(9, 11))).isEqualTo(range(3, 11)); - assertThat(range(3, 9).mergeWith(range(8, 11))).isEqualTo(range(3, 11)); - assertThat(range(3, 9).mergeWith(range(9, 1))).isEqualTo(range(3, 1)); - assertThat(range(3, 9).mergeWith(range(8, 1))).isEqualTo(range(3, 1)); - - assertThat(range(3, 9).mergeWith(range(9, 3))).isEqualTo(fullRing()); - assertThat(range(3, 9).mergeWith(range(9, 4))).isEqualTo(fullRing()); - assertThat(range(3, 10).mergeWith(range(9, 4))).isEqualTo(fullRing()); - - assertThat(range(9, 3).mergeWith(range(8, 9))).isEqualTo(range(8, 3)); - assertThat(range(9, 3).mergeWith(range(8, 10))).isEqualTo(range(8, 3)); - assertThat(range(9, 3).mergeWith(range(4, 9))).isEqualTo(range(4, 3)); - assertThat(range(9, 3).mergeWith(range(4, 10))).isEqualTo(range(4, 3)); - - assertThat(range(9, 3).mergeWith(range(10, 2))).isEqualTo(range(9, 3)); - assertThat(range(9, 3).mergeWith(range(9, 3))).isEqualTo(range(9, 3)); - assertThat(range(9, 3).mergeWith(range(9, 9))).isEqualTo(range(9, 3)); - assertThat(range(9, 9).mergeWith(range(9, 3))).isEqualTo(range(9, 3)); - - assertThat(range(9, 3).mergeWith(range(3, 5))).isEqualTo(range(9, 5)); - assertThat(range(9, 3).mergeWith(range(2, 5))).isEqualTo(range(9, 5)); - assertThat(range(9, 3).mergeWith(range(3, 7))).isEqualTo(range(9, 7)); - assertThat(range(9, 3).mergeWith(range(2, 7))).isEqualTo(range(9, 7)); - - assertThat(range(9, 3).mergeWith(range(3, 9))).isEqualTo(fullRing()); - assertThat(range(9, 3).mergeWith(range(3, 10))).isEqualTo(fullRing()); - - assertThat(range(3, 3).mergeWith(range(3, 3))).isEqualTo(range(3, 3)); - - assertThat(toMax(5).mergeWith(range(6, 7))).isEqualTo(toMax(5)); - assertThat(toMax(5).mergeWith(minTo(3))).isEqualTo(range(5, 3)); - assertThat(toMax(5).mergeWith(range(3, 5))).isEqualTo(toMax(3)); - - assertThat(minTo(5).mergeWith(range(2, 3))).isEqualTo(minTo(5)); - assertThat(minTo(5).mergeWith(toMax(7))).isEqualTo(range(7, 5)); - assertThat(minTo(5).mergeWith(range(5, 7))).isEqualTo(minTo(7)); - } - - @Test(expected = IllegalArgumentException.class) - public void should_not_merge_with_nonadjacent_and_disjoint_ranges() { - range(0, 5).mergeWith(range(7, 14)); - } - - @Test - public void should_return_non_empty_range_if_other_range_is_empty() { - assertThat(range(1, 5).mergeWith(range(5, 5))).isEqualTo(range(1, 5)); - } - - @Test - public void should_unwrap_to_non_wrapping_ranges() { - assertThat(range(9, 3)).unwrapsTo(toMax(9), minTo(3)); - assertThat(range(3, 9)).isNotWrappedAround(); - assertThat(toMax(3)).isNotWrappedAround(); - assertThat(minTo(3)).isNotWrappedAround(); - assertThat(range(3, 3)).isNotWrappedAround(); - assertThat(fullRing()).isNotWrappedAround(); - } - - @Test - public void should_split_evenly() { - // Simply exercise splitEvenly, split logic is exercised in the test of each TokenRange - // implementation - List splits = range(3, 9).splitEvenly(3); - - assertThat(splits).hasSize(3); - assertThat(splits).containsExactly(range(3, 5), range(5, 7), range(7, 9)); - } - - @Test - public void should_throw_error_with_less_than_1_splits() { - for (int i = -255; i < 1; i++) { - try { - range(0, 1).splitEvenly(i); - fail("Expected error when providing " + i + " splits."); - } catch (IllegalArgumentException e) { - // expected. - } - } - } - - @Test(expected = IllegalArgumentException.class) - public void should_not_split_empty_token_range() { - range(0, 0).splitEvenly(1); - } - - @Test - public void should_create_empty_token_ranges_if_too_many_splits() { - TokenRange range = range(0, 10); - - List ranges = range.splitEvenly(255); - assertThat(ranges).hasSize(255); - - for (int i = 0; i < ranges.size(); i++) { - TokenRange tr = ranges.get(i); - if (i < 10) { - assertThat(tr).isEqualTo(range(i, i + 1)); - } else { - assertThat(tr.isEmpty()); - } - } - } - - @Test - public void should_check_if_range_contains_token() { - // ]1,2] contains 2, but it does not contain the start of ]2,3] - assertThat(range(1, 2)) - .contains(new Murmur3Token(2), false) - .doesNotContain(new Murmur3Token(2), true); - // ]1,2] does not contain 1, but it contains the start of ]1,3] - assertThat(range(1, 2)) - .doesNotContain(new Murmur3Token(1), false) - .contains(new Murmur3Token(1), true); - - // ]2,1] contains the start of ]min,5] - assertThat(range(2, 1)).contains(min, true); - - // ]min, 1] does not contain min, but it contains the start of ]min, 2] - assertThat(minTo(1)).doesNotContain(min, false).contains(min, true); - // ]1, min] contains min, but not the start of ]min, 2] - assertThat(toMax(1)).contains(min, false).doesNotContain(min, true); - - // An empty range contains nothing - assertThat(range(1, 1)) - .doesNotContain(new Murmur3Token(1), true) - .doesNotContain(new Murmur3Token(1), false) - .doesNotContain(min, true) - .doesNotContain(min, false); - - // The whole ring contains everything - assertThat(fullRing()) - .contains(min, true) - .contains(min, false) - .contains(new Murmur3Token(1), true) - .contains(new Murmur3Token(1), false); - } - - private TokenRange range(long start, long end) { - return new Murmur3TokenRange(new Murmur3Token(start), new Murmur3Token(end)); - } - - private TokenRange minTo(long end) { - return new Murmur3TokenRange(min, new Murmur3Token(end)); - } - - private TokenRange toMax(long start) { - return new Murmur3TokenRange(new Murmur3Token(start), min); - } - - private TokenRange fullRing() { - return new Murmur3TokenRange(Murmur3TokenFactory.MIN_TOKEN, Murmur3TokenFactory.MIN_TOKEN); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetricIdGeneratorTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetricIdGeneratorTest.java deleted file mode 100644 index 13efda4b352..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetricIdGeneratorTest.java +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.BDDMockito.given; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; -import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -@RunWith(DataProviderRunner.class) -public class DefaultMetricIdGeneratorTest { - - @Mock private InternalDriverContext context; - - @Mock private DriverConfig config; - - @Mock private DriverExecutionProfile profile; - - @Mock private Node node; - - @Mock private EndPoint endpoint; - - @Before - public void setUp() throws Exception { - MockitoAnnotations.initMocks(this); - given(context.getConfig()).willReturn(config); - given(context.getSessionName()).willReturn("s0"); - given(config.getDefaultProfile()).willReturn(profile); - given(node.getEndPoint()).willReturn(endpoint); - given(endpoint.asMetricPrefix()).willReturn("10_1_2_3:9042"); - } - - @Test - @UseDataProvider("sessionMetrics") - public void should_generate_session_metric(String prefix, String expectedName) { - // given - given(profile.getString(DefaultDriverOption.METRICS_ID_GENERATOR_PREFIX, "")) - .willReturn(prefix); - DefaultMetricIdGenerator generator = new DefaultMetricIdGenerator(context); - // when - MetricId id = generator.sessionMetricId(DefaultSessionMetric.CONNECTED_NODES); - // then - assertThat(id.getName()).isEqualTo(expectedName); - assertThat(id.getTags()).isEmpty(); - } - - @Test - @UseDataProvider("nodeMetrics") - public void should_generate_node_metric(String prefix, String expectedName) { - // given - given(profile.getString(DefaultDriverOption.METRICS_ID_GENERATOR_PREFIX, "")) - .willReturn(prefix); - DefaultMetricIdGenerator generator = new DefaultMetricIdGenerator(context); - // when - MetricId id = generator.nodeMetricId(node, DefaultNodeMetric.CQL_MESSAGES); - // then - assertThat(id.getName()).isEqualTo(expectedName); - assertThat(id.getTags()).isEmpty(); - } - - @DataProvider - public static Object[][] sessionMetrics() { - String suffix = DefaultSessionMetric.CONNECTED_NODES.getPath(); - return new Object[][] { - new Object[] {"", "s0." + suffix}, - new Object[] {"cassandra", "cassandra.s0." + suffix}, - new Object[] {"app.cassandra", "app.cassandra.s0." + suffix} - }; - } - - @DataProvider - public static Object[][] nodeMetrics() { - String suffix = DefaultNodeMetric.CQL_MESSAGES.getPath(); - return new Object[][] { - new Object[] {"", "s0.nodes.10_1_2_3:9042." + suffix}, - new Object[] {"cassandra", "cassandra.s0.nodes.10_1_2_3:9042." + suffix}, - new Object[] {"app.cassandra", "app.cassandra.s0.nodes.10_1_2_3:9042." + suffix} - }; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetricIdTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetricIdTest.java deleted file mode 100644 index 339f9235dc2..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetricIdTest.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.google.common.collect.ImmutableMap; -import org.junit.Test; - -public class DefaultMetricIdTest { - - @Test - public void testGetName() { - DefaultMetricId id = new DefaultMetricId("metric1", ImmutableMap.of()); - assertThat(id.getName()).isEqualTo("metric1"); - } - - @Test - public void testGetTags() { - DefaultMetricId id = - new DefaultMetricId("metric1", ImmutableMap.of("tag1", "value1", "tag2", "value2")); - assertThat(id.getTags()) - .hasSize(2) - .containsEntry("tag1", "value1") - .containsEntry("tag2", "value2"); - } - - @Test - public void testEquals() { - DefaultMetricId id1 = - new DefaultMetricId("metric1", ImmutableMap.of("tag1", "value1", "tag2", "value2")); - DefaultMetricId id2 = - new DefaultMetricId("metric1", ImmutableMap.of("tag1", "value1", "tag2", "value2")); - DefaultMetricId id3 = - new DefaultMetricId("metric2", ImmutableMap.of("tag1", "value1", "tag2", "value2")); - DefaultMetricId id4 = new DefaultMetricId("metric1", ImmutableMap.of("tag2", "value2")); - assertThat(id1).isEqualTo(id2).isNotEqualTo(id3).isNotEqualTo(id4); - } - - @Test - public void testHashCode() { - DefaultMetricId id1 = - new DefaultMetricId("metric1", ImmutableMap.of("tag1", "value1", "tag2", "value2")); - DefaultMetricId id2 = - new DefaultMetricId("metric1", ImmutableMap.of("tag1", "value1", "tag2", "value2")); - assertThat(id1).hasSameHashCodeAs(id2); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metrics/DropwizardMetricsFactoryTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metrics/DropwizardMetricsFactoryTest.java deleted file mode 100644 index e5983c4f4fd..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metrics/DropwizardMetricsFactoryTest.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.codahale.metrics.MetricRegistry; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import java.time.Duration; -import java.util.Collections; -import java.util.List; -import org.junit.Test; - -public class DropwizardMetricsFactoryTest { - - @Test - public void should_throw_if_registry_of_wrong_type() { - // given - InternalDriverContext context = mock(InternalDriverContext.class); - DriverExecutionProfile profile = mock(DriverExecutionProfile.class); - DriverConfig config = mock(DriverConfig.class); - List enabledMetrics = - Collections.singletonList(DefaultSessionMetric.CQL_REQUESTS.getPath()); - // when - when(config.getDefaultProfile()).thenReturn(profile); - when(context.getConfig()).thenReturn(config); - when(context.getSessionName()).thenReturn("MockSession"); - // registry object is not a registry type - when(context.getMetricRegistry()).thenReturn(Integer.MAX_VALUE); - when(profile.getDuration(DefaultDriverOption.METRICS_NODE_EXPIRE_AFTER)) - .thenReturn(Duration.ofHours(1)); - when(profile.getStringList(DefaultDriverOption.METRICS_SESSION_ENABLED)) - .thenReturn(enabledMetrics); - // then - try { - new DropwizardMetricsFactory(context); - fail( - "MetricsFactory should require correct registry object type: " - + MetricRegistry.class.getName()); - } catch (IllegalArgumentException iae) { - assertThat(iae.getMessage()) - .isEqualTo( - "Unexpected Metrics registry object. " - + "Expected registry object to be of type '%s', but was '%s'", - MetricRegistry.class.getName(), Integer.class.getName()); - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metrics/DropwizardNodeMetricUpdaterTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metrics/DropwizardNodeMetricUpdaterTest.java deleted file mode 100644 index ccc42a7027d..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metrics/DropwizardNodeMetricUpdaterTest.java +++ /dev/null @@ -1,162 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.timeout; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import ch.qos.logback.classic.Level; -import com.codahale.metrics.MetricRegistry; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.config.DriverOption; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; -import com.datastax.oss.driver.api.core.metrics.NodeMetric; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.util.LoggerTest; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.time.Duration; -import java.util.Collections; -import java.util.Set; -import java.util.function.Supplier; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class DropwizardNodeMetricUpdaterTest { - - @Test - public void should_log_warning_when_provided_eviction_time_setting_is_too_low() { - // given - LoggerTest.LoggerSetup logger = - LoggerTest.setupTestLogger(AbstractMetricUpdater.class, Level.WARN); - Node node = mock(Node.class); - InternalDriverContext context = mock(InternalDriverContext.class); - DriverExecutionProfile profile = mock(DriverExecutionProfile.class); - DriverConfig config = mock(DriverConfig.class); - Set enabledMetrics = Collections.singleton(DefaultNodeMetric.CQL_MESSAGES); - Duration expireAfter = AbstractMetricUpdater.MIN_EXPIRE_AFTER.minusMinutes(1); - - // when - when(context.getSessionName()).thenReturn("prefix"); - when(context.getConfig()).thenReturn(config); - when(config.getDefaultProfile()).thenReturn(profile); - when(profile.getDuration(DefaultDriverOption.METRICS_NODE_EXPIRE_AFTER)) - .thenReturn(expireAfter); - - DropwizardNodeMetricUpdater updater = - new DropwizardNodeMetricUpdater(node, context, enabledMetrics, new MetricRegistry()) { - @Override - protected void initializeGauge( - NodeMetric metric, DriverExecutionProfile profile, Supplier supplier) { - // do nothing - } - - @Override - protected void initializeCounter(NodeMetric metric, DriverExecutionProfile profile) { - // do nothing - } - - @Override - protected void initializeHdrTimer( - NodeMetric metric, - DriverExecutionProfile profile, - DriverOption highestLatency, - DriverOption significantDigits, - DriverOption interval) { - // do nothing - } - }; - - // then - assertThat(updater.getExpireAfter()).isEqualTo(AbstractMetricUpdater.MIN_EXPIRE_AFTER); - verify(logger.appender, timeout(500).times(1)).doAppend(logger.loggingEventCaptor.capture()); - assertThat(logger.loggingEventCaptor.getValue().getMessage()).isNotNull(); - assertThat(logger.loggingEventCaptor.getValue().getFormattedMessage()) - .contains( - String.format( - "[prefix] Value too low for %s: %s. Forcing to %s instead.", - DefaultDriverOption.METRICS_NODE_EXPIRE_AFTER.getPath(), - expireAfter, - AbstractMetricUpdater.MIN_EXPIRE_AFTER)); - } - - @Test - @UseDataProvider(value = "acceptableEvictionTimes") - public void should_not_log_warning_when_provided_eviction_time_setting_is_acceptable( - Duration expireAfter) { - // given - LoggerTest.LoggerSetup logger = - LoggerTest.setupTestLogger(AbstractMetricUpdater.class, Level.WARN); - Node node = mock(Node.class); - InternalDriverContext context = mock(InternalDriverContext.class); - DriverExecutionProfile profile = mock(DriverExecutionProfile.class); - DriverConfig config = mock(DriverConfig.class); - Set enabledMetrics = Collections.singleton(DefaultNodeMetric.CQL_MESSAGES); - - // when - when(context.getSessionName()).thenReturn("prefix"); - when(context.getConfig()).thenReturn(config); - when(config.getDefaultProfile()).thenReturn(profile); - when(profile.getDuration(DefaultDriverOption.METRICS_NODE_EXPIRE_AFTER)) - .thenReturn(expireAfter); - - DropwizardNodeMetricUpdater updater = - new DropwizardNodeMetricUpdater(node, context, enabledMetrics, new MetricRegistry()) { - @Override - protected void initializeGauge( - NodeMetric metric, DriverExecutionProfile profile, Supplier supplier) { - // do nothing - } - - @Override - protected void initializeCounter(NodeMetric metric, DriverExecutionProfile profile) { - // do nothing - } - - @Override - protected void initializeHdrTimer( - NodeMetric metric, - DriverExecutionProfile profile, - DriverOption highestLatency, - DriverOption significantDigits, - DriverOption interval) { - // do nothing - } - }; - - // then - assertThat(updater.getExpireAfter()).isEqualTo(expireAfter); - verify(logger.appender, timeout(500).times(0)).doAppend(logger.loggingEventCaptor.capture()); - } - - @DataProvider - public static Object[][] acceptableEvictionTimes() { - return new Object[][] { - {AbstractMetricUpdater.MIN_EXPIRE_AFTER}, - {AbstractMetricUpdater.MIN_EXPIRE_AFTER.plusMinutes(1)} - }; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metrics/NoopMetricsFactoryTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metrics/NoopMetricsFactoryTest.java deleted file mode 100644 index 3a563be4453..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metrics/NoopMetricsFactoryTest.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import ch.qos.logback.classic.Level; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.util.LoggerTest; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import java.util.Collections; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class NoopMetricsFactoryTest { - - @Test - public void should_log_warning_when_metrics_enabled() { - // given - InternalDriverContext context = mock(InternalDriverContext.class); - DriverConfig config = mock(DriverConfig.class); - DriverExecutionProfile profile = mock(DriverExecutionProfile.class); - when(context.getSessionName()).thenReturn("MockSession"); - when(context.getConfig()).thenReturn(config); - when(config.getDefaultProfile()).thenReturn(profile); - when(profile.getStringList(DefaultDriverOption.METRICS_SESSION_ENABLED)) - .thenReturn(Collections.singletonList(DefaultSessionMetric.CQL_REQUESTS.getPath())); - LoggerTest.LoggerSetup logger = - LoggerTest.setupTestLogger(NoopMetricsFactory.class, Level.WARN); - - // when - new NoopMetricsFactory(context); - - // then - verify(logger.appender, times(1)).doAppend(logger.loggingEventCaptor.capture()); - assertThat(logger.loggingEventCaptor.getValue().getMessage()).isNotNull(); - assertThat(logger.loggingEventCaptor.getValue().getFormattedMessage()) - .contains("[MockSession] Some session-level or node-level metrics were enabled"); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metrics/TaggingMetricIdGeneratorTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metrics/TaggingMetricIdGeneratorTest.java deleted file mode 100644 index 809a7419ba4..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metrics/TaggingMetricIdGeneratorTest.java +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.BDDMockito.given; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; -import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.google.common.collect.ImmutableMap; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.Map; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -@RunWith(DataProviderRunner.class) -public class TaggingMetricIdGeneratorTest { - - @Mock private InternalDriverContext context; - - @Mock private DriverConfig config; - - @Mock private DriverExecutionProfile profile; - - @Mock private Node node; - - @Mock private EndPoint endpoint; - - @Before - public void setUp() throws Exception { - MockitoAnnotations.initMocks(this); - given(context.getConfig()).willReturn(config); - given(context.getSessionName()).willReturn("s0"); - given(config.getDefaultProfile()).willReturn(profile); - given(node.getEndPoint()).willReturn(endpoint); - given(endpoint.toString()).willReturn("/10.1.2.3:9042"); - } - - @Test - @UseDataProvider("sessionMetrics") - public void should_generate_session_metric( - String prefix, String expectedName, Map expectedTags) { - // given - given(profile.getString(DefaultDriverOption.METRICS_ID_GENERATOR_PREFIX, "")) - .willReturn(prefix); - TaggingMetricIdGenerator generator = new TaggingMetricIdGenerator(context); - // when - MetricId id = generator.sessionMetricId(DefaultSessionMetric.CONNECTED_NODES); - // then - assertThat(id.getName()).isEqualTo(expectedName); - assertThat(id.getTags()).isEqualTo(expectedTags); - } - - @Test - @UseDataProvider("nodeMetrics") - public void should_generate_node_metric( - String prefix, String expectedName, Map expectedTags) { - // given - given(profile.getString(DefaultDriverOption.METRICS_ID_GENERATOR_PREFIX, "")) - .willReturn(prefix); - TaggingMetricIdGenerator generator = new TaggingMetricIdGenerator(context); - // when - MetricId id = generator.nodeMetricId(node, DefaultNodeMetric.CQL_MESSAGES); - // then - assertThat(id.getName()).isEqualTo(expectedName); - assertThat(id.getTags()).isEqualTo(expectedTags); - } - - @DataProvider - public static Object[][] sessionMetrics() { - String suffix = DefaultSessionMetric.CONNECTED_NODES.getPath(); - ImmutableMap tags = ImmutableMap.of("session", "s0"); - return new Object[][] { - new Object[] {"", "session." + suffix, tags}, - new Object[] {"cassandra", "cassandra.session." + suffix, tags}, - new Object[] {"app.cassandra", "app.cassandra.session." + suffix, tags} - }; - } - - @DataProvider - public static Object[][] nodeMetrics() { - String suffix = DefaultNodeMetric.CQL_MESSAGES.getPath(); - ImmutableMap tags = ImmutableMap.of("session", "s0", "node", "/10.1.2.3:9042"); - return new Object[][] { - new Object[] {"", "nodes." + suffix, tags}, - new Object[] {"cassandra", "cassandra.nodes." + suffix, tags}, - new Object[] {"app.cassandra", "app.cassandra.nodes." + suffix, tags} - }; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/os/JnrLibcTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/os/JnrLibcTest.java deleted file mode 100644 index 30dee7847c4..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/os/JnrLibcTest.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.os; - -import static org.assertj.core.api.Assertions.assertThat; - -import java.time.Instant; -import java.time.temporal.ChronoUnit; -import java.util.Optional; -import org.junit.Test; - -/** - * Explicitly test native impl based on jnr's POSIX impl. This test should pass on any platform - * which is supported by jnr. - */ -public class JnrLibcTest { - - @Test - public void should_be_available() { - - Libc impl = new JnrLibc(); - assertThat(impl.available()).isTrue(); - } - - @Test - public void should_support_getpid() { - Libc impl = new JnrLibc(); - Optional val = impl.getpid(); - assertThat(val).isNotEmpty(); - assertThat(val.get()).isGreaterThan(1); - } - - @Test - public void should_support_gettimeofday() { - Libc impl = new JnrLibc(); - Optional val = impl.gettimeofday(); - assertThat(val).isNotEmpty(); - assertThat(val.get()).isGreaterThan(0); - - Instant now = Instant.now(); - Instant rvInstant = Instant.EPOCH.plus(val.get(), ChronoUnit.MICROS); - assertThat(rvInstant.isAfter(now.minusSeconds(1))).isTrue(); - assertThat(rvInstant.isBefore(now.plusSeconds(1))).isTrue(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/os/NativeTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/os/NativeTest.java deleted file mode 100644 index aeaf28d1fdf..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/os/NativeTest.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.os; - -import static org.assertj.core.api.Assertions.assertThat; - -import org.junit.Test; - -public class NativeTest { - - /** Verifies that {@link Native#getCpu()} returns non-empty cpu architecture */ - @Test - public void should_return_cpu_info() { - assertThat(Native.getCpu()).isNotEmpty(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolInitTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolInitTest.java deleted file mode 100644 index 5c7257d8c3f..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolInitTest.java +++ /dev/null @@ -1,190 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.pool; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.awaitility.Awaitility.await; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.InvalidKeyspaceException; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; -import com.datastax.oss.driver.internal.core.channel.ChannelEvent; -import com.datastax.oss.driver.internal.core.channel.ClusterNameMismatchException; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.channel.MockChannelFactoryHelper; -import com.datastax.oss.driver.internal.core.metadata.TopologyEvent; -import java.time.Duration; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import org.junit.Test; -import org.mockito.InOrder; - -public class ChannelPoolInitTest extends ChannelPoolTestBase { - - @Test - public void should_initialize_when_all_channels_succeed() throws Exception { - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)).thenReturn(3); - - DriverChannel channel1 = newMockDriverChannel(1); - DriverChannel channel2 = newMockDriverChannel(2); - DriverChannel channel3 = newMockDriverChannel(3); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - .success(node, channel1) - .success(node, channel2) - .success(node, channel3) - .build(); - - CompletionStage poolFuture = - ChannelPool.init(node, null, NodeDistance.LOCAL, context, "test"); - - factoryHelper.waitForCalls(node, 3); - - assertThatStage(poolFuture) - .isSuccess(pool -> assertThat(pool.channels).containsOnly(channel1, channel2, channel3)); - verify(eventBus, VERIFY_TIMEOUT.times(3)).fire(ChannelEvent.channelOpened(node)); - - factoryHelper.verifyNoMoreCalls(); - } - - @Test - public void should_initialize_when_all_channels_fail() throws Exception { - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)).thenReturn(3); - - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - .failure(node, "mock channel init failure") - .failure(node, "mock channel init failure") - .failure(node, "mock channel init failure") - .build(); - - CompletionStage poolFuture = - ChannelPool.init(node, null, NodeDistance.LOCAL, context, "test"); - - factoryHelper.waitForCalls(node, 3); - - assertThatStage(poolFuture).isSuccess(pool -> assertThat(pool.channels).isEmpty()); - verify(eventBus, never()).fire(ChannelEvent.channelOpened(node)); - verify(nodeMetricUpdater, VERIFY_TIMEOUT.times(3)) - .incrementCounter(DefaultNodeMetric.CONNECTION_INIT_ERRORS, null); - - factoryHelper.verifyNoMoreCalls(); - } - - @Test - public void should_indicate_when_keyspace_failed_on_all_channels() { - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)).thenReturn(3); - - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - .failure(node, new InvalidKeyspaceException("invalid keyspace")) - .failure(node, new InvalidKeyspaceException("invalid keyspace")) - .failure(node, new InvalidKeyspaceException("invalid keyspace")) - .build(); - - CompletionStage poolFuture = - ChannelPool.init(node, null, NodeDistance.LOCAL, context, "test"); - - factoryHelper.waitForCalls(node, 3); - assertThatStage(poolFuture) - .isSuccess( - pool -> { - assertThat(pool.isInvalidKeyspace()).isTrue(); - verify(nodeMetricUpdater, VERIFY_TIMEOUT.times(3)) - .incrementCounter(DefaultNodeMetric.CONNECTION_INIT_ERRORS, null); - }); - } - - @Test - public void should_fire_force_down_event_when_cluster_name_does_not_match() throws Exception { - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)).thenReturn(3); - - ClusterNameMismatchException error = - new ClusterNameMismatchException(node.getEndPoint(), "actual", "expected"); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - .failure(node, error) - .failure(node, error) - .failure(node, error) - .build(); - - ChannelPool.init(node, null, NodeDistance.LOCAL, context, "test"); - - factoryHelper.waitForCalls(node, 3); - - verify(eventBus, VERIFY_TIMEOUT) - .fire(TopologyEvent.forceDown(node.getBroadcastRpcAddress().get())); - verify(eventBus, never()).fire(ChannelEvent.channelOpened(node)); - - verify(nodeMetricUpdater, VERIFY_TIMEOUT.times(3)) - .incrementCounter(DefaultNodeMetric.CONNECTION_INIT_ERRORS, null); - factoryHelper.verifyNoMoreCalls(); - } - - @Test - public void should_reconnect_when_init_incomplete() throws Exception { - // Short delay so we don't have to wait in the test - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofNanos(1)); - - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)).thenReturn(2); - - DriverChannel channel1 = newMockDriverChannel(1); - DriverChannel channel2 = newMockDriverChannel(2); - CompletableFuture channel2Future = new CompletableFuture<>(); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - // Init: 1 channel fails, the other succeeds - .failure(node, "mock channel init failure") - .success(node, channel1) - // 1st reconnection - .pending(node, channel2Future) - .build(); - InOrder inOrder = inOrder(eventBus); - - CompletionStage poolFuture = - ChannelPool.init(node, null, NodeDistance.LOCAL, context, "test"); - - factoryHelper.waitForCalls(node, 2); - - assertThatStage(poolFuture).isSuccess(); - ChannelPool pool = poolFuture.toCompletableFuture().get(); - assertThat(pool.channels).containsOnly(channel1); - inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node)); - - // A reconnection should have been scheduled - verify(reconnectionSchedule, VERIFY_TIMEOUT).nextDelay(); - inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStarted(node)); - - channel2Future.complete(channel2); - factoryHelper.waitForCalls(node, 1); - inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node)); - inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStopped(node)); - - await().untilAsserted(() -> assertThat(pool.channels).containsOnly(channel1, channel2)); - - verify(nodeMetricUpdater, VERIFY_TIMEOUT) - .incrementCounter(DefaultNodeMetric.CONNECTION_INIT_ERRORS, null); - factoryHelper.verifyNoMoreCalls(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolKeyspaceTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolKeyspaceTest.java deleted file mode 100644 index 4273a51f891..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolKeyspaceTest.java +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.pool; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.internal.core.channel.ChannelEvent; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.channel.MockChannelFactoryHelper; -import java.time.Duration; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import org.junit.Test; - -public class ChannelPoolKeyspaceTest extends ChannelPoolTestBase { - - @Test - public void should_switch_keyspace_on_existing_channels() throws Exception { - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)).thenReturn(2); - - DriverChannel channel1 = newMockDriverChannel(1); - DriverChannel channel2 = newMockDriverChannel(2); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - .success(node, channel1) - .success(node, channel2) - .build(); - - CompletionStage poolFuture = - ChannelPool.init(node, null, NodeDistance.LOCAL, context, "test"); - - factoryHelper.waitForCalls(node, 2); - - assertThatStage(poolFuture).isSuccess(); - ChannelPool pool = poolFuture.toCompletableFuture().get(); - assertThat(pool.channels).containsOnly(channel1, channel2); - - CqlIdentifier newKeyspace = CqlIdentifier.fromCql("new_keyspace"); - CompletionStage setKeyspaceFuture = pool.setKeyspace(newKeyspace); - - verify(channel1, VERIFY_TIMEOUT).setKeyspace(newKeyspace); - verify(channel2, VERIFY_TIMEOUT).setKeyspace(newKeyspace); - - assertThatStage(setKeyspaceFuture).isSuccess(); - - factoryHelper.verifyNoMoreCalls(); - } - - @Test - public void should_switch_keyspace_on_pending_channels() throws Exception { - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofNanos(1)); - - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)).thenReturn(2); - - DriverChannel channel1 = newMockDriverChannel(1); - CompletableFuture channel1Future = new CompletableFuture<>(); - DriverChannel channel2 = newMockDriverChannel(2); - CompletableFuture channel2Future = new CompletableFuture<>(); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - // init - .failure(node, "mock channel init failure") - .failure(node, "mock channel init failure") - // reconnection - .pending(node, channel1Future) - .pending(node, channel2Future) - .build(); - - CompletionStage poolFuture = - ChannelPool.init(node, null, NodeDistance.LOCAL, context, "test"); - - factoryHelper.waitForCalls(node, 2); - - assertThatStage(poolFuture).isSuccess(); - ChannelPool pool = poolFuture.toCompletableFuture().get(); - - // Check that reconnection has kicked in, but do not complete it yet - verify(reconnectionSchedule, VERIFY_TIMEOUT).nextDelay(); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStarted(node)); - factoryHelper.waitForCalls(node, 2); - - // Switch keyspace, it succeeds immediately since there is no active channel - CqlIdentifier newKeyspace = CqlIdentifier.fromCql("new_keyspace"); - CompletionStage setKeyspaceFuture = pool.setKeyspace(newKeyspace); - assertThatStage(setKeyspaceFuture).isSuccess(); - - // Now let the two channels succeed to complete the reconnection - channel1Future.complete(channel1); - channel2Future.complete(channel2); - - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStopped(node)); - verify(channel1, VERIFY_TIMEOUT).setKeyspace(newKeyspace); - verify(channel2, VERIFY_TIMEOUT).setKeyspace(newKeyspace); - - factoryHelper.verifyNoMoreCalls(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolReconnectTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolReconnectTest.java deleted file mode 100644 index c4538f78bdb..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolReconnectTest.java +++ /dev/null @@ -1,191 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.pool; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.awaitility.Awaitility.await; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.internal.core.channel.ChannelEvent; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.channel.MockChannelFactoryHelper; -import io.netty.channel.ChannelPromise; -import java.time.Duration; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -import org.junit.Test; -import org.mockito.InOrder; - -public class ChannelPoolReconnectTest extends ChannelPoolTestBase { - - @Test - public void should_reconnect_when_channel_closes() throws Exception { - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofNanos(1)); - - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)).thenReturn(2); - - DriverChannel channel1 = newMockDriverChannel(1); - DriverChannel channel2 = newMockDriverChannel(2); - DriverChannel channel3 = newMockDriverChannel(3); - CompletableFuture channel3Future = new CompletableFuture<>(); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - // init - .success(node, channel1) - .success(node, channel2) - // reconnection - .pending(node, channel3Future) - .build(); - InOrder inOrder = inOrder(eventBus); - - CompletionStage poolFuture = - ChannelPool.init(node, null, NodeDistance.LOCAL, context, "test"); - - factoryHelper.waitForCalls(node, 2); - - assertThatStage(poolFuture).isSuccess(); - ChannelPool pool = poolFuture.toCompletableFuture().get(); - assertThat(pool.channels).containsOnly(channel1, channel2); - inOrder.verify(eventBus, VERIFY_TIMEOUT.times(2)).fire(ChannelEvent.channelOpened(node)); - - // Simulate fatal error on channel2 - ((ChannelPromise) channel2.closeFuture()) - .setFailure(new Exception("mock channel init failure")); - inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelClosed(node)); - - verify(reconnectionSchedule, VERIFY_TIMEOUT).nextDelay(); - inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStarted(node)); - factoryHelper.waitForCall(node); - - channel3Future.complete(channel3); - inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node)); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStopped(node)); - - await().untilAsserted(() -> assertThat(pool.channels).containsOnly(channel1, channel3)); - - factoryHelper.verifyNoMoreCalls(); - } - - @Test - public void should_reconnect_when_channel_starts_graceful_shutdown() throws Exception { - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofNanos(1)); - - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)).thenReturn(2); - - DriverChannel channel1 = newMockDriverChannel(1); - DriverChannel channel2 = newMockDriverChannel(2); - DriverChannel channel3 = newMockDriverChannel(3); - CompletableFuture channel3Future = new CompletableFuture<>(); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - // init - .success(node, channel1) - .success(node, channel2) - // reconnection - .pending(node, channel3Future) - .build(); - InOrder inOrder = inOrder(eventBus); - - CompletionStage poolFuture = - ChannelPool.init(node, null, NodeDistance.LOCAL, context, "test"); - - factoryHelper.waitForCalls(node, 2); - - assertThatStage(poolFuture).isSuccess(); - ChannelPool pool = poolFuture.toCompletableFuture().get(); - assertThat(pool.channels).containsOnly(channel1, channel2); - inOrder.verify(eventBus, VERIFY_TIMEOUT.times(2)).fire(ChannelEvent.channelOpened(node)); - - // Simulate graceful shutdown on channel2 - ((ChannelPromise) channel2.closeStartedFuture()).setSuccess(); - inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelClosed(node)); - - verify(reconnectionSchedule, VERIFY_TIMEOUT).nextDelay(); - inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStarted(node)); - factoryHelper.waitForCall(node); - - channel3Future.complete(channel3); - inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node)); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStopped(node)); - - await().untilAsserted(() -> assertThat(pool.channels).containsOnly(channel1, channel3)); - - factoryHelper.verifyNoMoreCalls(); - } - - @Test - public void should_let_current_attempt_complete_when_reconnecting_now() - throws ExecutionException, InterruptedException { - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofNanos(1)); - - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)).thenReturn(1); - - DriverChannel channel1 = newMockDriverChannel(1); - DriverChannel channel2 = newMockDriverChannel(2); - CompletableFuture channel2Future = new CompletableFuture<>(); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - // init - .success(node, channel1) - // reconnection - .pending(node, channel2Future) - .build(); - - InOrder inOrder = inOrder(eventBus); - - // Initial connection - CompletionStage poolFuture = - ChannelPool.init(node, null, NodeDistance.LOCAL, context, "test"); - factoryHelper.waitForCalls(node, 1); - assertThatStage(poolFuture).isSuccess(); - ChannelPool pool = poolFuture.toCompletableFuture().get(); - inOrder.verify(eventBus, VERIFY_TIMEOUT.times(1)).fire(ChannelEvent.channelOpened(node)); - - // Kill channel1, reconnection begins and starts initializing channel2, but the initialization - // is still pending (channel2Future not completed) - ((ChannelPromise) channel1.closeStartedFuture()).setSuccess(); - inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelClosed(node)); - inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStarted(node)); - verify(reconnectionSchedule, VERIFY_TIMEOUT).nextDelay(); - factoryHelper.waitForCalls(node, 1); - - // Force a reconnection, should not try to create a new channel since we have a pending one - pool.reconnectNow(); - TimeUnit.MILLISECONDS.sleep(200); - factoryHelper.verifyNoMoreCalls(); - inOrder.verify(eventBus, never()).fire(any()); - - // Complete the initialization of channel2, reconnection succeeds - channel2Future.complete(channel2); - inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node)); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStopped(node)); - - await().untilAsserted(() -> assertThat(pool.channels).containsOnly(channel2)); - - factoryHelper.verifyNoMoreCalls(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolResizeTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolResizeTest.java deleted file mode 100644 index 6992bb7742a..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolResizeTest.java +++ /dev/null @@ -1,417 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.pool; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.awaitility.Awaitility.await; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.internal.core.channel.ChannelEvent; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.channel.MockChannelFactoryHelper; -import com.datastax.oss.driver.internal.core.config.ConfigChangeEvent; -import java.time.Duration; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.TimeUnit; -import org.junit.Test; -import org.mockito.InOrder; - -public class ChannelPoolResizeTest extends ChannelPoolTestBase { - - @Test - public void should_shrink_outside_of_reconnection() throws Exception { - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_REMOTE_SIZE)).thenReturn(4); - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)).thenReturn(2); - - DriverChannel channel1 = newMockDriverChannel(1); - DriverChannel channel2 = newMockDriverChannel(2); - DriverChannel channel3 = newMockDriverChannel(3); - DriverChannel channel4 = newMockDriverChannel(4); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - .success(node, channel1) - .success(node, channel2) - .success(node, channel3) - .success(node, channel4) - .build(); - InOrder inOrder = inOrder(eventBus); - - CompletionStage poolFuture = - ChannelPool.init(node, null, NodeDistance.REMOTE, context, "test"); - - factoryHelper.waitForCalls(node, 4); - - assertThatStage(poolFuture).isSuccess(); - ChannelPool pool = poolFuture.toCompletableFuture().get(); - assertThat(pool.channels).containsOnly(channel1, channel2, channel3, channel4); - inOrder.verify(eventBus, VERIFY_TIMEOUT.times(4)).fire(ChannelEvent.channelOpened(node)); - - pool.resize(NodeDistance.LOCAL); - - inOrder.verify(eventBus, VERIFY_TIMEOUT.times(2)).fire(ChannelEvent.channelClosed(node)); - - await().untilAsserted(() -> assertThat(pool.channels).containsOnly(channel3, channel4)); - - factoryHelper.verifyNoMoreCalls(); - } - - @Test - public void should_shrink_during_reconnection() throws Exception { - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofNanos(1)); - - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_REMOTE_SIZE)).thenReturn(4); - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)).thenReturn(2); - - DriverChannel channel1 = newMockDriverChannel(1); - DriverChannel channel2 = newMockDriverChannel(2); - DriverChannel channel3 = newMockDriverChannel(3); - CompletableFuture channel3Future = new CompletableFuture<>(); - DriverChannel channel4 = newMockDriverChannel(4); - CompletableFuture channel4Future = new CompletableFuture<>(); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - // init - .success(node, channel1) - .success(node, channel2) - .failure(node, "mock channel init failure") - .failure(node, "mock channel init failure") - // reconnection - .pending(node, channel3Future) - .pending(node, channel4Future) - .build(); - InOrder inOrder = inOrder(eventBus); - - CompletionStage poolFuture = - ChannelPool.init(node, null, NodeDistance.REMOTE, context, "test"); - - factoryHelper.waitForCalls(node, 4); - - inOrder.verify(eventBus, VERIFY_TIMEOUT.times(2)).fire(ChannelEvent.channelOpened(node)); - assertThatStage(poolFuture).isSuccess(); - ChannelPool pool = poolFuture.toCompletableFuture().get(); - assertThat(pool.channels).containsOnly(channel1, channel2); - - // A reconnection should have been scheduled to add the missing channels, don't complete yet - verify(reconnectionSchedule).nextDelay(); - inOrder.verify(eventBus).fire(ChannelEvent.reconnectionStarted(node)); - - pool.resize(NodeDistance.LOCAL); - - TimeUnit.MILLISECONDS.sleep(200); - - // Now allow the reconnected channels to complete initialization - channel3Future.complete(channel3); - channel4Future.complete(channel4); - - factoryHelper.waitForCalls(node, 2); - - // Pool should have shrunk back to 2. We keep the most recent channels so 1 and 2 get closed. - inOrder.verify(eventBus, VERIFY_TIMEOUT.times(2)).fire(ChannelEvent.channelOpened(node)); - inOrder.verify(eventBus, VERIFY_TIMEOUT.times(2)).fire(ChannelEvent.channelClosed(node)); - inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStopped(node)); - await().untilAsserted(() -> assertThat(pool.channels).containsOnly(channel3, channel4)); - - factoryHelper.verifyNoMoreCalls(); - } - - @Test - public void should_grow_outside_of_reconnection() throws Exception { - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofNanos(1)); - - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)).thenReturn(2); - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_REMOTE_SIZE)).thenReturn(4); - - DriverChannel channel1 = newMockDriverChannel(1); - DriverChannel channel2 = newMockDriverChannel(2); - DriverChannel channel3 = newMockDriverChannel(3); - DriverChannel channel4 = newMockDriverChannel(4); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - // init - .success(node, channel1) - .success(node, channel2) - // growth attempt - .success(node, channel3) - .success(node, channel4) - .build(); - InOrder inOrder = inOrder(eventBus); - - CompletionStage poolFuture = - ChannelPool.init(node, null, NodeDistance.LOCAL, context, "test"); - - factoryHelper.waitForCalls(node, 2); - inOrder.verify(eventBus, VERIFY_TIMEOUT.times(2)).fire(ChannelEvent.channelOpened(node)); - - assertThatStage(poolFuture).isSuccess(); - ChannelPool pool = poolFuture.toCompletableFuture().get(); - assertThat(pool.channels).containsOnly(channel1, channel2); - - pool.resize(NodeDistance.REMOTE); - - // The resizing should have triggered a reconnection - verify(reconnectionSchedule, VERIFY_TIMEOUT).nextDelay(); - inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStarted(node)); - - factoryHelper.waitForCalls(node, 2); - inOrder.verify(eventBus, VERIFY_TIMEOUT.times(2)).fire(ChannelEvent.channelOpened(node)); - inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStopped(node)); - - await() - .untilAsserted( - () -> assertThat(pool.channels).containsOnly(channel1, channel2, channel3, channel4)); - - factoryHelper.verifyNoMoreCalls(); - } - - @Test - public void should_grow_during_reconnection() throws Exception { - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofNanos(1)); - - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)).thenReturn(2); - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_REMOTE_SIZE)).thenReturn(4); - - DriverChannel channel1 = newMockDriverChannel(1); - DriverChannel channel2 = newMockDriverChannel(2); - CompletableFuture channel2Future = new CompletableFuture<>(); - DriverChannel channel3 = newMockDriverChannel(3); - CompletableFuture channel3Future = new CompletableFuture<>(); - DriverChannel channel4 = newMockDriverChannel(4); - CompletableFuture channel4Future = new CompletableFuture<>(); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - // init - .success(node, channel1) - .failure(node, "mock channel init failure") - // first reconnection attempt - .pending(node, channel2Future) - // extra reconnection attempt after we realize the pool must grow - .pending(node, channel3Future) - .pending(node, channel4Future) - .build(); - InOrder inOrder = inOrder(eventBus); - - CompletionStage poolFuture = - ChannelPool.init(node, null, NodeDistance.LOCAL, context, "test"); - - factoryHelper.waitForCalls(node, 2); - inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node)); - - assertThatStage(poolFuture).isSuccess(); - ChannelPool pool = poolFuture.toCompletableFuture().get(); - assertThat(pool.channels).containsOnly(channel1); - - // A reconnection should have been scheduled to add the missing channel, don't complete yet - verify(reconnectionSchedule, VERIFY_TIMEOUT).nextDelay(); - inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStarted(node)); - - pool.resize(NodeDistance.REMOTE); - - TimeUnit.MILLISECONDS.sleep(200); - - // Complete the channel for the first reconnection, bringing the count to 2 - channel2Future.complete(channel2); - factoryHelper.waitForCall(node); - inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node)); - - await().untilAsserted(() -> assertThat(pool.channels).containsOnly(channel1, channel2)); - - // A second attempt should have been scheduled since we're now still under the target size - verify(reconnectionSchedule, VERIFY_TIMEOUT.times(2)).nextDelay(); - // Same reconnection is still running, no additional events - inOrder.verify(eventBus, never()).fire(ChannelEvent.reconnectionStopped(node)); - inOrder.verify(eventBus, never()).fire(ChannelEvent.reconnectionStarted(node)); - - // Two more channels get opened, bringing us to the target count - factoryHelper.waitForCalls(node, 2); - channel3Future.complete(channel3); - channel4Future.complete(channel4); - inOrder.verify(eventBus, VERIFY_TIMEOUT.times(2)).fire(ChannelEvent.channelOpened(node)); - inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStopped(node)); - - await() - .untilAsserted( - () -> assertThat(pool.channels).containsOnly(channel1, channel2, channel3, channel4)); - - factoryHelper.verifyNoMoreCalls(); - } - - @Test - public void should_resize_outside_of_reconnection_if_config_changes() throws Exception { - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofNanos(1)); - - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)).thenReturn(2); - - DriverChannel channel1 = newMockDriverChannel(1); - DriverChannel channel2 = newMockDriverChannel(2); - DriverChannel channel3 = newMockDriverChannel(3); - DriverChannel channel4 = newMockDriverChannel(4); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - // init - .success(node, channel1) - .success(node, channel2) - // growth attempt - .success(node, channel3) - .success(node, channel4) - .build(); - InOrder inOrder = inOrder(eventBus); - - CompletionStage poolFuture = - ChannelPool.init(node, null, NodeDistance.LOCAL, context, "test"); - - factoryHelper.waitForCalls(node, 2); - inOrder.verify(eventBus, VERIFY_TIMEOUT.times(2)).fire(ChannelEvent.channelOpened(node)); - - assertThatStage(poolFuture).isSuccess(); - ChannelPool pool = poolFuture.toCompletableFuture().get(); - assertThat(pool.channels).containsOnly(channel1, channel2); - - // Simulate a configuration change - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)).thenReturn(4); - eventBus.fire(ConfigChangeEvent.INSTANCE); - - // It should have triggered a reconnection - verify(reconnectionSchedule, VERIFY_TIMEOUT).nextDelay(); - inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStarted(node)); - - factoryHelper.waitForCalls(node, 2); - inOrder.verify(eventBus, VERIFY_TIMEOUT.times(2)).fire(ChannelEvent.channelOpened(node)); - inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStopped(node)); - - await() - .untilAsserted( - () -> assertThat(pool.channels).containsOnly(channel1, channel2, channel3, channel4)); - - factoryHelper.verifyNoMoreCalls(); - } - - @Test - public void should_resize_during_reconnection_if_config_changes() throws Exception { - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofNanos(1)); - - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)).thenReturn(2); - - DriverChannel channel1 = newMockDriverChannel(1); - DriverChannel channel2 = newMockDriverChannel(2); - CompletableFuture channel2Future = new CompletableFuture<>(); - DriverChannel channel3 = newMockDriverChannel(3); - CompletableFuture channel3Future = new CompletableFuture<>(); - DriverChannel channel4 = newMockDriverChannel(4); - CompletableFuture channel4Future = new CompletableFuture<>(); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - // init - .success(node, channel1) - .failure(node, "mock channel init failure") - // first reconnection attempt - .pending(node, channel2Future) - // extra reconnection attempt after we realize the pool must grow - .pending(node, channel3Future) - .pending(node, channel4Future) - .build(); - InOrder inOrder = inOrder(eventBus); - - CompletionStage poolFuture = - ChannelPool.init(node, null, NodeDistance.LOCAL, context, "test"); - - factoryHelper.waitForCalls(node, 2); - inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node)); - - assertThatStage(poolFuture).isSuccess(); - ChannelPool pool = poolFuture.toCompletableFuture().get(); - assertThat(pool.channels).containsOnly(channel1); - - // A reconnection should have been scheduled to add the missing channel, don't complete yet - verify(reconnectionSchedule, VERIFY_TIMEOUT).nextDelay(); - inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStarted(node)); - - // Simulate a configuration change - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)).thenReturn(4); - eventBus.fire(ConfigChangeEvent.INSTANCE); - TimeUnit.MILLISECONDS.sleep(200); - - // Complete the channel for the first reconnection, bringing the count to 2 - channel2Future.complete(channel2); - factoryHelper.waitForCall(node); - inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node)); - - await().untilAsserted(() -> assertThat(pool.channels).containsOnly(channel1, channel2)); - - // A second attempt should have been scheduled since we're now still under the target size - verify(reconnectionSchedule, VERIFY_TIMEOUT.times(2)).nextDelay(); - // Same reconnection is still running, no additional events - inOrder.verify(eventBus, never()).fire(ChannelEvent.reconnectionStopped(node)); - inOrder.verify(eventBus, never()).fire(ChannelEvent.reconnectionStarted(node)); - - // Two more channels get opened, bringing us to the target count - factoryHelper.waitForCalls(node, 2); - channel3Future.complete(channel3); - channel4Future.complete(channel4); - inOrder.verify(eventBus, VERIFY_TIMEOUT.times(2)).fire(ChannelEvent.channelOpened(node)); - inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStopped(node)); - - await() - .untilAsserted( - () -> assertThat(pool.channels).containsOnly(channel1, channel2, channel3, channel4)); - - factoryHelper.verifyNoMoreCalls(); - } - - @Test - public void should_ignore_config_change_if_not_relevant() throws Exception { - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofNanos(1)); - - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)).thenReturn(2); - - DriverChannel channel1 = newMockDriverChannel(1); - DriverChannel channel2 = newMockDriverChannel(2); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - .success(node, channel1) - .success(node, channel2) - .build(); - InOrder inOrder = inOrder(eventBus); - - CompletionStage poolFuture = - ChannelPool.init(node, null, NodeDistance.LOCAL, context, "test"); - - factoryHelper.waitForCalls(node, 2); - inOrder.verify(eventBus, VERIFY_TIMEOUT.times(2)).fire(ChannelEvent.channelOpened(node)); - - assertThatStage(poolFuture).isSuccess(); - ChannelPool pool = poolFuture.toCompletableFuture().get(); - assertThat(pool.channels).containsOnly(channel1, channel2); - - // Config changes, but not for our distance - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_REMOTE_SIZE)).thenReturn(1); - eventBus.fire(ConfigChangeEvent.INSTANCE); - TimeUnit.MILLISECONDS.sleep(200); - - // It should not have triggered a reconnection - verify(reconnectionSchedule, never()).nextDelay(); - - factoryHelper.verifyNoMoreCalls(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolShutdownTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolShutdownTest.java deleted file mode 100644 index b40bcb4aa39..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolShutdownTest.java +++ /dev/null @@ -1,173 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.pool; - -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.internal.core.channel.ChannelEvent; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.channel.MockChannelFactoryHelper; -import io.netty.channel.ChannelPromise; -import java.time.Duration; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import org.junit.Test; -import org.mockito.InOrder; - -public class ChannelPoolShutdownTest extends ChannelPoolTestBase { - - @Test - public void should_close_all_channels_when_closed() throws Exception { - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofNanos(1)); - - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)).thenReturn(3); - - DriverChannel channel1 = newMockDriverChannel(1); - DriverChannel channel2 = newMockDriverChannel(2); - DriverChannel channel3 = newMockDriverChannel(3); - DriverChannel channel4 = newMockDriverChannel(4); - CompletableFuture channel4Future = new CompletableFuture<>(); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - // init - .success(node, channel1) - .success(node, channel2) - .success(node, channel3) - // reconnection - .pending(node, channel4Future) - .build(); - InOrder inOrder = inOrder(eventBus); - - CompletionStage poolFuture = - ChannelPool.init(node, null, NodeDistance.LOCAL, context, "test"); - - factoryHelper.waitForCalls(node, 3); - inOrder.verify(eventBus, VERIFY_TIMEOUT.times(3)).fire(ChannelEvent.channelOpened(node)); - - assertThatStage(poolFuture).isSuccess(); - ChannelPool pool = poolFuture.toCompletableFuture().get(); - - // Simulate graceful shutdown on channel3 - ((ChannelPromise) channel3.closeStartedFuture()).setSuccess(); - inOrder.verify(eventBus, VERIFY_TIMEOUT.times(1)).fire(ChannelEvent.channelClosed(node)); - - // Reconnection should have kicked in and started to open channel4, do not complete it yet - verify(reconnectionSchedule, VERIFY_TIMEOUT).nextDelay(); - factoryHelper.waitForCalls(node, 1); - - CompletionStage closeFuture = pool.closeAsync(); - - // The two original channels were closed normally - verify(channel1, VERIFY_TIMEOUT).close(); - verify(channel2, VERIFY_TIMEOUT).close(); - inOrder.verify(eventBus, VERIFY_TIMEOUT.times(2)).fire(ChannelEvent.channelClosed(node)); - // The closing channel was not closed again - verify(channel3, never()).close(); - - // Complete the reconnecting channel - channel4Future.complete(channel4); - - // It should be force-closed once we find out the pool was closed - verify(channel4, VERIFY_TIMEOUT).forceClose(); - // No events because the channel was never really associated to the pool - inOrder.verify(eventBus, never()).fire(ChannelEvent.channelOpened(node)); - inOrder.verify(eventBus, never()).fire(ChannelEvent.channelClosed(node)); - - // We don't wait for reconnected channels to close, so the pool only depends on channel 1 to 3 - ((ChannelPromise) channel1.closeFuture()).setSuccess(); - ((ChannelPromise) channel2.closeFuture()).setSuccess(); - ((ChannelPromise) channel3.closeFuture()).setSuccess(); - - assertThatStage(closeFuture).isSuccess(); - - factoryHelper.verifyNoMoreCalls(); - } - - @Test - public void should_force_close_all_channels_when_force_closed() throws Exception { - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofNanos(1)); - - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)).thenReturn(3); - - DriverChannel channel1 = newMockDriverChannel(1); - DriverChannel channel2 = newMockDriverChannel(2); - DriverChannel channel3 = newMockDriverChannel(3); - DriverChannel channel4 = newMockDriverChannel(4); - CompletableFuture channel4Future = new CompletableFuture<>(); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - // init - .success(node, channel1) - .success(node, channel2) - .success(node, channel3) - // reconnection - .pending(node, channel4Future) - .build(); - InOrder inOrder = inOrder(eventBus); - - CompletionStage poolFuture = - ChannelPool.init(node, null, NodeDistance.LOCAL, context, "test"); - - factoryHelper.waitForCalls(node, 3); - - assertThatStage(poolFuture).isSuccess(); - ChannelPool pool = poolFuture.toCompletableFuture().get(); - inOrder.verify(eventBus, VERIFY_TIMEOUT.times(3)).fire(ChannelEvent.channelOpened(node)); - - // Simulate graceful shutdown on channel3 - ((ChannelPromise) channel3.closeStartedFuture()).setSuccess(); - inOrder.verify(eventBus, VERIFY_TIMEOUT.times(1)).fire(ChannelEvent.channelClosed(node)); - - // Reconnection should have kicked in and started to open a channel, do not complete it yet - verify(reconnectionSchedule, VERIFY_TIMEOUT).nextDelay(); - factoryHelper.waitForCalls(node, 1); - - CompletionStage closeFuture = pool.forceCloseAsync(); - - // The three original channels were force-closed - verify(channel1, VERIFY_TIMEOUT).forceClose(); - verify(channel2, VERIFY_TIMEOUT).forceClose(); - verify(channel3, VERIFY_TIMEOUT).forceClose(); - // Only two events because the one for channel3 was sent earlier - inOrder.verify(eventBus, VERIFY_TIMEOUT.times(2)).fire(ChannelEvent.channelClosed(node)); - - // Complete the reconnecting channel - channel4Future.complete(channel4); - - // It should be force-closed once we find out the pool was closed - verify(channel4, VERIFY_TIMEOUT).forceClose(); - // No events because the channel was never really associated to the pool - inOrder.verify(eventBus, never()).fire(ChannelEvent.channelOpened(node)); - inOrder.verify(eventBus, never()).fire(ChannelEvent.channelClosed(node)); - - // We don't wait for reconnected channels to close, so the pool only depends on channel 1-3 - ((ChannelPromise) channel1.closeFuture()).setSuccess(); - ((ChannelPromise) channel2.closeFuture()).setSuccess(); - ((ChannelPromise) channel3.closeFuture()).setSuccess(); - - assertThatStage(closeFuture).isSuccess(); - - factoryHelper.verifyNoMoreCalls(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolTestBase.java b/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolTestBase.java deleted file mode 100644 index 2f8056e49e0..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolTestBase.java +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.pool; - -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.timeout; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.connection.ReconnectionPolicy; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.channel.ChannelFactory; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.context.EventBus; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.context.NettyOptions; -import com.datastax.oss.driver.internal.core.metadata.DefaultNode; -import com.datastax.oss.driver.internal.core.metadata.TestNodeFactory; -import com.datastax.oss.driver.internal.core.metrics.MetricsFactory; -import com.datastax.oss.driver.internal.core.metrics.NodeMetricUpdater; -import io.netty.channel.Channel; -import io.netty.channel.DefaultChannelPromise; -import io.netty.channel.DefaultEventLoopGroup; -import io.netty.channel.EventLoop; -import java.time.Duration; -import java.util.concurrent.TimeUnit; -import org.junit.After; -import org.junit.Before; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; -import org.mockito.verification.VerificationWithTimeout; - -abstract class ChannelPoolTestBase { - - /** How long we wait when verifying mocks for async invocations */ - protected static final VerificationWithTimeout VERIFY_TIMEOUT = timeout(2000); - - @Mock protected InternalDriverContext context; - @Mock private DriverConfig config; - @Mock protected DriverExecutionProfile defaultProfile; - @Mock private ReconnectionPolicy reconnectionPolicy; - @Mock protected ReconnectionPolicy.ReconnectionSchedule reconnectionSchedule; - @Mock private NettyOptions nettyOptions; - @Mock protected ChannelFactory channelFactory; - @Mock protected MetricsFactory metricsFactory; - @Mock protected NodeMetricUpdater nodeMetricUpdater; - protected DefaultNode node; - protected EventBus eventBus; - private DefaultEventLoopGroup adminEventLoopGroup; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - - adminEventLoopGroup = new DefaultEventLoopGroup(1); - - when(context.getNettyOptions()).thenReturn(nettyOptions); - when(nettyOptions.adminEventExecutorGroup()).thenReturn(adminEventLoopGroup); - when(context.getConfig()).thenReturn(config); - when(config.getDefaultProfile()).thenReturn(defaultProfile); - this.eventBus = spy(new EventBus("test")); - when(context.getEventBus()).thenReturn(eventBus); - when(context.getChannelFactory()).thenReturn(channelFactory); - - when(context.getReconnectionPolicy()).thenReturn(reconnectionPolicy); - when(reconnectionPolicy.newNodeSchedule(any(Node.class))).thenReturn(reconnectionSchedule); - // By default, set a large reconnection delay. Tests that care about reconnection will override - // it. - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofDays(1)); - - when(context.getMetricsFactory()).thenReturn(metricsFactory); - when(metricsFactory.newNodeUpdater(any(Node.class))).thenReturn(nodeMetricUpdater); - - node = TestNodeFactory.newNode(1, context); - } - - @After - public void teardown() { - adminEventLoopGroup.shutdownGracefully(100, 200, TimeUnit.MILLISECONDS); - } - - DriverChannel newMockDriverChannel(int id) { - DriverChannel driverChannel = mock(DriverChannel.class); - EventLoop adminExecutor = adminEventLoopGroup.next(); - Channel channel = mock(Channel.class); - DefaultChannelPromise closeFuture = new DefaultChannelPromise(channel, adminExecutor); - DefaultChannelPromise closeStartedFuture = new DefaultChannelPromise(channel, adminExecutor); - when(driverChannel.close()).thenReturn(closeFuture); - when(driverChannel.forceClose()).thenReturn(closeFuture); - when(driverChannel.closeFuture()).thenReturn(closeFuture); - when(driverChannel.closeStartedFuture()).thenReturn(closeStartedFuture); - when(driverChannel.setKeyspace(any(CqlIdentifier.class))) - .thenReturn(adminExecutor.newSucceededFuture(null)); - when(driverChannel.toString()).thenReturn("channel" + id); - return driverChannel; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelSetTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelSetTest.java deleted file mode 100644 index 628110bc1df..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelSetTest.java +++ /dev/null @@ -1,179 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.pool; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -public class ChannelSetTest { - @Mock private DriverChannel channel1, channel2, channel3; - private ChannelSet set; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - set = new ChannelSet(); - } - - @Test - public void should_return_null_when_empty() { - assertThat(set.size()).isEqualTo(0); - assertThat(set.next()).isNull(); - } - - @Test - public void should_return_element_when_single() { - // Given - when(channel1.preAcquireId()).thenReturn(true); - - // When - set.add(channel1); - - // Then - assertThat(set.size()).isEqualTo(1); - assertThat(set.next()).isEqualTo(channel1); - verify(channel1, never()).getAvailableIds(); - verify(channel1).preAcquireId(); - } - - @Test - public void should_return_null_when_single_but_full() { - // Given - when(channel1.preAcquireId()).thenReturn(false); - - // When - set.add(channel1); - - // Then - assertThat(set.next()).isNull(); - verify(channel1).preAcquireId(); - } - - @Test - public void should_return_most_available_when_multiple() { - // Given - when(channel1.getAvailableIds()).thenReturn(2); - when(channel2.getAvailableIds()).thenReturn(12); - when(channel3.getAvailableIds()).thenReturn(8); - when(channel2.preAcquireId()).thenReturn(true); - - // When - set.add(channel1); - set.add(channel2); - set.add(channel3); - - // Then - assertThat(set.size()).isEqualTo(3); - assertThat(set.next()).isEqualTo(channel2); - verify(channel1).getAvailableIds(); - verify(channel2).getAvailableIds(); - verify(channel3).getAvailableIds(); - verify(channel2).preAcquireId(); - - // When - when(channel1.getAvailableIds()).thenReturn(15); - when(channel1.preAcquireId()).thenReturn(true); - - // Then - assertThat(set.next()).isEqualTo(channel1); - verify(channel1).preAcquireId(); - } - - @Test - public void should_return_null_when_multiple_but_all_full() { - // Given - when(channel1.getAvailableIds()).thenReturn(0); - when(channel2.getAvailableIds()).thenReturn(0); - when(channel3.getAvailableIds()).thenReturn(0); - - // When - set.add(channel1); - set.add(channel2); - set.add(channel3); - - // Then - assertThat(set.next()).isNull(); - } - - @Test - public void should_remove_channels() { - // Given - when(channel1.getAvailableIds()).thenReturn(2); - when(channel2.getAvailableIds()).thenReturn(12); - when(channel3.getAvailableIds()).thenReturn(8); - when(channel2.preAcquireId()).thenReturn(true); - - set.add(channel1); - set.add(channel2); - set.add(channel3); - assertThat(set.next()).isEqualTo(channel2); - - // When - set.remove(channel2); - when(channel3.preAcquireId()).thenReturn(true); - - // Then - assertThat(set.size()).isEqualTo(2); - assertThat(set.next()).isEqualTo(channel3); - - // When - set.remove(channel3); - when(channel1.preAcquireId()).thenReturn(true); - - // Then - assertThat(set.size()).isEqualTo(1); - assertThat(set.next()).isEqualTo(channel1); - - // When - set.remove(channel1); - - // Then - assertThat(set.size()).isEqualTo(0); - assertThat(set.next()).isNull(); - } - - /** - * Check that {@link ChannelSet#next()} doesn't spin forever if it keeps racing (see comments in - * the implementation). - */ - @Test - public void should_not_loop_indefinitely_if_acquisition_keeps_failing() { - // Given - when(channel1.getAvailableIds()).thenReturn(2); - when(channel2.getAvailableIds()).thenReturn(12); - when(channel3.getAvailableIds()).thenReturn(8); - // channel2 is the most available but we keep failing to acquire (simulating the race condition) - when(channel2.preAcquireId()).thenReturn(false); - - // When - set.add(channel1); - set.add(channel2); - set.add(channel3); - - // Then - assertThat(set.next()).isNull(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/BuiltInCompressorsTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/BuiltInCompressorsTest.java deleted file mode 100644 index 1911c7c7227..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/BuiltInCompressorsTest.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.protocol; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; - -import com.datastax.oss.driver.TestDataProviders; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.protocol.internal.NoopCompressor; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.Locale; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -@RunWith(DataProviderRunner.class) -public class BuiltInCompressorsTest { - - @Mock private DriverContext context; - - @Before - public void setUp() { - MockitoAnnotations.initMocks(this); - } - - @Test - @UseDataProvider(location = TestDataProviders.class, value = "locales") - public void should_create_instance_for_supported_algorithms(Locale locale) { - Locale def = Locale.getDefault(); - try { - Locale.setDefault(locale); - assertThat(BuiltInCompressors.newInstance("lz4", context)).isInstanceOf(Lz4Compressor.class); - assertThat(BuiltInCompressors.newInstance("snappy", context)) - .isInstanceOf(SnappyCompressor.class); - assertThat(BuiltInCompressors.newInstance("none", context)) - .isInstanceOf(NoopCompressor.class); - assertThat(BuiltInCompressors.newInstance("LZ4", context)).isInstanceOf(Lz4Compressor.class); - assertThat(BuiltInCompressors.newInstance("SNAPPY", context)) - .isInstanceOf(SnappyCompressor.class); - assertThat(BuiltInCompressors.newInstance("NONE", context)) - .isInstanceOf(NoopCompressor.class); - } finally { - Locale.setDefault(def); - } - } - - @Test - public void should_throw_when_unsupported_algorithm() { - assertThatThrownBy(() -> BuiltInCompressors.newInstance("GZIP", context)) - .isInstanceOf(IllegalArgumentException.class) - .hasMessageContaining("Unsupported compression algorithm 'GZIP'"); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/ByteBufPrimitiveCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/ByteBufPrimitiveCodecTest.java deleted file mode 100644 index 895a650b292..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/ByteBufPrimitiveCodecTest.java +++ /dev/null @@ -1,407 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.protocol; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; - -import com.datastax.oss.driver.internal.core.util.ByteBufs; -import com.datastax.oss.protocol.internal.util.Bytes; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.ByteBufAllocator; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.nio.ByteBuffer; -import org.junit.Test; - -/** - * Note: we don't test trivial methods that simply delegate to ByteBuf, nor default implementations - * inherited from {@link com.datastax.oss.protocol.internal.PrimitiveCodec}. - */ -public class ByteBufPrimitiveCodecTest { - private ByteBufPrimitiveCodec codec = new ByteBufPrimitiveCodec(ByteBufAllocator.DEFAULT); - - @Test - public void should_concatenate() { - ByteBuf left = ByteBufs.wrap(0xca, 0xfe); - ByteBuf right = ByteBufs.wrap(0xba, 0xbe); - assertThat(codec.concat(left, right)).containsExactly("0xcafebabe"); - } - - @Test - public void should_concatenate_slices() { - ByteBuf left = ByteBufs.wrap(0x00, 0xca, 0xfe, 0x00).slice(1, 2); - ByteBuf right = ByteBufs.wrap(0x00, 0x00, 0xba, 0xbe, 0x00).slice(2, 2); - - assertThat(codec.concat(left, right)).containsExactly("0xcafebabe"); - } - - @Test - public void should_read_inet_v4() { - ByteBuf source = - ByteBufs.wrap( - // length (as a byte) - 0x04, - // address - 0x7f, - 0x00, - 0x00, - 0x01, - // port (as an int) - 0x00, - 0x00, - 0x23, - 0x52); - InetSocketAddress inet = codec.readInet(source); - assertThat(inet.getAddress().getHostAddress()).isEqualTo("127.0.0.1"); - assertThat(inet.getPort()).isEqualTo(9042); - } - - @Test - public void should_read_inet_v6() { - ByteBuf lengthAndAddress = allocate(17); - lengthAndAddress.writeByte(16); - lengthAndAddress.writeLong(0); - lengthAndAddress.writeLong(1); - ByteBuf source = - codec.concat( - lengthAndAddress, - // port (as an int) - ByteBufs.wrap(0x00, 0x00, 0x23, 0x52)); - InetSocketAddress inet = codec.readInet(source); - assertThat(inet.getAddress().getHostAddress()).isEqualTo("0:0:0:0:0:0:0:1"); - assertThat(inet.getPort()).isEqualTo(9042); - } - - @Test - public void should_fail_to_read_inet_if_length_invalid() { - ByteBuf source = - ByteBufs.wrap( - // length (as a byte) - 0x03, - // address - 0x7f, - 0x00, - 0x01, - // port (as an int) - 0x00, - 0x00, - 0x23, - 0x52); - assertThatThrownBy(() -> codec.readInet(source)) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage("Invalid address length: 3 ([127, 0, 1])"); - } - - @Test - public void should_read_inetaddr_v4() { - ByteBuf source = - ByteBufs.wrap( - // length (as a byte) - 0x04, - // address - 0x7f, - 0x00, - 0x00, - 0x01); - InetAddress inetAddr = codec.readInetAddr(source); - assertThat(inetAddr.getHostAddress()).isEqualTo("127.0.0.1"); - } - - @Test - public void should_read_inetaddr_v6() { - ByteBuf source = allocate(17); - source.writeByte(16); - source.writeLong(0); - source.writeLong(1); - InetAddress inetAddr = codec.readInetAddr(source); - assertThat(inetAddr.getHostAddress()).isEqualTo("0:0:0:0:0:0:0:1"); - } - - @Test - public void should_fail_to_read_inetaddr_if_length_invalid() { - ByteBuf source = - ByteBufs.wrap( - // length (as a byte) - 0x03, - // address - 0x7f, - 0x00, - 0x01); - assertThatThrownBy(() -> codec.readInetAddr(source)) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage("Invalid address length: 3 ([127, 0, 1])"); - } - - @Test - public void should_read_bytes() { - ByteBuf source = - ByteBufs.wrap( - // length (as an int) - 0x00, - 0x00, - 0x00, - 0x04, - // contents - 0xca, - 0xfe, - 0xba, - 0xbe); - ByteBuffer bytes = codec.readBytes(source); - assertThat(Bytes.toHexString(bytes)).isEqualTo("0xcafebabe"); - } - - @Test - public void should_read_bytes_when_extra_data() { - ByteBuf source = - ByteBufs.wrap( - // length (as an int) - 0x00, - 0x00, - 0x00, - 0x04, - // contents - 0xca, - 0xfe, - 0xba, - 0xbe, - 0xde, - 0xda, - 0xdd); - ByteBuffer bytes = codec.readBytes(source); - assertThat(Bytes.toHexString(bytes)).isEqualTo("0xcafebabe"); - } - - @Test - public void read_bytes_should_udpate_reader_index() { - ByteBuf source = - ByteBufs.wrap( - // length (as an int) - 0x00, - 0x00, - 0x00, - 0x04, - // contents - 0xca, - 0xfe, - 0xba, - 0xbe, - 0xde, - 0xda, - 0xdd); - codec.readBytes(source); - - assertThat(source.readerIndex()).isEqualTo(8); - } - - @Test - public void read_bytes_should_throw_when_not_enough_content() { - ByteBuf source = - ByteBufs.wrap( - // length (as an int) : 4 bytes - 0x00, - 0x00, - 0x00, - 0x04, - // contents : only 2 bytes - 0xca, - 0xfe); - assertThatThrownBy(() -> codec.readBytes(source)).isInstanceOf(IndexOutOfBoundsException.class); - } - - @Test - public void should_read_null_bytes() { - ByteBuf source = ByteBufs.wrap(0xFF, 0xFF, 0xFF, 0xFF); // -1 (as an int) - assertThat(codec.readBytes(source)).isNull(); - } - - @Test - public void should_read_short_bytes() { - ByteBuf source = - ByteBufs.wrap( - // length (as an unsigned short) - 0x00, - 0x04, - // contents - 0xca, - 0xfe, - 0xba, - 0xbe); - assertThat(Bytes.toHexString(codec.readShortBytes(source))).isEqualTo("0xcafebabe"); - } - - @Test - public void should_read_string() { - ByteBuf source = - ByteBufs.wrap( - // length (as an unsigned short) - 0x00, - 0x05, - // UTF-8 contents - 0x68, - 0x65, - 0x6c, - 0x6c, - 0x6f); - assertThat(codec.readString(source)).isEqualTo("hello"); - } - - @Test - public void should_fail_to_read_string_if_not_enough_characters() { - ByteBuf source = codec.allocate(2); - source.writeShort(4); - - assertThatThrownBy(() -> codec.readString(source)) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage("Not enough bytes to read an UTF-8 serialized string of size 4"); - } - - @Test - public void should_read_long_string() { - ByteBuf source = - ByteBufs.wrap( - // length (as an int) - 0x00, - 0x00, - 0x00, - 0x05, - // UTF-8 contents - 0x68, - 0x65, - 0x6c, - 0x6c, - 0x6f); - assertThat(codec.readLongString(source)).isEqualTo("hello"); - } - - @Test - public void should_fail_to_read_long_string_if_not_enough_characters() { - ByteBuf source = codec.allocate(4); - source.writeInt(4); - - assertThatThrownBy(() -> codec.readLongString(source)) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage("Not enough bytes to read an UTF-8 serialized string of size 4"); - } - - @Test - public void should_write_inet_v4() throws Exception { - ByteBuf dest = allocate(1 + 4 + 4); - InetSocketAddress inet = new InetSocketAddress(InetAddress.getByName("127.0.0.1"), 9042); - codec.writeInet(inet, dest); - assertThat(dest) - .containsExactly( - "0x04" // size as a byte - + "7f000001" // address - + "00002352" // port - ); - } - - @Test - public void should_write_inet_v6() throws Exception { - ByteBuf dest = allocate(1 + 16 + 4); - InetSocketAddress inet = new InetSocketAddress(InetAddress.getByName("::1"), 9042); - codec.writeInet(inet, dest); - assertThat(dest) - .containsExactly( - "0x10" // size as a byte - + "00000000000000000000000000000001" // address - + "00002352" // port - ); - } - - @Test - public void should_write_inetaddr_v4() throws Exception { - ByteBuf dest = allocate(1 + 4); - InetAddress inetAddr = InetAddress.getByName("127.0.0.1"); - codec.writeInetAddr(inetAddr, dest); - assertThat(dest) - .containsExactly( - "0x04" // size as a byte - + "7f000001" // address - ); - } - - @Test - public void should_write_inetaddr_v6() throws Exception { - ByteBuf dest = allocate(1 + 16); - InetAddress inetAddr = InetAddress.getByName("::1"); - codec.writeInetAddr(inetAddr, dest); - assertThat(dest) - .containsExactly( - "0x10" // size as a byte - + "00000000000000000000000000000001" // address - ); - } - - @Test - public void should_write_string() { - ByteBuf dest = allocate(7); - codec.writeString("hello", dest); - assertThat(dest) - .containsExactly( - "0x0005" // size as an unsigned short - + "68656c6c6f" // UTF-8 contents - ); - } - - @Test - public void should_write_long_string() { - ByteBuf dest = allocate(9); - codec.writeLongString("hello", dest); - assertThat(dest) - .containsExactly( - "0x00000005" - + // size as an int - "68656c6c6f" // UTF-8 contents - ); - } - - @Test - public void should_write_bytes() { - ByteBuf dest = allocate(8); - codec.writeBytes(Bytes.fromHexString("0xcafebabe"), dest); - assertThat(dest) - .containsExactly( - "0x00000004" - + // size as an int - "cafebabe"); - } - - @Test - public void should_write_short_bytes() { - ByteBuf dest = allocate(6); - codec.writeShortBytes(new byte[] {(byte) 0xca, (byte) 0xfe, (byte) 0xba, (byte) 0xbe}, dest); - assertThat(dest) - .containsExactly( - "0x0004" - + // size as an unsigned short - "cafebabe"); - } - - @Test - public void should_write_null_bytes() { - ByteBuf dest = allocate(4); - codec.writeBytes((ByteBuffer) null, dest); - assertThat(dest).containsExactly("0xFFFFFFFF"); - } - - private static ByteBuf allocate(int length) { - return ByteBufAllocator.DEFAULT.buffer(length); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/BytesToSegmentDecoderTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/BytesToSegmentDecoderTest.java deleted file mode 100644 index d151da309c1..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/BytesToSegmentDecoderTest.java +++ /dev/null @@ -1,151 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.protocol; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; - -import com.datastax.oss.driver.api.core.connection.CrcMismatchException; -import com.datastax.oss.protocol.internal.Compressor; -import com.datastax.oss.protocol.internal.Segment; -import com.datastax.oss.protocol.internal.SegmentCodec; -import com.google.common.base.Strings; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.ByteBufUtil; -import io.netty.buffer.Unpooled; -import io.netty.buffer.UnpooledByteBufAllocator; -import io.netty.channel.embedded.EmbeddedChannel; -import io.netty.handler.codec.DecoderException; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public class BytesToSegmentDecoderTest { - - // Hard-coded test data, the values were generated with our encoding methods. - // We're not really testing the decoding itself here, only that our subclass calls the - // LengthFieldBasedFrameDecoder parent constructor with the right parameters. - private static final ByteBuf REGULAR_HEADER = byteBuf("04000201f9f2"); - private static final ByteBuf REGULAR_PAYLOAD = byteBuf("00000001"); - private static final ByteBuf REGULAR_TRAILER = byteBuf("1fd6022d"); - private static final ByteBuf REGULAR_WRONG_HEADER = byteBuf("04000202f9f2"); - private static final ByteBuf REGULAR_WRONG_TRAILER = byteBuf("1fd6022e"); - - private static final ByteBuf MAX_HEADER = byteBuf("ffff03254047"); - private static final ByteBuf MAX_PAYLOAD = - byteBuf(Strings.repeat("01", Segment.MAX_PAYLOAD_LENGTH)); - private static final ByteBuf MAX_TRAILER = byteBuf("a05c2f13"); - - private static final ByteBuf LZ4_HEADER = byteBuf("120020000491c94f"); - private static final ByteBuf LZ4_PAYLOAD_UNCOMPRESSED = - byteBuf("00000001000000010000000100000001"); - private static final ByteBuf LZ4_PAYLOAD_COMPRESSED = - byteBuf("f00100000001000000010000000100000001"); - private static final ByteBuf LZ4_TRAILER = byteBuf("2bd67f90"); - - private static final Compressor LZ4_COMPRESSOR = new Lz4Compressor("test"); - - private EmbeddedChannel channel; - - @Before - public void setup() { - channel = new EmbeddedChannel(); - } - - @Test - public void should_decode_regular_segment() { - channel.pipeline().addLast(newDecoder(Compressor.none())); - channel.writeInbound(Unpooled.wrappedBuffer(REGULAR_HEADER, REGULAR_PAYLOAD, REGULAR_TRAILER)); - Segment segment = channel.readInbound(); - assertThat(segment.isSelfContained).isTrue(); - assertThat(segment.payload).isEqualTo(REGULAR_PAYLOAD); - } - - @Test - public void should_decode_max_length_segment() { - channel.pipeline().addLast(newDecoder(Compressor.none())); - channel.writeInbound(Unpooled.wrappedBuffer(MAX_HEADER, MAX_PAYLOAD, MAX_TRAILER)); - Segment segment = channel.readInbound(); - assertThat(segment.isSelfContained).isTrue(); - assertThat(segment.payload).isEqualTo(MAX_PAYLOAD); - } - - @Test - public void should_decode_segment_from_multiple_incoming_chunks() { - channel.pipeline().addLast(newDecoder(Compressor.none())); - // Send the header in two slices, to cover the case where the length can't be read the first - // time: - ByteBuf headerStart = REGULAR_HEADER.slice(0, 3); - ByteBuf headerEnd = REGULAR_HEADER.slice(3, 3); - channel.writeInbound(headerStart); - channel.writeInbound(headerEnd); - channel.writeInbound(REGULAR_PAYLOAD.duplicate()); - channel.writeInbound(REGULAR_TRAILER.duplicate()); - Segment segment = channel.readInbound(); - assertThat(segment.isSelfContained).isTrue(); - assertThat(segment.payload).isEqualTo(REGULAR_PAYLOAD); - } - - @Test - public void should_decode_compressed_segment() { - channel.pipeline().addLast(newDecoder(LZ4_COMPRESSOR)); - // We need a contiguous buffer for this one, because of how our decompressor operates - ByteBuf buffer = Unpooled.wrappedBuffer(LZ4_HEADER, LZ4_PAYLOAD_COMPRESSED, LZ4_TRAILER).copy(); - channel.writeInbound(buffer); - Segment segment = channel.readInbound(); - assertThat(segment.isSelfContained).isTrue(); - assertThat(segment.payload).isEqualTo(LZ4_PAYLOAD_UNCOMPRESSED); - } - - @Test - public void should_surface_header_crc_mismatch() { - try { - channel.pipeline().addLast(newDecoder(Compressor.none())); - channel.writeInbound( - Unpooled.wrappedBuffer(REGULAR_WRONG_HEADER, REGULAR_PAYLOAD, REGULAR_TRAILER)); - fail("Expected a " + DecoderException.class.getSimpleName()); - } catch (DecoderException exception) { - assertThat(exception).hasCauseInstanceOf(CrcMismatchException.class); - } - } - - @Test - public void should_surface_trailer_crc_mismatch() { - try { - channel.pipeline().addLast(newDecoder(Compressor.none())); - channel.writeInbound( - Unpooled.wrappedBuffer(REGULAR_HEADER, REGULAR_PAYLOAD, REGULAR_WRONG_TRAILER)); - fail("Expected a " + DecoderException.class.getSimpleName()); - } catch (DecoderException exception) { - assertThat(exception).hasCauseInstanceOf(CrcMismatchException.class); - } - } - - private BytesToSegmentDecoder newDecoder(Compressor compressor) { - return new BytesToSegmentDecoder( - new SegmentCodec<>( - new ByteBufPrimitiveCodec(UnpooledByteBufAllocator.DEFAULT), compressor)); - } - - private static ByteBuf byteBuf(String hex) { - return Unpooled.unreleasableBuffer( - Unpooled.wrappedBuffer(ByteBufUtil.decodeHexDump(hex)).asReadOnly()); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/FrameDecoderTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/FrameDecoderTest.java deleted file mode 100644 index 0ab61771da0..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/FrameDecoderTest.java +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.protocol; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; - -import com.datastax.oss.driver.api.core.connection.FrameTooLongException; -import com.datastax.oss.driver.internal.core.channel.ChannelHandlerTestBase; -import com.datastax.oss.driver.internal.core.util.ByteBufs; -import com.datastax.oss.protocol.internal.Compressor; -import com.datastax.oss.protocol.internal.Frame; -import com.datastax.oss.protocol.internal.FrameCodec; -import com.datastax.oss.protocol.internal.response.AuthSuccess; -import io.netty.buffer.ByteBuf; -import io.netty.handler.codec.LengthFieldBasedFrameDecoder; -import org.junit.Before; -import org.junit.Test; - -public class FrameDecoderTest extends ChannelHandlerTestBase { - // A valid binary payload for a response frame. - private static final ByteBuf VALID_PAYLOAD = - ByteBufs.fromHexString( - "0x84" // response frame, protocol version 4 - + "00" // flags (none) - + "002a" // stream id (42) - + "10" // opcode for AUTH_SUCCESS message - + "00000008" // body length - + "00000004cafebabe" // body - ); - - // A binary payload that is invalid because the protocol version is not supported by the codec - private static final ByteBuf INVALID_PAYLOAD = - ByteBufs.fromHexString( - "0xFF" // response frame, protocol version 127 - + "00002a100000000800000004cafebabe"); - - private FrameCodec frameCodec; - - @Before - @Override - public void setup() { - super.setup(); - frameCodec = - FrameCodec.defaultClient(new ByteBufPrimitiveCodec(channel.alloc()), Compressor.none()); - } - - @Test - public void should_decode_valid_payload() { - // Given - FrameDecoder decoder = new FrameDecoder(frameCodec, 1024); - channel.pipeline().addLast(decoder); - - // When - // The decoder releases the buffer, so make sure we retain it for the other tests - VALID_PAYLOAD.retain(); - channel.writeInbound(VALID_PAYLOAD.duplicate()); - Frame frame = readInboundFrame(); - - // Then - assertThat(frame.message).isInstanceOf(AuthSuccess.class); - } - - /** - * Checks that an exception carrying the stream id is thrown when decoding fails in the {@link - * LengthFieldBasedFrameDecoder} code. - */ - @Test - public void should_fail_to_decode_if_payload_is_valid_but_too_long() { - // Given - FrameDecoder decoder = new FrameDecoder(frameCodec, VALID_PAYLOAD.readableBytes() - 1); - channel.pipeline().addLast(decoder); - - // When - VALID_PAYLOAD.retain(); - try { - channel.writeInbound(VALID_PAYLOAD.duplicate()); - fail("expected an exception"); - } catch (FrameDecodingException e) { - // Then - assertThat(e.streamId).isEqualTo(42); - assertThat(e.getCause()).isInstanceOf(FrameTooLongException.class); - } - } - - /** Checks that an exception carrying the stream id is thrown when decoding fails in our code. */ - @Test - public void should_fail_to_decode_if_payload_cannot_be_decoded() { - // Given - FrameDecoder decoder = new FrameDecoder(frameCodec, 1024); - channel.pipeline().addLast(decoder); - - // When - INVALID_PAYLOAD.retain(); - try { - channel.writeInbound(INVALID_PAYLOAD.duplicate()); - fail("expected an exception"); - } catch (FrameDecodingException e) { - // Then - assertThat(e.streamId).isEqualTo(42); - assertThat(e.getCause()).isInstanceOf(IllegalArgumentException.class); - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/SegmentToFrameDecoderTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/SegmentToFrameDecoderTest.java deleted file mode 100644 index 2886adeab4e..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/SegmentToFrameDecoderTest.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.protocol; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.shaded.guava.common.base.Strings; -import com.datastax.oss.protocol.internal.Compressor; -import com.datastax.oss.protocol.internal.Frame; -import com.datastax.oss.protocol.internal.FrameCodec; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.ProtocolV5ClientCodecs; -import com.datastax.oss.protocol.internal.ProtocolV5ServerCodecs; -import com.datastax.oss.protocol.internal.Segment; -import com.datastax.oss.protocol.internal.request.AuthResponse; -import com.datastax.oss.protocol.internal.response.result.Void; -import com.datastax.oss.protocol.internal.util.Bytes; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.UnpooledByteBufAllocator; -import io.netty.channel.embedded.EmbeddedChannel; -import java.util.Collections; -import org.junit.Before; -import org.junit.Test; - -public class SegmentToFrameDecoderTest { - - private static final FrameCodec FRAME_CODEC = - new FrameCodec<>( - new ByteBufPrimitiveCodec(UnpooledByteBufAllocator.DEFAULT), - Compressor.none(), - new ProtocolV5ClientCodecs(), - new ProtocolV5ServerCodecs()); - - private EmbeddedChannel channel; - - @Before - public void setup() { - channel = new EmbeddedChannel(); - channel.pipeline().addLast(new SegmentToFrameDecoder(FRAME_CODEC, "test")); - } - - @Test - public void should_decode_self_contained() { - ByteBuf payload = UnpooledByteBufAllocator.DEFAULT.buffer(); - payload.writeBytes(encodeFrame(Void.INSTANCE)); - payload.writeBytes(encodeFrame(new AuthResponse(Bytes.fromHexString("0xabcdef")))); - - channel.writeInbound(new Segment<>(payload, true)); - - Frame frame1 = channel.readInbound(); - assertThat(frame1.message).isInstanceOf(Void.class); - Frame frame2 = channel.readInbound(); - assertThat(frame2.message).isInstanceOf(AuthResponse.class); - } - - @Test - public void should_decode_sequence_of_slices() { - ByteBuf encodedFrame = - encodeFrame(new AuthResponse(Bytes.fromHexString("0x" + Strings.repeat("aa", 1011)))); - int sliceLength = 100; - do { - ByteBuf payload = - encodedFrame.readRetainedSlice(Math.min(sliceLength, encodedFrame.readableBytes())); - channel.writeInbound(new Segment<>(payload, false)); - } while (encodedFrame.isReadable()); - - Frame frame = channel.readInbound(); - assertThat(frame.message).isInstanceOf(AuthResponse.class); - } - - private static ByteBuf encodeFrame(Message message) { - Frame frame = - Frame.forResponse( - ProtocolConstants.Version.V5, - 1, - null, - Collections.emptyMap(), - Collections.emptyList(), - message); - return FRAME_CODEC.encode(frame); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/SliceWriteListenerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/SliceWriteListenerTest.java deleted file mode 100644 index 736bcb66d56..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/SliceWriteListenerTest.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.protocol; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import io.netty.channel.ChannelPromise; -import io.netty.channel.embedded.EmbeddedChannel; -import org.junit.Before; -import org.junit.Test; - -public class SliceWriteListenerTest { - - private final EmbeddedChannel channel = new EmbeddedChannel(); - - private ChannelPromise framePromise, slicePromise1, slicePromise2, slicePromise3; - - @Before - public void setup() { - framePromise = channel.newPromise(); - slicePromise1 = channel.newPromise(); - slicePromise2 = channel.newPromise(); - slicePromise3 = channel.newPromise(); - - ByteBufSegmentBuilder.SliceWriteListener listener = - new ByteBufSegmentBuilder.SliceWriteListener( - framePromise, ImmutableList.of(slicePromise1, slicePromise2, slicePromise3)); - slicePromise1.addListener(listener); - slicePromise2.addListener(listener); - slicePromise3.addListener(listener); - - assertThat(framePromise.isDone()).isFalse(); - } - - @Test - public void should_succeed_frame_if_all_slices_succeed() { - slicePromise1.setSuccess(); - assertThat(framePromise.isDone()).isFalse(); - slicePromise2.setSuccess(); - assertThat(framePromise.isDone()).isFalse(); - slicePromise3.setSuccess(); - - assertThat(framePromise.isSuccess()).isTrue(); - } - - @Test - public void should_fail_frame_and_cancel_remaining_slices_if_one_slice_fails() { - slicePromise1.setSuccess(); - assertThat(framePromise.isDone()).isFalse(); - Exception failure = new Exception("test"); - slicePromise2.setFailure(failure); - - assertThat(framePromise.isDone()).isTrue(); - assertThat(framePromise.isSuccess()).isFalse(); - assertThat(framePromise.cause()).isEqualTo(failure); - - assertThat(slicePromise3.isCancelled()).isTrue(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/session/DefaultSessionPoolsTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/session/DefaultSessionPoolsTest.java deleted file mode 100644 index 58d1783038d..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/session/DefaultSessionPoolsTest.java +++ /dev/null @@ -1,932 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.session; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.awaitility.Awaitility.await; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.timeout; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.addresstranslation.AddressTranslator; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.connection.ReconnectionPolicy; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.metadata.Metadata; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeState; -import com.datastax.oss.driver.api.core.metadata.NodeStateListener; -import com.datastax.oss.driver.api.core.metadata.schema.SchemaChangeListener; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.core.specex.SpeculativeExecutionPolicy; -import com.datastax.oss.driver.api.core.tracker.RequestTracker; -import com.datastax.oss.driver.internal.core.context.EventBus; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.context.NettyOptions; -import com.datastax.oss.driver.internal.core.control.ControlConnection; -import com.datastax.oss.driver.internal.core.metadata.DefaultEndPoint; -import com.datastax.oss.driver.internal.core.metadata.DefaultNode; -import com.datastax.oss.driver.internal.core.metadata.DistanceEvent; -import com.datastax.oss.driver.internal.core.metadata.LoadBalancingPolicyWrapper; -import com.datastax.oss.driver.internal.core.metadata.MetadataManager; -import com.datastax.oss.driver.internal.core.metadata.NodeStateEvent; -import com.datastax.oss.driver.internal.core.metadata.TestNodeFactory; -import com.datastax.oss.driver.internal.core.metadata.TopologyMonitor; -import com.datastax.oss.driver.internal.core.metrics.MetricsFactory; -import com.datastax.oss.driver.internal.core.pool.ChannelPool; -import com.datastax.oss.driver.internal.core.pool.ChannelPoolFactory; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import io.netty.channel.DefaultEventLoopGroup; -import io.netty.util.concurrent.DefaultPromise; -import io.netty.util.concurrent.GlobalEventExecutor; -import java.time.Duration; -import java.util.Collections; -import java.util.Optional; -import java.util.UUID; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.TimeUnit; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; -import org.mockito.verification.VerificationWithTimeout; - -public class DefaultSessionPoolsTest { - - private static final CqlIdentifier KEYSPACE = CqlIdentifier.fromInternal("ks"); - /** How long we wait when verifying mocks for async invocations */ - protected static final VerificationWithTimeout VERIFY_TIMEOUT = timeout(500); - - @Mock private InternalDriverContext context; - @Mock private NettyOptions nettyOptions; - @Mock private ChannelPoolFactory channelPoolFactory; - @Mock private MetadataManager metadataManager; - @Mock private TopologyMonitor topologyMonitor; - @Mock private LoadBalancingPolicyWrapper loadBalancingPolicyWrapper; - @Mock private DriverConfigLoader configLoader; - @Mock private Metadata metadata; - @Mock private DriverConfig config; - @Mock private DriverExecutionProfile defaultProfile; - @Mock private ReconnectionPolicy reconnectionPolicy; - @Mock private RetryPolicy retryPolicy; - @Mock private SpeculativeExecutionPolicy speculativeExecutionPolicy; - @Mock private AddressTranslator addressTranslator; - @Mock private ControlConnection controlConnection; - @Mock private MetricsFactory metricsFactory; - @Mock private NodeStateListener nodeStateListener; - @Mock private SchemaChangeListener schemaChangeListener; - @Mock private RequestTracker requestTracker; - - private DefaultNode node1; - private DefaultNode node2; - private DefaultNode node3; - private EventBus eventBus; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - - DefaultEventLoopGroup adminEventLoopGroup = new DefaultEventLoopGroup(1); - when(nettyOptions.adminEventExecutorGroup()).thenReturn(adminEventLoopGroup); - when(context.getNettyOptions()).thenReturn(nettyOptions); - - // Config: - when(defaultProfile.getBoolean(DefaultDriverOption.REQUEST_WARN_IF_SET_KEYSPACE)) - .thenReturn(true); - when(defaultProfile.getBoolean(DefaultDriverOption.REPREPARE_ENABLED)).thenReturn(false); - when(defaultProfile.isDefined(DefaultDriverOption.PROTOCOL_VERSION)).thenReturn(true); - when(defaultProfile.getDuration(DefaultDriverOption.METADATA_TOPOLOGY_WINDOW)) - .thenReturn(Duration.ZERO); - when(defaultProfile.getInt(DefaultDriverOption.METADATA_TOPOLOGY_MAX_EVENTS)).thenReturn(1); - when(config.getDefaultProfile()).thenReturn(defaultProfile); - when(context.getConfig()).thenReturn(config); - - // Init sequence: - when(metadataManager.refreshNodes()).thenReturn(CompletableFuture.completedFuture(null)); - when(metadataManager.refreshSchema(null, false, true)) - .thenReturn(CompletableFuture.completedFuture(null)); - when(context.getMetadataManager()).thenReturn(metadataManager); - - when(topologyMonitor.init()).thenReturn(CompletableFuture.completedFuture(null)); - when(context.getTopologyMonitor()).thenReturn(topologyMonitor); - - when(context.getLoadBalancingPolicyWrapper()).thenReturn(loadBalancingPolicyWrapper); - - when(context.getConfigLoader()).thenReturn(configLoader); - - when(context.getMetricsFactory()).thenReturn(metricsFactory); - - // Runtime behavior: - when(context.getSessionName()).thenReturn("test"); - - when(context.getChannelPoolFactory()).thenReturn(channelPoolFactory); - - eventBus = spy(new EventBus("test")); - when(context.getEventBus()).thenReturn(eventBus); - - node1 = mockLocalNode(1); - node2 = mockLocalNode(2); - node3 = mockLocalNode(3); - @SuppressWarnings("ConstantConditions") - ImmutableMap nodes = - ImmutableMap.of( - node1.getHostId(), node1, - node2.getHostId(), node2, - node3.getHostId(), node3); - when(metadata.getNodes()).thenReturn(nodes); - when(metadataManager.getMetadata()).thenReturn(metadata); - - PoolManager poolManager = new PoolManager(context); - when(context.getPoolManager()).thenReturn(poolManager); - - // Shutdown sequence: - when(context.getReconnectionPolicy()).thenReturn(reconnectionPolicy); - when(context.getRetryPolicy(DriverExecutionProfile.DEFAULT_NAME)).thenReturn(retryPolicy); - when(context.getSpeculativeExecutionPolicies()) - .thenReturn( - ImmutableMap.of(DriverExecutionProfile.DEFAULT_NAME, speculativeExecutionPolicy)); - when(context.getAddressTranslator()).thenReturn(addressTranslator); - when(context.getNodeStateListener()).thenReturn(nodeStateListener); - when(context.getSchemaChangeListener()).thenReturn(schemaChangeListener); - when(context.getRequestTracker()).thenReturn(requestTracker); - - when(metadataManager.closeAsync()).thenReturn(CompletableFuture.completedFuture(null)); - when(metadataManager.forceCloseAsync()).thenReturn(CompletableFuture.completedFuture(null)); - - when(topologyMonitor.closeAsync()).thenReturn(CompletableFuture.completedFuture(null)); - when(topologyMonitor.forceCloseAsync()).thenReturn(CompletableFuture.completedFuture(null)); - - when(context.getControlConnection()).thenReturn(controlConnection); - when(controlConnection.closeAsync()).thenReturn(CompletableFuture.completedFuture(null)); - when(controlConnection.forceCloseAsync()).thenReturn(CompletableFuture.completedFuture(null)); - - DefaultPromise nettyCloseFuture = new DefaultPromise<>(GlobalEventExecutor.INSTANCE); - nettyCloseFuture.setSuccess(null); - when(nettyOptions.onClose()).thenAnswer(invocation -> nettyCloseFuture); - } - - @Test - public void should_initialize_pools_with_distances() { - when(node3.getDistance()).thenReturn(NodeDistance.REMOTE); - - CompletableFuture pool1Future = new CompletableFuture<>(); - CompletableFuture pool2Future = new CompletableFuture<>(); - CompletableFuture pool3Future = new CompletableFuture<>(); - ChannelPool pool1 = mockPool(node1); - ChannelPool pool2 = mockPool(node2); - ChannelPool pool3 = mockPool(node3); - MockChannelPoolFactoryHelper factoryHelper = - MockChannelPoolFactoryHelper.builder(channelPoolFactory) - .pending(node1, KEYSPACE, NodeDistance.LOCAL, pool1Future) - .pending(node2, KEYSPACE, NodeDistance.LOCAL, pool2Future) - .pending(node3, KEYSPACE, NodeDistance.REMOTE, pool3Future) - .build(); - - CompletionStage initFuture = newSession(); - - factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node2, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.REMOTE); - - assertThatStage(initFuture).isNotDone(); - - pool1Future.complete(pool1); - pool2Future.complete(pool2); - pool3Future.complete(pool3); - - assertThatStage(initFuture) - .isSuccess( - session -> - assertThat(((DefaultSession) session).getPools()) - .containsValues(pool1, pool2, pool3)); - } - - @Test - public void should_not_connect_to_ignored_nodes() { - when(node2.getDistance()).thenReturn(NodeDistance.IGNORED); - - ChannelPool pool1 = mockPool(node1); - ChannelPool pool3 = mockPool(node3); - MockChannelPoolFactoryHelper factoryHelper = - MockChannelPoolFactoryHelper.builder(channelPoolFactory) - // Initial connection - .success(node1, KEYSPACE, NodeDistance.LOCAL, pool1) - .success(node3, KEYSPACE, NodeDistance.LOCAL, pool3) - .build(); - - CompletionStage initFuture = newSession(); - - factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - assertThatStage(initFuture) - .isSuccess( - session -> - assertThat(((DefaultSession) session).getPools()).containsValues(pool1, pool3)); - } - - @Test - public void should_not_connect_to_forced_down_nodes() { - when(node2.getState()).thenReturn(NodeState.FORCED_DOWN); - - ChannelPool pool1 = mockPool(node1); - ChannelPool pool3 = mockPool(node3); - MockChannelPoolFactoryHelper factoryHelper = - MockChannelPoolFactoryHelper.builder(channelPoolFactory) - // Initial connection - .success(node1, KEYSPACE, NodeDistance.LOCAL, pool1) - .success(node3, KEYSPACE, NodeDistance.LOCAL, pool3) - .build(); - - CompletionStage initFuture = newSession(); - - factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - assertThatStage(initFuture) - .isSuccess( - session -> - assertThat(((DefaultSession) session).getPools()).containsValues(pool1, pool3)); - } - - @Test - public void should_adjust_distance_if_changed_while_init() { - CompletableFuture pool1Future = new CompletableFuture<>(); - CompletableFuture pool2Future = new CompletableFuture<>(); - CompletableFuture pool3Future = new CompletableFuture<>(); - ChannelPool pool1 = mockPool(node1); - ChannelPool pool2 = mockPool(node2); - ChannelPool pool3 = mockPool(node3); - MockChannelPoolFactoryHelper factoryHelper = - MockChannelPoolFactoryHelper.builder(channelPoolFactory) - .pending(node1, KEYSPACE, NodeDistance.LOCAL, pool1Future) - .pending(node2, KEYSPACE, NodeDistance.LOCAL, pool2Future) - .pending(node3, KEYSPACE, NodeDistance.LOCAL, pool3Future) - .build(); - - CompletionStage initFuture = newSession(); - - factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node2, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - - assertThatStage(initFuture).isNotDone(); - - // Distance changes while init still pending - eventBus.fire(new DistanceEvent(NodeDistance.REMOTE, node2)); - - pool1Future.complete(pool1); - pool2Future.complete(pool2); - pool3Future.complete(pool3); - - verify(pool2, VERIFY_TIMEOUT).resize(NodeDistance.REMOTE); - - assertThatStage(initFuture) - .isSuccess( - session -> - assertThat(((DefaultSession) session).getPools()) - .containsValues(pool1, pool2, pool3)); - } - - @Test - public void should_remove_pool_if_ignored_while_init() { - CompletableFuture pool1Future = new CompletableFuture<>(); - CompletableFuture pool2Future = new CompletableFuture<>(); - CompletableFuture pool3Future = new CompletableFuture<>(); - ChannelPool pool1 = mockPool(node1); - ChannelPool pool2 = mockPool(node2); - ChannelPool pool3 = mockPool(node3); - MockChannelPoolFactoryHelper factoryHelper = - MockChannelPoolFactoryHelper.builder(channelPoolFactory) - .pending(node1, KEYSPACE, NodeDistance.LOCAL, pool1Future) - .pending(node2, KEYSPACE, NodeDistance.LOCAL, pool2Future) - .pending(node3, KEYSPACE, NodeDistance.LOCAL, pool3Future) - .build(); - - CompletionStage initFuture = newSession(); - - factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node2, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - - assertThatStage(initFuture).isNotDone(); - - // Distance changes while init still pending - eventBus.fire(new DistanceEvent(NodeDistance.IGNORED, node2)); - - pool1Future.complete(pool1); - pool2Future.complete(pool2); - pool3Future.complete(pool3); - - verify(pool2, VERIFY_TIMEOUT).closeAsync(); - - assertThatStage(initFuture) - .isSuccess( - session -> - assertThat(((DefaultSession) session).getPools()).containsValues(pool1, pool3)); - } - - @Test - public void should_remove_pool_if_forced_down_while_init() { - CompletableFuture pool1Future = new CompletableFuture<>(); - CompletableFuture pool2Future = new CompletableFuture<>(); - CompletableFuture pool3Future = new CompletableFuture<>(); - ChannelPool pool1 = mockPool(node1); - ChannelPool pool2 = mockPool(node2); - ChannelPool pool3 = mockPool(node3); - MockChannelPoolFactoryHelper factoryHelper = - MockChannelPoolFactoryHelper.builder(channelPoolFactory) - .pending(node1, KEYSPACE, NodeDistance.LOCAL, pool1Future) - .pending(node2, KEYSPACE, NodeDistance.LOCAL, pool2Future) - .pending(node3, KEYSPACE, NodeDistance.LOCAL, pool3Future) - .build(); - - CompletionStage initFuture = newSession(); - - factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node2, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - - assertThatStage(initFuture).isNotDone(); - - // Forced down while init still pending - eventBus.fire(NodeStateEvent.changed(NodeState.UP, NodeState.FORCED_DOWN, node2)); - - pool1Future.complete(pool1); - pool2Future.complete(pool2); - pool3Future.complete(pool3); - - verify(pool2, VERIFY_TIMEOUT).closeAsync(); - - assertThatStage(initFuture) - .isSuccess( - session -> - assertThat(((DefaultSession) session).getPools()).containsValues(pool1, pool3)); - } - - @Test - public void should_resize_pool_if_distance_changes() { - ChannelPool pool1 = mockPool(node1); - ChannelPool pool2 = mockPool(node2); - ChannelPool pool3 = mockPool(node3); - MockChannelPoolFactoryHelper factoryHelper = - MockChannelPoolFactoryHelper.builder(channelPoolFactory) - .success(node1, KEYSPACE, NodeDistance.LOCAL, pool1) - .success(node2, KEYSPACE, NodeDistance.LOCAL, pool2) - .success(node3, KEYSPACE, NodeDistance.LOCAL, pool3) - .build(); - - CompletionStage initFuture = newSession(); - - factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node2, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - assertThatStage(initFuture).isSuccess(); - - eventBus.fire(new DistanceEvent(NodeDistance.REMOTE, node2)); - verify(pool2, timeout(500)).resize(NodeDistance.REMOTE); - } - - @Test - public void should_remove_pool_if_node_becomes_ignored() { - ChannelPool pool1 = mockPool(node1); - ChannelPool pool2 = mockPool(node2); - ChannelPool pool3 = mockPool(node3); - MockChannelPoolFactoryHelper factoryHelper = - MockChannelPoolFactoryHelper.builder(channelPoolFactory) - .success(node1, KEYSPACE, NodeDistance.LOCAL, pool1) - .success(node2, KEYSPACE, NodeDistance.LOCAL, pool2) - .success(node3, KEYSPACE, NodeDistance.LOCAL, pool3) - .build(); - - CompletionStage initFuture = newSession(); - - factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node2, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - assertThatStage(initFuture).isSuccess(); - - eventBus.fire(new DistanceEvent(NodeDistance.IGNORED, node2)); - verify(pool2, timeout(500)).closeAsync(); - - Session session = CompletableFutures.getCompleted(initFuture.toCompletableFuture()); - await() - .untilAsserted( - () -> assertThat(((DefaultSession) session).getPools()).containsValues(pool1, pool3)); - } - - @Test - public void should_do_nothing_if_node_becomes_ignored_but_was_already_ignored() - throws InterruptedException { - ChannelPool pool1 = mockPool(node1); - ChannelPool pool2 = mockPool(node2); - ChannelPool pool3 = mockPool(node3); - MockChannelPoolFactoryHelper factoryHelper = - MockChannelPoolFactoryHelper.builder(channelPoolFactory) - .success(node1, KEYSPACE, NodeDistance.LOCAL, pool1) - .success(node2, KEYSPACE, NodeDistance.LOCAL, pool2) - .success(node3, KEYSPACE, NodeDistance.LOCAL, pool3) - .build(); - - CompletionStage initFuture = newSession(); - - factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node2, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - assertThatStage(initFuture).isSuccess(); - - eventBus.fire(new DistanceEvent(NodeDistance.IGNORED, node2)); - verify(pool2, timeout(100)).closeAsync(); - - Session session = CompletableFutures.getCompleted(initFuture.toCompletableFuture()); - assertThat(((DefaultSession) session).getPools()).containsValues(pool1, pool3); - - // Fire the same event again, nothing should happen - eventBus.fire(new DistanceEvent(NodeDistance.IGNORED, node2)); - TimeUnit.MILLISECONDS.sleep(200); - factoryHelper.verifyNoMoreCalls(); - } - - @Test - public void should_recreate_pool_if_node_becomes_not_ignored() { - when(node2.getDistance()).thenReturn(NodeDistance.IGNORED); - - ChannelPool pool1 = mockPool(node1); - ChannelPool pool2 = mockPool(node2); - ChannelPool pool3 = mockPool(node3); - MockChannelPoolFactoryHelper factoryHelper = - MockChannelPoolFactoryHelper.builder(channelPoolFactory) - // Initial connection - .success(node1, KEYSPACE, NodeDistance.LOCAL, pool1) - .success(node3, KEYSPACE, NodeDistance.LOCAL, pool3) - // When node2 becomes not ignored - .success(node2, KEYSPACE, NodeDistance.LOCAL, pool2) - .build(); - - CompletionStage initFuture = newSession(); - - factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - assertThatStage(initFuture).isSuccess(); - Session session = CompletableFutures.getCompleted(initFuture.toCompletableFuture()); - assertThat(((DefaultSession) session).getPools()).containsValues(pool1, pool3); - - eventBus.fire(new DistanceEvent(NodeDistance.LOCAL, node2)); - - factoryHelper.waitForCall(node2, KEYSPACE, NodeDistance.LOCAL); - await() - .untilAsserted( - () -> - assertThat(((DefaultSession) session).getPools()) - .containsValues(pool1, pool2, pool3)); - } - - @Test - public void should_remove_pool_if_node_is_forced_down() { - ChannelPool pool1 = mockPool(node1); - ChannelPool pool2 = mockPool(node2); - ChannelPool pool3 = mockPool(node3); - MockChannelPoolFactoryHelper factoryHelper = - MockChannelPoolFactoryHelper.builder(channelPoolFactory) - .success(node1, KEYSPACE, NodeDistance.LOCAL, pool1) - .success(node2, KEYSPACE, NodeDistance.LOCAL, pool2) - .success(node3, KEYSPACE, NodeDistance.LOCAL, pool3) - .build(); - - CompletionStage initFuture = newSession(); - - factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node2, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - assertThatStage(initFuture).isSuccess(); - - eventBus.fire(NodeStateEvent.changed(NodeState.UP, NodeState.FORCED_DOWN, node2)); - verify(pool2, timeout(500)).closeAsync(); - - Session session = CompletableFutures.getCompleted(initFuture.toCompletableFuture()); - await() - .untilAsserted( - () -> assertThat(((DefaultSession) session).getPools()).containsValues(pool1, pool3)); - } - - @Test - public void should_recreate_pool_if_node_is_forced_back_up() { - when(node2.getState()).thenReturn(NodeState.FORCED_DOWN); - - ChannelPool pool1 = mockPool(node1); - ChannelPool pool2 = mockPool(node2); - ChannelPool pool3 = mockPool(node3); - MockChannelPoolFactoryHelper factoryHelper = - MockChannelPoolFactoryHelper.builder(channelPoolFactory) - // init - .success(node1, KEYSPACE, NodeDistance.LOCAL, pool1) - .success(node3, KEYSPACE, NodeDistance.LOCAL, pool3) - // when node2 comes back up - .success(node2, KEYSPACE, NodeDistance.LOCAL, pool2) - .build(); - - CompletionStage initFuture = newSession(); - - factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - assertThatStage(initFuture).isSuccess(); - Session session = CompletableFutures.getCompleted(initFuture.toCompletableFuture()); - assertThat(((DefaultSession) session).getPools()).containsValues(pool1, pool3); - - eventBus.fire(NodeStateEvent.changed(NodeState.FORCED_DOWN, NodeState.UP, node2)); - factoryHelper.waitForCall(node2, KEYSPACE, NodeDistance.LOCAL); - await() - .untilAsserted( - () -> - assertThat(((DefaultSession) session).getPools()) - .containsValues(pool1, pool2, pool3)); - } - - @Test - public void should_not_recreate_pool_if_node_is_forced_back_up_but_ignored() { - when(node2.getState()).thenReturn(NodeState.FORCED_DOWN); - when(node2.getDistance()).thenReturn(NodeDistance.IGNORED); - - ChannelPool pool1 = mockPool(node1); - ChannelPool pool3 = mockPool(node3); - MockChannelPoolFactoryHelper factoryHelper = - MockChannelPoolFactoryHelper.builder(channelPoolFactory) - // init - .success(node1, KEYSPACE, NodeDistance.LOCAL, pool1) - .success(node3, KEYSPACE, NodeDistance.LOCAL, pool3) - .build(); - - CompletionStage initFuture = newSession(); - - factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - assertThatStage(initFuture).isSuccess(); - Session session = CompletableFutures.getCompleted(initFuture.toCompletableFuture()); - assertThat(((DefaultSession) session).getPools()).containsValues(pool1, pool3); - - eventBus.fire(NodeStateEvent.changed(NodeState.FORCED_DOWN, NodeState.UP, node2)); - await() - .untilAsserted( - () -> assertThat(((DefaultSession) session).getPools()).containsValues(pool1, pool3)); - factoryHelper.verifyNoMoreCalls(); - } - - @Test - public void should_adjust_distance_if_changed_while_recreating() { - when(node2.getDistance()).thenReturn(NodeDistance.IGNORED); - - ChannelPool pool1 = mockPool(node1); - ChannelPool pool2 = mockPool(node2); - CompletableFuture pool2Future = new CompletableFuture<>(); - ChannelPool pool3 = mockPool(node3); - MockChannelPoolFactoryHelper factoryHelper = - MockChannelPoolFactoryHelper.builder(channelPoolFactory) - // Initial connection - .success(node1, KEYSPACE, NodeDistance.LOCAL, pool1) - .success(node3, KEYSPACE, NodeDistance.LOCAL, pool3) - // When node2 becomes not ignored - .pending(node2, KEYSPACE, NodeDistance.LOCAL, pool2Future) - .build(); - - CompletionStage initFuture = newSession(); - - factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - assertThatStage(initFuture).isSuccess(); - Session session = CompletableFutures.getCompleted(initFuture.toCompletableFuture()); - assertThat(((DefaultSession) session).getPools()).containsValues(pool1, pool3); - - eventBus.fire(new DistanceEvent(NodeDistance.LOCAL, node2)); - - factoryHelper.waitForCall(node2, KEYSPACE, NodeDistance.LOCAL); - - // Distance changes again while pool init is in progress - eventBus.fire(new DistanceEvent(NodeDistance.REMOTE, node2)); - - // Now pool init succeeds - pool2Future.complete(pool2); - - // Pool should have been adjusted - verify(pool2, VERIFY_TIMEOUT).resize(NodeDistance.REMOTE); - await() - .untilAsserted( - () -> - assertThat(((DefaultSession) session).getPools()) - .containsValues(pool1, pool2, pool3)); - } - - @Test - public void should_remove_pool_if_ignored_while_recreating() { - when(node2.getDistance()).thenReturn(NodeDistance.IGNORED); - - ChannelPool pool1 = mockPool(node1); - ChannelPool pool2 = mockPool(node2); - CompletableFuture pool2Future = new CompletableFuture<>(); - ChannelPool pool3 = mockPool(node3); - MockChannelPoolFactoryHelper factoryHelper = - MockChannelPoolFactoryHelper.builder(channelPoolFactory) - // Initial connection - .success(node1, KEYSPACE, NodeDistance.LOCAL, pool1) - .success(node3, KEYSPACE, NodeDistance.LOCAL, pool3) - // When node2 becomes not ignored - .pending(node2, KEYSPACE, NodeDistance.LOCAL, pool2Future) - .build(); - - CompletionStage initFuture = newSession(); - - factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - assertThatStage(initFuture).isSuccess(); - Session session = CompletableFutures.getCompleted(initFuture.toCompletableFuture()); - assertThat(((DefaultSession) session).getPools()).containsValues(pool1, pool3); - - eventBus.fire(new DistanceEvent(NodeDistance.LOCAL, node2)); - - factoryHelper.waitForCall(node2, KEYSPACE, NodeDistance.LOCAL); - - // Distance changes to ignored while pool init is in progress - eventBus.fire(new DistanceEvent(NodeDistance.IGNORED, node2)); - - // Now pool init succeeds - pool2Future.complete(pool2); - - // Pool should have been closed - verify(pool2, VERIFY_TIMEOUT).closeAsync(); - - await() - .untilAsserted( - () -> assertThat(((DefaultSession) session).getPools()).containsValues(pool1, pool3)); - } - - @Test - public void should_remove_pool_if_forced_down_while_recreating() { - when(node2.getDistance()).thenReturn(NodeDistance.IGNORED); - - ChannelPool pool1 = mockPool(node1); - ChannelPool pool2 = mockPool(node2); - CompletableFuture pool2Future = new CompletableFuture<>(); - ChannelPool pool3 = mockPool(node3); - MockChannelPoolFactoryHelper factoryHelper = - MockChannelPoolFactoryHelper.builder(channelPoolFactory) - // Initial connection - .success(node1, KEYSPACE, NodeDistance.LOCAL, pool1) - .success(node3, KEYSPACE, NodeDistance.LOCAL, pool3) - // When node2 becomes not ignored - .pending(node2, KEYSPACE, NodeDistance.LOCAL, pool2Future) - .build(); - - CompletionStage initFuture = newSession(); - - factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - assertThatStage(initFuture).isSuccess(); - Session session = CompletableFutures.getCompleted(initFuture.toCompletableFuture()); - assertThat(((DefaultSession) session).getPools()).containsValues(pool1, pool3); - - eventBus.fire(new DistanceEvent(NodeDistance.LOCAL, node2)); - - factoryHelper.waitForCall(node2, KEYSPACE, NodeDistance.LOCAL); - - // Forced down while pool init is in progress - eventBus.fire(NodeStateEvent.changed(NodeState.UP, NodeState.FORCED_DOWN, node2)); - - // Now pool init succeeds - pool2Future.complete(pool2); - - // Pool should have been closed - verify(pool2, VERIFY_TIMEOUT).closeAsync(); - await() - .untilAsserted( - () -> assertThat(((DefaultSession) session).getPools()).containsValues(pool1, pool3)); - } - - @Test - public void should_close_all_pools_when_closing() { - ChannelPool pool1 = mockPool(node1); - ChannelPool pool2 = mockPool(node2); - ChannelPool pool3 = mockPool(node3); - MockChannelPoolFactoryHelper factoryHelper = - MockChannelPoolFactoryHelper.builder(channelPoolFactory) - .success(node1, KEYSPACE, NodeDistance.LOCAL, pool1) - .success(node2, KEYSPACE, NodeDistance.LOCAL, pool2) - .success(node3, KEYSPACE, NodeDistance.LOCAL, pool3) - .build(); - - CompletionStage initFuture = newSession(); - - factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node2, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - assertThatStage(initFuture).isSuccess(); - Session session = CompletableFutures.getCompleted(initFuture.toCompletableFuture()); - - CompletionStage closeFuture = session.closeAsync(); - assertThatStage(closeFuture).isSuccess(); - - verify(pool1, VERIFY_TIMEOUT).closeAsync(); - verify(pool2, VERIFY_TIMEOUT).closeAsync(); - verify(pool3, VERIFY_TIMEOUT).closeAsync(); - } - - @Test - public void should_force_close_all_pools_when_force_closing() { - ChannelPool pool1 = mockPool(node1); - ChannelPool pool2 = mockPool(node2); - ChannelPool pool3 = mockPool(node3); - MockChannelPoolFactoryHelper factoryHelper = - MockChannelPoolFactoryHelper.builder(channelPoolFactory) - .success(node1, KEYSPACE, NodeDistance.LOCAL, pool1) - .success(node2, KEYSPACE, NodeDistance.LOCAL, pool2) - .success(node3, KEYSPACE, NodeDistance.LOCAL, pool3) - .build(); - - CompletionStage initFuture = newSession(); - - factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node2, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - assertThatStage(initFuture).isSuccess(); - Session session = CompletableFutures.getCompleted(initFuture.toCompletableFuture()); - - CompletionStage closeFuture = session.forceCloseAsync(); - assertThatStage(closeFuture).isSuccess(); - - verify(pool1, VERIFY_TIMEOUT).forceCloseAsync(); - verify(pool2, VERIFY_TIMEOUT).forceCloseAsync(); - verify(pool3, VERIFY_TIMEOUT).forceCloseAsync(); - } - - @Test - public void should_close_pool_if_recreated_while_closing() { - when(node2.getState()).thenReturn(NodeState.FORCED_DOWN); - - ChannelPool pool1 = mockPool(node1); - ChannelPool pool2 = mockPool(node2); - CompletableFuture pool2Future = new CompletableFuture<>(); - ChannelPool pool3 = mockPool(node3); - MockChannelPoolFactoryHelper factoryHelper = - MockChannelPoolFactoryHelper.builder(channelPoolFactory) - // init - .success(node1, KEYSPACE, NodeDistance.LOCAL, pool1) - .success(node3, KEYSPACE, NodeDistance.LOCAL, pool3) - // when node2 comes back up - .pending(node2, KEYSPACE, NodeDistance.LOCAL, pool2Future) - .build(); - - CompletionStage initFuture = newSession(); - - factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - assertThatStage(initFuture).isSuccess(); - Session session = CompletableFutures.getCompleted(initFuture.toCompletableFuture()); - assertThat(((DefaultSession) session).getPools()).containsValues(pool1, pool3); - - // node2 comes back up, start initializing a pool for it - eventBus.fire(NodeStateEvent.changed(NodeState.FORCED_DOWN, NodeState.UP, node2)); - factoryHelper.waitForCall(node2, KEYSPACE, NodeDistance.LOCAL); - - // but the session gets closed before pool init completes - CompletionStage closeFuture = session.closeAsync(); - assertThatStage(closeFuture).isSuccess(); - - // now pool init completes - pool2Future.complete(pool2); - - // Pool should have been closed - verify(pool2, VERIFY_TIMEOUT).forceCloseAsync(); - } - - @Test - public void should_set_keyspace_on_all_pools() { - ChannelPool pool1 = mockPool(node1); - ChannelPool pool2 = mockPool(node2); - ChannelPool pool3 = mockPool(node3); - MockChannelPoolFactoryHelper factoryHelper = - MockChannelPoolFactoryHelper.builder(channelPoolFactory) - .success(node1, KEYSPACE, NodeDistance.LOCAL, pool1) - .success(node2, KEYSPACE, NodeDistance.LOCAL, pool2) - .success(node3, KEYSPACE, NodeDistance.LOCAL, pool3) - .build(); - - CompletionStage initFuture = newSession(); - - factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node2, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - assertThatStage(initFuture).isSuccess(); - Session session = CompletableFutures.getCompleted(initFuture.toCompletableFuture()); - - CqlIdentifier newKeyspace = CqlIdentifier.fromInternal("newKeyspace"); - ((DefaultSession) session).setKeyspace(newKeyspace); - - verify(pool1, VERIFY_TIMEOUT).setKeyspace(newKeyspace); - verify(pool2, VERIFY_TIMEOUT).setKeyspace(newKeyspace); - verify(pool3, VERIFY_TIMEOUT).setKeyspace(newKeyspace); - } - - @Test - public void should_set_keyspace_on_pool_if_recreated_while_switching_keyspace() { - when(node2.getState()).thenReturn(NodeState.FORCED_DOWN); - - ChannelPool pool1 = mockPool(node1); - ChannelPool pool2 = mockPool(node2); - CompletableFuture pool2Future = new CompletableFuture<>(); - ChannelPool pool3 = mockPool(node3); - MockChannelPoolFactoryHelper factoryHelper = - MockChannelPoolFactoryHelper.builder(channelPoolFactory) - // init - .success(node1, KEYSPACE, NodeDistance.LOCAL, pool1) - .success(node3, KEYSPACE, NodeDistance.LOCAL, pool3) - // when node2 comes back up - .pending(node2, KEYSPACE, NodeDistance.LOCAL, pool2Future) - .build(); - - CompletionStage initFuture = newSession(); - - factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - assertThatStage(initFuture).isSuccess(); - DefaultSession session = - (DefaultSession) CompletableFutures.getCompleted(initFuture.toCompletableFuture()); - assertThat(session.getPools()).containsValues(pool1, pool3); - - // node2 comes back up, start initializing a pool for it - eventBus.fire(NodeStateEvent.changed(NodeState.FORCED_DOWN, NodeState.UP, node2)); - factoryHelper.waitForCall(node2, KEYSPACE, NodeDistance.LOCAL); - - // Keyspace gets changed on the session in the meantime, node2's pool will miss it - CqlIdentifier newKeyspace = CqlIdentifier.fromInternal("newKeyspace"); - session.setKeyspace(newKeyspace); - verify(pool1, VERIFY_TIMEOUT).setKeyspace(newKeyspace); - verify(pool3, VERIFY_TIMEOUT).setKeyspace(newKeyspace); - - // now pool init completes - pool2Future.complete(pool2); - - // Pool should have been closed - verify(pool2, VERIFY_TIMEOUT).setKeyspace(newKeyspace); - } - - private ChannelPool mockPool(Node node) { - ChannelPool pool = mock(ChannelPool.class); - when(pool.getNode()).thenReturn(node); - when(pool.getInitialKeyspaceName()).thenReturn(KEYSPACE); - when(pool.setKeyspace(any(CqlIdentifier.class))) - .thenReturn(CompletableFuture.completedFuture(null)); - CompletableFuture closeFuture = new CompletableFuture<>(); - when(pool.closeFuture()).thenReturn(closeFuture); - when(pool.closeAsync()) - .then( - i -> { - closeFuture.complete(null); - return closeFuture; - }); - when(pool.forceCloseAsync()) - .then( - i -> { - closeFuture.complete(null); - return closeFuture; - }); - return pool; - } - - private CompletionStage newSession() { - return DefaultSession.init(context, Collections.emptySet(), KEYSPACE); - } - - private static DefaultNode mockLocalNode(int i) { - DefaultNode node = mock(DefaultNode.class); - when(node.getHostId()).thenReturn(UUID.randomUUID()); - DefaultEndPoint endPoint = TestNodeFactory.newEndPoint(i); - when(node.getEndPoint()).thenReturn(endPoint); - when(node.getBroadcastRpcAddress()).thenReturn(Optional.of(endPoint.resolve())); - when(node.getDistance()).thenReturn(NodeDistance.LOCAL); - when(node.toString()).thenReturn("node" + i); - return node; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/session/MockChannelPoolFactoryHelper.java b/core/src/test/java/com/datastax/oss/driver/internal/core/session/MockChannelPoolFactoryHelper.java deleted file mode 100644 index 6c3dc7f3689..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/session/MockChannelPoolFactoryHelper.java +++ /dev/null @@ -1,227 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.session; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.timeout; -import static org.mockito.Mockito.verifyZeroInteractions; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.pool.ChannelPool; -import com.datastax.oss.driver.internal.core.pool.ChannelPoolFactory; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.driver.shaded.guava.common.collect.ListMultimap; -import com.datastax.oss.driver.shaded.guava.common.collect.MultimapBuilder; -import com.datastax.oss.driver.shaded.guava.common.collect.Sets; -import java.util.ArrayDeque; -import java.util.Deque; -import java.util.HashMap; -import java.util.Map; -import java.util.Objects; -import java.util.Set; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import org.mockito.ArgumentCaptor; -import org.mockito.InOrder; -import org.mockito.internal.util.MockUtil; -import org.mockito.stubbing.OngoingStubbing; - -public class MockChannelPoolFactoryHelper { - - public static MockChannelPoolFactoryHelper.Builder builder( - ChannelPoolFactory channelPoolFactory) { - return new MockChannelPoolFactoryHelper.Builder(channelPoolFactory); - } - - private final ChannelPoolFactory channelPoolFactory; - private final InOrder inOrder; - // If waitForCalls sees more invocations than expected, the difference is stored here - private final Map previous = new HashMap<>(); - - private MockChannelPoolFactoryHelper(ChannelPoolFactory channelPoolFactory) { - this.channelPoolFactory = channelPoolFactory; - this.inOrder = inOrder(channelPoolFactory); - } - - public void waitForCall(Node node, CqlIdentifier keyspace, NodeDistance distance) { - waitForCalls(node, keyspace, distance, 1); - } - - /** - * Waits for a given number of calls to {@code ChannelPoolFactory.init()}. - * - *

Because we test asynchronous, non-blocking code, there might already be more calls than - * expected when this method is called. If so, the extra calls are stored and stored and will be - * taken into account next time. - */ - public void waitForCalls(Node node, CqlIdentifier keyspace, NodeDistance distance, int expected) { - Params params = new Params(node, keyspace, distance); - int fromLastTime = previous.getOrDefault(params, 0); - if (fromLastTime >= expected) { - previous.put(params, fromLastTime - expected); - return; - } - expected -= fromLastTime; - - // Because we test asynchronous, non-blocking code, there might have been already more - // invocations than expected. Use `atLeast` and a captor to find out. - ArgumentCaptor contextCaptor = - ArgumentCaptor.forClass(InternalDriverContext.class); - inOrder - .verify(channelPoolFactory, timeout(500).atLeast(expected)) - .init(eq(node), eq(keyspace), eq(distance), contextCaptor.capture(), eq("test")); - int actual = contextCaptor.getAllValues().size(); - - int extras = actual - expected; - if (extras > 0) { - previous.compute(params, (k, v) -> (v == null) ? extras : v + extras); - } - } - - public void verifyNoMoreCalls() { - inOrder - .verify(channelPoolFactory, timeout(500).times(0)) - .init( - any(Node.class), - any(CqlIdentifier.class), - any(NodeDistance.class), - any(InternalDriverContext.class), - any(String.class)); - - Set counts = Sets.newHashSet(previous.values()); - if (!counts.isEmpty()) { - assertThat(counts).containsExactly(0); - } - } - - public static class Builder { - private final ChannelPoolFactory channelPoolFactory; - private final ListMultimap invocations = - MultimapBuilder.hashKeys().arrayListValues().build(); - - private Builder(ChannelPoolFactory channelPoolFactory) { - assertThat(MockUtil.isMock(channelPoolFactory)).as("expected a mock").isTrue(); - verifyZeroInteractions(channelPoolFactory); - this.channelPoolFactory = channelPoolFactory; - } - - public Builder success( - Node node, CqlIdentifier keyspaceName, NodeDistance distance, ChannelPool pool) { - invocations.put(new Params(node, keyspaceName, distance), pool); - return this; - } - - public Builder failure( - Node node, CqlIdentifier keyspaceName, NodeDistance distance, String error) { - invocations.put(new Params(node, keyspaceName, distance), new Exception(error)); - return this; - } - - public Builder failure( - Node node, CqlIdentifier keyspaceName, NodeDistance distance, Throwable error) { - invocations.put(new Params(node, keyspaceName, distance), error); - return this; - } - - public Builder pending( - Node node, - CqlIdentifier keyspaceName, - NodeDistance distance, - CompletionStage future) { - invocations.put(new Params(node, keyspaceName, distance), future); - return this; - } - - public MockChannelPoolFactoryHelper build() { - stub(); - return new MockChannelPoolFactoryHelper(channelPoolFactory); - } - - private void stub() { - for (Params params : invocations.keySet()) { - Deque> results = new ArrayDeque<>(); - for (Object object : invocations.get(params)) { - if (object instanceof ChannelPool) { - results.add(CompletableFuture.completedFuture(((ChannelPool) object))); - } else if (object instanceof Throwable) { - results.add(CompletableFutures.failedFuture(((Throwable) object))); - } else if (object instanceof CompletableFuture) { - @SuppressWarnings("unchecked") - CompletionStage future = (CompletionStage) object; - results.add(future); - } else { - fail("unexpected type: " + object.getClass()); - } - } - if (results.size() > 0) { - CompletionStage first = results.poll(); - OngoingStubbing> ongoingStubbing = - when(channelPoolFactory.init( - eq(params.node), - eq(params.keyspace), - eq(params.distance), - any(InternalDriverContext.class), - eq("test"))) - .thenReturn(first); - for (CompletionStage result : results) { - ongoingStubbing.thenReturn(result); - } - } - } - } - } - - private static class Params { - private final Node node; - private final CqlIdentifier keyspace; - private final NodeDistance distance; - - private Params(Node node, CqlIdentifier keyspace, NodeDistance distance) { - this.node = node; - this.keyspace = keyspace; - this.distance = distance; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof Params) { - Params that = (Params) other; - return Objects.equals(this.node, that.node) - && Objects.equals(this.keyspace, that.keyspace) - && Objects.equals(this.distance, that.distance); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(node, keyspace, distance); - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/session/PoolManagerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/session/PoolManagerTest.java deleted file mode 100644 index 60483da4c72..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/session/PoolManagerTest.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.session; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.internal.core.context.EventBus; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.context.NettyOptions; -import io.netty.channel.DefaultEventLoopGroup; -import java.util.concurrent.ConcurrentHashMap; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -public class PoolManagerTest { - @Mock private InternalDriverContext context; - @Mock private NettyOptions nettyOptions; - @Mock private DriverConfig config; - @Mock private DriverExecutionProfile defaultProfile; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - - DefaultEventLoopGroup adminEventLoopGroup = new DefaultEventLoopGroup(1); - when(nettyOptions.adminEventExecutorGroup()).thenReturn(adminEventLoopGroup); - when(context.getNettyOptions()).thenReturn(nettyOptions); - when(context.getEventBus()).thenReturn(new EventBus("test")); - when(config.getDefaultProfile()).thenReturn(defaultProfile); - when(context.getConfig()).thenReturn(config); - } - - @Test - public void should_use_weak_values_if_config_is_true_or_undefined() { - when(defaultProfile.getBoolean(DefaultDriverOption.PREPARED_CACHE_WEAK_VALUES, true)) - .thenReturn(true); - // As weak values map class is MapMakerInternalMap - assertThat(new PoolManager(context).getRepreparePayloads()) - .isNotInstanceOf(ConcurrentHashMap.class); - } - - @Test - public void should_not_use_weak_values_if_config_is_false() { - when(defaultProfile.getBoolean(DefaultDriverOption.PREPARED_CACHE_WEAK_VALUES, true)) - .thenReturn(false); - assertThat(new PoolManager(context).getRepreparePayloads()) - .isInstanceOf(ConcurrentHashMap.class); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/session/ReprepareOnUpTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/session/ReprepareOnUpTest.java deleted file mode 100644 index 555ed2e8806..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/session/ReprepareOnUpTest.java +++ /dev/null @@ -1,409 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.session; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.internal.core.adminrequest.AdminResult; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.TopologyMonitor; -import com.datastax.oss.driver.internal.core.metrics.MetricsFactory; -import com.datastax.oss.driver.internal.core.metrics.SessionMetricUpdater; -import com.datastax.oss.driver.internal.core.pool.ChannelPool; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.request.Prepare; -import com.datastax.oss.protocol.internal.request.Query; -import com.datastax.oss.protocol.internal.response.result.ColumnSpec; -import com.datastax.oss.protocol.internal.response.result.DefaultRows; -import com.datastax.oss.protocol.internal.response.result.RawType; -import com.datastax.oss.protocol.internal.response.result.Rows; -import com.datastax.oss.protocol.internal.response.result.RowsMetadata; -import com.datastax.oss.protocol.internal.util.Bytes; -import io.netty.util.concurrent.EventExecutor; -import io.netty.util.concurrent.ImmediateEventExecutor; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.ArrayDeque; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.Queue; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -public class ReprepareOnUpTest { - @Mock private ChannelPool pool; - @Mock private DriverChannel channel; - @Mock private InternalDriverContext context; - @Mock private DriverConfig config; - @Mock private DriverExecutionProfile defaultProfile; - @Mock private TopologyMonitor topologyMonitor; - @Mock private MetricsFactory metricsFactory; - @Mock private SessionMetricUpdater metricUpdater; - private Runnable whenPrepared; - private CompletionStage done; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - - when(pool.next()).thenReturn(channel); - - when(config.getDefaultProfile()).thenReturn(defaultProfile); - when(defaultProfile.getBoolean(DefaultDriverOption.REPREPARE_CHECK_SYSTEM_TABLE)) - .thenReturn(true); - when(defaultProfile.getDuration(DefaultDriverOption.REPREPARE_TIMEOUT)) - .thenReturn(Duration.ofMillis(500)); - when(defaultProfile.getInt(DefaultDriverOption.REPREPARE_MAX_STATEMENTS)).thenReturn(0); - when(defaultProfile.getInt(DefaultDriverOption.REPREPARE_MAX_PARALLELISM)).thenReturn(100); - when(context.getConfig()).thenReturn(config); - - when(context.getMetricsFactory()).thenReturn(metricsFactory); - when(metricsFactory.getSessionUpdater()).thenReturn(metricUpdater); - - done = new CompletableFuture<>(); - whenPrepared = () -> ((CompletableFuture) done).complete(null); - } - - @Test - public void should_complete_immediately_if_no_prepared_statements() { - // Given - MockReprepareOnUp reprepareOnUp = - new MockReprepareOnUp( - "test", - pool, - ImmediateEventExecutor.INSTANCE, - getMockPayloads(/*none*/ ), - context, - whenPrepared); - - // When - reprepareOnUp.start(); - - // Then - assertThatStage(done).isSuccess(v -> assertThat(reprepareOnUp.queries).isEmpty()); - } - - @Test - public void should_reprepare_all_if_system_table_query_fails() { - MockReprepareOnUp reprepareOnUp = - new MockReprepareOnUp( - "test", - pool, - ImmediateEventExecutor.INSTANCE, - getMockPayloads('a', 'b', 'c', 'd', 'e', 'f'), - context, - whenPrepared); - - reprepareOnUp.start(); - - MockAdminQuery adminQuery = reprepareOnUp.queries.poll(); - assertThat(adminQuery).isNotNull(); - assertThat(adminQuery.request).isInstanceOf(Query.class); - assertThat(((Query) adminQuery.request).query) - .isEqualTo("SELECT prepared_id FROM system.prepared_statements"); - adminQuery.resultFuture.completeExceptionally(new RuntimeException("mock error")); - - for (char c = 'a'; c <= 'f'; c++) { - adminQuery = reprepareOnUp.queries.poll(); - assertThat(adminQuery).isNotNull(); - assertThat(adminQuery.request).isInstanceOf(Prepare.class); - assertThat(((Prepare) adminQuery.request).cqlQuery).isEqualTo("mock query " + c); - adminQuery.resultFuture.complete(null); - } - - assertThatStage(done).isSuccess(v -> assertThat(reprepareOnUp.queries).isEmpty()); - } - - @Test - public void should_reprepare_all_if_system_table_empty() { - MockReprepareOnUp reprepareOnUp = - new MockReprepareOnUp( - "test", - pool, - ImmediateEventExecutor.INSTANCE, - getMockPayloads('a', 'b', 'c', 'd', 'e', 'f'), - context, - whenPrepared); - - reprepareOnUp.start(); - - MockAdminQuery adminQuery = reprepareOnUp.queries.poll(); - assertThat(adminQuery).isNotNull(); - assertThat(adminQuery.request).isInstanceOf(Query.class); - assertThat(((Query) adminQuery.request).query) - .isEqualTo("SELECT prepared_id FROM system.prepared_statements"); - // server knows no ids: - adminQuery.resultFuture.complete( - new AdminResult(preparedIdRows(/*none*/ ), null, DefaultProtocolVersion.DEFAULT)); - - for (char c = 'a'; c <= 'f'; c++) { - adminQuery = reprepareOnUp.queries.poll(); - assertThat(adminQuery).isNotNull(); - assertThat(adminQuery.request).isInstanceOf(Prepare.class); - assertThat(((Prepare) adminQuery.request).cqlQuery).isEqualTo("mock query " + c); - adminQuery.resultFuture.complete(null); - } - - assertThatStage(done).isSuccess(v -> assertThat(reprepareOnUp.queries).isEmpty()); - } - - @Test - public void should_reprepare_all_if_system_query_disabled() { - when(defaultProfile.getBoolean(DefaultDriverOption.REPREPARE_CHECK_SYSTEM_TABLE)) - .thenReturn(false); - - MockReprepareOnUp reprepareOnUp = - new MockReprepareOnUp( - "test", - pool, - ImmediateEventExecutor.INSTANCE, - getMockPayloads('a', 'b', 'c', 'd', 'e', 'f'), - context, - whenPrepared); - - reprepareOnUp.start(); - - MockAdminQuery adminQuery; - for (char c = 'a'; c <= 'f'; c++) { - adminQuery = reprepareOnUp.queries.poll(); - assertThat(adminQuery).isNotNull(); - assertThat(adminQuery.request).isInstanceOf(Prepare.class); - assertThat(((Prepare) adminQuery.request).cqlQuery).isEqualTo("mock query " + c); - adminQuery.resultFuture.complete(null); - } - - assertThatStage(done).isSuccess(v -> assertThat(reprepareOnUp.queries).isEmpty()); - } - - @Test - public void should_not_reprepare_already_known_statements() { - MockReprepareOnUp reprepareOnUp = - new MockReprepareOnUp( - "test", - pool, - ImmediateEventExecutor.INSTANCE, - getMockPayloads('a', 'b', 'c', 'd', 'e', 'f'), - context, - whenPrepared); - - reprepareOnUp.start(); - - MockAdminQuery adminQuery = reprepareOnUp.queries.poll(); - assertThat(adminQuery).isNotNull(); - assertThat(adminQuery.request).isInstanceOf(Query.class); - assertThat(((Query) adminQuery.request).query) - .isEqualTo("SELECT prepared_id FROM system.prepared_statements"); - // server knows d, e and f already: - adminQuery.resultFuture.complete( - new AdminResult(preparedIdRows('d', 'e', 'f'), null, DefaultProtocolVersion.DEFAULT)); - - for (char c = 'a'; c <= 'c'; c++) { - adminQuery = reprepareOnUp.queries.poll(); - assertThat(adminQuery).isNotNull(); - assertThat(adminQuery.request).isInstanceOf(Prepare.class); - assertThat(((Prepare) adminQuery.request).cqlQuery).isEqualTo("mock query " + c); - adminQuery.resultFuture.complete(null); - } - - assertThatStage(done).isSuccess(v -> assertThat(reprepareOnUp.queries).isEmpty()); - } - - @Test - public void should_proceed_if_schema_agreement_not_reached() { - when(topologyMonitor.checkSchemaAgreement()) - .thenReturn(CompletableFuture.completedFuture(false)); - should_not_reprepare_already_known_statements(); - } - - @Test - public void should_proceed_if_schema_agreement_fails() { - when(topologyMonitor.checkSchemaAgreement()) - .thenReturn(CompletableFutures.failedFuture(new RuntimeException("test"))); - should_not_reprepare_already_known_statements(); - } - - @Test - public void should_limit_number_of_statements_to_reprepare() { - when(defaultProfile.getInt(DefaultDriverOption.REPREPARE_MAX_STATEMENTS)).thenReturn(3); - - MockReprepareOnUp reprepareOnUp = - new MockReprepareOnUp( - "test", - pool, - ImmediateEventExecutor.INSTANCE, - getMockPayloads('a', 'b', 'c', 'd', 'e', 'f'), - context, - whenPrepared); - - reprepareOnUp.start(); - - MockAdminQuery adminQuery = reprepareOnUp.queries.poll(); - assertThat(adminQuery).isNotNull(); - assertThat(adminQuery.request).isInstanceOf(Query.class); - assertThat(((Query) adminQuery.request).query) - .isEqualTo("SELECT prepared_id FROM system.prepared_statements"); - // server knows no ids: - adminQuery.resultFuture.complete( - new AdminResult(preparedIdRows(/*none*/ ), null, DefaultProtocolVersion.DEFAULT)); - - for (char c = 'a'; c <= 'c'; c++) { - adminQuery = reprepareOnUp.queries.poll(); - assertThat(adminQuery).isNotNull(); - assertThat(adminQuery.request).isInstanceOf(Prepare.class); - assertThat(((Prepare) adminQuery.request).cqlQuery).isEqualTo("mock query " + c); - adminQuery.resultFuture.complete(null); - } - - assertThatStage(done).isSuccess(v -> assertThat(reprepareOnUp.queries).isEmpty()); - } - - @Test - public void should_limit_number_of_statements_reprepared_in_parallel() { - when(defaultProfile.getInt(DefaultDriverOption.REPREPARE_MAX_PARALLELISM)).thenReturn(3); - - MockReprepareOnUp reprepareOnUp = - new MockReprepareOnUp( - "test", - pool, - ImmediateEventExecutor.INSTANCE, - getMockPayloads('a', 'b', 'c', 'd', 'e', 'f'), - context, - whenPrepared); - - reprepareOnUp.start(); - - MockAdminQuery adminQuery = reprepareOnUp.queries.poll(); - assertThat(adminQuery).isNotNull(); - assertThat(adminQuery.request).isInstanceOf(Query.class); - assertThat(((Query) adminQuery.request).query) - .isEqualTo("SELECT prepared_id FROM system.prepared_statements"); - // server knows no ids => will reprepare all 6: - adminQuery.resultFuture.complete( - new AdminResult(preparedIdRows(/*none*/ ), null, DefaultProtocolVersion.DEFAULT)); - - // 3 statements have enqueued, we've not completed the queries yet so no more should be sent: - assertThat(reprepareOnUp.queries.size()).isEqualTo(3); - - // As we complete each statement, another one should enqueue: - for (char c = 'a'; c <= 'c'; c++) { - adminQuery = reprepareOnUp.queries.poll(); - assertThat(adminQuery).isNotNull(); - assertThat(adminQuery.request).isInstanceOf(Prepare.class); - assertThat(((Prepare) adminQuery.request).cqlQuery).isEqualTo("mock query " + c); - adminQuery.resultFuture.complete(null); - assertThat(reprepareOnUp.queries.size()).isEqualTo(3); - } - - // Complete the last 3: - for (char c = 'd'; c <= 'f'; c++) { - adminQuery = reprepareOnUp.queries.poll(); - assertThat(adminQuery).isNotNull(); - assertThat(adminQuery.request).isInstanceOf(Prepare.class); - assertThat(((Prepare) adminQuery.request).cqlQuery).isEqualTo("mock query " + c); - adminQuery.resultFuture.complete(null); - } - - assertThatStage(done).isSuccess(v -> assertThat(reprepareOnUp.queries).isEmpty()); - } - - private Map getMockPayloads(char... values) { - ImmutableMap.Builder builder = ImmutableMap.builder(); - for (char value : values) { - ByteBuffer id = Bytes.fromHexString("0x0" + value); - builder.put( - id, new RepreparePayload(id, "mock query " + value, null, Collections.emptyMap())); - } - return builder.build(); - } - - /** Bypasses the channel to make testing easier. */ - private static class MockReprepareOnUp extends ReprepareOnUp { - - private Queue queries = new ArrayDeque<>(); - - MockReprepareOnUp( - String logPrefix, - ChannelPool pool, - EventExecutor adminExecutor, - Map repreparePayloads, - InternalDriverContext context, - Runnable whenPrepared) { - super(logPrefix, pool, adminExecutor, repreparePayloads, context, whenPrepared); - } - - @Override - protected CompletionStage queryAsync( - Message message, Map customPayload, String debugString) { - CompletableFuture resultFuture = new CompletableFuture<>(); - queries.add(new MockAdminQuery(message, resultFuture)); - return resultFuture; - } - - @Override - protected CompletionStage prepareAsync( - Message message, Map customPayload) { - CompletableFuture resultFuture = new CompletableFuture<>(); - queries.add(new MockAdminQuery(message, resultFuture)); - return resultFuture; - } - } - - private static class MockAdminQuery { - private final Message request; - private final CompletableFuture resultFuture; - - @SuppressWarnings("unchecked") - public MockAdminQuery(Message request, CompletableFuture resultFuture) { - this.request = request; - this.resultFuture = (CompletableFuture) resultFuture; - } - } - - private Rows preparedIdRows(char... values) { - ColumnSpec preparedIdSpec = - new ColumnSpec( - "system", - "prepared_statements", - "prepared_id", - 0, - RawType.PRIMITIVES.get(ProtocolConstants.DataType.BLOB)); - RowsMetadata rowsMetadata = - new RowsMetadata(ImmutableList.of(preparedIdSpec), null, null, null); - Queue> data = new ArrayDeque<>(); - for (char value : values) { - data.add(ImmutableList.of(Bytes.fromHexString("0x0" + value))); - } - return new DefaultRows(rowsMetadata, data); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/session/throttling/ConcurrencyLimitingRequestThrottlerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/session/throttling/ConcurrencyLimitingRequestThrottlerTest.java deleted file mode 100644 index 7eb682070cd..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/session/throttling/ConcurrencyLimitingRequestThrottlerTest.java +++ /dev/null @@ -1,366 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.session.throttling; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.RequestThrottlingException; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.session.throttling.Throttled; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import java.util.List; -import java.util.concurrent.CountDownLatch; -import java.util.function.Consumer; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public class ConcurrencyLimitingRequestThrottlerTest { - - @Mock private DriverContext context; - @Mock private DriverConfig config; - @Mock private DriverExecutionProfile defaultProfile; - - private ConcurrencyLimitingRequestThrottler throttler; - - @Before - public void setup() { - when(context.getConfig()).thenReturn(config); - when(config.getDefaultProfile()).thenReturn(defaultProfile); - - when(defaultProfile.getInt(DefaultDriverOption.REQUEST_THROTTLER_MAX_CONCURRENT_REQUESTS)) - .thenReturn(5); - when(defaultProfile.getInt(DefaultDriverOption.REQUEST_THROTTLER_MAX_QUEUE_SIZE)) - .thenReturn(10); - - throttler = new ConcurrencyLimitingRequestThrottler(context); - } - - @Test - public void should_start_immediately_when_under_capacity() { - // Given - MockThrottled request = new MockThrottled(); - - // When - throttler.register(request); - - // Then - assertThatStage(request.ended).isSuccess(wasDelayed -> assertThat(wasDelayed).isFalse()); - assertThat(throttler.getConcurrentRequests()).isEqualTo(1); - assertThat(throttler.getQueue()).isEmpty(); - } - - @Test - public void should_allow_new_request_when_active_one_succeeds() { - should_allow_new_request_when_active_one_completes(throttler::signalSuccess); - } - - @Test - public void should_allow_new_request_when_active_one_fails() { - should_allow_new_request_when_active_one_completes( - request -> throttler.signalError(request, new RuntimeException("mock error"))); - } - - @Test - public void should_allow_new_request_when_active_one_times_out() { - should_allow_new_request_when_active_one_completes(throttler::signalTimeout); - } - - @Test - public void should_allow_new_request_when_active_one_canceled() { - should_allow_new_request_when_active_one_completes(throttler::signalCancel); - } - - private void should_allow_new_request_when_active_one_completes( - Consumer completeCallback) { - // Given - MockThrottled first = new MockThrottled(); - throttler.register(first); - assertThatStage(first.ended).isSuccess(wasDelayed -> assertThat(wasDelayed).isFalse()); - for (int i = 0; i < 4; i++) { // fill to capacity - throttler.register(new MockThrottled()); - } - assertThat(throttler.getConcurrentRequests()).isEqualTo(5); - assertThat(throttler.getQueue()).isEmpty(); - - // When - completeCallback.accept(first); - assertThat(throttler.getConcurrentRequests()).isEqualTo(4); - assertThat(throttler.getQueue()).isEmpty(); - MockThrottled incoming = new MockThrottled(); - throttler.register(incoming); - - // Then - assertThatStage(incoming.ended).isSuccess(wasDelayed -> assertThat(wasDelayed).isFalse()); - assertThat(throttler.getConcurrentRequests()).isEqualTo(5); - assertThat(throttler.getQueue()).isEmpty(); - } - - @Test - public void should_enqueue_when_over_capacity() { - // Given - for (int i = 0; i < 5; i++) { - throttler.register(new MockThrottled()); - } - assertThat(throttler.getConcurrentRequests()).isEqualTo(5); - assertThat(throttler.getQueue()).isEmpty(); - - // When - MockThrottled incoming = new MockThrottled(); - throttler.register(incoming); - - // Then - assertThatStage(incoming.ended).isNotDone(); - assertThat(throttler.getConcurrentRequests()).isEqualTo(5); - assertThat(throttler.getQueue()).containsExactly(incoming); - } - - @Test - public void should_dequeue_when_active_succeeds() { - should_dequeue_when_active_completes(throttler::signalSuccess); - } - - @Test - public void should_dequeue_when_active_fails() { - should_dequeue_when_active_completes( - request -> throttler.signalError(request, new RuntimeException("mock error"))); - } - - @Test - public void should_dequeue_when_active_times_out() { - should_dequeue_when_active_completes(throttler::signalTimeout); - } - - private void should_dequeue_when_active_completes(Consumer completeCallback) { - // Given - MockThrottled first = new MockThrottled(); - throttler.register(first); - assertThatStage(first.ended).isSuccess(wasDelayed -> assertThat(wasDelayed).isFalse()); - for (int i = 0; i < 4; i++) { - throttler.register(new MockThrottled()); - } - - MockThrottled incoming = new MockThrottled(); - throttler.register(incoming); - assertThatStage(incoming.ended).isNotDone(); - - // When - completeCallback.accept(first); - - // Then - assertThatStage(incoming.ended).isSuccess(wasDelayed -> assertThat(wasDelayed).isTrue()); - assertThat(throttler.getConcurrentRequests()).isEqualTo(5); - assertThat(throttler.getQueue()).isEmpty(); - } - - @Test - public void should_reject_when_queue_is_full() { - // Given - for (int i = 0; i < 15; i++) { - throttler.register(new MockThrottled()); - } - assertThat(throttler.getConcurrentRequests()).isEqualTo(5); - assertThat(throttler.getQueue()).hasSize(10); - - // When - MockThrottled incoming = new MockThrottled(); - throttler.register(incoming); - - // Then - assertThatStage(incoming.ended) - .isFailed(error -> assertThat(error).isInstanceOf(RequestThrottlingException.class)); - } - - @Test - public void should_remove_timed_out_request_from_queue() { - // Given - for (int i = 0; i < 5; i++) { - throttler.register(new MockThrottled()); - } - MockThrottled queued1 = new MockThrottled(); - throttler.register(queued1); - MockThrottled queued2 = new MockThrottled(); - throttler.register(queued2); - - // When - throttler.signalTimeout(queued1); - - // Then - assertThatStage(queued2.ended).isNotDone(); - assertThat(throttler.getConcurrentRequests()).isEqualTo(5); - assertThat(throttler.getQueue()).hasSize(1); - } - - @Test - public void should_reject_enqueued_when_closing() { - // Given - for (int i = 0; i < 5; i++) { - throttler.register(new MockThrottled()); - } - List enqueued = Lists.newArrayList(); - for (int i = 0; i < 10; i++) { - MockThrottled request = new MockThrottled(); - throttler.register(request); - assertThatStage(request.ended).isNotDone(); - enqueued.add(request); - } - - // When - throttler.close(); - - // Then - for (MockThrottled request : enqueued) { - assertThatStage(request.ended) - .isFailed(error -> assertThat(error).isInstanceOf(RequestThrottlingException.class)); - } - - // When - MockThrottled request = new MockThrottled(); - throttler.register(request); - - // Then - assertThatStage(request.ended) - .isFailed(error -> assertThat(error).isInstanceOf(RequestThrottlingException.class)); - } - - @Test - public void should_run_throttle_callbacks_concurrently() throws InterruptedException { - // Given - - // a task is enqueued, which when in onThrottleReady, will stall latch countDown()ed - // register() should automatically start onThrottleReady on same thread - - // start a parallel thread - CountDownLatch firstRelease = new CountDownLatch(1); - MockThrottled first = new MockThrottled(firstRelease); - Runnable r = - () -> { - throttler.register(first); - first.ended.toCompletableFuture().thenRun(() -> throttler.signalSuccess(first)); - }; - Thread t = new Thread(r); - t.start(); - - // wait for the registration threads to reach await state - assertThatStage(first.started).isSuccess(); - assertThatStage(first.ended).isNotDone(); - - // When - // we concurrently submit a second shorter task - MockThrottled second = new MockThrottled(); - // (on a second thread, so that we can join and force a timeout in case - // registration is delayed) - Thread t2 = new Thread(() -> throttler.register(second)); - t2.start(); - t2.join(1_000); - - // Then - // registration will trigger callback, should complete ~immediately - assertThatStage(second.ended).isSuccess(wasDelayed -> assertThat(wasDelayed).isFalse()); - // first should still be unfinished - assertThatStage(first.started).isDone(); - assertThatStage(first.ended).isNotDone(); - // now finish, and verify - firstRelease.countDown(); - assertThatStage(first.ended).isSuccess(wasDelayed -> assertThat(wasDelayed).isFalse()); - - t.join(1_000); - } - - @Test - public void should_enqueue_tasks_quickly_when_callbacks_blocked() throws InterruptedException { - // Given - - // Multiple tasks are registered, up to the limit, and proceed into their - // callback - - // start five parallel threads - final int THREADS = 5; - Thread[] threads = new Thread[THREADS]; - CountDownLatch[] latches = new CountDownLatch[THREADS]; - MockThrottled[] throttled = new MockThrottled[THREADS]; - for (int i = 0; i < threads.length; i++) { - latches[i] = new CountDownLatch(1); - final MockThrottled itThrottled = new MockThrottled(latches[i]); - throttled[i] = itThrottled; - threads[i] = - new Thread( - () -> { - throttler.register(itThrottled); - itThrottled - .ended - .toCompletableFuture() - .thenRun(() -> throttler.signalSuccess(itThrottled)); - }); - threads[i].start(); - } - - // wait for the registration threads to be launched - // they are all waiting now - for (int i = 0; i < throttled.length; i++) { - assertThatStage(throttled[i].started).isSuccess(); - assertThatStage(throttled[i].ended).isNotDone(); - } - - // When - // we concurrently submit another task - MockThrottled last = new MockThrottled(); - throttler.register(last); - - // Then - // registration will enqueue the callback, and it should not - // take any time to proceed (ie: we should not be blocked) - // and there should be an element in the queue - assertThatStage(last.started).isNotDone(); - assertThatStage(last.ended).isNotDone(); - assertThat(throttler.getQueue()).containsExactly(last); - - // we still have not released, so old throttled threads should be waiting - for (int i = 0; i < throttled.length; i++) { - assertThatStage(throttled[i].started).isDone(); - assertThatStage(throttled[i].ended).isNotDone(); - } - - // now let us release .. - for (int i = 0; i < latches.length; i++) { - latches[i].countDown(); - } - - // .. and check everything finished up OK - for (int i = 0; i < latches.length; i++) { - assertThatStage(throttled[i].started).isSuccess(); - assertThatStage(throttled[i].ended).isSuccess(); - } - - // for good measure, we will also wait for the enqueued to complete - assertThatStage(last.started).isSuccess(); - assertThatStage(last.ended).isSuccess(); - - for (int i = 0; i < threads.length; i++) { - threads[i].join(1_000); - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/session/throttling/MockThrottled.java b/core/src/test/java/com/datastax/oss/driver/internal/core/session/throttling/MockThrottled.java deleted file mode 100644 index 9e54e3d511f..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/session/throttling/MockThrottled.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.session.throttling; - -import com.datastax.oss.driver.api.core.RequestThrottlingException; -import com.datastax.oss.driver.api.core.session.throttling.Throttled; -import com.datastax.oss.driver.shaded.guava.common.util.concurrent.Uninterruptibles; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.CountDownLatch; - -class MockThrottled implements Throttled { - final CompletionStage started = new CompletableFuture<>(); - final CompletionStage ended = new CompletableFuture<>(); - final CountDownLatch canRelease; - - public MockThrottled() { - this(new CountDownLatch(0)); - } - - /* - * The releaseLatch can be provided to add some delay before the - * task readiness/fail callbacks complete. This can be used, eg, to - * imitate a slow callback. - */ - public MockThrottled(CountDownLatch releaseLatch) { - this.canRelease = releaseLatch; - } - - @Override - public void onThrottleReady(boolean wasDelayed) { - started.toCompletableFuture().complete(null); - awaitRelease(); - ended.toCompletableFuture().complete(wasDelayed); - } - - @Override - public void onThrottleFailure(@NonNull RequestThrottlingException error) { - started.toCompletableFuture().complete(null); - awaitRelease(); - ended.toCompletableFuture().completeExceptionally(error); - } - - private void awaitRelease() { - Uninterruptibles.awaitUninterruptibly(canRelease); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/session/throttling/RateLimitingRequestThrottlerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/session/throttling/RateLimitingRequestThrottlerTest.java deleted file mode 100644 index 1e15610bf7b..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/session/throttling/RateLimitingRequestThrottlerTest.java +++ /dev/null @@ -1,330 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.session.throttling; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.RequestThrottlingException; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.session.throttling.Throttled; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.context.NettyOptions; -import com.datastax.oss.driver.internal.core.util.concurrent.ScheduledTaskCapturingEventLoop; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import io.netty.channel.EventLoopGroup; -import java.time.Duration; -import java.util.List; -import java.util.concurrent.TimeUnit; -import java.util.function.Consumer; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.Silent.class) -public class RateLimitingRequestThrottlerTest { - - private static final long ONE_HUNDRED_MILLISECONDS = - TimeUnit.NANOSECONDS.convert(100, TimeUnit.MILLISECONDS); - private static final long TWO_HUNDRED_MILLISECONDS = - TimeUnit.NANOSECONDS.convert(200, TimeUnit.MILLISECONDS); - private static final long TWO_SECONDS = TimeUnit.NANOSECONDS.convert(2, TimeUnit.SECONDS); - - // Note: we trigger scheduled task manually, so this is for verification purposes only, it doesn't - // need to be consistent with the actual throttling rate. - private static final Duration DRAIN_INTERVAL = Duration.ofMillis(10); - - @Mock private InternalDriverContext context; - @Mock private DriverConfig config; - @Mock private DriverExecutionProfile defaultProfile; - @Mock private NettyOptions nettyOptions; - @Mock private EventLoopGroup adminGroup; - - private ScheduledTaskCapturingEventLoop adminExecutor; - private SettableNanoClock clock = new SettableNanoClock(); - - private RateLimitingRequestThrottler throttler; - - @Before - public void setup() { - when(context.getConfig()).thenReturn(config); - when(config.getDefaultProfile()).thenReturn(defaultProfile); - - when(defaultProfile.getInt(DefaultDriverOption.REQUEST_THROTTLER_MAX_REQUESTS_PER_SECOND)) - .thenReturn(5); - when(defaultProfile.getInt(DefaultDriverOption.REQUEST_THROTTLER_MAX_QUEUE_SIZE)) - .thenReturn(10); - - // Set to match the time to reissue one permit. Although it does not matter in practice, since - // the executor is mocked and we trigger tasks manually. - when(defaultProfile.getDuration(DefaultDriverOption.REQUEST_THROTTLER_DRAIN_INTERVAL)) - .thenReturn(DRAIN_INTERVAL); - - when(context.getNettyOptions()).thenReturn(nettyOptions); - when(nettyOptions.adminEventExecutorGroup()).thenReturn(adminGroup); - adminExecutor = new ScheduledTaskCapturingEventLoop(adminGroup); - when(adminGroup.next()).thenReturn(adminExecutor); - - throttler = new RateLimitingRequestThrottler(context, clock); - } - - /** Note: the throttler starts with 1 second worth of permits, so at t=0 we have 5 available. */ - @Test - public void should_start_immediately_when_under_capacity() { - // Given - MockThrottled request = new MockThrottled(); - - // When - throttler.register(request); - - // Then - assertThatStage(request.ended).isSuccess(wasDelayed -> assertThat(wasDelayed).isFalse()); - assertThat(throttler.getStoredPermits()).isEqualTo(4); - assertThat(throttler.getQueue()).isEmpty(); - } - - @Test - public void should_allow_new_request_when_under_rate() { - // Given - for (int i = 0; i < 5; i++) { - throttler.register(new MockThrottled()); - } - assertThat(throttler.getStoredPermits()).isEqualTo(0); - - // When - clock.add(TWO_HUNDRED_MILLISECONDS); - MockThrottled request = new MockThrottled(); - throttler.register(request); - - // Then - assertThatStage(request.ended).isSuccess(wasDelayed -> assertThat(wasDelayed).isFalse()); - assertThat(throttler.getStoredPermits()).isEqualTo(0); - assertThat(throttler.getQueue()).isEmpty(); - } - - @Test - public void should_enqueue_when_over_rate() { - // Given - for (int i = 0; i < 5; i++) { - throttler.register(new MockThrottled()); - } - assertThat(throttler.getStoredPermits()).isEqualTo(0); - - // When - // (do not advance time) - MockThrottled request = new MockThrottled(); - throttler.register(request); - - // Then - assertThatStage(request.ended).isNotDone(); - assertThat(throttler.getStoredPermits()).isEqualTo(0); - assertThat(throttler.getQueue()).containsExactly(request); - - ScheduledTaskCapturingEventLoop.CapturedTask task = adminExecutor.nextTask(); - assertThat(task).isNotNull(); - assertThat(task.getInitialDelay(TimeUnit.NANOSECONDS)).isEqualTo(DRAIN_INTERVAL.toNanos()); - } - - @Test - public void should_reject_when_queue_is_full() { - // Given - for (int i = 0; i < 15; i++) { - throttler.register(new MockThrottled()); - } - assertThat(throttler.getStoredPermits()).isEqualTo(0); - assertThat(throttler.getQueue()).hasSize(10); - - // When - clock.add(TWO_HUNDRED_MILLISECONDS); // even if time has passed, queued items have priority - MockThrottled request = new MockThrottled(); - throttler.register(request); - - // Then - assertThatStage(request.ended) - .isFailed(error -> assertThat(error).isInstanceOf(RequestThrottlingException.class)); - } - - @Test - public void should_remove_timed_out_request_from_queue() { - testRemoveInvalidEventFromQueue(throttler::signalTimeout); - } - - @Test - public void should_remove_cancel_request_from_queue() { - testRemoveInvalidEventFromQueue(throttler::signalCancel); - } - - private void testRemoveInvalidEventFromQueue(Consumer completeCallback) { - // Given - for (int i = 0; i < 5; i++) { - throttler.register(new MockThrottled()); - } - MockThrottled queued1 = new MockThrottled(); - throttler.register(queued1); - MockThrottled queued2 = new MockThrottled(); - throttler.register(queued2); - - // When - completeCallback.accept(queued1); - - // Then - assertThatStage(queued2.ended).isNotDone(); - assertThat(throttler.getStoredPermits()).isEqualTo(0); - assertThat(throttler.getQueue()).containsExactly(queued2); - } - - @Test - public void should_dequeue_when_draining_task_runs() { - // Given - for (int i = 0; i < 5; i++) { - throttler.register(new MockThrottled()); - } - - MockThrottled queued1 = new MockThrottled(); - throttler.register(queued1); - assertThatStage(queued1.ended).isNotDone(); - MockThrottled queued2 = new MockThrottled(); - throttler.register(queued2); - assertThatStage(queued2.ended).isNotDone(); - assertThat(throttler.getStoredPermits()).isEqualTo(0); - assertThat(throttler.getQueue()).hasSize(2); - - ScheduledTaskCapturingEventLoop.CapturedTask task = adminExecutor.nextTask(); - assertThat(task).isNotNull(); - assertThat(task.getInitialDelay(TimeUnit.NANOSECONDS)).isEqualTo(DRAIN_INTERVAL.toNanos()); - - // When - // (do not advance clock => no new permits) - task.run(); - - // Then - assertThat(throttler.getStoredPermits()).isEqualTo(0); - assertThat(throttler.getQueue()).containsExactly(queued1, queued2); - // task reschedules itself since it did not empty the queue - task = adminExecutor.nextTask(); - assertThat(task).isNotNull(); - assertThat(task.getInitialDelay(TimeUnit.NANOSECONDS)).isEqualTo(DRAIN_INTERVAL.toNanos()); - - // When - clock.add(TWO_HUNDRED_MILLISECONDS); // 1 extra permit issued - task.run(); - - // Then - assertThatStage(queued1.ended).isSuccess(wasDelayed -> assertThat(wasDelayed).isTrue()); - assertThatStage(queued2.ended).isNotDone(); - assertThat(throttler.getStoredPermits()).isEqualTo(0); - assertThat(throttler.getQueue()).containsExactly(queued2); - // task reschedules itself since it did not empty the queue - task = adminExecutor.nextTask(); - assertThat(task).isNotNull(); - assertThat(task.getInitialDelay(TimeUnit.NANOSECONDS)).isEqualTo(DRAIN_INTERVAL.toNanos()); - - // When - clock.add(TWO_HUNDRED_MILLISECONDS); - task.run(); - - // Then - assertThatStage(queued2.ended).isSuccess(wasDelayed -> assertThat(wasDelayed).isTrue()); - assertThat(throttler.getStoredPermits()).isEqualTo(0); - assertThat(throttler.getQueue()).isEmpty(); - assertThat(adminExecutor.nextTask()).isNull(); - } - - @Test - public void should_store_new_permits_up_to_threshold() { - // Given - for (int i = 0; i < 5; i++) { - throttler.register(new MockThrottled()); - } - assertThat(throttler.getStoredPermits()).isEqualTo(0); - - // When - clock.add(TWO_SECONDS); // should store at most 1 second worth of permits - - // Then - // acquire to trigger the throttler to update its permits - throttler.register(new MockThrottled()); - assertThat(throttler.getStoredPermits()).isEqualTo(4); - } - - /** - * Ensure that permits are still created if we try to acquire faster than the minimal interval to - * create one permit. In an early version of the code there was a bug where we would reset the - * elapsed time on each acquisition attempt, and never regenerate permits. - */ - @Test - public void should_keep_accumulating_time_if_no_permits_created() { - // Given - for (int i = 0; i < 5; i++) { - throttler.register(new MockThrottled()); - } - assertThat(throttler.getStoredPermits()).isEqualTo(0); - - // When - clock.add(ONE_HUNDRED_MILLISECONDS); - - // Then - MockThrottled queued = new MockThrottled(); - throttler.register(queued); - assertThatStage(queued.ended).isNotDone(); - - // When - clock.add(ONE_HUNDRED_MILLISECONDS); - adminExecutor.nextTask().run(); - - // Then - assertThatStage(queued.ended).isSuccess(wasDelayed -> assertThat(wasDelayed).isTrue()); - } - - @Test - public void should_reject_enqueued_when_closing() { - // Given - for (int i = 0; i < 5; i++) { - throttler.register(new MockThrottled()); - } - List enqueued = Lists.newArrayList(); - for (int i = 0; i < 10; i++) { - MockThrottled request = new MockThrottled(); - throttler.register(request); - assertThatStage(request.ended).isNotDone(); - enqueued.add(request); - } - - // When - throttler.close(); - - // Then - for (MockThrottled request : enqueued) { - assertThatStage(request.ended) - .isFailed(error -> assertThat(error).isInstanceOf(RequestThrottlingException.class)); - } - - // When - MockThrottled request = new MockThrottled(); - throttler.register(request); - - // Then - assertThatStage(request.ended) - .isFailed(error -> assertThat(error).isInstanceOf(RequestThrottlingException.class)); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/session/throttling/SettableNanoClock.java b/core/src/test/java/com/datastax/oss/driver/internal/core/session/throttling/SettableNanoClock.java deleted file mode 100644 index 1489d1da345..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/session/throttling/SettableNanoClock.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.session.throttling; - -class SettableNanoClock implements NanoClock { - - private volatile long nanoTime; - - @Override - public long nanoTime() { - return nanoTime; - } - - // This is racy, but in our tests it's never read concurrently - @SuppressWarnings({"NonAtomicVolatileUpdate", "NonAtomicOperationOnVolatileField"}) - void add(long increment) { - nanoTime += increment; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/ssl/ReloadingKeyManagerFactoryTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/ssl/ReloadingKeyManagerFactoryTest.java deleted file mode 100644 index d07b45c21df..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/ssl/ReloadingKeyManagerFactoryTest.java +++ /dev/null @@ -1,270 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.ssl; - -import static java.nio.file.StandardCopyOption.COPY_ATTRIBUTES; -import static java.nio.file.StandardCopyOption.REPLACE_EXISTING; - -import java.io.IOException; -import java.io.InputStream; -import java.math.BigInteger; -import java.net.InetSocketAddress; -import java.net.SocketAddress; -import java.net.SocketException; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.security.KeyManagementException; -import java.security.KeyStore; -import java.security.NoSuchAlgorithmException; -import java.security.SecureRandom; -import java.security.cert.X509Certificate; -import java.util.Optional; -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.TimeUnit; -import java.util.function.Consumer; -import java.util.function.Supplier; -import javax.net.ssl.KeyManagerFactory; -import javax.net.ssl.SSLContext; -import javax.net.ssl.SSLPeerUnverifiedException; -import javax.net.ssl.SSLServerSocket; -import javax.net.ssl.SSLSocket; -import javax.net.ssl.TrustManagerFactory; -import org.junit.Assert; -import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class ReloadingKeyManagerFactoryTest { - private static final Logger logger = - LoggerFactory.getLogger(ReloadingKeyManagerFactoryTest.class); - - static final Path CERT_BASE = - Paths.get( - ReloadingKeyManagerFactoryTest.class - .getResource( - String.format("/%s/certs/", ReloadingKeyManagerFactoryTest.class.getSimpleName())) - .getPath()); - static final Path SERVER_KEYSTORE_PATH = CERT_BASE.resolve("server.keystore"); - static final Path SERVER_TRUSTSTORE_PATH = CERT_BASE.resolve("server.truststore"); - - static final Path ORIGINAL_CLIENT_KEYSTORE_PATH = CERT_BASE.resolve("client-original.keystore"); - static final Path ALTERNATE_CLIENT_KEYSTORE_PATH = CERT_BASE.resolve("client-alternate.keystore"); - static final BigInteger ORIGINAL_CLIENT_KEYSTORE_CERT_SERIAL = - convertSerial("7372a966"); // 1936894310 - static final BigInteger ALTERNATE_CLIENT_KEYSTORE_CERT_SERIAL = - convertSerial("e50bf31"); // 240172849 - - // File at this path will change content - static final Path TMP_CLIENT_KEYSTORE_PATH; - - static { - try { - TMP_CLIENT_KEYSTORE_PATH = - Files.createTempFile(ReloadingKeyManagerFactoryTest.class.getSimpleName(), null); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - - static final Path CLIENT_TRUSTSTORE_PATH = CERT_BASE.resolve("client.truststore"); - static final String CERTSTORE_PASSWORD = "changeit"; - - private static TrustManagerFactory buildTrustManagerFactory() { - TrustManagerFactory tmf; - try (InputStream tsf = Files.newInputStream(CLIENT_TRUSTSTORE_PATH)) { - KeyStore ts = KeyStore.getInstance("JKS"); - char[] password = CERTSTORE_PASSWORD.toCharArray(); - ts.load(tsf, password); - tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); - tmf.init(ts); - } catch (Exception e) { - throw new RuntimeException(e); - } - return tmf; - } - - private static SSLContext buildServerSslContext() { - try { - SSLContext context = SSLContext.getInstance("SSL"); - - TrustManagerFactory tmf; - try (InputStream tsf = Files.newInputStream(SERVER_TRUSTSTORE_PATH)) { - KeyStore ts = KeyStore.getInstance("JKS"); - char[] password = CERTSTORE_PASSWORD.toCharArray(); - ts.load(tsf, password); - tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); - tmf.init(ts); - } - - KeyManagerFactory kmf; - try (InputStream ksf = Files.newInputStream(SERVER_KEYSTORE_PATH)) { - KeyStore ks = KeyStore.getInstance("JKS"); - char[] password = CERTSTORE_PASSWORD.toCharArray(); - ks.load(ksf, password); - kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); - kmf.init(ks, password); - } - - context.init(kmf.getKeyManagers(), tmf.getTrustManagers(), new SecureRandom()); - return context; - } catch (Exception e) { - throw new RuntimeException(e); - } - } - - @Test - public void client_certificates_should_reload() throws Exception { - Files.copy( - ORIGINAL_CLIENT_KEYSTORE_PATH, TMP_CLIENT_KEYSTORE_PATH, REPLACE_EXISTING, COPY_ATTRIBUTES); - - final BlockingQueue> peerCertificates = - new LinkedBlockingQueue<>(1); - - // Create a listening socket. Make sure there's no backlog so each accept is in order. - SSLContext serverSslContext = buildServerSslContext(); - final SSLServerSocket server = - (SSLServerSocket) serverSslContext.getServerSocketFactory().createServerSocket(); - server.bind(new InetSocketAddress(0), 1); - server.setUseClientMode(false); - server.setNeedClientAuth(true); - Thread serverThread = - new Thread( - () -> { - while (true) { - try { - logger.info("Server accepting client"); - final SSLSocket conn = (SSLSocket) server.accept(); - logger.info("Server accepted client {}", conn); - conn.addHandshakeCompletedListener( - event -> { - boolean offer; - try { - // Transfer certificates to client thread once handshake is complete, so - // it can safely close - // the socket - offer = - peerCertificates.offer( - Optional.of((X509Certificate[]) event.getPeerCertificates())); - } catch (SSLPeerUnverifiedException e) { - offer = peerCertificates.offer(Optional.empty()); - } - Assert.assertTrue(offer); - }); - logger.info("Server starting handshake"); - // Without this, client handshake blocks - conn.startHandshake(); - } catch (IOException e) { - // Not sure why I sometimes see ~thousands of these locally - if (e instanceof SocketException && e.getMessage().contains("Socket closed")) - return; - logger.info("Server accept error", e); - } - } - }); - serverThread.setName(String.format("%s-serverThread", this.getClass().getSimpleName())); - serverThread.setDaemon(true); - serverThread.start(); - - final ReloadingKeyManagerFactory kmf = - ReloadingKeyManagerFactory.create( - TMP_CLIENT_KEYSTORE_PATH, CERTSTORE_PASSWORD, Optional.empty()); - // Need a tmf that tells the server to send its certs - final TrustManagerFactory tmf = buildTrustManagerFactory(); - - // Check original client certificate - testClientCertificates( - kmf, - tmf, - server.getLocalSocketAddress(), - () -> { - try { - return peerCertificates.poll(10, TimeUnit.SECONDS); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - }, - certs -> { - Assert.assertEquals(1, certs.length); - X509Certificate cert = certs[0]; - Assert.assertEquals(ORIGINAL_CLIENT_KEYSTORE_CERT_SERIAL, cert.getSerialNumber()); - }); - - // Update keystore content - logger.info("Updating keystore file with new content"); - Files.copy( - ALTERNATE_CLIENT_KEYSTORE_PATH, - TMP_CLIENT_KEYSTORE_PATH, - REPLACE_EXISTING, - COPY_ATTRIBUTES); - kmf.reload(); - - // Check that alternate client certificate was applied - testClientCertificates( - kmf, - tmf, - server.getLocalSocketAddress(), - () -> { - try { - return peerCertificates.poll(30, TimeUnit.SECONDS); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - }, - certs -> { - Assert.assertEquals(1, certs.length); - X509Certificate cert = certs[0]; - Assert.assertEquals(ALTERNATE_CLIENT_KEYSTORE_CERT_SERIAL, cert.getSerialNumber()); - }); - - kmf.close(); - server.close(); - } - - private static void testClientCertificates( - KeyManagerFactory kmf, - TrustManagerFactory tmf, - SocketAddress serverAddress, - Supplier> certsSupplier, - Consumer certsConsumer) - throws NoSuchAlgorithmException, KeyManagementException, IOException { - SSLContext clientSslContext = SSLContext.getInstance("TLS"); - clientSslContext.init(kmf.getKeyManagers(), tmf.getTrustManagers(), null); - final SSLSocket client = (SSLSocket) clientSslContext.getSocketFactory().createSocket(); - logger.info("Client connecting"); - client.connect(serverAddress); - logger.info("Client doing handshake"); - client.startHandshake(); - - final Optional lastCertificate = certsSupplier.get(); - logger.info("Client got its certificate back from the server; closing socket"); - client.close(); - Assert.assertNotNull(lastCertificate); - Assert.assertTrue(lastCertificate.isPresent()); - logger.info("Client got its certificate back from server: {}", lastCertificate); - - certsConsumer.accept(lastCertificate.get()); - } - - private static BigInteger convertSerial(String hex) { - final BigInteger serial = new BigInteger(Integer.valueOf(hex, 16).toString()); - logger.info("Serial hex {} is {}", hex, serial); - return serial; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/time/AtomicTimestampGeneratorTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/time/AtomicTimestampGeneratorTest.java deleted file mode 100644 index f1827eb8a86..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/time/AtomicTimestampGeneratorTest.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.time; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.fail; -import static org.mockito.Mockito.when; - -import java.util.SortedSet; -import java.util.concurrent.ConcurrentSkipListSet; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; -import org.junit.Test; -import org.mockito.stubbing.OngoingStubbing; - -public class AtomicTimestampGeneratorTest extends MonotonicTimestampGeneratorTestBase { - @Override - protected MonotonicTimestampGenerator newInstance(Clock clock) { - return new AtomicTimestampGenerator(clock, context); - } - - @Test - public void should_share_timestamps_across_all_threads() throws Exception { - // Prepare to generate 1000 timestamps with the clock frozen at 1 - OngoingStubbing stub = when(clock.currentTimeMicros()); - for (int i = 0; i < 1000; i++) { - stub = stub.thenReturn(1L); - } - - MonotonicTimestampGenerator generator = newInstance(clock); - - final int testThreadsCount = 2; - assertThat(1000 % testThreadsCount).isZero(); - - final SortedSet allTimestamps = new ConcurrentSkipListSet(); - ExecutorService executor = Executors.newFixedThreadPool(testThreadsCount); - for (int i = 0; i < testThreadsCount; i++) { - executor.submit( - () -> { - for (int j = 0; j < 1000 / testThreadsCount; j++) { - allTimestamps.add(generator.next()); - } - }); - } - executor.shutdown(); - if (!executor.awaitTermination(1, TimeUnit.SECONDS)) { - fail("Expected executor to shut down cleanly"); - } - - assertThat(allTimestamps).hasSize(1000); - assertThat(allTimestamps.first()).isEqualTo(1); - assertThat(allTimestamps.last()).isEqualTo(1000); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/time/MonotonicTimestampGeneratorTestBase.java b/core/src/test/java/com/datastax/oss/driver/internal/core/time/MonotonicTimestampGeneratorTestBase.java deleted file mode 100644 index 7074dd4ccc2..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/time/MonotonicTimestampGeneratorTestBase.java +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.time; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import ch.qos.logback.classic.Level; -import ch.qos.logback.classic.Logger; -import ch.qos.logback.classic.spi.ILoggingEvent; -import ch.qos.logback.core.Appender; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import java.time.Duration; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.mockito.ArgumentCaptor; -import org.mockito.Captor; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; -import org.mockito.stubbing.OngoingStubbing; -import org.slf4j.LoggerFactory; - -abstract class MonotonicTimestampGeneratorTestBase { - - @Mock protected Clock clock; - @Mock protected InternalDriverContext context; - @Mock private DriverConfig config; - @Mock protected DriverExecutionProfile defaultProfile; - - @Mock private Appender appender; - @Captor private ArgumentCaptor loggingEventCaptor; - - private Logger logger; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - - when(config.getDefaultProfile()).thenReturn(defaultProfile); - when(context.getConfig()).thenReturn(config); - - // Disable warnings by default - when(defaultProfile.getDuration( - DefaultDriverOption.TIMESTAMP_GENERATOR_DRIFT_WARNING_THRESHOLD, Duration.ZERO)) - .thenReturn(Duration.ZERO); - // Actual value doesn't really matter since we only test the first warning - when(defaultProfile.getDuration(DefaultDriverOption.TIMESTAMP_GENERATOR_DRIFT_WARNING_INTERVAL)) - .thenReturn(Duration.ofSeconds(10)); - - logger = (Logger) LoggerFactory.getLogger(MonotonicTimestampGenerator.class); - logger.addAppender(appender); - } - - @After - public void teardown() { - logger.detachAppender(appender); - } - - protected abstract MonotonicTimestampGenerator newInstance(Clock clock); - - @Test - public void should_use_clock_if_it_keeps_increasing() { - OngoingStubbing stub = when(clock.currentTimeMicros()); - for (long l = 1; l < 5; l++) { - stub = stub.thenReturn(l); - } - - MonotonicTimestampGenerator generator = newInstance(clock); - - for (long l = 1; l < 5; l++) { - assertThat(generator.next()).isEqualTo(l); - } - } - - @Test - public void should_increment_if_clock_does_not_increase() { - when(clock.currentTimeMicros()).thenReturn(1L, 1L, 1L, 5L); - - MonotonicTimestampGenerator generator = newInstance(clock); - - assertThat(generator.next()).isEqualTo(1); - assertThat(generator.next()).isEqualTo(2); - assertThat(generator.next()).isEqualTo(3); - assertThat(generator.next()).isEqualTo(5); - } - - @Test - public void should_warn_if_timestamps_drift() { - when(defaultProfile.getDuration( - DefaultDriverOption.TIMESTAMP_GENERATOR_DRIFT_WARNING_THRESHOLD, Duration.ZERO)) - .thenReturn(Duration.ofNanos(2 * 1000)); - when(clock.currentTimeMicros()).thenReturn(1L, 1L, 1L, 1L, 1L); - - MonotonicTimestampGenerator generator = newInstance(clock); - - assertThat(generator.next()).isEqualTo(1); - assertThat(generator.next()).isEqualTo(2); - assertThat(generator.next()).isEqualTo(3); - assertThat(generator.next()).isEqualTo(4); - // Clock still at 1, last returned timestamp is 4 (> 1 + 2), should warn - assertThat(generator.next()).isEqualTo(5); - - verify(appender).doAppend(loggingEventCaptor.capture()); - ILoggingEvent log = loggingEventCaptor.getValue(); - assertThat(log.getLevel()).isEqualTo(Level.WARN); - assertThat(log.getMessage()).contains("Clock skew detected"); - } - - @Test - public void should_go_back_to_clock_if_new_tick_high_enough() { - when(clock.currentTimeMicros()).thenReturn(1L, 1L, 1L, 1L, 1L, 10L); - - MonotonicTimestampGenerator generator = newInstance(clock); - - for (long l = 1; l <= 5; l++) { - // Clock at 1, keep incrementing - assertThat(generator.next()).isEqualTo(l); - } - - // Last returned is 5, but clock has ticked to 10, should use that. - assertThat(generator.next()).isEqualTo(10); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/time/ThreadLocalTimestampGeneratorTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/time/ThreadLocalTimestampGeneratorTest.java deleted file mode 100644 index 5d9ed8b2ceb..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/time/ThreadLocalTimestampGeneratorTest.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.time; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static com.datastax.oss.driver.Assertions.fail; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import java.util.List; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; -import org.junit.Test; -import org.mockito.stubbing.OngoingStubbing; - -public class ThreadLocalTimestampGeneratorTest extends MonotonicTimestampGeneratorTestBase { - @Override - protected MonotonicTimestampGenerator newInstance(Clock clock) { - return new ThreadLocalTimestampGenerator(clock, context); - } - - @Test - public void should_confine_timestamps_to_thread() throws Exception { - final int testThreadsCount = 2; - - // Prepare to generate 1000 timestamps for each thread, with the clock frozen at 1 - OngoingStubbing stub = when(clock.currentTimeMicros()); - for (int i = 0; i < testThreadsCount * 1000; i++) { - stub = stub.thenReturn(1L); - } - - MonotonicTimestampGenerator generator = newInstance(clock); - - List> futures = new CopyOnWriteArrayList<>(); - ExecutorService executor = Executors.newFixedThreadPool(testThreadsCount); - for (int i = 0; i < testThreadsCount; i++) { - executor.submit( - () -> { - try { - for (long l = 1; l <= 1000; l++) { - assertThat(generator.next()).isEqualTo(l); - } - futures.add(CompletableFuture.completedFuture(null)); - } catch (Throwable t) { - futures.add(CompletableFutures.failedFuture(t)); - } - }); - } - executor.shutdown(); - if (!executor.awaitTermination(1, TimeUnit.SECONDS)) { - fail("Expected executor to shut down cleanly"); - } - - assertThat(futures).hasSize(testThreadsCount); - for (CompletionStage future : futures) { - assertThatStage(future).isSuccess(); - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/tracker/MultiplexingRequestTrackerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/tracker/MultiplexingRequestTrackerTest.java deleted file mode 100644 index 8dcad99b459..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/tracker/MultiplexingRequestTrackerTest.java +++ /dev/null @@ -1,213 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.tracker; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.willThrow; -import static org.mockito.Mockito.verify; - -import ch.qos.logback.classic.Level; -import ch.qos.logback.classic.Logger; -import ch.qos.logback.classic.spi.ILoggingEvent; -import ch.qos.logback.core.Appender; -import com.datastax.oss.driver.api.core.DriverExecutionException; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.core.tracker.RequestTracker; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.ArgumentCaptor; -import org.mockito.Captor; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; -import org.slf4j.LoggerFactory; - -@RunWith(MockitoJUnitRunner.Strict.class) -public class MultiplexingRequestTrackerTest { - - @Mock private RequestTracker child1; - @Mock private RequestTracker child2; - @Mock private Request request; - @Mock private DriverExecutionProfile profile; - @Mock private Node node; - @Mock private Session session; - - @Mock private Appender appender; - @Captor private ArgumentCaptor loggingEventCaptor; - - private Logger logger; - private Level initialLogLevel; - - private final Exception error = new DriverExecutionException(new NullPointerException()); - - @Before - public void addAppenders() { - logger = (Logger) LoggerFactory.getLogger(MultiplexingRequestTracker.class); - initialLogLevel = logger.getLevel(); - logger.setLevel(Level.WARN); - logger.addAppender(appender); - } - - @After - public void removeAppenders() { - logger.detachAppender(appender); - logger.setLevel(initialLogLevel); - } - - @Test - public void should_register() { - // given - MultiplexingRequestTracker tracker = new MultiplexingRequestTracker(); - // when - tracker.register(child1); - tracker.register(child2); - // then - assertThat(tracker).extracting("trackers").asList().hasSize(2).contains(child1, child2); - } - - @Test - public void should_flatten_child_multiplexing_tracker_via_constructor() { - // given - MultiplexingRequestTracker tracker = - new MultiplexingRequestTracker(new MultiplexingRequestTracker(child1, child2)); - // when - // then - assertThat(tracker).extracting("trackers").asList().hasSize(2).contains(child1, child2); - } - - @Test - public void should_flatten_child_multiplexing_tracker_via_register() { - // given - MultiplexingRequestTracker tracker = new MultiplexingRequestTracker(); - // when - tracker.register(new MultiplexingRequestTracker(child1, child2)); - // then - assertThat(tracker).extracting("trackers").asList().hasSize(2).contains(child1, child2); - } - - @Test - public void should_notify_onSuccess() { - // given - MultiplexingRequestTracker tracker = new MultiplexingRequestTracker(child1, child2); - willThrow(new NullPointerException()) - .given(child1) - .onSuccess(request, 123456L, profile, node, "test"); - // when - tracker.onSuccess(request, 123456L, profile, node, "test"); - // then - verify(child1).onSuccess(request, 123456L, profile, node, "test"); - verify(child2).onSuccess(request, 123456L, profile, node, "test"); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "[test] Unexpected error while notifying request tracker child1 of an onSuccess event. (NullPointerException: null)"); - } - - @Test - public void should_notify_onError() { - // given - MultiplexingRequestTracker tracker = new MultiplexingRequestTracker(child1, child2); - willThrow(new NullPointerException()) - .given(child1) - .onError(request, error, 123456L, profile, node, "test"); - // when - tracker.onError(request, error, 123456L, profile, node, "test"); - // then - verify(child1).onError(request, error, 123456L, profile, node, "test"); - verify(child2).onError(request, error, 123456L, profile, node, "test"); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "[test] Unexpected error while notifying request tracker child1 of an onError event. (NullPointerException: null)"); - } - - @Test - public void should_notify_onNodeSuccess() { - // given - MultiplexingRequestTracker tracker = new MultiplexingRequestTracker(child1, child2); - willThrow(new NullPointerException()) - .given(child1) - .onNodeSuccess(request, 123456L, profile, node, "test"); - // when - tracker.onNodeSuccess(request, 123456L, profile, node, "test"); - // then - verify(child1).onNodeSuccess(request, 123456L, profile, node, "test"); - verify(child2).onNodeSuccess(request, 123456L, profile, node, "test"); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "[test] Unexpected error while notifying request tracker child1 of an onNodeSuccess event. (NullPointerException: null)"); - } - - @Test - public void should_notify_onNodeError() { - // given - MultiplexingRequestTracker tracker = new MultiplexingRequestTracker(child1, child2); - willThrow(new NullPointerException()) - .given(child1) - .onNodeError(request, error, 123456L, profile, node, "test"); - // when - tracker.onNodeError(request, error, 123456L, profile, node, "test"); - // then - verify(child1).onNodeError(request, error, 123456L, profile, node, "test"); - verify(child2).onNodeError(request, error, 123456L, profile, node, "test"); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "[test] Unexpected error while notifying request tracker child1 of an onNodeError event. (NullPointerException: null)"); - } - - @Test - public void should_notify_onSessionReady() { - // given - MultiplexingRequestTracker tracker = new MultiplexingRequestTracker(child1, child2); - willThrow(new NullPointerException()).given(child1).onSessionReady(session); - given(session.getName()).willReturn("test"); - // when - tracker.onSessionReady(session); - // then - verify(child1).onSessionReady(session); - verify(child2).onSessionReady(session); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "[test] Unexpected error while notifying request tracker child1 of an onSessionReady event. (NullPointerException: null)"); - } - - @Test - public void should_notify_close() throws Exception { - // given - MultiplexingRequestTracker tracker = new MultiplexingRequestTracker(child1, child2); - Exception child1Error = new NullPointerException(); - willThrow(child1Error).given(child1).close(); - // when - tracker.close(); - // then - verify(child1).close(); - verify(child2).close(); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "Unexpected error while closing request tracker child1. (NullPointerException: null)"); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/tracker/RequestIdGeneratorTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/tracker/RequestIdGeneratorTest.java deleted file mode 100644 index fb1883e125f..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/tracker/RequestIdGeneratorTest.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.tracker; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.tracker.RequestIdGenerator; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import org.apache.commons.lang3.RandomStringUtils; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.Strict.class) -public class RequestIdGeneratorTest { - @Mock private InternalDriverContext context; - @Mock private Statement statement; - - @Test - public void uuid_generator_should_generate() { - // given - UuidRequestIdGenerator generator = new UuidRequestIdGenerator(context); - // when - String parentId = generator.getSessionRequestId(); - String requestId = generator.getNodeRequestId(statement, parentId); - // then - // e.g. "550e8400-e29b-41d4-a716-446655440000", which is 36 characters long - assertThat(parentId.length()).isEqualTo(36); - // e.g. "550e8400-e29b-41d4-a716-446655440000-550e8400-e29b-41d4-a716-446655440000", which is 73 - // characters long - assertThat(requestId.length()).isEqualTo(73); - } - - @Test - public void w3c_generator_should_generate() { - // given - W3CContextRequestIdGenerator generator = new W3CContextRequestIdGenerator(context); - // when - String parentId = generator.getSessionRequestId(); - String requestId = generator.getNodeRequestId(statement, parentId); - // then - // e.g. "4bf92f3577b34da6a3ce929d0e0e4736", which is 32 characters long - assertThat(parentId.length()).isEqualTo(32); - // According to W3C "traceparent" spec, - // https://www.w3.org/TR/trace-context/#traceparent-header-field-values - // e.g. "00-4bf92f3577b34da6a3ce929d0e0e4736-a3ce929d0e0e4736-01", which 55 characters long - assertThat(requestId.length()).isEqualTo(55); - } - - @Test - public void w3c_generator_default_payloadkey() { - W3CContextRequestIdGenerator w3cGenerator = new W3CContextRequestIdGenerator(context); - assertThat(w3cGenerator.getCustomPayloadKey()) - .isEqualTo(RequestIdGenerator.DEFAULT_PAYLOAD_KEY); - } - - @Test - public void w3c_generator_provided_payloadkey() { - String someString = RandomStringUtils.random(12); - W3CContextRequestIdGenerator w3cGenerator = new W3CContextRequestIdGenerator(someString); - assertThat(w3cGenerator.getCustomPayloadKey()).isEqualTo(someString); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/tracker/RequestLogFormatterTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/tracker/RequestLogFormatterTest.java deleted file mode 100644 index e9fb518b51f..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/tracker/RequestLogFormatterTest.java +++ /dev/null @@ -1,291 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.tracker; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.cql.BatchStatement; -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.ColumnDefinition; -import com.datastax.oss.driver.api.core.cql.DefaultBatchType; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.internal.core.cql.DefaultColumnDefinition; -import com.datastax.oss.driver.internal.core.cql.DefaultColumnDefinitions; -import com.datastax.oss.driver.internal.core.cql.DefaultPreparedStatement; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.protocol.internal.response.result.ColumnSpec; -import com.datastax.oss.protocol.internal.response.result.RawType; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.util.Collections; -import java.util.Map; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public class RequestLogFormatterTest { - - @Mock private DriverContext context; - private final ProtocolVersion protocolVersion = DefaultProtocolVersion.V4; - - private RequestLogFormatter formatter; - - @Before - public void setup() { - when(context.getCodecRegistry()).thenReturn(CodecRegistry.DEFAULT); - when(context.getProtocolVersion()).thenReturn(protocolVersion); - - formatter = new RequestLogFormatter(context); - } - - @Test - public void should_format_simple_statement_without_values() { - SimpleStatement statement = - SimpleStatement.newInstance("SELECT release_version FROM system.local"); - - assertThat( - formatRequest( - statement, Integer.MAX_VALUE, false, Integer.MAX_VALUE, Integer.MAX_VALUE)) - .isEqualTo("[0 values] SELECT release_version FROM system.local"); - - assertThat( - formatRequest(statement, Integer.MAX_VALUE, true, Integer.MAX_VALUE, Integer.MAX_VALUE)) - .isEqualTo("[0 values] SELECT release_version FROM system.local"); - - assertThat(formatRequest(statement, 20, false, Integer.MAX_VALUE, Integer.MAX_VALUE)) - .isEqualTo("[0 values] SELECT release_versi..."); - } - - @Test - public void should_format_simple_statement_with_positional_values() { - SimpleStatement statement = - SimpleStatement.builder("UPDATE foo SET v=? WHERE k=?") - .addPositionalValue(Bytes.fromHexString("0xdeadbeef")) - .addPositionalValue(0) - .build(); - - assertThat( - formatRequest( - statement, Integer.MAX_VALUE, false, Integer.MAX_VALUE, Integer.MAX_VALUE)) - .isEqualTo("[2 values] UPDATE foo SET v=? WHERE k=?"); - - assertThat( - formatRequest(statement, Integer.MAX_VALUE, true, Integer.MAX_VALUE, Integer.MAX_VALUE)) - .isEqualTo("[2 values] UPDATE foo SET v=? WHERE k=? [v0=0xdeadbeef, v1=0]"); - - assertThat(formatRequest(statement, Integer.MAX_VALUE, true, 1, Integer.MAX_VALUE)) - .isEqualTo( - "[2 values] UPDATE foo SET v=? WHERE k=? [v0=0xdeadbeef, ...]"); - - assertThat(formatRequest(statement, Integer.MAX_VALUE, true, Integer.MAX_VALUE, 4)) - .isEqualTo("[2 values] UPDATE foo SET v=? WHERE k=? [v0=0xde..., v1=0]"); - } - - @Test - public void should_format_simple_statement_with_named_values() { - SimpleStatement statement = - SimpleStatement.builder("UPDATE foo SET v=:v WHERE k=:k") - .addNamedValue("v", Bytes.fromHexString("0xdeadbeef")) - .addNamedValue("k", 0) - .build(); - - assertThat( - formatRequest( - statement, Integer.MAX_VALUE, false, Integer.MAX_VALUE, Integer.MAX_VALUE)) - .isEqualTo("[2 values] UPDATE foo SET v=:v WHERE k=:k"); - - assertThat( - formatRequest(statement, Integer.MAX_VALUE, true, Integer.MAX_VALUE, Integer.MAX_VALUE)) - .isEqualTo("[2 values] UPDATE foo SET v=:v WHERE k=:k [v=0xdeadbeef, k=0]"); - - assertThat(formatRequest(statement, Integer.MAX_VALUE, true, 1, Integer.MAX_VALUE)) - .isEqualTo( - "[2 values] UPDATE foo SET v=:v WHERE k=:k [v=0xdeadbeef, ...]"); - - assertThat(formatRequest(statement, Integer.MAX_VALUE, true, Integer.MAX_VALUE, 4)) - .isEqualTo("[2 values] UPDATE foo SET v=:v WHERE k=:k [v=0xde..., k=0]"); - } - - @Test - public void should_format_bound_statement() { - PreparedStatement preparedStatement = - mockPreparedStatement( - "UPDATE foo SET v=? WHERE k=?", - ImmutableMap.of("v", DataTypes.BLOB, "k", DataTypes.INT)); - BoundStatement statement = preparedStatement.bind(Bytes.fromHexString("0xdeadbeef"), 0); - - assertThat( - formatRequest( - statement, Integer.MAX_VALUE, false, Integer.MAX_VALUE, Integer.MAX_VALUE)) - .isEqualTo("[2 values] UPDATE foo SET v=? WHERE k=?"); - - assertThat( - formatRequest(statement, Integer.MAX_VALUE, true, Integer.MAX_VALUE, Integer.MAX_VALUE)) - .isEqualTo("[2 values] UPDATE foo SET v=? WHERE k=? [v=0xdeadbeef, k=0]"); - - assertThat(formatRequest(statement, Integer.MAX_VALUE, true, 1, Integer.MAX_VALUE)) - .isEqualTo( - "[2 values] UPDATE foo SET v=? WHERE k=? [v=0xdeadbeef, ...]"); - - assertThat(formatRequest(statement, Integer.MAX_VALUE, true, Integer.MAX_VALUE, 4)) - .isEqualTo("[2 values] UPDATE foo SET v=? WHERE k=? [v=0xde..., k=0]"); - } - - @Test - public void should_format_bound_statement_with_unset_values() { - PreparedStatement preparedStatement = - mockPreparedStatement( - "UPDATE foo SET v=? WHERE k=?", - ImmutableMap.of("v", DataTypes.BLOB, "k", DataTypes.INT)); - BoundStatement statement = preparedStatement.bind().setInt("k", 0); - assertThat( - formatRequest(statement, Integer.MAX_VALUE, true, Integer.MAX_VALUE, Integer.MAX_VALUE)) - .isEqualTo("[2 values] UPDATE foo SET v=? WHERE k=? [v=, k=0]"); - } - - @Test - public void should_format_batch_statement() { - SimpleStatement statement1 = - SimpleStatement.builder("UPDATE foo SET v=? WHERE k=?") - .addNamedValue("v", Bytes.fromHexString("0xdeadbeef")) - .addNamedValue("k", 0) - .build(); - - PreparedStatement preparedStatement = - mockPreparedStatement( - "UPDATE foo SET v=? WHERE k=?", - ImmutableMap.of("v", DataTypes.BLOB, "k", DataTypes.INT)); - BoundStatement statement2 = preparedStatement.bind(Bytes.fromHexString("0xabcdef"), 1); - - BatchStatement batch = - BatchStatement.builder(DefaultBatchType.UNLOGGED) - .addStatements(statement1, statement2) - .build(); - - assertThat(formatRequest(batch, Integer.MAX_VALUE, false, Integer.MAX_VALUE, Integer.MAX_VALUE)) - .isEqualTo( - "[2 statements, 4 values] " - + "BEGIN UNLOGGED BATCH " - + "UPDATE foo SET v=? WHERE k=?; " - + "UPDATE foo SET v=? WHERE k=?; " - + "APPLY BATCH"); - - assertThat(formatRequest(batch, 20, false, Integer.MAX_VALUE, Integer.MAX_VALUE)) - .isEqualTo("[2 statements, 4 values] BEGIN UNLOGGED BATCH..."); - - assertThat(formatRequest(batch, Integer.MAX_VALUE, true, Integer.MAX_VALUE, Integer.MAX_VALUE)) - .isEqualTo( - "[2 statements, 4 values] " - + "BEGIN UNLOGGED BATCH " - + "UPDATE foo SET v=? WHERE k=?; " - + "UPDATE foo SET v=? WHERE k=?; " - + "APPLY BATCH " - + "[v=0xdeadbeef, k=0]" - + "[v=0xabcdef, k=1]"); - - assertThat(formatRequest(batch, Integer.MAX_VALUE, true, 3, Integer.MAX_VALUE)) - .isEqualTo( - "[2 statements, 4 values] " - + "BEGIN UNLOGGED BATCH " - + "UPDATE foo SET v=? WHERE k=?; " - + "UPDATE foo SET v=? WHERE k=?; " - + "APPLY BATCH " - + "[v=0xdeadbeef, k=0]" - + "[v=0xabcdef, ...]"); - - assertThat(formatRequest(batch, Integer.MAX_VALUE, true, 2, Integer.MAX_VALUE)) - .isEqualTo( - "[2 statements, 4 values] " - + "BEGIN UNLOGGED BATCH " - + "UPDATE foo SET v=? WHERE k=?; " - + "UPDATE foo SET v=? WHERE k=?; " - + "APPLY BATCH " - + "[v=0xdeadbeef, k=0]" - + "[...]"); - - assertThat(formatRequest(batch, Integer.MAX_VALUE, true, Integer.MAX_VALUE, 4)) - .isEqualTo( - "[2 statements, 4 values] " - + "BEGIN UNLOGGED BATCH " - + "UPDATE foo SET v=? WHERE k=?; " - + "UPDATE foo SET v=? WHERE k=?; " - + "APPLY BATCH " - + "[v=0xde..., k=0]" - + "[v=0xab..., k=1]"); - } - - private String formatRequest( - Request request, int maxQueryLength, boolean showValues, int maxValues, int maxValueLength) { - StringBuilder builder = new StringBuilder(); - formatter.appendRequest( - request, maxQueryLength, showValues, maxValues, maxValueLength, builder); - return builder.toString(); - } - - private PreparedStatement mockPreparedStatement(String query, Map variables) { - ImmutableList.Builder definitions = ImmutableList.builder(); - int i = 0; - for (Map.Entry entry : variables.entrySet()) { - definitions.add( - new DefaultColumnDefinition( - new ColumnSpec( - "test", - "foo", - entry.getKey(), - i, - RawType.PRIMITIVES.get(entry.getValue().getProtocolCode())), - context)); - } - return new DefaultPreparedStatement( - Bytes.fromHexString("0x"), - query, - DefaultColumnDefinitions.valueOf(definitions.build()), - Collections.emptyList(), - null, - null, - null, - Collections.emptyMap(), - null, - null, - null, - null, - null, - Collections.emptyMap(), - null, - null, - null, - Integer.MIN_VALUE, - null, - null, - false, - context.getCodecRegistry(), - context.getProtocolVersion()); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/DataTypeDetachableTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/DataTypeDetachableTest.java deleted file mode 100644 index d798df8d191..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/DataTypeDetachableTest.java +++ /dev/null @@ -1,188 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.ListType; -import com.datastax.oss.driver.api.core.type.MapType; -import com.datastax.oss.driver.api.core.type.SetType; -import com.datastax.oss.driver.api.core.type.TupleType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.internal.SerializationHelper; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -public class DataTypeDetachableTest { - - @Mock private AttachmentPoint attachmentPoint; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - } - - @Test - public void simple_types_should_never_be_detached() { - // Because simple types don't need the codec registry, we consider them as always attached by - // default - for (DataType simpleType : ImmutableList.of(DataTypes.INT, DataTypes.custom("some.class"))) { - assertThat(simpleType.isDetached()).isFalse(); - assertThat(SerializationHelper.serializeAndDeserialize(simpleType).isDetached()).isFalse(); - } - } - - @Test - public void manually_created_tuple_should_be_detached() { - TupleType tuple = DataTypes.tupleOf(DataTypes.INT, DataTypes.TEXT); - assertThat(tuple.isDetached()).isTrue(); - } - - @Test - public void attaching_tuple_should_attach_all_of_its_subtypes() { - TupleType tuple1 = DataTypes.tupleOf(DataTypes.INT); - TupleType tuple2 = DataTypes.tupleOf(DataTypes.TEXT, tuple1); - - assertThat(tuple1.isDetached()).isTrue(); - assertThat(tuple2.isDetached()).isTrue(); - - tuple2.attach(attachmentPoint); - - assertThat(tuple1.isDetached()).isFalse(); - } - - @Test - public void manually_created_udt_should_be_detached() { - UserDefinedType udt = - new UserDefinedTypeBuilder( - CqlIdentifier.fromInternal("ks"), CqlIdentifier.fromInternal("type")) - .withField(CqlIdentifier.fromInternal("field1"), DataTypes.INT) - .withField(CqlIdentifier.fromInternal("field2"), DataTypes.TEXT) - .build(); - assertThat(udt.isDetached()).isTrue(); - } - - @Test - public void attaching_udt_should_attach_all_of_its_subtypes() { - TupleType tuple = DataTypes.tupleOf(DataTypes.INT); - UserDefinedType udt = - new UserDefinedTypeBuilder( - CqlIdentifier.fromInternal("ks"), CqlIdentifier.fromInternal("type")) - .withField(CqlIdentifier.fromInternal("field1"), DataTypes.INT) - .withField(CqlIdentifier.fromInternal("field2"), tuple) - .build(); - - assertThat(tuple.isDetached()).isTrue(); - assertThat(udt.isDetached()).isTrue(); - - udt.attach(attachmentPoint); - - assertThat(tuple.isDetached()).isFalse(); - } - - @Test - public void list_should_be_attached_if_its_element_is() { - TupleType tuple = DataTypes.tupleOf(DataTypes.INT); - ListType list = DataTypes.listOf(tuple); - - assertThat(tuple.isDetached()).isTrue(); - assertThat(list.isDetached()).isTrue(); - - tuple.attach(attachmentPoint); - - assertThat(list.isDetached()).isFalse(); - } - - @Test - public void attaching_list_should_attach_its_element() { - TupleType tuple = DataTypes.tupleOf(DataTypes.INT); - ListType list = DataTypes.listOf(tuple); - - assertThat(tuple.isDetached()).isTrue(); - assertThat(list.isDetached()).isTrue(); - - list.attach(attachmentPoint); - - assertThat(tuple.isDetached()).isFalse(); - } - - @Test - public void set_should_be_attached_if_its_element_is() { - TupleType tuple = DataTypes.tupleOf(DataTypes.INT); - SetType set = DataTypes.setOf(tuple); - - assertThat(tuple.isDetached()).isTrue(); - assertThat(set.isDetached()).isTrue(); - - tuple.attach(attachmentPoint); - - assertThat(set.isDetached()).isFalse(); - } - - @Test - public void attaching_set_should_attach_its_element() { - TupleType tuple = DataTypes.tupleOf(DataTypes.INT); - SetType set = DataTypes.setOf(tuple); - - assertThat(tuple.isDetached()).isTrue(); - assertThat(set.isDetached()).isTrue(); - - set.attach(attachmentPoint); - - assertThat(tuple.isDetached()).isFalse(); - } - - @Test - public void map_should_be_attached_if_its_elements_are() { - TupleType tuple1 = DataTypes.tupleOf(DataTypes.INT); - TupleType tuple2 = DataTypes.tupleOf(DataTypes.TEXT); - MapType map = DataTypes.mapOf(tuple1, tuple2); - - assertThat(tuple1.isDetached()).isTrue(); - assertThat(tuple2.isDetached()).isTrue(); - assertThat(map.isDetached()).isTrue(); - - tuple1.attach(attachmentPoint); - assertThat(map.isDetached()).isTrue(); - - tuple2.attach(attachmentPoint); - assertThat(map.isDetached()).isFalse(); - } - - @Test - public void attaching_map_should_attach_all_of_its_subtypes() { - TupleType tuple1 = DataTypes.tupleOf(DataTypes.INT); - TupleType tuple2 = DataTypes.tupleOf(DataTypes.TEXT); - MapType map = DataTypes.mapOf(tuple1, tuple2); - - assertThat(tuple1.isDetached()).isTrue(); - assertThat(tuple2.isDetached()).isTrue(); - - map.attach(attachmentPoint); - - assertThat(tuple1.isDetached()).isFalse(); - assertThat(tuple2.isDetached()).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/DataTypeSerializationTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/DataTypeSerializationTest.java deleted file mode 100644 index ccf53dd3a65..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/DataTypeSerializationTest.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.TupleType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.internal.SerializationHelper; -import org.junit.Test; - -public class DataTypeSerializationTest { - - @Test - public void should_serialize_and_deserialize() { - TupleType tuple = DataTypes.tupleOf(DataTypes.INT, DataTypes.TEXT); - UserDefinedType udt = - new UserDefinedTypeBuilder( - CqlIdentifier.fromInternal("ks"), CqlIdentifier.fromInternal("type")) - .withField(CqlIdentifier.fromInternal("field1"), DataTypes.INT) - .withField(CqlIdentifier.fromInternal("field2"), DataTypes.TEXT) - .build(); - - // Because primitive and custom types never use the codec registry, we consider them always - // attached - should_serialize_and_deserialize(DataTypes.INT, false); - should_serialize_and_deserialize(DataTypes.custom("some.class.name"), false); - - should_serialize_and_deserialize(tuple, true); - should_serialize_and_deserialize(udt, true); - should_serialize_and_deserialize(DataTypes.listOf(DataTypes.INT), false); - should_serialize_and_deserialize(DataTypes.listOf(tuple), true); - should_serialize_and_deserialize(DataTypes.setOf(udt), true); - should_serialize_and_deserialize(DataTypes.mapOf(tuple, udt), true); - } - - private void should_serialize_and_deserialize(DataType in, boolean expectDetached) { - // When - DataType out = SerializationHelper.serializeAndDeserialize(in); - - // Then - assertThat(out).isEqualTo(in); - assertThat(out.isDetached()).isEqualTo(expectDetached); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/PrimitiveTypeTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/PrimitiveTypeTest.java deleted file mode 100644 index f9ae1d24f77..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/PrimitiveTypeTest.java +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.TestDataProviders; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.Locale; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class PrimitiveTypeTest { - - @Test - public void should_report_protocol_code() { - assertThat(DataTypes.ASCII.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.ASCII); - assertThat(DataTypes.BIGINT.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.BIGINT); - assertThat(DataTypes.BLOB.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.BLOB); - assertThat(DataTypes.BOOLEAN.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.BOOLEAN); - assertThat(DataTypes.COUNTER.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.COUNTER); - assertThat(DataTypes.DECIMAL.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.DECIMAL); - assertThat(DataTypes.DOUBLE.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.DOUBLE); - assertThat(DataTypes.FLOAT.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.FLOAT); - assertThat(DataTypes.INT.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.INT); - assertThat(DataTypes.TIMESTAMP.getProtocolCode()) - .isEqualTo(ProtocolConstants.DataType.TIMESTAMP); - assertThat(DataTypes.UUID.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.UUID); - assertThat(DataTypes.VARINT.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.VARINT); - assertThat(DataTypes.TIMEUUID.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.TIMEUUID); - assertThat(DataTypes.INET.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.INET); - assertThat(DataTypes.DATE.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.DATE); - assertThat(DataTypes.TEXT.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.VARCHAR); - assertThat(DataTypes.TIME.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.TIME); - assertThat(DataTypes.SMALLINT.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.SMALLINT); - assertThat(DataTypes.TINYINT.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.TINYINT); - assertThat(DataTypes.DURATION.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.DURATION); - } - - @Test - @UseDataProvider(location = TestDataProviders.class, value = "locales") - public void should_format_as_cql(Locale locale) { - Locale def = Locale.getDefault(); - try { - Locale.setDefault(locale); - assertThat(DataTypes.ASCII.asCql(true, true)).isEqualTo("ascii"); - assertThat(DataTypes.BIGINT.asCql(true, true)).isEqualTo("bigint"); - assertThat(DataTypes.BLOB.asCql(true, true)).isEqualTo("blob"); - assertThat(DataTypes.BOOLEAN.asCql(true, true)).isEqualTo("boolean"); - assertThat(DataTypes.COUNTER.asCql(true, true)).isEqualTo("counter"); - assertThat(DataTypes.DECIMAL.asCql(true, true)).isEqualTo("decimal"); - assertThat(DataTypes.DOUBLE.asCql(true, true)).isEqualTo("double"); - assertThat(DataTypes.FLOAT.asCql(true, true)).isEqualTo("float"); - assertThat(DataTypes.INT.asCql(true, true)).isEqualTo("int"); - assertThat(DataTypes.TIMESTAMP.asCql(true, true)).isEqualTo("timestamp"); - assertThat(DataTypes.UUID.asCql(true, true)).isEqualTo("uuid"); - assertThat(DataTypes.VARINT.asCql(true, true)).isEqualTo("varint"); - assertThat(DataTypes.TIMEUUID.asCql(true, true)).isEqualTo("timeuuid"); - assertThat(DataTypes.INET.asCql(true, true)).isEqualTo("inet"); - assertThat(DataTypes.DATE.asCql(true, true)).isEqualTo("date"); - assertThat(DataTypes.TEXT.asCql(true, true)).isEqualTo("text"); - assertThat(DataTypes.TIME.asCql(true, true)).isEqualTo("time"); - assertThat(DataTypes.SMALLINT.asCql(true, true)).isEqualTo("smallint"); - assertThat(DataTypes.TINYINT.asCql(true, true)).isEqualTo("tinyint"); - assertThat(DataTypes.DURATION.asCql(true, true)).isEqualTo("duration"); - } finally { - Locale.setDefault(def); - } - } - - @Test - @UseDataProvider(location = TestDataProviders.class, value = "locales") - public void should_format_as_string(Locale locale) { - Locale def = Locale.getDefault(); - try { - Locale.setDefault(locale); - assertThat(DataTypes.ASCII.toString()).isEqualTo("ASCII"); - assertThat(DataTypes.BIGINT.toString()).isEqualTo("BIGINT"); - assertThat(DataTypes.BLOB.toString()).isEqualTo("BLOB"); - assertThat(DataTypes.BOOLEAN.toString()).isEqualTo("BOOLEAN"); - assertThat(DataTypes.COUNTER.toString()).isEqualTo("COUNTER"); - assertThat(DataTypes.DECIMAL.toString()).isEqualTo("DECIMAL"); - assertThat(DataTypes.DOUBLE.toString()).isEqualTo("DOUBLE"); - assertThat(DataTypes.FLOAT.toString()).isEqualTo("FLOAT"); - assertThat(DataTypes.INT.toString()).isEqualTo("INT"); - assertThat(DataTypes.TIMESTAMP.toString()).isEqualTo("TIMESTAMP"); - assertThat(DataTypes.UUID.toString()).isEqualTo("UUID"); - assertThat(DataTypes.VARINT.toString()).isEqualTo("VARINT"); - assertThat(DataTypes.TIMEUUID.toString()).isEqualTo("TIMEUUID"); - assertThat(DataTypes.INET.toString()).isEqualTo("INET"); - assertThat(DataTypes.DATE.toString()).isEqualTo("DATE"); - assertThat(DataTypes.TEXT.toString()).isEqualTo("TEXT"); - assertThat(DataTypes.TIME.toString()).isEqualTo("TIME"); - assertThat(DataTypes.SMALLINT.toString()).isEqualTo("SMALLINT"); - assertThat(DataTypes.TINYINT.toString()).isEqualTo("TINYINT"); - assertThat(DataTypes.DURATION.toString()).isEqualTo("DURATION"); - } finally { - Locale.setDefault(def); - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/AsciiCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/AsciiCodecTest.java deleted file mode 100644 index 43c01ea35dc..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/AsciiCodecTest.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import org.junit.Test; - -public class AsciiCodecTest extends CodecTestBase { - public AsciiCodecTest() { - this.codec = TypeCodecs.ASCII; - } - - @Test - public void should_encode() { - assertThat(encode("hello")).isEqualTo("0x68656c6c6f"); - assertThat(encode(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_encode_non_ascii() { - encode("hëllo"); - } - - @Test - public void should_decode() { - assertThat(decode("0x68656c6c6f")).isEqualTo("hello"); - assertThat(decode("0x")).isEmpty(); - assertThat(decode(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_non_ascii() { - decode("0x68c3ab6c6c6f"); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/BigIntCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/BigIntCodecTest.java deleted file mode 100644 index c5360c90a7b..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/BigIntCodecTest.java +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import org.junit.Test; - -public class BigIntCodecTest extends CodecTestBase { - - public BigIntCodecTest() { - this.codec = TypeCodecs.BIGINT; - } - - @Test - public void should_encode() { - assertThat(encode(1L)).isEqualTo("0x0000000000000001"); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_decode() { - assertThat(decode("0x0000000000000001")).isEqualTo(1L); - assertThat(decode("0x")).isNull(); - assertThat(decode(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_too_many_bytes() { - decode("0x0000000000000000" + "0000"); - } - - @Test - public void should_format() { - assertThat(format(1L)).isEqualTo("1"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_parse() { - assertThat(parse("1")).isEqualTo(1L); - assertThat(parse("NULL")).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("")).isNull(); - assertThat(parse(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_invalid_input() { - parse("not a number"); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_if_out_of_range() { - parse(Long.MAX_VALUE + "0"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.of(Long.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(long.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(Integer.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(Long.class)).isTrue(); - assertThat(codec.accepts(long.class)).isTrue(); - assertThat(codec.accepts(Integer.class)).isFalse(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(123L)).isTrue(); - assertThat(codec.accepts(Long.MIN_VALUE)).isTrue(); - assertThat(codec.accepts(Integer.MIN_VALUE)).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/BlobCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/BlobCodecTest.java deleted file mode 100644 index ec1ab294911..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/BlobCodecTest.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.nio.ByteBuffer; -import java.nio.MappedByteBuffer; -import org.junit.Test; - -public class BlobCodecTest extends CodecTestBase { - private static final ByteBuffer BUFFER = Bytes.fromHexString("0xcafebabe"); - - public BlobCodecTest() { - this.codec = TypeCodecs.BLOB; - } - - @Test - public void should_encode() { - assertThat(encode(BUFFER)).isEqualTo("0xcafebabe"); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_not_share_position_between_input_and_encoded() { - int inputPosition = BUFFER.position(); - ByteBuffer encoded = codec.encode(BUFFER, ProtocolVersion.DEFAULT); - // Read from the encoded buffer to change its position - encoded.get(); - // The input buffer should not be affected - assertThat(BUFFER.position()).isEqualTo(inputPosition); - } - - @Test - public void should_decode() { - assertThat(decode("0xcafebabe")).isEqualTo(BUFFER); - assertThat(decode("0x").capacity()).isEqualTo(0); - assertThat(decode(null)).isNull(); - } - - @Test - public void should_not_share_position_between_decoded_and_input() { - int inputPosition = BUFFER.position(); - ByteBuffer decoded = codec.decode(BUFFER, ProtocolVersion.DEFAULT); - // Read from the decoded buffer to change its position - decoded.get(); - // The input buffer should not be affected - assertThat(BUFFER.position()).isEqualTo(inputPosition); - } - - @Test - public void should_format() { - assertThat(format(BUFFER)).isEqualTo("0xcafebabe"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_parse() { - assertThat(parse("0xcafebabe")).isEqualTo(BUFFER); - assertThat(parse("NULL")).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("")).isNull(); - assertThat(parse(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_invalid_input() { - parse("not a blob"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.of(ByteBuffer.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(MappedByteBuffer.class))) - .isFalse(); // covariance not allowed - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(ByteBuffer.class)).isTrue(); - assertThat(codec.accepts(MappedByteBuffer.class)).isFalse(); // covariance not allowed - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(BUFFER)).isTrue(); - assertThat(codec.accepts(MappedByteBuffer.allocate(0))).isTrue(); // covariance allowed - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/BooleanCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/BooleanCodecTest.java deleted file mode 100644 index 57fcef1235d..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/BooleanCodecTest.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import org.junit.Test; - -public class BooleanCodecTest extends CodecTestBase { - - public BooleanCodecTest() { - this.codec = TypeCodecs.BOOLEAN; - } - - @Test - public void should_encode() { - assertThat(encode(false)).isEqualTo("0x00"); - assertThat(encode(true)).isEqualTo("0x01"); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_decode() { - assertThat(decode("0x00")).isFalse(); - assertThat(decode("0x01")).isTrue(); - assertThat(decode("0x")).isNull(); - assertThat(decode(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_too_many_bytes() { - decode("0x0000"); - } - - @Test - public void should_format() { - assertThat(format(true)).isEqualTo("true"); - assertThat(format(false)).isEqualTo("false"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_parse() { - assertThat(parse("true")).isEqualTo(true); - assertThat(parse("false")).isEqualTo(false); - assertThat(parse("NULL")).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("")).isNull(); - assertThat(parse(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_invalid_input() { - parse("maybe"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.of(Boolean.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(boolean.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(Integer.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(Boolean.class)).isTrue(); - assertThat(codec.accepts(boolean.class)).isTrue(); - assertThat(codec.accepts(Integer.class)).isFalse(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(true)).isTrue(); - assertThat(codec.accepts(Boolean.TRUE)).isTrue(); - assertThat(codec.accepts(Integer.MIN_VALUE)).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/CodecTestBase.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/CodecTestBase.java deleted file mode 100644 index 8a00cceda09..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/CodecTestBase.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.nio.ByteBuffer; - -public class CodecTestBase { - protected TypeCodec codec; - - protected String encode(T t, ProtocolVersion protocolVersion) { - assertThat(codec).as("Must set codec before calling this method").isNotNull(); - ByteBuffer bytes = codec.encode(t, protocolVersion); - return (bytes == null) ? null : Bytes.toHexString(bytes); - } - - protected String encode(T t) { - return encode(t, ProtocolVersion.DEFAULT); - } - - protected T decode(String hexString, ProtocolVersion protocolVersion) { - assertThat(codec).as("Must set codec before calling this method").isNotNull(); - ByteBuffer bytes = (hexString == null) ? null : Bytes.fromHexString(hexString); - // Decode twice, to assert that decode leaves the input buffer in its original state - codec.decode(bytes, protocolVersion); - return codec.decode(bytes, protocolVersion); - } - - protected T decode(String hexString) { - return decode(hexString, ProtocolVersion.DEFAULT); - } - - protected String format(T t) { - assertThat(codec).as("Must set codec before calling this method").isNotNull(); - return codec.format(t); - } - - protected T parse(String s) { - assertThat(codec).as("Must set codec before calling this method").isNotNull(); - return codec.parse(s); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/CounterCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/CounterCodecTest.java deleted file mode 100644 index c18c6e76d7c..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/CounterCodecTest.java +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import org.junit.Test; - -public class CounterCodecTest extends CodecTestBase { - - public CounterCodecTest() { - this.codec = TypeCodecs.COUNTER; - } - - @Test - public void should_encode() { - assertThat(encode(1L)).isEqualTo("0x0000000000000001"); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_decode() { - assertThat(decode("0x0000000000000001")).isEqualTo(1L); - assertThat(decode("0x")).isNull(); - assertThat(decode(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_too_many_bytes() { - decode("0x0000000000000000" + "0000"); - } - - @Test - public void should_format() { - assertThat(format(1L)).isEqualTo("1"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_parse() { - assertThat(parse("1")).isEqualTo(1L); - assertThat(parse("NULL")).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("")).isNull(); - assertThat(parse(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_invalid_input() { - parse("not a number"); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_if_out_of_range() { - parse(Long.MAX_VALUE + "0"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.of(Long.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(long.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(Integer.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(Long.class)).isTrue(); - assertThat(codec.accepts(long.class)).isTrue(); - assertThat(codec.accepts(Integer.class)).isFalse(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(123L)).isTrue(); - assertThat(codec.accepts(Long.MIN_VALUE)).isTrue(); - assertThat(codec.accepts(Integer.MIN_VALUE)).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/CqlDurationCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/CqlDurationCodecTest.java deleted file mode 100644 index 43526f72e57..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/CqlDurationCodecTest.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.data.CqlDuration; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import org.junit.Test; - -public class CqlDurationCodecTest extends CodecTestBase { - - private static final CqlDuration DURATION = CqlDuration.newInstance(1, 2, 3); - - public CqlDurationCodecTest() { - this.codec = TypeCodecs.DURATION; - } - - @Test - public void should_encode() { - assertThat(encode(DURATION)) - .isEqualTo( - "0x" - + "02" // 1 (encoded as 2 because of zig-zag encoding) - + "04" // 2 (same) - + "06" // 3 (same) - ); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_decode() { - assertThat(decode("0x020406")).isEqualTo(DURATION); - assertThat(decode("0x")).isNull(); - assertThat(decode(null)).isNull(); - } - - @Test(expected = IllegalStateException.class) - public void should_fail_to_decode_if_not_enough_bytes() { - decode("0x0000"); - } - - @Test - public void should_format() { - assertThat(format(DURATION)).isEqualTo("1mo2d3ns"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_parse() { - assertThat(parse("1mo2d3ns")).isEqualTo(DURATION); - assertThat(parse("NULL")).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("")).isNull(); - assertThat(parse(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_invalid_input() { - parse("not a duration"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.of(CqlDuration.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(Integer.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(CqlDuration.class)).isTrue(); - assertThat(codec.accepts(Integer.class)).isFalse(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(DURATION)).isTrue(); - assertThat(codec.accepts(Integer.MIN_VALUE)).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/CqlIntToStringCodec.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/CqlIntToStringCodec.java deleted file mode 100644 index 4f04f3defec..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/CqlIntToStringCodec.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import com.datastax.oss.driver.api.core.type.codec.MappingCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** - * A sample user codec implementation that we use in our tests. - * - *

It maps a CQL string to a Java string containing its textual representation. - */ -public class CqlIntToStringCodec extends MappingCodec { - - public CqlIntToStringCodec() { - super(TypeCodecs.INT, GenericType.STRING); - } - - @Nullable - @Override - protected String innerToOuter(@Nullable Integer value) { - return value == null ? null : value.toString(); - } - - @Nullable - @Override - protected Integer outerToInner(@Nullable String value) { - return value == null ? null : Integer.parseInt(value); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/CustomCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/CustomCodecTest.java deleted file mode 100644 index a832b51cfec..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/CustomCodecTest.java +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.nio.ByteBuffer; -import java.nio.MappedByteBuffer; -import org.junit.Test; - -public class CustomCodecTest extends CodecTestBase { - private static final ByteBuffer BUFFER = Bytes.fromHexString("0xcafebabe"); - - public CustomCodecTest() { - this.codec = TypeCodecs.custom(DataTypes.custom("com.test.MyClass")); - } - - @Test - public void should_encode() { - assertThat(encode(BUFFER)).isEqualTo("0xcafebabe"); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_not_share_position_between_input_and_encoded() { - int inputPosition = BUFFER.position(); - ByteBuffer encoded = codec.encode(BUFFER, ProtocolVersion.DEFAULT); - // Read from the encoded buffer to change its position - encoded.get(); - // The input buffer should not be affected - assertThat(BUFFER.position()).isEqualTo(inputPosition); - } - - @Test - public void should_decode() { - assertThat(decode("0xcafebabe")).isEqualTo(BUFFER); - assertThat(decode("0x").capacity()).isEqualTo(0); - assertThat(decode(null)).isNull(); - } - - @Test - public void should_not_share_position_between_decoded_and_input() { - int inputPosition = BUFFER.position(); - ByteBuffer decoded = codec.decode(BUFFER, ProtocolVersion.DEFAULT); - // Read from the decoded buffer to change its position - decoded.get(); - // The input buffer should not be affected - assertThat(BUFFER.position()).isEqualTo(inputPosition); - } - - @Test - public void should_format() { - assertThat(format(BUFFER)).isEqualTo("0xcafebabe"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_parse() { - assertThat(parse("0xcafebabe")).isEqualTo(BUFFER); - assertThat(parse("NULL")).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("")).isNull(); - assertThat(parse(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_invalid_input() { - parse("not a blob"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.of(ByteBuffer.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(MappedByteBuffer.class))) - .isFalse(); // covariance not allowed - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(ByteBuffer.class)).isTrue(); - assertThat(codec.accepts(MappedByteBuffer.class)).isFalse(); // covariance not allowed - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(BUFFER)).isTrue(); - assertThat(codec.accepts(MappedByteBuffer.allocate(0))).isTrue(); // covariance allowed - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/DateCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/DateCodecTest.java deleted file mode 100644 index 48388fbc692..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/DateCodecTest.java +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import java.time.LocalDate; -import org.junit.Test; - -public class DateCodecTest extends CodecTestBase { - - private static final LocalDate EPOCH = LocalDate.ofEpochDay(0); - private static final LocalDate MIN = LocalDate.parse("-5877641-06-23"); - private static final LocalDate MAX = LocalDate.parse("+5881580-07-11"); - - public DateCodecTest() { - this.codec = TypeCodecs.DATE; - } - - @Test - public void should_encode() { - // Dates are encoded as a number of days since the epoch, stored on 8 bytes with 0 in the - // middle. - assertThat(encode(MIN)).isEqualTo("0x00000000"); - // The "middle" is the one that has only the most significant bit set (because it has the same - // number of values before and after it, determined by all possible combinations of the - // remaining bits) - assertThat(encode(EPOCH)).isEqualTo("0x80000000"); - assertThat(encode(MAX)).isEqualTo("0xffffffff"); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_decode() { - assertThat(decode("0x00000000")).isEqualTo(MIN); - assertThat(decode("0x80000000")).isEqualTo(EPOCH); - assertThat(decode("0xffffffff")).isEqualTo(MAX); - assertThat(decode(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_too_many_bytes() { - decode("0x00000000" + "0000"); - } - - @Test - public void should_format() { - // No need to test various values because the codec delegates directly to the JDK's formatter, - // which we assume does its job correctly. - assertThat(format(EPOCH)).isEqualTo("'1970-01-01'"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_parse() { - // Raw number - assertThat(parse("0")).isEqualTo(MIN); - assertThat(parse("2147483648")).isEqualTo(EPOCH); - - // Date format - assertThat(parse("'-5877641-06-23'")).isEqualTo(MIN); - assertThat(parse("'1970-01-01'")).isEqualTo(EPOCH); - assertThat(parse("'2014-01-01'")).isEqualTo(LocalDate.parse("2014-01-01")); - - assertThat(parse("NULL")).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("")).isNull(); - assertThat(parse(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_invalid_input() { - parse("not a date"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.of(LocalDate.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(Integer.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(LocalDate.class)).isTrue(); - assertThat(codec.accepts(Integer.class)).isFalse(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(EPOCH)).isTrue(); - assertThat(codec.accepts(Integer.MIN_VALUE)).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/DecimalCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/DecimalCodecTest.java deleted file mode 100644 index eac360fdcc5..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/DecimalCodecTest.java +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import java.math.BigDecimal; -import org.junit.Test; - -public class DecimalCodecTest extends CodecTestBase { - - public DecimalCodecTest() { - this.codec = TypeCodecs.DECIMAL; - } - - @Test - public void should_encode() { - assertThat(encode(BigDecimal.ONE)) - .isEqualTo( - "0x" - + "00000000" // scale - + "01" // unscaled value - ); - assertThat(encode(BigDecimal.valueOf(128, 4))) - .isEqualTo( - "0x" - + "00000004" // scale - + "0080" // unscaled value - ); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_decode() { - assertThat(decode("0x0000000001")).isEqualTo(BigDecimal.ONE); - assertThat(decode("0x000000040080")).isEqualTo(BigDecimal.valueOf(128, 4)); - assertThat(decode("0x")).isNull(); - assertThat(decode(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_not_enough_bytes() { - decode("0x0000"); - } - - @Test - public void should_format() { - assertThat(format(BigDecimal.ONE)).isEqualTo("1"); - assertThat(format(BigDecimal.valueOf(128, 4))).isEqualTo("0.0128"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_parse() { - assertThat(parse("1")).isEqualTo(BigDecimal.ONE); - assertThat(parse("0.0128")).isEqualTo(BigDecimal.valueOf(128, 4)); - assertThat(parse("NULL")).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("")).isNull(); - assertThat(parse(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_invalid_input() { - parse("not a decimal"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.of(BigDecimal.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(Integer.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(BigDecimal.class)).isTrue(); - assertThat(codec.accepts(Integer.class)).isFalse(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(BigDecimal.ONE)).isTrue(); - assertThat(codec.accepts(Integer.MIN_VALUE)).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/DoubleCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/DoubleCodecTest.java deleted file mode 100644 index f27081aa784..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/DoubleCodecTest.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import org.junit.Test; - -public class DoubleCodecTest extends CodecTestBase { - - public DoubleCodecTest() { - this.codec = TypeCodecs.DOUBLE; - } - - @Test - public void should_encode() { - // Our codec relies on the JDK's ByteBuffer API. We're not testing the JDK, so no need to try - // a thousand different values. - assertThat(encode(0.0)).isEqualTo("0x0000000000000000"); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_decode() { - assertThat(decode("0x0000000000000000")).isEqualTo(0.0); - assertThat(decode("0x")).isNull(); - assertThat(decode(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_too_many_bytes() { - decode("0x0000"); - } - - @Test - public void should_format() { - assertThat(format(0.0)).isEqualTo("0.0"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_parse() { - assertThat(parse("0.0")).isEqualTo(0.0); - assertThat(parse("NULL")).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("")).isNull(); - assertThat(parse(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_invalid_input() { - parse("not a double"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.of(Double.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(double.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(Integer.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(Double.class)).isTrue(); - assertThat(codec.accepts(double.class)).isTrue(); - assertThat(codec.accepts(Integer.class)).isFalse(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(123.45d)).isTrue(); - assertThat(codec.accepts(Double.MIN_VALUE)).isTrue(); - assertThat(codec.accepts(Integer.MIN_VALUE)).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/FloatCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/FloatCodecTest.java deleted file mode 100644 index 62d5b549153..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/FloatCodecTest.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import org.junit.Test; - -public class FloatCodecTest extends CodecTestBase { - - public FloatCodecTest() { - this.codec = TypeCodecs.FLOAT; - } - - @Test - public void should_encode() { - // Our codec relies on the JDK's ByteBuffer API. We're not testing the JDK, so no need to try - // a thousand different values. - assertThat(encode(0.0f)).isEqualTo("0x00000000"); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_decode() { - assertThat(decode("0x00000000")).isEqualTo(0.0f); - assertThat(decode("0x")).isNull(); - assertThat(decode(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_too_many_bytes() { - decode("0x0000"); - } - - @Test - public void should_format() { - assertThat(format(0.0f)).isEqualTo("0.0"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_parse() { - assertThat(parse("0.0")).isEqualTo(0.0f); - assertThat(parse("NULL")).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("")).isNull(); - assertThat(parse(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_invalid_input() { - parse("not a float"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.of(Float.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(float.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(Integer.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(Float.class)).isTrue(); - assertThat(codec.accepts(float.class)).isTrue(); - assertThat(codec.accepts(Integer.class)).isFalse(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(123.45f)).isTrue(); - assertThat(codec.accepts(Float.MIN_VALUE)).isTrue(); - assertThat(codec.accepts(Integer.MIN_VALUE)).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/InetCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/InetCodecTest.java deleted file mode 100644 index e10fa695ba0..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/InetCodecTest.java +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; - -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.shaded.guava.common.base.Strings; -import java.net.Inet4Address; -import java.net.InetAddress; -import java.net.UnknownHostException; -import org.junit.Test; - -public class InetCodecTest extends CodecTestBase { - - private static final InetAddress V4_ADDRESS; - private static final InetAddress V6_ADDRESS; - - static { - try { - V4_ADDRESS = InetAddress.getByName("127.0.0.1"); - V6_ADDRESS = InetAddress.getByName("::1"); - } catch (UnknownHostException e) { - fail("unexpected error", e); - throw new AssertionError(); // never reached - } - } - - public InetCodecTest() { - this.codec = TypeCodecs.INET; - } - - @Test - public void should_encode() { - assertThat(encode(V4_ADDRESS)).isEqualTo("0x7f000001"); - assertThat(encode(V6_ADDRESS)).isEqualTo("0x00000000000000000000000000000001"); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_decode() { - assertThat(decode("0x7f000001")).isEqualTo(V4_ADDRESS); - assertThat(decode("0x00000000000000000000000000000001")).isEqualTo(V6_ADDRESS); - assertThat(decode("0x")).isNull(); - assertThat(decode(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_not_enough_bytes() { - decode("0x0000"); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_incorrect_byte_count() { - decode("0x" + Strings.repeat("00", 7)); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_too_many_bytes() { - decode("0x" + Strings.repeat("00", 17)); - } - - @Test - public void should_format() { - assertThat(format(V4_ADDRESS)).isEqualTo("'127.0.0.1'"); - assertThat(format(V6_ADDRESS)).isEqualTo("'0:0:0:0:0:0:0:1'"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_parse() { - assertThat(parse("'127.0.0.1'")).isEqualTo(V4_ADDRESS); - assertThat(parse("'0:0:0:0:0:0:0:1'")).isEqualTo(V6_ADDRESS); - assertThat(parse("NULL")).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("")).isNull(); - assertThat(parse(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_invalid_input() { - parse("not an address"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.of(InetAddress.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(Inet4Address.class))) - .isFalse(); // covariance not allowed - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(InetAddress.class)).isTrue(); - assertThat(codec.accepts(Inet4Address.class)).isFalse(); // covariance not allowed - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(V4_ADDRESS)).isTrue(); // covariance allowed - assertThat(codec.accepts(V6_ADDRESS)).isTrue(); // covariance allowed - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/IntCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/IntCodecTest.java deleted file mode 100644 index b5268a7e844..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/IntCodecTest.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import org.junit.Test; - -public class IntCodecTest extends CodecTestBase { - - public IntCodecTest() { - this.codec = TypeCodecs.INT; - } - - @Test - public void should_encode() { - // Our codec relies on the JDK's ByteBuffer API. We're not testing the JDK, so no need to try - // a thousand different values. - assertThat(encode(0)).isEqualTo("0x00000000"); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_decode() { - assertThat(decode("0x00000000")).isEqualTo(0); - assertThat(decode("0x")).isNull(); - assertThat(decode(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_not_enough_bytes() { - decode("0x0000"); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_too_many_bytes() { - decode("0x0000000000000000"); - } - - @Test - public void should_format() { - assertThat(format(0)).isEqualTo("0"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_parse() { - assertThat(parse("0")).isEqualTo(0); - assertThat(parse("NULL")).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("")).isNull(); - assertThat(parse(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_invalid_input() { - parse("not an int"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.of(Integer.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(int.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(Long.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(Integer.class)).isTrue(); - assertThat(codec.accepts(int.class)).isTrue(); - assertThat(codec.accepts(Long.class)).isFalse(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(123)).isTrue(); - assertThat(codec.accepts(Integer.MIN_VALUE)).isTrue(); - assertThat(codec.accepts(Long.MIN_VALUE)).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/ListCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/ListCodecTest.java deleted file mode 100644 index 975aa3a1428..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/ListCodecTest.java +++ /dev/null @@ -1,155 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.util.ArrayList; -import java.util.List; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -public class ListCodecTest extends CodecTestBase> { - - @Mock private TypeCodec elementCodec; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - - when(elementCodec.getCqlType()).thenReturn(DataTypes.INT); - when(elementCodec.getJavaType()).thenReturn(GenericType.INTEGER); - codec = TypeCodecs.listOf(elementCodec); - } - - @Test - public void should_encode_null() { - assertThat(encode(null)).isNull(); - } - - @Test - public void should_encode_empty_list() { - assertThat(encode(new ArrayList<>())).isEqualTo("0x00000000"); - } - - @Test - public void should_encode_non_empty_list() { - when(elementCodec.encode(1, ProtocolVersion.DEFAULT)).thenReturn(Bytes.fromHexString("0x01")); - when(elementCodec.encode(2, ProtocolVersion.DEFAULT)).thenReturn(Bytes.fromHexString("0x0002")); - when(elementCodec.encode(3, ProtocolVersion.DEFAULT)) - .thenReturn(Bytes.fromHexString("0x000003")); - - assertThat(encode(ImmutableList.of(1, 2, 3))) - .isEqualTo( - "0x" - + "00000003" // number of elements - + "0000000101" // size + contents of element 1 - + "000000020002" // size + contents of element 2 - + "00000003000003" // size + contents of element 3 - ); - } - - @Test - public void should_decode_null_as_empty_list() { - assertThat(decode(null)).isEmpty(); - } - - @Test - public void should_decode_empty_list() { - assertThat(decode("0x00000000")).isEmpty(); - } - - @Test - public void should_decode_non_empty_list() { - when(elementCodec.decode(Bytes.fromHexString("0x01"), ProtocolVersion.DEFAULT)).thenReturn(1); - when(elementCodec.decode(Bytes.fromHexString("0x0002"), ProtocolVersion.DEFAULT)).thenReturn(2); - when(elementCodec.decode(Bytes.fromHexString("0x000003"), ProtocolVersion.DEFAULT)) - .thenReturn(3); - - assertThat(decode("0x" + "00000003" + "0000000101" + "000000020002" + "00000003000003")) - .containsExactly(1, 2, 3); - } - - @Test - public void should_decode_list_with_null_elements() { - when(elementCodec.decode(Bytes.fromHexString("0x0002"), ProtocolVersion.DEFAULT)).thenReturn(2); - assertThat( - decode( - "0x" - + "00000002" // number of elements - + "FFFFFFFF" // size of element 1 (-1 for null) - + "00000002" // size of element 2 - + "0002" // contents of element 2 - )) - .containsExactly(null, 2); - } - - @Test - public void should_format_null_list() { - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_format_empty_list() { - assertThat(format(new ArrayList<>())).isEqualTo("[]"); - } - - @Test - public void should_format_non_empty_list() { - when(elementCodec.format(1)).thenReturn("a"); - when(elementCodec.format(2)).thenReturn("b"); - when(elementCodec.format(3)).thenReturn("c"); - - assertThat(format(ImmutableList.of(1, 2, 3))).isEqualTo("[a,b,c]"); - } - - @Test - public void should_parse_null_or_empty_string() { - assertThat(parse(null)).isNull(); - assertThat(parse("")).isNull(); - } - - @Test - public void should_parse_empty_list() { - assertThat(parse("[]")).isEmpty(); - } - - @Test - public void should_parse_non_empty_list() { - when(elementCodec.parse("a")).thenReturn(1); - when(elementCodec.parse("b")).thenReturn(2); - when(elementCodec.parse("c")).thenReturn(3); - - assertThat(parse("[a,b,c]")).containsExactly(1, 2, 3); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_malformed_list() { - parse("not a list"); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/MapCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/MapCodecTest.java deleted file mode 100644 index 94cb33a5a99..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/MapCodecTest.java +++ /dev/null @@ -1,189 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.util.LinkedHashMap; -import java.util.Map; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -public class MapCodecTest extends CodecTestBase> { - - @Mock private TypeCodec keyCodec; - @Mock private TypeCodec valueCodec; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - - when(keyCodec.getCqlType()).thenReturn(DataTypes.TEXT); - when(keyCodec.getJavaType()).thenReturn(GenericType.STRING); - - when(valueCodec.getCqlType()).thenReturn(DataTypes.INT); - when(valueCodec.getJavaType()).thenReturn(GenericType.INTEGER); - codec = TypeCodecs.mapOf(keyCodec, valueCodec); - } - - @Test - public void should_encode_null() { - assertThat(encode(null)).isNull(); - } - - @Test - public void should_encode_empty_map() { - assertThat(encode(new LinkedHashMap<>())).isEqualTo("0x00000000"); - } - - @Test - public void should_encode_non_empty_map() { - when(keyCodec.encode("a", ProtocolVersion.DEFAULT)).thenReturn(Bytes.fromHexString("0x10")); - when(keyCodec.encode("b", ProtocolVersion.DEFAULT)).thenReturn(Bytes.fromHexString("0x2000")); - when(keyCodec.encode("c", ProtocolVersion.DEFAULT)).thenReturn(Bytes.fromHexString("0x300000")); - - when(valueCodec.encode(1, ProtocolVersion.DEFAULT)).thenReturn(Bytes.fromHexString("0x01")); - when(valueCodec.encode(2, ProtocolVersion.DEFAULT)).thenReturn(Bytes.fromHexString("0x0002")); - when(valueCodec.encode(3, ProtocolVersion.DEFAULT)).thenReturn(Bytes.fromHexString("0x000003")); - - assertThat(encode(ImmutableMap.of("a", 1, "b", 2, "c", 3))) - .isEqualTo( - "0x" - + "00000003" // number of key-value pairs - + "0000000110" // size + contents of key 1 - + "0000000101" // size + contents of value 1 - + "000000022000" // size + contents of key 2 - + "000000020002" // size + contents of value 2 - + "00000003300000" // size + contents of key 3 - + "00000003000003" // size + contents of value 3 - ); - } - - @Test - public void should_decode_null_as_empty_map() { - assertThat(decode(null)).isEmpty(); - } - - @Test - public void should_decode_empty_map() { - assertThat(decode("0x00000000")).isEmpty(); - } - - @Test - public void should_decode_non_empty_map() { - when(keyCodec.decode(Bytes.fromHexString("0x10"), ProtocolVersion.DEFAULT)).thenReturn("a"); - when(keyCodec.decode(Bytes.fromHexString("0x2000"), ProtocolVersion.DEFAULT)).thenReturn("b"); - when(keyCodec.decode(Bytes.fromHexString("0x300000"), ProtocolVersion.DEFAULT)).thenReturn("c"); - - when(valueCodec.decode(Bytes.fromHexString("0x01"), ProtocolVersion.DEFAULT)).thenReturn(1); - when(valueCodec.decode(Bytes.fromHexString("0x0002"), ProtocolVersion.DEFAULT)).thenReturn(2); - when(valueCodec.decode(Bytes.fromHexString("0x000003"), ProtocolVersion.DEFAULT)).thenReturn(3); - - assertThat( - decode( - "0x" - + "00000003" - + "0000000110" - + "0000000101" - + "000000022000" - + "000000020002" - + "00000003300000" - + "00000003000003")) - .containsOnlyKeys("a", "b", "c") - .containsEntry("a", 1) - .containsEntry("b", 2) - .containsEntry("c", 3); - } - - @Test - public void should_decode_map_with_null_elements() { - when(keyCodec.decode(Bytes.fromHexString("0x10"), ProtocolVersion.DEFAULT)).thenReturn("a"); - when(valueCodec.decode(Bytes.fromHexString("0x0002"), ProtocolVersion.DEFAULT)).thenReturn(2); - assertThat(decode("0x" + "00000002" + "0000000110" + "FFFFFFFF" + "FFFFFFFF" + "000000020002")) - .containsOnlyKeys("a", null) - .containsEntry("a", null) - .containsEntry(null, 2); - } - - @Test - public void should_format_null_map() { - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_format_empty_map() { - assertThat(format(new LinkedHashMap<>())).isEqualTo("{}"); - } - - @Test - public void should_format_non_empty_map() { - when(keyCodec.format("a")).thenReturn("foo"); - when(keyCodec.format("b")).thenReturn("bar"); - when(keyCodec.format("c")).thenReturn("baz"); - - when(valueCodec.format(1)).thenReturn("qux"); - when(valueCodec.format(2)).thenReturn("quux"); - when(valueCodec.format(3)).thenReturn("quuz"); - - assertThat(format(ImmutableMap.of("a", 1, "b", 2, "c", 3))) - .isEqualTo("{foo:qux,bar:quux,baz:quuz}"); - } - - @Test - public void should_parse_null_or_empty_string() { - assertThat(parse(null)).isNull(); - assertThat(parse("")).isNull(); - } - - @Test - public void should_parse_empty_map() { - assertThat(parse("{}")).isEmpty(); - } - - @Test - public void should_parse_non_empty_map() { - when(keyCodec.parse("foo")).thenReturn("a"); - when(keyCodec.parse("bar")).thenReturn("b"); - when(keyCodec.parse("baz")).thenReturn("c"); - - when(valueCodec.parse("qux")).thenReturn(1); - when(valueCodec.parse("quux")).thenReturn(2); - when(valueCodec.parse("quuz")).thenReturn(3); - - assertThat(parse("{foo:qux,bar:quux,baz:quuz}")) - .containsOnlyKeys("a", "b", "c") - .containsEntry("a", 1) - .containsEntry("b", 2) - .containsEntry("c", 3); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_malformed_map() { - parse("not a map"); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/MappingCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/MappingCodecTest.java deleted file mode 100644 index f78dc774f62..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/MappingCodecTest.java +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.type.codec.MappingCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import org.junit.Test; - -public class MappingCodecTest extends CodecTestBase { - - public MappingCodecTest() { - this.codec = new CqlIntToStringCodec(); - } - - @Test - public void should_encode() { - // Our codec relies on the JDK's ByteBuffer API. We're not testing the JDK, so no need to try - // a thousand different values. - assertThat(encode("0")).isEqualTo("0x00000000"); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_decode() { - assertThat(decode("0x00000000")).isEqualTo("0"); - assertThat(decode("0x")).isNull(); - assertThat(decode(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_not_enough_bytes() { - decode("0x0000"); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_too_many_bytes() { - decode("0x0000000000000000"); - } - - @Test - public void should_format() { - assertThat(format("0")).isEqualTo("0"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_parse() { - assertThat(parse("0")).isEqualTo("0"); - assertThat(parse("NULL")).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("")).isNull(); - assertThat(parse(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_invalid_input() { - parse("not an int"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.of(String.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(int.class))).isFalse(); - assertThat(codec.accepts(GenericType.of(Integer.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(String.class)).isTrue(); - assertThat(codec.accepts(int.class)).isFalse(); - assertThat(codec.accepts(Integer.class)).isFalse(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts("123")).isTrue(); - // codec accepts any String, even if it can't be encoded - assertThat(codec.accepts("not an int")).isTrue(); - assertThat(codec.accepts(Integer.MIN_VALUE)).isFalse(); - } - - @Test - public void should_expose_inner_and_outer_java_types() { - assertThat(((MappingCodec) codec).getInnerJavaType()).isEqualTo(GenericType.INTEGER); - assertThat(codec.getJavaType()).isEqualTo(GenericType.STRING); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/SetCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/SetCodecTest.java deleted file mode 100644 index a302357c9f3..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/SetCodecTest.java +++ /dev/null @@ -1,147 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.util.LinkedHashSet; -import java.util.Set; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -public class SetCodecTest extends CodecTestBase> { - - @Mock private TypeCodec elementCodec; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - - when(elementCodec.getCqlType()).thenReturn(DataTypes.INT); - when(elementCodec.getJavaType()).thenReturn(GenericType.INTEGER); - codec = TypeCodecs.setOf(elementCodec); - } - - @Test - public void should_encode_null() { - assertThat(encode(null)).isNull(); - } - - @Test - public void should_encode_empty_set() { - assertThat(encode(new LinkedHashSet<>())).isEqualTo("0x00000000"); - } - - @Test - public void should_encode_non_empty_set() { - when(elementCodec.encode(1, ProtocolVersion.DEFAULT)).thenReturn(Bytes.fromHexString("0x01")); - when(elementCodec.encode(2, ProtocolVersion.DEFAULT)).thenReturn(Bytes.fromHexString("0x0002")); - when(elementCodec.encode(3, ProtocolVersion.DEFAULT)) - .thenReturn(Bytes.fromHexString("0x000003")); - - assertThat(encode(ImmutableSet.of(1, 2, 3))) - .isEqualTo( - "0x" - + "00000003" // number of elements - + "0000000101" // size + contents of element 1 - + "000000020002" // size + contents of element 2 - + "00000003000003" // size + contents of element 3 - ); - } - - @Test - public void should_decode_null_as_empty_set() { - assertThat(decode(null)).isEmpty(); - } - - @Test - public void should_decode_empty_set() { - assertThat(decode("0x00000000")).isEmpty(); - } - - @Test - public void should_decode_non_empty_set() { - when(elementCodec.decode(Bytes.fromHexString("0x01"), ProtocolVersion.DEFAULT)).thenReturn(1); - when(elementCodec.decode(Bytes.fromHexString("0x0002"), ProtocolVersion.DEFAULT)).thenReturn(2); - when(elementCodec.decode(Bytes.fromHexString("0x000003"), ProtocolVersion.DEFAULT)) - .thenReturn(3); - - assertThat(decode("0x" + "00000003" + "0000000101" + "000000020002" + "00000003000003")) - .containsExactly(1, 2, 3); - } - - @Test - public void should_decode_set_with_null_elements() { - when(elementCodec.decode(Bytes.fromHexString("0x01"), ProtocolVersion.DEFAULT)).thenReturn(1); - assertThat(decode("0x" + "00000002" + "0000000101" + "FFFFFFFF")).containsExactly(1, null); - } - - @Test - public void should_format_null_set() { - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_format_empty_set() { - assertThat(format(new LinkedHashSet<>())).isEqualTo("{}"); - } - - @Test - public void should_format_non_empty_set() { - when(elementCodec.format(1)).thenReturn("a"); - when(elementCodec.format(2)).thenReturn("b"); - when(elementCodec.format(3)).thenReturn("c"); - - assertThat(format(ImmutableSet.of(1, 2, 3))).isEqualTo("{a,b,c}"); - } - - @Test - public void should_parse_null_or_empty_string() { - assertThat(parse(null)).isNull(); - assertThat(parse("")).isNull(); - } - - @Test - public void should_parse_empty_set() { - assertThat(parse("{}")).isEmpty(); - } - - @Test - public void should_parse_non_empty_set() { - when(elementCodec.parse("a")).thenReturn(1); - when(elementCodec.parse("b")).thenReturn(2); - when(elementCodec.parse("c")).thenReturn(3); - - assertThat(parse("{a,b,c}")).containsExactly(1, 2, 3); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_malformed_set() { - parse("not a set"); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/SimpleBlobCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/SimpleBlobCodecTest.java deleted file mode 100644 index 3f40efb16ac..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/SimpleBlobCodecTest.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.nio.ByteBuffer; -import org.junit.Test; - -public class SimpleBlobCodecTest extends CodecTestBase { - - private static final ByteBuffer BUFFER = Bytes.fromHexString("0xcafebabe"); - private static final byte[] ARRAY = Bytes.getArray(Bytes.fromHexString("0xcafebabe")); - - public SimpleBlobCodecTest() { - this.codec = ExtraTypeCodecs.BLOB_TO_ARRAY; - } - - @Test - public void should_encode() { - assertThat(encode(ARRAY)).isEqualTo("0xcafebabe"); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_not_share_position_between_input_and_encoded() { - ByteBuffer encoded = codec.encode(ARRAY, ProtocolVersion.DEFAULT); - assertThat(encoded).isNotNull(); - assertThat(ARRAY).isEqualTo(Bytes.getArray(encoded)); - } - - @Test - public void should_decode() { - assertThat(decode("0xcafebabe")).isEqualTo(ARRAY); - assertThat(decode("0x")).hasSize(0); - assertThat(decode(null)).isNull(); - } - - @Test - public void should_not_share_position_between_decoded_and_input() { - byte[] decoded = codec.decode(BUFFER, ProtocolVersion.DEFAULT); - assertThat(decoded).isEqualTo(ARRAY); - } - - @Test - public void should_format() { - assertThat(format(ARRAY)).isEqualTo("0xcafebabe"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_parse() { - assertThat(parse("0xcafebabe")).isEqualTo(ARRAY); - assertThat(parse("NULL")).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("")).isNull(); - assertThat(parse(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_invalid_input() { - parse("not a blob"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.of(byte[].class))).isTrue(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(byte[].class)).isTrue(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(ARRAY)).isTrue(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/SmallIntCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/SmallIntCodecTest.java deleted file mode 100644 index 483dd0b65bd..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/SmallIntCodecTest.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import org.junit.Test; - -public class SmallIntCodecTest extends CodecTestBase { - - public SmallIntCodecTest() { - this.codec = TypeCodecs.SMALLINT; - } - - @Test - public void should_encode() { - // Our codec relies on the JDK's ByteBuffer API. We're not testing the JDK, so no need to try - // a thousand different values. - assertThat(encode((short) 0)).isEqualTo("0x0000"); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_decode() { - assertThat(decode("0x0000")).isEqualTo((short) 0); - assertThat(decode("0x")).isNull(); - assertThat(decode(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_not_enough_bytes() { - decode("0x00"); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_too_many_bytes() { - decode("0x000000"); - } - - @Test - public void should_format() { - assertThat(format((short) 0)).isEqualTo("0"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_parse() { - assertThat(parse("0")).isEqualTo((short) 0); - assertThat(parse("NULL")).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("")).isNull(); - assertThat(parse(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_invalid_input() { - parse("not a smallint"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.of(Short.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(short.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(Integer.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(Short.class)).isTrue(); - assertThat(codec.accepts(short.class)).isTrue(); - assertThat(codec.accepts(Integer.class)).isFalse(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(((short) 123))).isTrue(); - assertThat(codec.accepts(Short.MIN_VALUE)).isTrue(); - assertThat(codec.accepts(Integer.MIN_VALUE)).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TextCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TextCodecTest.java deleted file mode 100644 index a42178544d4..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TextCodecTest.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import org.junit.Test; - -public class TextCodecTest extends CodecTestBase { - - public TextCodecTest() { - // We will test edge cases of ASCII in AsciiCodecTest - this.codec = TypeCodecs.TEXT; - } - - @Test - public void should_encode() { - assertThat(encode("hello")).isEqualTo("0x68656c6c6f"); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_decode() { - assertThat(decode("0x68656c6c6f")).isEqualTo("hello"); - assertThat(decode("0x")).isEmpty(); - assertThat(decode(null)).isNull(); - } - - @Test - public void should_format() { - assertThat(format("hello")).isEqualTo("'hello'"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_parse() { - assertThat(parse("'hello'")).isEqualTo("hello"); - assertThat(parse("NULL")).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("")).isNull(); - assertThat(parse(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_invalid_input() { - parse("not a string"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.of(String.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(Integer.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(String.class)).isTrue(); - assertThat(codec.accepts(Integer.class)).isFalse(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts("hello")).isTrue(); - assertThat(codec.accepts(Integer.MIN_VALUE)).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TimeCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TimeCodecTest.java deleted file mode 100644 index 6d77efd396a..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TimeCodecTest.java +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import java.time.LocalTime; -import java.time.temporal.ChronoUnit; -import org.junit.Test; - -public class TimeCodecTest extends CodecTestBase { - - public TimeCodecTest() { - this.codec = TypeCodecs.TIME; - } - - @Test - public void should_encode() { - assertThat(encode(LocalTime.MIDNIGHT)).isEqualTo("0x0000000000000000"); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_decode() { - assertThat(decode("0x0000000000000000")).isEqualTo(LocalTime.MIDNIGHT); - assertThat(decode(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_not_enough_bytes() { - decode("0x0000"); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_too_many_bytes() { - decode("0x0000000000000000" + "0000"); - } - - @Test - public void should_format() { - // No need to test various values because the codec delegates directly to the JDK's formatter, - // which we assume does its job correctly. - assertThat(format(LocalTime.MIDNIGHT)).isEqualTo("'00:00:00.000000000'"); - assertThat(format(LocalTime.NOON.plus(13799999994L, ChronoUnit.NANOS))) - .isEqualTo("'12:00:13.799999994'"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_parse() { - // Raw number - assertThat(parse("'0'")).isEqualTo(LocalTime.MIDNIGHT); - - // String format - assertThat(parse("'00:00'")).isEqualTo(LocalTime.MIDNIGHT); - - assertThat(parse("NULL")).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("")).isNull(); - assertThat(parse(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_invalid_input() { - parse("not a time"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.of(LocalTime.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(Integer.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(LocalTime.class)).isTrue(); - assertThat(codec.accepts(Integer.class)).isFalse(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(LocalTime.MIDNIGHT)).isTrue(); - assertThat(codec.accepts(Integer.MIN_VALUE)).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TimeUuidCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TimeUuidCodecTest.java deleted file mode 100644 index 416bee8e4df..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TimeUuidCodecTest.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import java.util.UUID; -import org.junit.Test; - -public class TimeUuidCodecTest extends CodecTestBase { - - private static final UUID TIME_BASED = new UUID(6342305776366260711L, -5736720392086604862L); - private static final UUID NOT_TIME_BASED = new UUID(2, 1); - - public TimeUuidCodecTest() { - this.codec = TypeCodecs.TIMEUUID; - - assertThat(TIME_BASED.version()).isEqualTo(1); - assertThat(NOT_TIME_BASED.version()).isNotEqualTo(1); - } - - @Test - public void should_encode_time_uuid() { - assertThat(encode(TIME_BASED)).isEqualTo("0x58046580293811e7b0631332a5f033c2"); - } - - @Test(expected = IllegalArgumentException.class) - public void should_not_encode_non_time_uuid() { - assertThat(codec.accepts(NOT_TIME_BASED)).isFalse(); - encode(NOT_TIME_BASED); - } - - @Test - public void should_format_time_uuid() { - assertThat(format(TIME_BASED)).isEqualTo("58046580-2938-11e7-b063-1332a5f033c2"); - } - - @Test(expected = IllegalArgumentException.class) - public void should_not_format_non_time_uuid() { - format(NOT_TIME_BASED); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.of(UUID.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(Integer.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(UUID.class)).isTrue(); - assertThat(codec.accepts(Integer.class)).isFalse(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(TIME_BASED)).isTrue(); - assertThat(codec.accepts(Integer.MIN_VALUE)).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TimestampCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TimestampCodecTest.java deleted file mode 100644 index 5cfd17da622..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TimestampCodecTest.java +++ /dev/null @@ -1,195 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static java.time.ZoneOffset.ofHours; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; - -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.time.Instant; -import java.time.LocalDate; -import java.time.LocalDateTime; -import java.time.ZoneId; -import java.time.ZoneOffset; -import java.time.ZonedDateTime; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class TimestampCodecTest extends CodecTestBase { - - public TimestampCodecTest() { - // force a given timezone for reproducible results in should_format - codec = new TimestampCodec(ZoneOffset.UTC); - } - - @Test - public void should_encode() { - assertThat(encode(Instant.EPOCH)).isEqualTo("0x0000000000000000"); - assertThat(encode(Instant.ofEpochMilli(128))).isEqualTo("0x0000000000000080"); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_decode() { - assertThat(decode("0x0000000000000000").toEpochMilli()).isEqualTo(0); - assertThat(decode("0x0000000000000080").toEpochMilli()).isEqualTo(128); - assertThat(decode(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_not_enough_bytes() { - decode("0x0000"); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_too_many_bytes() { - decode("0x0000000000000000" + "0000"); - } - - @Test - public void should_format() { - // No need to test various values because the codec delegates directly to SimpleDateFormat, - // which we assume does its job correctly. - assertThat(format(Instant.EPOCH)).isEqualTo("'1970-01-01T00:00:00.000Z'"); - assertThat(format(Instant.parse("2018-08-16T15:59:34.123Z"))) - .isEqualTo("'2018-08-16T15:59:34.123Z'"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @DataProvider - public static Iterable timeZones() { - return Lists.newArrayList( - ZoneId.systemDefault(), - ZoneOffset.UTC, - ZoneOffset.ofHoursMinutes(3, 30), - ZoneId.of("Europe/Paris"), - ZoneId.of("GMT+7")); - } - - @Test - @UseDataProvider("timeZones") - public void should_parse(ZoneId defaultTimeZone) { - TimestampCodec codec = new TimestampCodec(defaultTimeZone); - - // Raw numbers - assertThat(codec.parse("'0'")).isEqualTo(Instant.EPOCH); - assertThat(codec.parse("'-1'")).isEqualTo(Instant.EPOCH.minusMillis(1)); - assertThat(codec.parse("1534463100000")).isEqualTo(Instant.ofEpochMilli(1534463100000L)); - - // Date formats - Instant expected; - - // date without time, without time zone - expected = LocalDate.parse("2017-01-01").atStartOfDay().atZone(defaultTimeZone).toInstant(); - assertThat(codec.parse("'2017-01-01'")).isEqualTo(expected); - - // date without time, with time zone - expected = LocalDate.parse("2018-08-16").atStartOfDay().atZone(ofHours(2)).toInstant(); - assertThat(codec.parse("'2018-08-16+02'")).isEqualTo(expected); - assertThat(codec.parse("'2018-08-16+0200'")).isEqualTo(expected); - assertThat(codec.parse("'2018-08-16+02:00'")).isEqualTo(expected); - assertThat(codec.parse("'2018-08-16 CEST'")).isEqualTo(expected); - - // date with time, without time zone - expected = LocalDateTime.parse("2018-08-16T23:45").atZone(defaultTimeZone).toInstant(); - assertThat(codec.parse("'2018-08-16T23:45'")).isEqualTo(expected); - assertThat(codec.parse("'2018-08-16 23:45'")).isEqualTo(expected); - - // date with time + seconds, without time zone - expected = LocalDateTime.parse("2019-12-31T16:08:38").atZone(defaultTimeZone).toInstant(); - assertThat(codec.parse("'2019-12-31T16:08:38'")).isEqualTo(expected); - assertThat(codec.parse("'2019-12-31 16:08:38'")).isEqualTo(expected); - - // date with time + seconds + milliseconds, without time zone - expected = LocalDateTime.parse("1950-02-28T12:00:59.230").atZone(defaultTimeZone).toInstant(); - assertThat(codec.parse("'1950-02-28T12:00:59.230'")).isEqualTo(expected); - assertThat(codec.parse("'1950-02-28 12:00:59.230'")).isEqualTo(expected); - - // date with time, with time zone - expected = ZonedDateTime.parse("1973-06-23T23:59:00.000+01:00").toInstant(); - assertThat(codec.parse("'1973-06-23T23:59+01'")).isEqualTo(expected); - assertThat(codec.parse("'1973-06-23T23:59+0100'")).isEqualTo(expected); - assertThat(codec.parse("'1973-06-23T23:59+01:00'")).isEqualTo(expected); - assertThat(codec.parse("'1973-06-23T23:59 CET'")).isEqualTo(expected); - assertThat(codec.parse("'1973-06-23 23:59+01'")).isEqualTo(expected); - assertThat(codec.parse("'1973-06-23 23:59+0100'")).isEqualTo(expected); - assertThat(codec.parse("'1973-06-23 23:59+01:00'")).isEqualTo(expected); - assertThat(codec.parse("'1973-06-23 23:59 CET'")).isEqualTo(expected); - - // date with time + seconds, with time zone - expected = ZonedDateTime.parse("1980-01-01T23:59:59.000-08:00").toInstant(); - assertThat(codec.parse("'1980-01-01T23:59:59-08'")).isEqualTo(expected); - assertThat(codec.parse("'1980-01-01T23:59:59-0800'")).isEqualTo(expected); - assertThat(codec.parse("'1980-01-01T23:59:59-08:00'")).isEqualTo(expected); - assertThat(codec.parse("'1980-01-01T23:59:59 PST'")).isEqualTo(expected); - assertThat(codec.parse("'1980-01-01 23:59:59-08'")).isEqualTo(expected); - assertThat(codec.parse("'1980-01-01 23:59:59-0800'")).isEqualTo(expected); - assertThat(codec.parse("'1980-01-01 23:59:59-08:00'")).isEqualTo(expected); - assertThat(codec.parse("'1980-01-01 23:59:59 PST'")).isEqualTo(expected); - - // date with time + seconds + milliseconds, with time zone - expected = ZonedDateTime.parse("1999-12-31T23:59:59.999+00:00").toInstant(); - assertThat(codec.parse("'1999-12-31T23:59:59.999+00'")).isEqualTo(expected); - assertThat(codec.parse("'1999-12-31T23:59:59.999+0000'")).isEqualTo(expected); - assertThat(codec.parse("'1999-12-31T23:59:59.999+00:00'")).isEqualTo(expected); - assertThat(codec.parse("'1999-12-31T23:59:59.999 UTC'")).isEqualTo(expected); - assertThat(codec.parse("'1999-12-31 23:59:59.999+00'")).isEqualTo(expected); - assertThat(codec.parse("'1999-12-31 23:59:59.999+0000'")).isEqualTo(expected); - assertThat(codec.parse("'1999-12-31 23:59:59.999+00:00'")).isEqualTo(expected); - assertThat(codec.parse("'1999-12-31 23:59:59.999 UTC'")).isEqualTo(expected); - - assertThat(codec.parse("NULL")).isNull(); - assertThat(codec.parse("null")).isNull(); - assertThat(codec.parse("")).isNull(); - assertThat(codec.parse(null)).isNull(); - } - - @Test - public void should_fail_to_parse_invalid_input() { - assertThatThrownBy(() -> parse("not a timestamp")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage("Alphanumeric timestamp literal must be quoted: \"not a timestamp\""); - assertThatThrownBy(() -> parse("'not a timestamp'")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage("Cannot parse timestamp value from \"'not a timestamp'\""); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.of(Instant.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(Integer.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(Instant.class)).isTrue(); - assertThat(codec.accepts(Integer.class)).isFalse(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(Instant.EPOCH)).isTrue(); - assertThat(codec.accepts(Integer.MIN_VALUE)).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TinyIntCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TinyIntCodecTest.java deleted file mode 100644 index 358c36e9386..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TinyIntCodecTest.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import org.junit.Test; - -public class TinyIntCodecTest extends CodecTestBase { - - public TinyIntCodecTest() { - this.codec = TypeCodecs.TINYINT; - } - - @Test - public void should_encode() { - // Our codec relies on the JDK's ByteBuffer API. We're not testing the JDK, so no need to try - // a thousand different values. - assertThat(encode((byte) 0)).isEqualTo("0x00"); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_decode() { - assertThat(decode("0x00")).isEqualTo((byte) 0); - assertThat(decode("0x")).isNull(); - assertThat(decode(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_too_many_bytes() { - decode("0x0000"); - } - - @Test - public void should_format() { - assertThat(format((byte) 0)).isEqualTo("0"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_parse() { - assertThat(parse("0")).isEqualTo((byte) 0); - assertThat(parse("NULL")).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("")).isNull(); - assertThat(parse(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_invalid_input() { - parse("not a tinyint"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.of(Byte.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(byte.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(Integer.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(Byte.class)).isTrue(); - assertThat(codec.accepts(byte.class)).isTrue(); - assertThat(codec.accepts(Integer.class)).isFalse(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(((byte) 123))).isTrue(); - assertThat(codec.accepts(Byte.MIN_VALUE)).isTrue(); - assertThat(codec.accepts(Integer.MIN_VALUE)).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TupleCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TupleCodecTest.java deleted file mode 100644 index c51eea20c2e..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TupleCodecTest.java +++ /dev/null @@ -1,309 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyNoMoreInteractions; -import static org.mockito.Mockito.verifyZeroInteractions; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.data.TupleValue; -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.TupleType; -import com.datastax.oss.driver.api.core.type.codec.PrimitiveIntCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.data.DefaultTupleValue; -import com.datastax.oss.driver.internal.core.type.DefaultTupleType; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.protocol.internal.util.Bytes; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -public class TupleCodecTest extends CodecTestBase { - - @Mock private AttachmentPoint attachmentPoint; - @Mock private CodecRegistry codecRegistry; - private PrimitiveIntCodec intCodec; - private TypeCodec doubleCodec; - private TypeCodec textCodec; - - private TupleType tupleType; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - - when(attachmentPoint.getCodecRegistry()).thenReturn(codecRegistry); - when(attachmentPoint.getProtocolVersion()).thenReturn(ProtocolVersion.DEFAULT); - - intCodec = spy(TypeCodecs.INT); - doubleCodec = spy(TypeCodecs.DOUBLE); - textCodec = spy(TypeCodecs.TEXT); - - // Called by the getters/setters - when(codecRegistry.codecFor(DataTypes.INT, Integer.class)).thenAnswer(i -> intCodec); - when(codecRegistry.codecFor(DataTypes.DOUBLE, Double.class)).thenAnswer(i -> doubleCodec); - when(codecRegistry.codecFor(DataTypes.TEXT, String.class)).thenAnswer(i -> textCodec); - - // Called by format/parse - when(codecRegistry.codecFor(DataTypes.INT)).thenAnswer(i -> intCodec); - when(codecRegistry.codecFor(DataTypes.DOUBLE)).thenAnswer(i -> doubleCodec); - when(codecRegistry.codecFor(DataTypes.TEXT)).thenAnswer(i -> textCodec); - - tupleType = - new DefaultTupleType( - ImmutableList.of(DataTypes.INT, DataTypes.DOUBLE, DataTypes.TEXT), attachmentPoint); - - codec = TypeCodecs.tupleOf(tupleType); - } - - @Test - public void should_encode_null_tuple() { - assertThat(encode(null)).isNull(); - } - - @Test - public void should_encode_tuple() { - TupleValue tuple = tupleType.newValue(); - tuple = tuple.setInt(0, 1); - tuple = tuple.setToNull(1); - tuple = tuple.setString(2, "a"); - - assertThat(encode(tuple)) - .isEqualTo( - "0x" - + ("00000004" + "00000001") // size and contents of field 0 - + "ffffffff" // null field 1 - + ("00000001" + "61") // size and contents of field 2 - ); - - verify(intCodec).encodePrimitive(1, ProtocolVersion.DEFAULT); - // null values are handled directly in the tuple codec, without calling the child codec: - verifyZeroInteractions(doubleCodec); - verify(textCodec).encode("a", ProtocolVersion.DEFAULT); - } - - @Test - public void should_decode_null_tuple() { - assertThat(decode(null)).isNull(); - } - - @Test - public void should_decode_tuple() { - TupleValue tuple = decode("0x" + ("00000004" + "00000001") + "ffffffff" + ("00000001" + "61")); - - assertThat(tuple.getInt(0)).isEqualTo(1); - assertThat(tuple.isNull(1)).isTrue(); - assertThat(tuple.getString(2)).isEqualTo("a"); - - verify(intCodec).decodePrimitive(Bytes.fromHexString("0x00000001"), ProtocolVersion.DEFAULT); - verifyZeroInteractions(doubleCodec); - verify(textCodec).decode(Bytes.fromHexString("0x61"), ProtocolVersion.DEFAULT); - } - - /** Test for JAVA-2557. Ensures that the codec can decode null fields with any negative length. */ - @Test - public void should_decode_negative_element_length_as_null_field() { - TupleValue tuple = - decode( - "0x" - + "ffffffff" // field1 has length -1 - + "fffffffe" // field2 has length -2 - + "80000000" // field3 has length Integer.MIN_VALUE (-2147483648) - ); - - assertThat(tuple.isNull(0)).isTrue(); - assertThat(tuple.isNull(1)).isTrue(); - assertThat(tuple.isNull(2)).isTrue(); - - verifyZeroInteractions(intCodec); - verifyZeroInteractions(doubleCodec); - verifyZeroInteractions(textCodec); - } - - @Test - public void should_format_null_tuple() { - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_format_tuple() { - TupleValue tuple = tupleType.newValue(); - tuple = tuple.setInt(0, 1); - tuple = tuple.setToNull(1); - tuple = tuple.setString(2, "a"); - - assertThat(format(tuple)).isEqualTo("(1,NULL,'a')"); - - verify(intCodec).format(1); - verify(doubleCodec).format(null); - verify(textCodec).format("a"); - } - - @Test - public void should_parse_null_tuple() { - assertThat(parse(null)).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("NULL")).isNull(); - } - - @Test - public void should_parse_empty_tuple() { - TupleValue tuple = parse("()"); - - assertThat(tuple.isNull(0)).isTrue(); - assertThat(tuple.isNull(1)).isTrue(); - assertThat(tuple.isNull(2)).isTrue(); - - verifyNoMoreInteractions(intCodec); - verifyNoMoreInteractions(doubleCodec); - verifyNoMoreInteractions(textCodec); - } - - @Test - public void should_parse_partial_tuple() { - TupleValue tuple = parse("(1,NULL)"); - - assertThat(tuple.getInt(0)).isEqualTo(1); - assertThat(tuple.isNull(1)).isTrue(); - assertThat(tuple.isNull(2)).isTrue(); - - verify(intCodec).parse("1"); - verify(doubleCodec).parse("NULL"); - verifyNoMoreInteractions(textCodec); - } - - @Test - public void should_parse_full_tuple() { - TupleValue tuple = parse("(1,NULL,'a')"); - - assertThat(tuple.getInt(0)).isEqualTo(1); - assertThat(tuple.isNull(1)).isTrue(); - assertThat(tuple.getString(2)).isEqualTo("a"); - - verify(intCodec).parse("1"); - verify(doubleCodec).parse("NULL"); - verify(textCodec).parse("'a'"); - } - - @Test - public void should_parse_tuple_with_extra_whitespace() { - TupleValue tuple = parse(" ( 1 , NULL , 'a' ) "); - - assertThat(tuple.getInt(0)).isEqualTo(1); - assertThat(tuple.isNull(1)).isTrue(); - assertThat(tuple.getString(2)).isEqualTo("a"); - - verify(intCodec).parse("1"); - verify(doubleCodec).parse("NULL"); - verify(textCodec).parse("'a'"); - } - - @Test - public void should_fail_to_parse_invalid_input() { - // general tuple structure invalid - assertThatThrownBy(() -> parse("not a tuple")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse tuple value from \"not a tuple\", at character 0 expecting '(' but got 'n'"); - assertThatThrownBy(() -> parse(" ( ")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse tuple value from \" ( \", at field 0 (character 3) expecting CQL value or ')', got EOF"); - assertThatThrownBy(() -> parse("( [")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse tuple value from \"( [\", invalid CQL value at field 0 (character 2)"); - assertThatThrownBy(() -> parse("( 12 , ")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse tuple value from \"( 12 , \", at field 1 (character 7) expecting CQL value or ')', got EOF"); - assertThatThrownBy(() -> parse("( 12 12.34 ")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse tuple value from \"( 12 12.34 \", at field 0 (character 5) expecting ',' but got '1'"); - assertThatThrownBy(() -> parse("(1234,12.34,'text'")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse tuple value from \"(1234,12.34,'text'\", at field 2 (character 18) expecting ',' or ')', but got EOF"); - assertThatThrownBy(() -> parse("(1234,12.34,'text'))")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse tuple value from \"(1234,12.34,'text'))\", at character 19 expecting EOF or blank, but got \")\""); - assertThatThrownBy(() -> parse("())")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse tuple value from \"())\", at character 2 expecting EOF or blank, but got \")\""); - assertThatThrownBy(() -> parse("(1234,12.34,'text') extra")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse tuple value from \"(1234,12.34,'text') extra\", at character 20 expecting EOF or blank, but got \"extra\""); - // element syntax invalid - assertThatThrownBy(() -> parse("(not a valid int,12.34,'text')")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse tuple value from \"(not a valid int,12.34,'text')\", " - + "invalid CQL value at field 0 (character 1): " - + "Cannot parse 32-bits int value from \"not\"") - .hasRootCauseInstanceOf(IllegalArgumentException.class); - assertThatThrownBy(() -> parse("(1234,not a valid double,'text')")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse tuple value from \"(1234,not a valid double,'text')\", " - + "invalid CQL value at field 1 (character 6): " - + "Cannot parse 64-bits double value from \"not\"") - .hasRootCauseInstanceOf(IllegalArgumentException.class); - assertThatThrownBy(() -> parse("(1234,12.34,not a valid text)")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse tuple value from \"(1234,12.34,not a valid text)\", " - + "invalid CQL value at field 2 (character 12): " - + "text or varchar values must be enclosed by single quotes") - .hasRootCauseInstanceOf(IllegalArgumentException.class); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.of(TupleValue.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(DefaultTupleValue.class))) - .isFalse(); // covariance not allowed - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(TupleValue.class)).isTrue(); - assertThat(codec.accepts(DefaultTupleValue.class)).isFalse(); // covariance not allowed - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(tupleType.newValue())).isTrue(); - assertThat(codec.accepts(new DefaultTupleValue(tupleType))).isTrue(); // covariance allowed - assertThat(codec.accepts("not a tuple")).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/UdtCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/UdtCodecTest.java deleted file mode 100644 index af94247f937..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/UdtCodecTest.java +++ /dev/null @@ -1,362 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyNoMoreInteractions; -import static org.mockito.Mockito.verifyZeroInteractions; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.data.UdtValue; -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.api.core.type.codec.PrimitiveIntCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.data.DefaultUdtValue; -import com.datastax.oss.driver.internal.core.type.DefaultUserDefinedType; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.protocol.internal.util.Bytes; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -public class UdtCodecTest extends CodecTestBase { - - @Mock private AttachmentPoint attachmentPoint; - @Mock private CodecRegistry codecRegistry; - private PrimitiveIntCodec intCodec; - private TypeCodec doubleCodec; - private TypeCodec textCodec; - - private UserDefinedType userType; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - - when(attachmentPoint.getCodecRegistry()).thenReturn(codecRegistry); - when(attachmentPoint.getProtocolVersion()).thenReturn(ProtocolVersion.DEFAULT); - - intCodec = spy(TypeCodecs.INT); - doubleCodec = spy(TypeCodecs.DOUBLE); - textCodec = spy(TypeCodecs.TEXT); - - // Called by the getters/setters - when(codecRegistry.codecFor(DataTypes.INT, Integer.class)).thenAnswer(i -> intCodec); - when(codecRegistry.codecFor(DataTypes.DOUBLE, Double.class)).thenAnswer(i -> doubleCodec); - when(codecRegistry.codecFor(DataTypes.TEXT, String.class)).thenAnswer(i -> textCodec); - - // Called by format/parse - when(codecRegistry.codecFor(DataTypes.INT)).thenAnswer(i -> intCodec); - when(codecRegistry.codecFor(DataTypes.DOUBLE)).thenAnswer(i -> doubleCodec); - when(codecRegistry.codecFor(DataTypes.TEXT)).thenAnswer(i -> textCodec); - - userType = - new DefaultUserDefinedType( - CqlIdentifier.fromInternal("ks"), - CqlIdentifier.fromInternal("type"), - false, - ImmutableList.of( - CqlIdentifier.fromInternal("field1"), - CqlIdentifier.fromInternal("field2"), - CqlIdentifier.fromInternal("field3")), - ImmutableList.of(DataTypes.INT, DataTypes.DOUBLE, DataTypes.TEXT), - attachmentPoint); - - codec = TypeCodecs.udtOf(userType); - } - - @Test - public void should_encode_null_udt() { - assertThat(encode(null)).isNull(); - } - - @Test - public void should_encode_udt() { - UdtValue udt = userType.newValue(); - udt = udt.setInt("field1", 1); - udt = udt.setToNull("field2"); - udt = udt.setString("field3", "a"); - - assertThat(encode(udt)) - .isEqualTo( - "0x" - + ("00000004" + "00000001") // size and contents of field 0 - + "ffffffff" // null field 1 - + ("00000001" + "61") // size and contents of field 2 - ); - - verify(intCodec).encodePrimitive(1, ProtocolVersion.DEFAULT); - // null values are handled directly in the udt codec, without calling the child codec: - verifyZeroInteractions(doubleCodec); - verify(textCodec).encode("a", ProtocolVersion.DEFAULT); - } - - @Test - public void should_decode_null_udt() { - assertThat(decode(null)).isNull(); - } - - @Test - public void should_decode_udt() { - UdtValue udt = decode("0x" + ("00000004" + "00000001") + "ffffffff" + ("00000001" + "61")); - - assertThat(udt.getInt(0)).isEqualTo(1); - assertThat(udt.isNull(1)).isTrue(); - assertThat(udt.getString(2)).isEqualTo("a"); - - verify(intCodec).decodePrimitive(Bytes.fromHexString("0x00000001"), ProtocolVersion.DEFAULT); - verifyZeroInteractions(doubleCodec); - verify(textCodec).decode(Bytes.fromHexString("0x61"), ProtocolVersion.DEFAULT); - } - - @Test - public void should_decode_udt_when_too_many_fields() { - UdtValue udt = - decode( - "0x" - + ("00000004" + "00000001") - + "ffffffff" - + ("00000001" + "61") - // extra contents - + "ffffffff"); - assertThat(udt.getInt(0)).isEqualTo(1); - assertThat(udt.isNull(1)).isTrue(); - assertThat(udt.getString(2)).isEqualTo("a"); - } - - /** Test for JAVA-2557. Ensures that the codec can decode null fields with any negative length. */ - @Test - public void should_decode_negative_element_length_as_null_field() { - UdtValue udt = - decode( - "0x" - + "ffffffff" // field1 has length -1 - + "fffffffe" // field2 has length -2 - + "80000000" // field3 has length Integer.MIN_VALUE (-2147483648) - ); - - assertThat(udt.isNull(0)).isTrue(); - assertThat(udt.isNull(1)).isTrue(); - assertThat(udt.isNull(2)).isTrue(); - - verifyZeroInteractions(intCodec); - verifyZeroInteractions(doubleCodec); - verifyZeroInteractions(textCodec); - } - - @Test - public void should_decode_absent_element_as_null_field() { - UdtValue udt = decode("0x"); - - assertThat(udt.isNull(0)).isTrue(); - assertThat(udt.isNull(1)).isTrue(); - assertThat(udt.isNull(2)).isTrue(); - - verifyZeroInteractions(intCodec); - verifyZeroInteractions(doubleCodec); - verifyZeroInteractions(textCodec); - } - - @Test - public void should_format_null_udt() { - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_format_udt() { - UdtValue udt = userType.newValue(); - udt = udt.setInt(0, 1); - udt = udt.setToNull(1); - udt = udt.setString(2, "a"); - - assertThat(format(udt)).isEqualTo("{field1:1,field2:NULL,field3:'a'}"); - - verify(intCodec).format(1); - verify(doubleCodec).format(null); - verify(textCodec).format("a"); - } - - @Test - public void should_parse_null_udt() { - assertThat(parse(null)).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("NULL")).isNull(); - } - - @Test - public void should_parse_empty_udt() { - UdtValue udt = parse("{}"); - - assertThat(udt.isNull(0)).isTrue(); - assertThat(udt.isNull(1)).isTrue(); - assertThat(udt.isNull(2)).isTrue(); - - verifyNoMoreInteractions(intCodec); - verifyNoMoreInteractions(doubleCodec); - verifyNoMoreInteractions(textCodec); - } - - @Test - public void should_parse_partial_udt() { - UdtValue udt = parse("{field1:1,field2:NULL}"); - - assertThat(udt.getInt(0)).isEqualTo(1); - assertThat(udt.isNull(1)).isTrue(); - assertThat(udt.isNull(2)).isTrue(); - - verify(intCodec).parse("1"); - verify(doubleCodec).parse("NULL"); - verifyNoMoreInteractions(textCodec); - } - - @Test - public void should_parse_full_udt() { - UdtValue udt = parse("{field1:1,field2:NULL,field3:'a'}"); - - assertThat(udt.getInt(0)).isEqualTo(1); - assertThat(udt.isNull(1)).isTrue(); - assertThat(udt.getString(2)).isEqualTo("a"); - - verify(intCodec).parse("1"); - verify(doubleCodec).parse("NULL"); - verify(textCodec).parse("'a'"); - } - - @Test - public void should_parse_udt_with_extra_whitespace() { - UdtValue udt = parse(" { field1 : 1 , field2 : NULL , field3 : 'a' } "); - - assertThat(udt.getInt(0)).isEqualTo(1); - assertThat(udt.isNull(1)).isTrue(); - assertThat(udt.getString(2)).isEqualTo("a"); - - verify(intCodec).parse("1"); - verify(doubleCodec).parse("NULL"); - verify(textCodec).parse("'a'"); - } - - @Test - public void should_fail_to_parse_invalid_input() { - // general UDT structure invalid - assertThatThrownBy(() -> parse("not a udt")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse UDT value from \"not a udt\" at character 0: expecting '{' but got 'n'"); - assertThatThrownBy(() -> parse(" { ")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse UDT value from \" { \" at character 3: expecting CQL identifier or '}', got EOF"); - assertThatThrownBy(() -> parse("{ [ ")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse UDT value from \"{ [ \", cannot parse a CQL identifier at character 2"); - assertThatThrownBy(() -> parse("{ field1 ")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse UDT value from \"{ field1 \", at field field1 (character 9) expecting ':', but got EOF"); - assertThatThrownBy(() -> parse("{ field1 ,")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse UDT value from \"{ field1 ,\", at field field1 (character 9) expecting ':', but got ','"); - assertThatThrownBy(() -> parse("{nonExistentField:NULL}")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse UDT value from \"{nonExistentField:NULL}\", unknown CQL identifier at character 17: \"nonExistentField\""); - assertThatThrownBy(() -> parse("{ field1 : ")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse UDT value from \"{ field1 : \", invalid CQL value at field field1 (character 11)"); - assertThatThrownBy(() -> parse("{ field1 : [")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse UDT value from \"{ field1 : [\", invalid CQL value at field field1 (character 11)"); - assertThatThrownBy(() -> parse("{ field1 : 1 , ")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse UDT value from \"{ field1 : 1 , \" at field field1 (character 15): expecting CQL identifier or '}', got EOF"); - assertThatThrownBy(() -> parse("{ field1 : 1 field2 ")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse UDT value from \"{ field1 : 1 field2 \", at field field1 (character 13) expecting ',' but got 'f'"); - assertThatThrownBy(() -> parse("{field1:1,field2:12.34,field3:'a'")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse UDT value from \"{field1:1,field2:12.34,field3:'a'\", at field field3 (character 33) expecting ',' or '}', but got EOF"); - assertThatThrownBy(() -> parse("{field1:1,field2:12.34,field3:'a'}}")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse UDT value from \"{field1:1,field2:12.34,field3:'a'}}\", at character 34 expecting EOF or blank, but got \"}\""); - assertThatThrownBy(() -> parse("{field1:1,field2:12.34,field3:'a'} extra")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse UDT value from \"{field1:1,field2:12.34,field3:'a'} extra\", at character 35 expecting EOF or blank, but got \"extra\""); - // element syntax invalid - assertThatThrownBy(() -> parse("{field1:not a valid int,field2:NULL,field3:'a'}")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse UDT value from \"{field1:not a valid int,field2:NULL,field3:'a'}\", " - + "invalid CQL value at field field1 (character 8): " - + "Cannot parse 32-bits int value from \"not\"") - .hasRootCauseInstanceOf(IllegalArgumentException.class); - assertThatThrownBy(() -> parse("{field1:1,field2:not a valid double,field3:'a'}")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse UDT value from \"{field1:1,field2:not a valid double,field3:'a'}\", " - + "invalid CQL value at field field2 (character 17): " - + "Cannot parse 64-bits double value from \"not\"") - .hasRootCauseInstanceOf(IllegalArgumentException.class); - assertThatThrownBy(() -> parse("{field1:1,field2:NULL,field3:not a valid text}")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse UDT value from \"{field1:1,field2:NULL,field3:not a valid text}\", " - + "invalid CQL value at field field3 (character 29): " - + "text or varchar values must be enclosed by single quotes") - .hasRootCauseInstanceOf(IllegalArgumentException.class); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.of(UdtValue.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(DefaultUdtValue.class))) - .isFalse(); // covariance not allowed - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(UdtValue.class)).isTrue(); - assertThat(codec.accepts(DefaultUdtValue.class)).isFalse(); // covariance not allowed - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(userType.newValue())).isTrue(); - assertThat(codec.accepts(new DefaultUdtValue(userType))).isTrue(); // covariance allowed - assertThat(codec.accepts("not a udt")).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/UuidCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/UuidCodecTest.java deleted file mode 100644 index e62fb4af15b..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/UuidCodecTest.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import java.util.UUID; -import org.junit.Test; - -public class UuidCodecTest extends CodecTestBase { - private static final UUID MOCK_UUID = new UUID(2L, 1L); - - public UuidCodecTest() { - this.codec = TypeCodecs.UUID; - } - - @Test - public void should_encode() { - assertThat(encode(MOCK_UUID)).isEqualTo("0x00000000000000020000000000000001"); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_decode() { - UUID decoded = decode("0x00000000000000020000000000000001"); - assertThat(decoded.getMostSignificantBits()).isEqualTo(2L); - assertThat(decoded.getLeastSignificantBits()).isEqualTo(1L); - - assertThat(decode(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_not_enough_bytes() { - decode("0x0000"); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_too_many_bytes() { - decode("0x00000000000000020000000000000001" + "0000"); - } - - @Test - public void should_format() { - assertThat(format(MOCK_UUID)).isEqualTo("00000000-0000-0002-0000-000000000001"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_parse() { - assertThat(parse("00000000-0000-0002-0000-000000000001")).isEqualTo(MOCK_UUID); - assertThat(parse("NULL")).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("")).isNull(); - assertThat(parse(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_invalid_input() { - parse("not a uuid"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.of(UUID.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(Integer.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(UUID.class)).isTrue(); - assertThat(codec.accepts(Integer.class)).isFalse(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(MOCK_UUID)).isTrue(); - assertThat(codec.accepts(Integer.MIN_VALUE)).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/VarintCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/VarintCodecTest.java deleted file mode 100644 index a3472d4b8ce..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/VarintCodecTest.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import java.math.BigInteger; -import org.junit.Test; - -public class VarintCodecTest extends CodecTestBase { - - public VarintCodecTest() { - this.codec = TypeCodecs.VARINT; - } - - @Test - public void should_encode() { - assertThat(encode(BigInteger.ONE)).isEqualTo("0x01"); - assertThat(encode(BigInteger.valueOf(128))).isEqualTo("0x0080"); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_decode() { - assertThat(decode("0x01")).isEqualTo(BigInteger.ONE); - assertThat(decode("0x0080")).isEqualTo(BigInteger.valueOf(128)); - assertThat(decode("0x")).isNull(); - assertThat(decode(null)).isNull(); - } - - @Test - public void should_format() { - assertThat(format(BigInteger.ONE)).isEqualTo("1"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_parse() { - assertThat(parse("1")).isEqualTo(BigInteger.ONE); - assertThat(parse("NULL")).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("")).isNull(); - assertThat(parse(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_invalid_input() { - parse("not a varint"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.of(BigInteger.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(Integer.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(BigInteger.class)).isTrue(); - assertThat(codec.accepts(Integer.class)).isFalse(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(BigInteger.ONE)).isTrue(); - assertThat(codec.accepts(Integer.MIN_VALUE)).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/VectorCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/VectorCodecTest.java deleted file mode 100644 index 17c78514127..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/VectorCodecTest.java +++ /dev/null @@ -1,274 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.data.CqlVector; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.internal.core.type.DefaultVectorType; -import com.datastax.oss.protocol.internal.util.Bytes; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.nio.ByteBuffer; -import java.time.LocalTime; -import java.util.HashMap; -import org.apache.commons.lang3.ArrayUtils; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class VectorCodecTest { - - @DataProvider - public static Object[] dataProvider() { - HashMap map1 = new HashMap<>(); - map1.put(1, "a"); - HashMap map2 = new HashMap<>(); - map2.put(2, "b"); - return new TestDataContainer[] { - new TestDataContainer( - DataTypes.FLOAT, - new Float[] {1.0f, 2.5f}, - "[1.0, 2.5]", - Bytes.fromHexString("0x3f80000040200000")), - new TestDataContainer( - DataTypes.ASCII, - new String[] {"ab", "cde"}, - "['ab', 'cde']", - Bytes.fromHexString("0x02616203636465")), - new TestDataContainer( - DataTypes.BIGINT, - new Long[] {1L, 2L}, - "[1, 2]", - Bytes.fromHexString("0x00000000000000010000000000000002")), - new TestDataContainer( - DataTypes.BLOB, - new ByteBuffer[] {Bytes.fromHexString("0xCAFE"), Bytes.fromHexString("0xABCD")}, - "[0xcafe, 0xabcd]", - Bytes.fromHexString("0x02cafe02abcd")), - new TestDataContainer( - DataTypes.BOOLEAN, - new Boolean[] {true, false}, - "[true, false]", - Bytes.fromHexString("0x0100")), - new TestDataContainer( - DataTypes.TIME, - new LocalTime[] {LocalTime.ofNanoOfDay(1), LocalTime.ofNanoOfDay(2)}, - "['00:00:00.000000001', '00:00:00.000000002']", - Bytes.fromHexString("0x080000000000000001080000000000000002")), - new TestDataContainer( - DataTypes.mapOf(DataTypes.INT, DataTypes.ASCII), - new HashMap[] {map1, map2}, - "[{1:'a'}, {2:'b'}]", - Bytes.fromHexString( - "0x110000000100000004000000010000000161110000000100000004000000020000000162")), - new TestDataContainer( - DataTypes.vectorOf(DataTypes.INT, 1), - new CqlVector[] {CqlVector.newInstance(1), CqlVector.newInstance(2)}, - "[[1], [2]]", - Bytes.fromHexString("0x0000000100000002")), - new TestDataContainer( - DataTypes.vectorOf(DataTypes.TEXT, 1), - new CqlVector[] {CqlVector.newInstance("ab"), CqlVector.newInstance("cdef")}, - "[['ab'], ['cdef']]", - Bytes.fromHexString("0x03026162050463646566")), - new TestDataContainer( - DataTypes.vectorOf(DataTypes.vectorOf(DataTypes.FLOAT, 2), 1), - new CqlVector[] { - CqlVector.newInstance(CqlVector.newInstance(1.0f, 2.5f)), - CqlVector.newInstance(CqlVector.newInstance(3.0f, 4.5f)) - }, - "[[[1.0, 2.5]], [[3.0, 4.5]]]", - Bytes.fromHexString("0x3f800000402000004040000040900000")) - }; - } - - @UseDataProvider("dataProvider") - @Test - public void should_encode(TestDataContainer testData) { - TypeCodec> codec = getCodec(testData.getDataType()); - CqlVector vector = CqlVector.newInstance(testData.getValues()); - assertThat(codec.encode(vector, ProtocolVersion.DEFAULT)).isEqualTo(testData.getBytes()); - } - - @Test - @UseDataProvider("dataProvider") - public void should_throw_on_encode_with_too_few_elements(TestDataContainer testData) { - TypeCodec> codec = getCodec(testData.getDataType()); - assertThatThrownBy( - () -> - codec.encode( - CqlVector.newInstance(testData.getValues()[0]), ProtocolVersion.DEFAULT)) - .isInstanceOf(IllegalArgumentException.class); - } - - @Test - @UseDataProvider("dataProvider") - public void should_throw_on_encode_with_too_many_elements(TestDataContainer testData) { - Object[] doubled = ArrayUtils.addAll(testData.getValues(), testData.getValues()); - TypeCodec> codec = getCodec(testData.getDataType()); - assertThatThrownBy(() -> codec.encode(CqlVector.newInstance(doubled), ProtocolVersion.DEFAULT)) - .isInstanceOf(IllegalArgumentException.class); - } - - @Test - @UseDataProvider("dataProvider") - public void should_decode(TestDataContainer testData) { - TypeCodec> codec = getCodec(testData.getDataType()); - assertThat(codec.decode(testData.getBytes(), ProtocolVersion.DEFAULT)) - .isEqualTo(CqlVector.newInstance(testData.getValues())); - } - - @Test - @UseDataProvider("dataProvider") - public void should_throw_on_decode_if_too_few_bytes(TestDataContainer testData) { - TypeCodec> codec = getCodec(testData.getDataType()); - int lastIndex = testData.getBytes().remaining() - 1; - assertThatThrownBy( - () -> - codec.decode( - (ByteBuffer) testData.getBytes().duplicate().limit(lastIndex), - ProtocolVersion.DEFAULT)) - .isInstanceOf(IllegalArgumentException.class); - } - - @Test - @UseDataProvider("dataProvider") - public void should_throw_on_decode_if_too_many_bytes(TestDataContainer testData) { - ByteBuffer doubled = ByteBuffer.allocate(testData.getBytes().remaining() * 2); - doubled.put(testData.getBytes().duplicate()).put(testData.getBytes().duplicate()).flip(); - TypeCodec> codec = getCodec(testData.getDataType()); - assertThatThrownBy(() -> codec.decode(doubled, ProtocolVersion.DEFAULT)) - .isInstanceOf(IllegalArgumentException.class); - } - - @Test - @UseDataProvider("dataProvider") - public void should_format(TestDataContainer testData) { - TypeCodec> codec = getCodec(testData.getDataType()); - CqlVector vector = CqlVector.newInstance(testData.getValues()); - assertThat(codec.format(vector)).isEqualTo(testData.getFormatted()); - } - - @Test - @UseDataProvider("dataProvider") - public void should_parse(TestDataContainer testData) { - TypeCodec> codec = getCodec(testData.getDataType()); - assertThat(codec.parse(testData.getFormatted())) - .isEqualTo(CqlVector.newInstance(testData.getValues())); - } - - @Test - @UseDataProvider("dataProvider") - public void should_accept_data_type(TestDataContainer testData) { - TypeCodec> codec = getCodec(testData.getDataType()); - assertThat(codec.accepts(new DefaultVectorType(testData.getDataType(), 2))).isTrue(); - assertThat(codec.accepts(new DefaultVectorType(DataTypes.custom("non-existent"), 2))).isFalse(); - } - - @Test - @UseDataProvider("dataProvider") - public void should_accept_vector_type_correct_dimension_only(TestDataContainer testData) { - TypeCodec> codec = getCodec(testData.getDataType()); - assertThat(codec.accepts(new DefaultVectorType(testData.getDataType(), 0))).isFalse(); - assertThat(codec.accepts(new DefaultVectorType(testData.getDataType(), 1))).isFalse(); - assertThat(codec.accepts(new DefaultVectorType(testData.getDataType(), 3))).isFalse(); - } - - @Test - @UseDataProvider("dataProvider") - public void should_accept_generic_type(TestDataContainer testData) { - TypeCodec> codec = getCodec(testData.getDataType()); - assertThat(codec.accepts(codec.getJavaType())).isTrue(); - } - - @Test - @UseDataProvider("dataProvider") - public void should_accept_raw_type(TestDataContainer testData) { - TypeCodec> codec = getCodec(testData.getDataType()); - assertThat(codec.accepts(CqlVector.class)).isTrue(); - assertThat(codec.accepts(Integer.class)).isFalse(); - } - - @Test - @UseDataProvider("dataProvider") - public void should_accept_object(TestDataContainer testData) { - TypeCodec> codec = getCodec(testData.getDataType()); - CqlVector vector = CqlVector.newInstance(testData.getValues()); - assertThat(codec.accepts(vector)).isTrue(); - assertThat(codec.accepts(Integer.MIN_VALUE)).isFalse(); - } - - @Test - public void should_handle_null_and_empty() { - TypeCodec> codec = getCodec(DataTypes.FLOAT); - assertThat(codec.encode(null, ProtocolVersion.DEFAULT)).isNull(); - assertThat(codec.decode(Bytes.fromHexString("0x"), ProtocolVersion.DEFAULT)).isNull(); - assertThat(codec.format(null)).isEqualTo("NULL"); - assertThat(codec.parse("NULL")).isNull(); - assertThat(codec.parse("null")).isNull(); - assertThat(codec.parse("")).isNull(); - assertThat(codec.parse(null)).isNull(); - assertThatThrownBy(() -> codec.encode(CqlVector.newInstance(), ProtocolVersion.DEFAULT)) - .isInstanceOf(IllegalArgumentException.class); - } - - private static TypeCodec> getCodec(DataType dataType) { - return TypeCodecs.vectorOf( - DataTypes.vectorOf(dataType, 2), CodecRegistry.DEFAULT.codecFor(dataType)); - } - - private static class TestDataContainer { - private final DataType dataType; - private final Object[] values; - private final String formatted; - private final ByteBuffer bytes; - - public TestDataContainer( - DataType dataType, Object[] values, String formatted, ByteBuffer bytes) { - this.dataType = dataType; - this.values = values; - this.formatted = formatted; - this.bytes = bytes; - } - - public DataType getDataType() { - return dataType; - } - - public Object[] getValues() { - return values; - } - - public String getFormatted() { - return formatted; - } - - public ByteBuffer getBytes() { - return bytes; - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/OptionalCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/OptionalCodecTest.java deleted file mode 100644 index 745ba7a3aa8..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/OptionalCodecTest.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.codec.CodecTestBase; -import java.util.Optional; -import org.junit.Before; -import org.junit.Test; - -public class OptionalCodecTest extends CodecTestBase> { - - @Before - public void setup() { - codec = ExtraTypeCodecs.optionalOf(TypeCodecs.INT); - } - - @Test - public void should_encode() { - // Our codec relies on the JDK's ByteBuffer API. We're not testing the JDK, so no need to try - // a thousand different values. - assertThat(encode(Optional.of(1))).isEqualTo("0x00000001"); - assertThat(encode(Optional.empty())).isNull(); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_decode() { - assertThat(decode("0x00000001")).isPresent().contains(1); - assertThat(decode("0x")).isEmpty(); - assertThat(decode(null)).isEmpty(); - } - - @Test - public void should_format() { - assertThat(format(Optional.of(1))).isEqualTo("1"); - assertThat(format(Optional.empty())).isEqualTo("NULL"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_parse() { - assertThat(parse("1")).isPresent().contains(1); - assertThat(parse("NULL")).isEmpty(); - assertThat(parse("null")).isEmpty(); - assertThat(parse("")).isEmpty(); - assertThat(parse(null)).isEmpty(); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.optionalOf(Integer.class))).isTrue(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(Optional.class)).isTrue(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(Optional.of(1))).isTrue(); - assertThat(codec.accepts(Optional.empty())).isTrue(); - assertThat(codec.accepts(Optional.of("foo"))).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/BooleanArrayCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/BooleanArrayCodecTest.java deleted file mode 100644 index 4a175cdf306..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/BooleanArrayCodecTest.java +++ /dev/null @@ -1,147 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.array; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.codec.CodecTestBase; -import org.junit.Before; -import org.junit.Test; - -public class BooleanArrayCodecTest extends CodecTestBase { - - @Before - public void setup() { - codec = ExtraTypeCodecs.BOOLEAN_LIST_TO_ARRAY; - } - - @Test - public void should_encode_null() { - assertThat(encode(null)).isNull(); - } - - @Test - public void should_encode_empty_array() { - assertThat(encode(new boolean[] {})).isEqualTo("0x00000000"); - } - - @Test - public void should_encode_non_empty_array() { - assertThat(encode(new boolean[] {true, false})) - .isEqualTo( - "0x" - + "00000002" // number of elements - + "00000001" // size of element 1 - + "01" // contents of element 1 - + "00000001" // size of element 2 - + "00" // contents of element 2 - ); - } - - @Test - public void should_decode_null_as_empty_array() { - assertThat(decode(null)).isEmpty(); - } - - @Test - public void should_decode_empty_array() { - assertThat(decode("0x00000000")).isEmpty(); - } - - @Test - public void should_decode_non_empty_array() { - assertThat( - decode( - "0x" - + "00000002" // number of elements - + "00000001" // size of element 1 - + "01" // contents of element 1 - + "00000001" // size of element 2 - + "00" // contents of element 2 - )) - .containsExactly(true, false); - } - - @Test(expected = NullPointerException.class) - public void should_not_decode_array_with_null_elements() { - decode( - "0x" - + "00000001" // number of elements - + "FFFFFFFF" // size of element 1 (-1 for null) - ); - } - - @Test - public void should_format_null_array() { - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_format_empty_array() { - assertThat(format(new boolean[] {})).isEqualTo("[]"); - } - - @Test - public void should_format_non_empty_array() { - assertThat(format(new boolean[] {true, false})).isEqualTo("[true,false]"); - } - - @Test - public void should_parse_null_or_empty_string() { - assertThat(parse(null)).isNull(); - assertThat(parse("")).isNull(); - } - - @Test - public void should_parse_empty_array() { - assertThat(parse("[]")).isEmpty(); - } - - @Test - public void should_parse_non_empty_array() { - assertThat(parse("[true,false]")).containsExactly(true, false); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_malformed_array() { - parse("not an array"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.arrayOf(Boolean.TYPE))).isTrue(); - assertThat(codec.accepts(GenericType.arrayOf(Boolean.class))).isFalse(); - assertThat(codec.accepts(GenericType.arrayOf(String.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(GenericType.arrayOf(Boolean.TYPE).getRawType())).isTrue(); - assertThat(codec.accepts(GenericType.arrayOf(Boolean.class).getRawType())).isFalse(); - assertThat(codec.accepts(GenericType.arrayOf(String.class).getRawType())).isFalse(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(new boolean[] {true, false})).isTrue(); - assertThat(codec.accepts(new Boolean[] {true, false})).isFalse(); - assertThat(codec.accepts(new String[] {"hello", "world"})).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/ByteArrayCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/ByteArrayCodecTest.java deleted file mode 100644 index 761b568fcea..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/ByteArrayCodecTest.java +++ /dev/null @@ -1,151 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.array; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.codec.CodecTestBase; -import org.junit.Before; -import org.junit.Test; - -public class ByteArrayCodecTest extends CodecTestBase { - - @Before - public void setup() { - codec = ExtraTypeCodecs.BYTE_LIST_TO_ARRAY; - } - - @Test - public void should_encode_null() { - assertThat(encode(null)).isNull(); - } - - @Test - public void should_encode_empty_array() { - assertThat(encode(new byte[] {})).isEqualTo("0x00000000"); - } - - @Test - public void should_encode_non_empty_array() { - assertThat(encode(new byte[] {1, 2, 3})) - .isEqualTo( - "0x" - + "00000003" // number of elements - + "00000001" // size of element 1 - + "01" // contents of element 1 - + "00000001" // size of element 2 - + "02" // contents of element 2 - + "00000001" // size of element 3 - + "03" // contents of element 3 - ); - } - - @Test - public void should_decode_null_as_empty_array() { - assertThat(decode(null)).isEmpty(); - } - - @Test - public void should_decode_empty_array() { - assertThat(decode("0x00000000")).isEmpty(); - } - - @Test - public void should_decode_non_empty_array() { - assertThat( - decode( - "0x" - + "00000003" // number of elements - + "00000001" // size of element 1 - + "01" // contents of element 1 - + "00000001" // size of element 2 - + "02" // contents of element 2 - + "00000001" // size of element 3 - + "03" // contents of element 3 - )) - .containsExactly((byte) 1, (byte) 2, (byte) 3); - } - - @Test(expected = NullPointerException.class) - public void should_not_decode_array_with_null_elements() { - decode( - "0x" - + "00000001" // number of elements - + "FFFFFFFF" // size of element 1 (-1 for null) - ); - } - - @Test - public void should_format_null_array() { - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_format_empty_array() { - assertThat(format(new byte[] {})).isEqualTo("[]"); - } - - @Test - public void should_format_non_empty_array() { - assertThat(format(new byte[] {1, 2, 3})).isEqualTo("[1,2,3]"); - } - - @Test - public void should_parse_null_or_empty_string() { - assertThat(parse(null)).isNull(); - assertThat(parse("")).isNull(); - } - - @Test - public void should_parse_empty_array() { - assertThat(parse("[]")).isEmpty(); - } - - @Test - public void should_parse_non_empty_array() { - assertThat(parse("[1,2,3]")).containsExactly((byte) 1, (byte) 2, (byte) 3); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_malformed_array() { - parse("not an array"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.arrayOf(Byte.TYPE))).isTrue(); - assertThat(codec.accepts(GenericType.arrayOf(Byte.class))).isFalse(); - assertThat(codec.accepts(GenericType.arrayOf(String.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(GenericType.arrayOf(Byte.TYPE).getRawType())).isTrue(); - assertThat(codec.accepts(GenericType.arrayOf(Byte.class).getRawType())).isFalse(); - assertThat(codec.accepts(GenericType.arrayOf(String.class).getRawType())).isFalse(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(new byte[] {1, 2, 3})).isTrue(); - assertThat(codec.accepts(new Byte[] {1, 2, 3})).isFalse(); - assertThat(codec.accepts(new String[] {"hello", "world"})).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/DoubleArrayCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/DoubleArrayCodecTest.java deleted file mode 100644 index 8e951f8ed55..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/DoubleArrayCodecTest.java +++ /dev/null @@ -1,151 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.array; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.codec.CodecTestBase; -import org.junit.Before; -import org.junit.Test; - -public class DoubleArrayCodecTest extends CodecTestBase { - - @Before - public void setup() { - codec = ExtraTypeCodecs.DOUBLE_LIST_TO_ARRAY; - } - - @Test - public void should_encode_null() { - assertThat(encode(null)).isNull(); - } - - @Test - public void should_encode_empty_array() { - assertThat(encode(new double[] {})).isEqualTo("0x00000000"); - } - - @Test - public void should_encode_non_empty_array() { - assertThat(encode(new double[] {1.1d, 2.2d, 3.3d})) - .isEqualTo( - "0x" - + "00000003" // number of elements - + "00000008" // size of element 1 - + "3ff199999999999a" // contents of element 1 - + "00000008" // size of element 2 - + "400199999999999a" // contents of element 2 - + "00000008" // size of element 3 - + "400a666666666666" // contents of element 3 - ); - } - - @Test - public void should_decode_null_as_empty_array() { - assertThat(decode(null)).isEmpty(); - } - - @Test - public void should_decode_empty_array() { - assertThat(decode("0x00000000")).isEmpty(); - } - - @Test - public void should_decode_non_empty_array() { - assertThat( - decode( - "0x" - + "00000003" // number of elements - + "00000008" // size of element 1 - + "3ff199999999999a" // contents of element 1 - + "00000008" // size of element 2 - + "400199999999999a" // contents of element 2 - + "00000008" // size of element 3 - + "400a666666666666" // contents of element 3 - )) - .containsExactly(1.1d, 2.2d, 3.3d); - } - - @Test(expected = NullPointerException.class) - public void should_not_decode_array_with_null_elements() { - decode( - "0x" - + "00000001" // number of elements - + "FFFFFFFF" // size of element 1 (-1 for null) - ); - } - - @Test - public void should_format_null_array() { - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_format_empty_array() { - assertThat(format(new double[] {})).isEqualTo("[]"); - } - - @Test - public void should_format_non_empty_array() { - assertThat(format(new double[] {1.1d, 2.2d, 3.3d})).isEqualTo("[1.1,2.2,3.3]"); - } - - @Test - public void should_parse_null_or_empty_string() { - assertThat(parse(null)).isNull(); - assertThat(parse("")).isNull(); - } - - @Test - public void should_parse_empty_array() { - assertThat(parse("[]")).isEmpty(); - } - - @Test - public void should_parse_non_empty_array() { - assertThat(parse("[1.1,2.2,3.3]")).containsExactly(1.1d, 2.2d, 3.3d); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_malformed_array() { - parse("not an array"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.arrayOf(Double.TYPE))).isTrue(); - assertThat(codec.accepts(GenericType.arrayOf(Double.class))).isFalse(); - assertThat(codec.accepts(GenericType.arrayOf(String.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(GenericType.arrayOf(Double.TYPE).getRawType())).isTrue(); - assertThat(codec.accepts(GenericType.arrayOf(Double.class).getRawType())).isFalse(); - assertThat(codec.accepts(GenericType.arrayOf(String.class).getRawType())).isFalse(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(new double[] {1.1d, 2.2d, 3.3d})).isTrue(); - assertThat(codec.accepts(new Double[] {1.1d, 2.2d, 3.3d})).isFalse(); - assertThat(codec.accepts(new String[] {"hello", "world"})).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/FloatArrayCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/FloatArrayCodecTest.java deleted file mode 100644 index 77f3eafdcd7..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/FloatArrayCodecTest.java +++ /dev/null @@ -1,151 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.array; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.codec.CodecTestBase; -import org.junit.Before; -import org.junit.Test; - -public class FloatArrayCodecTest extends CodecTestBase { - - @Before - public void setup() { - codec = ExtraTypeCodecs.FLOAT_LIST_TO_ARRAY; - } - - @Test - public void should_encode_null() { - assertThat(encode(null)).isNull(); - } - - @Test - public void should_encode_empty_array() { - assertThat(encode(new float[] {})).isEqualTo("0x00000000"); - } - - @Test - public void should_encode_non_empty_array() { - assertThat(encode(new float[] {1.1f, 2.2f, 3.3f})) - .isEqualTo( - "0x" - + "00000003" // number of elements - + "00000004" // size of element 1 - + "3f8ccccd" // contents of element 1 - + "00000004" // size of element 2 - + "400ccccd" // contents of element 2 - + "00000004" // size of element 3 - + "40533333" // contents of element 3 - ); - } - - @Test - public void should_decode_null_as_empty_array() { - assertThat(decode(null)).isEmpty(); - } - - @Test - public void should_decode_empty_array() { - assertThat(decode("0x00000000")).isEmpty(); - } - - @Test - public void should_decode_non_empty_array() { - assertThat( - decode( - "0x" - + "00000003" // number of elements - + "00000004" // size of element 1 - + "3f8ccccd" // contents of element 1 - + "00000004" // size of element 2 - + "400ccccd" // contents of element 2 - + "00000004" // size of element 3 - + "40533333" // contents of element 3 - )) - .containsExactly(1.1f, 2.2f, 3.3f); - } - - @Test(expected = NullPointerException.class) - public void should_not_decode_array_with_null_elements() { - decode( - "0x" - + "00000001" // number of elements - + "FFFFFFFF" // size of element 1 (-1 for null) - ); - } - - @Test - public void should_format_null_array() { - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_format_empty_array() { - assertThat(format(new float[] {})).isEqualTo("[]"); - } - - @Test - public void should_format_non_empty_array() { - assertThat(format(new float[] {1.1f, 2.2f, 3.3f})).isEqualTo("[1.1,2.2,3.3]"); - } - - @Test - public void should_parse_null_or_empty_string() { - assertThat(parse(null)).isNull(); - assertThat(parse("")).isNull(); - } - - @Test - public void should_parse_empty_array() { - assertThat(parse("[]")).isEmpty(); - } - - @Test - public void should_parse_non_empty_array() { - assertThat(parse("[1.1,2.2,3.3]")).containsExactly(1.1f, 2.2f, 3.3f); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_malformed_array() { - parse("not an array"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.arrayOf(Float.TYPE))).isTrue(); - assertThat(codec.accepts(GenericType.arrayOf(Float.class))).isFalse(); - assertThat(codec.accepts(GenericType.arrayOf(String.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(GenericType.arrayOf(Float.TYPE).getRawType())).isTrue(); - assertThat(codec.accepts(GenericType.arrayOf(Float.class).getRawType())).isFalse(); - assertThat(codec.accepts(GenericType.arrayOf(String.class).getRawType())).isFalse(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(new float[] {1.1f, 2.2f, 3.3f})).isTrue(); - assertThat(codec.accepts(new Float[] {1.1f, 2.2f, 3.3f})).isFalse(); - assertThat(codec.accepts(new String[] {"hello", "world"})).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/IntArrayCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/IntArrayCodecTest.java deleted file mode 100644 index ac00f1f8e1c..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/IntArrayCodecTest.java +++ /dev/null @@ -1,151 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.array; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.codec.CodecTestBase; -import org.junit.Before; -import org.junit.Test; - -public class IntArrayCodecTest extends CodecTestBase { - - @Before - public void setup() { - codec = ExtraTypeCodecs.INT_LIST_TO_ARRAY; - } - - @Test - public void should_encode_null() { - assertThat(encode(null)).isNull(); - } - - @Test - public void should_encode_empty_array() { - assertThat(encode(new int[] {})).isEqualTo("0x00000000"); - } - - @Test - public void should_encode_non_empty_array() { - assertThat(encode(new int[] {1, 2, 3})) - .isEqualTo( - "0x" - + "00000003" // number of elements - + "00000004" // size of element 1 - + "00000001" // contents of element 1 - + "00000004" // size of element 2 - + "00000002" // contents of element 2 - + "00000004" // size of element 3 - + "00000003" // contents of element 3 - ); - } - - @Test - public void should_decode_null_as_empty_array() { - assertThat(decode(null)).isEmpty(); - } - - @Test - public void should_decode_empty_array() { - assertThat(decode("0x00000000")).isEmpty(); - } - - @Test - public void should_decode_non_empty_array() { - assertThat( - decode( - "0x" - + "00000003" // number of elements - + "00000004" // size of element 1 - + "00000001" // contents of element 1 - + "00000004" // size of element 2 - + "00000002" // contents of element 2 - + "00000004" // size of element 3 - + "00000003" // contents of element 3 - )) - .containsExactly(1, 2, 3); - } - - @Test(expected = NullPointerException.class) - public void should_not_decode_array_with_null_elements() { - decode( - "0x" - + "00000001" // number of elements - + "FFFFFFFF" // size of element 1 (-1 for null) - ); - } - - @Test - public void should_format_null_array() { - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_format_empty_array() { - assertThat(format(new int[] {})).isEqualTo("[]"); - } - - @Test - public void should_format_non_empty_array() { - assertThat(format(new int[] {1, 2, 3})).isEqualTo("[1,2,3]"); - } - - @Test - public void should_parse_null_or_empty_string() { - assertThat(parse(null)).isNull(); - assertThat(parse("")).isNull(); - } - - @Test - public void should_parse_empty_array() { - assertThat(parse("[]")).isEmpty(); - } - - @Test - public void should_parse_non_empty_array() { - assertThat(parse("[1,2,3]")).containsExactly(1, 2, 3); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_malformed_array() { - parse("not an array"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.arrayOf(Integer.TYPE))).isTrue(); - assertThat(codec.accepts(GenericType.arrayOf(Integer.class))).isFalse(); - assertThat(codec.accepts(GenericType.arrayOf(String.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(GenericType.arrayOf(Integer.TYPE).getRawType())).isTrue(); - assertThat(codec.accepts(GenericType.arrayOf(Integer.class).getRawType())).isFalse(); - assertThat(codec.accepts(GenericType.arrayOf(String.class).getRawType())).isFalse(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(new int[] {1, 2, 3})).isTrue(); - assertThat(codec.accepts(new Integer[] {1, 2, 3})).isFalse(); - assertThat(codec.accepts(new String[] {"hello", "world"})).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/LongArrayCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/LongArrayCodecTest.java deleted file mode 100644 index 737dcfae3c0..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/LongArrayCodecTest.java +++ /dev/null @@ -1,151 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.array; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.codec.CodecTestBase; -import org.junit.Before; -import org.junit.Test; - -public class LongArrayCodecTest extends CodecTestBase { - - @Before - public void setup() { - codec = ExtraTypeCodecs.LONG_LIST_TO_ARRAY; - } - - @Test - public void should_encode_null() { - assertThat(encode(null)).isNull(); - } - - @Test - public void should_encode_empty_array() { - assertThat(encode(new long[] {})).isEqualTo("0x00000000"); - } - - @Test - public void should_encode_non_empty_array() { - assertThat(encode(new long[] {1, 2, 3})) - .isEqualTo( - "0x" - + "00000003" // number of elements - + "00000008" // size of element 1 - + "0000000000000001" // contents of element 1 - + "00000008" // size of element 2 - + "0000000000000002" // contents of element 2 - + "00000008" // size of element 3 - + "0000000000000003" // contents of element 3 - ); - } - - @Test - public void should_decode_null_as_empty_array() { - assertThat(decode(null)).isEmpty(); - } - - @Test - public void should_decode_empty_array() { - assertThat(decode("0x00000000")).isEmpty(); - } - - @Test - public void should_decode_non_empty_array() { - assertThat( - decode( - "0x" - + "00000003" // number of elements - + "00000008" // size of element 1 - + "0000000000000001" // contents of element 1 - + "00000008" // size of element 2 - + "0000000000000002" // contents of element 2 - + "00000008" // size of element 3 - + "0000000000000003" // contents of element 3 - )) - .containsExactly(1L, 2L, 3L); - } - - @Test(expected = NullPointerException.class) - public void should_not_decode_array_with_null_elements() { - decode( - "0x" - + "00000001" // number of elements - + "FFFFFFFF" // size of element 1 (-1 for null) - ); - } - - @Test - public void should_format_null_array() { - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_format_empty_array() { - assertThat(format(new long[] {})).isEqualTo("[]"); - } - - @Test - public void should_format_non_empty_array() { - assertThat(format(new long[] {1, 2, 3})).isEqualTo("[1,2,3]"); - } - - @Test - public void should_parse_null_or_empty_string() { - assertThat(parse(null)).isNull(); - assertThat(parse("")).isNull(); - } - - @Test - public void should_parse_empty_array() { - assertThat(parse("[]")).isEmpty(); - } - - @Test - public void should_parse_non_empty_array() { - assertThat(parse("[1,2,3]")).containsExactly(1L, 2L, 3L); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_malformed_array() { - parse("not an array"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.arrayOf(Long.TYPE))).isTrue(); - assertThat(codec.accepts(GenericType.arrayOf(Long.class))).isFalse(); - assertThat(codec.accepts(GenericType.arrayOf(String.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(GenericType.arrayOf(Long.TYPE).getRawType())).isTrue(); - assertThat(codec.accepts(GenericType.arrayOf(Long.class).getRawType())).isFalse(); - assertThat(codec.accepts(GenericType.arrayOf(String.class).getRawType())).isFalse(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(new long[] {1, 2, 3})).isTrue(); - assertThat(codec.accepts(new Long[] {1L, 2L, 3L})).isFalse(); - assertThat(codec.accepts(new String[] {"hello", "world"})).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/ObjectArrayCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/ObjectArrayCodecTest.java deleted file mode 100644 index a2afc652002..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/ObjectArrayCodecTest.java +++ /dev/null @@ -1,174 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.array; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.codec.CodecTestBase; -import com.datastax.oss.protocol.internal.util.Bytes; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -public class ObjectArrayCodecTest extends CodecTestBase { - - @Mock private TypeCodec elementCodec; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - when(elementCodec.getCqlType()).thenReturn(DataTypes.TEXT); - when(elementCodec.getJavaType()).thenReturn(GenericType.STRING); - codec = ExtraTypeCodecs.listToArrayOf(elementCodec); - } - - @Test - public void should_encode_null() { - assertThat(encode(null)).isNull(); - } - - @Test - public void should_encode_empty_array() { - assertThat(encode(new String[] {})).isEqualTo("0x00000000"); - } - - @Test - public void should_encode_non_empty_array() { - when(elementCodec.encode("hello", ProtocolVersion.DEFAULT)) - .thenReturn(Bytes.fromHexString("0x68656c6c6f")); - when(elementCodec.encode("world", ProtocolVersion.DEFAULT)) - .thenReturn(Bytes.fromHexString("0x776f726c64")); - assertThat(encode(new String[] {"hello", "world"})) - .isEqualTo( - "0x" - + "00000002" // number of elements - + "00000005" // size of element 1 - + "68656c6c6f" // contents of element 1 - + "00000005" // size of element 2 - + "776f726c64" // contents of element 3 - ); - } - - @Test - public void should_decode_null_as_empty_array() { - assertThat(decode(null)).isEmpty(); - } - - @Test - public void should_decode_empty_array() { - assertThat(decode("0x00000000")).isEmpty(); - } - - @Test - public void should_decode_non_empty_array() { - when(elementCodec.decode(Bytes.fromHexString("0x68656c6c6f"), ProtocolVersion.DEFAULT)) - .thenReturn("hello"); - when(elementCodec.decode(Bytes.fromHexString("0x776f726c64"), ProtocolVersion.DEFAULT)) - .thenReturn("world"); - assertThat( - decode( - "0x" - + "00000002" // number of elements - + "00000005" // size of element 1 - + "68656c6c6f" // contents of element 1 - + "00000005" // size of element 2 - + "776f726c64" // contents of element 3 - )) - .containsExactly("hello", "world"); - } - - @Test - public void should_decode_array_with_null_elements() { - when(elementCodec.decode(Bytes.fromHexString("0x68656c6c6f"), ProtocolVersion.DEFAULT)) - .thenReturn("hello"); - assertThat( - decode( - "0x" - + "00000002" // number of elements - + "FFFFFFFF" // size of element 1 (-1 for null) - + "00000005" // size of element 2 - + "68656c6c6f" // contents of element 2 - )) - .containsExactly(null, "hello"); - } - - @Test - public void should_format_null_array() { - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_format_empty_array() { - assertThat(format(new String[] {})).isEqualTo("[]"); - } - - @Test - public void should_format_non_empty_array() { - when(elementCodec.format("hello")).thenReturn("'hello'"); - when(elementCodec.format("world")).thenReturn("'world'"); - assertThat(format(new String[] {"hello", "world"})).isEqualTo("['hello','world']"); - } - - @Test - public void should_parse_null_or_empty_string() { - assertThat(parse(null)).isNull(); - assertThat(parse("")).isNull(); - } - - @Test - public void should_parse_empty_array() { - assertThat(parse("[]")).isEmpty(); - } - - @Test - public void should_parse_non_empty_array() { - when(elementCodec.parse("'hello'")).thenReturn("hello"); - when(elementCodec.parse("'world'")).thenReturn("world"); - assertThat(parse("['hello','world']")).containsExactly("hello", "world"); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_malformed_array() { - parse("not an array"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.arrayOf(String.class))).isTrue(); - assertThat(codec.accepts(GenericType.arrayOf(Integer.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(GenericType.arrayOf(String.class).getRawType())).isTrue(); - assertThat(codec.accepts(GenericType.arrayOf(Integer.class).getRawType())).isFalse(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(new String[] {"hello", "world"})).isTrue(); - assertThat(codec.accepts(new Integer[] {1, 2, 3})).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/ShortArrayCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/ShortArrayCodecTest.java deleted file mode 100644 index 3d489ada38f..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/ShortArrayCodecTest.java +++ /dev/null @@ -1,151 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.array; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.codec.CodecTestBase; -import org.junit.Before; -import org.junit.Test; - -public class ShortArrayCodecTest extends CodecTestBase { - - @Before - public void setup() { - codec = ExtraTypeCodecs.SHORT_LIST_TO_ARRAY; - } - - @Test - public void should_encode_null() { - assertThat(encode(null)).isNull(); - } - - @Test - public void should_encode_empty_array() { - assertThat(encode(new short[] {})).isEqualTo("0x00000000"); - } - - @Test - public void should_encode_non_empty_array() { - assertThat(encode(new short[] {1, 2, 3})) - .isEqualTo( - "0x" - + "00000003" // number of elements - + "00000002" // size of element 1 - + "0001" // contents of element 1 - + "00000002" // size of element 2 - + "0002" // contents of element 2 - + "00000002" // size of element 3 - + "0003" // contents of element 3 - ); - } - - @Test - public void should_decode_null_as_empty_array() { - assertThat(decode(null)).isEmpty(); - } - - @Test - public void should_decode_empty_array() { - assertThat(decode("0x00000000")).isEmpty(); - } - - @Test - public void should_decode_non_empty_array() { - assertThat( - decode( - "0x" - + "00000003" // number of elements - + "00000002" // size of element 1 - + "0001" // contents of element 1 - + "00000002" // size of element 2 - + "0002" // contents of element 2 - + "00000002" // size of element 3 - + "0003" // contents of element 3 - )) - .containsExactly((short) 1, (short) 2, (short) 3); - } - - @Test(expected = NullPointerException.class) - public void should_not_decode_array_with_null_elements() { - decode( - "0x" - + "00000001" // number of elements - + "FFFFFFFF" // size of element 1 (-1 for null) - ); - } - - @Test - public void should_format_null_array() { - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_format_empty_array() { - assertThat(format(new short[] {})).isEqualTo("[]"); - } - - @Test - public void should_format_non_empty_array() { - assertThat(format(new short[] {1, 2, 3})).isEqualTo("[1,2,3]"); - } - - @Test - public void should_parse_null_or_empty_string() { - assertThat(parse(null)).isNull(); - assertThat(parse("")).isNull(); - } - - @Test - public void should_parse_empty_array() { - assertThat(parse("[]")).isEmpty(); - } - - @Test - public void should_parse_non_empty_array() { - assertThat(parse("[1,2,3]")).containsExactly((short) 1, (short) 2, (short) 3); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_malformed_array() { - parse("not an array"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.arrayOf(Short.TYPE))).isTrue(); - assertThat(codec.accepts(GenericType.arrayOf(Short.class))).isFalse(); - assertThat(codec.accepts(GenericType.arrayOf(String.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(GenericType.arrayOf(Short.TYPE).getRawType())).isTrue(); - assertThat(codec.accepts(GenericType.arrayOf(Short.class).getRawType())).isFalse(); - assertThat(codec.accepts(GenericType.arrayOf(String.class).getRawType())).isFalse(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(new short[] {1, 2, 3})).isTrue(); - assertThat(codec.accepts(new Short[] {1, 2, 3})).isFalse(); - assertThat(codec.accepts(new String[] {"hello", "world"})).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/enums/EnumNameCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/enums/EnumNameCodecTest.java deleted file mode 100644 index 093ec8a0be8..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/enums/EnumNameCodecTest.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.enums; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.dse.driver.api.core.DseProtocolVersion; -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.codec.CodecTestBase; -import org.junit.Before; -import org.junit.Test; - -public class EnumNameCodecTest extends CodecTestBase { - - @Before - public void setup() { - codec = ExtraTypeCodecs.enumNamesOf(DefaultProtocolVersion.class); - } - - @Test - public void should_encode() { - // Our codec relies on the JDK's ByteBuffer API. We're not testing the JDK, so no need to try - // a thousand different values. - assertThat(encode(DefaultProtocolVersion.V3)).isEqualTo("0x5633"); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_decode() { - assertThat(decode("0x5633")).isEqualTo(DefaultProtocolVersion.V3); - assertThat(decode("0x")).isNull(); - assertThat(decode(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_invalid_name() { - decode("0x1234"); - } - - @Test - public void should_format() { - assertThat(format(DefaultProtocolVersion.V3)).isEqualTo("'V3'"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_parse() { - assertThat(parse("'V3'")).isEqualTo(DefaultProtocolVersion.V3); - assertThat(parse("NULL")).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("")).isNull(); - assertThat(parse(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_invalid_input() { - parse("not a valid enum constant"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.of(DefaultProtocolVersion.class))).isTrue(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(DefaultProtocolVersion.class)).isTrue(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(DefaultProtocolVersion.V3)).isTrue(); - assertThat(codec.accepts(DseProtocolVersion.DSE_V1)).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/enums/EnumOrdinalCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/enums/EnumOrdinalCodecTest.java deleted file mode 100644 index 7162bc51ff2..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/enums/EnumOrdinalCodecTest.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.enums; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.dse.driver.api.core.DseProtocolVersion; -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.codec.CodecTestBase; -import org.junit.Before; -import org.junit.Test; - -public class EnumOrdinalCodecTest extends CodecTestBase { - - @Before - public void setup() { - codec = ExtraTypeCodecs.enumOrdinalsOf(DefaultProtocolVersion.class); - } - - @Test - public void should_encode() { - // Our codec relies on the JDK's ByteBuffer API. We're not testing the JDK, so no need to try - // a thousand different values. - assertThat(encode(DefaultProtocolVersion.values()[0])).isEqualTo("0x00000000"); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_decode() { - assertThat(decode("0x00000000")).isEqualTo(DefaultProtocolVersion.values()[0]); - assertThat(decode("0x")).isNull(); - assertThat(decode(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_not_enough_bytes() { - decode("0x0000"); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_too_many_bytes() { - decode("0x0000000000000000"); - } - - @Test - public void should_format() { - assertThat(format(DefaultProtocolVersion.values()[0])).isEqualTo("0"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_parse() { - assertThat(parse("0")).isEqualTo(DefaultProtocolVersion.values()[0]); - assertThat(parse("NULL")).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("")).isNull(); - assertThat(parse(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_invalid_input() { - parse("not an int"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.of(DefaultProtocolVersion.class))).isTrue(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(DefaultProtocolVersion.class)).isTrue(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(DefaultProtocolVersion.V3)).isTrue(); - assertThat(codec.accepts(DseProtocolVersion.DSE_V1)).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/json/JsonCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/json/JsonCodecTest.java deleted file mode 100644 index f9c37075b36..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/json/JsonCodecTest.java +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.json; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.codec.CodecTestBase; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.net.InetAddress; -import java.net.UnknownHostException; -import java.util.Collections; -import java.util.Set; -import org.junit.Before; -import org.junit.Test; - -public class JsonCodecTest extends CodecTestBase> { - - private static final InetAddress V4_ADDRESS; - private static final InetAddress V6_ADDRESS; - private static final Set SET_OF_ADDRESSES; - - static { - try { - V4_ADDRESS = InetAddress.getByName("127.0.0.1"); - V6_ADDRESS = InetAddress.getByName("::1"); - SET_OF_ADDRESSES = ImmutableSet.of(V4_ADDRESS, V6_ADDRESS); - } catch (UnknownHostException e) { - fail("unexpected error", e); - throw new AssertionError(); // never reached - } - } - - @Before - public void setup() { - this.codec = ExtraTypeCodecs.json(GenericType.setOf(GenericType.INET_ADDRESS)); - } - - @Test - public void should_encode() { - assertThat(encode(SET_OF_ADDRESSES)) - .isEqualTo(encodeJson("[\"127.0.0.1\",\"0:0:0:0:0:0:0:1\"]")); - assertThat(encode(Collections.emptySet())).isEqualTo(encodeJson("[]")); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_decode() { - assertThat(decode(encodeJson("[\"127.0.0.1\",\"0:0:0:0:0:0:0:1\"]"))) - .isEqualTo(SET_OF_ADDRESSES); - assertThat(decode(encodeJson("[]"))).isEqualTo(Collections.emptySet()); - assertThat(decode(null)).isNull(); - } - - @Test - public void should_format() { - assertThat(format(SET_OF_ADDRESSES)).isEqualTo("'[\"127.0.0.1\",\"0:0:0:0:0:0:0:1\"]'"); - assertThat(format(Collections.emptySet())).isEqualTo("'[]'"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_parse() { - assertThat(parse("'[\"127.0.0.1\",\"0:0:0:0:0:0:0:1\"]'")).isEqualTo(SET_OF_ADDRESSES); - assertThat(parse("'[]'")).isEqualTo(Collections.emptySet()); - assertThat(parse("NULL")).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("")).isNull(); - assertThat(parse(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_invalid_input() { - parse("not a JSON string"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.setOf(GenericType.INET_ADDRESS))).isTrue(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(Set.class)).isTrue(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(SET_OF_ADDRESSES)).isTrue(); - assertThat(codec.accepts(Collections.emptySet())).isTrue(); - assertThat(codec.accepts(Collections.singletonList(V4_ADDRESS))).isFalse(); - } - - private String encodeJson(String json) { - return Bytes.toHexString(TypeCodecs.TEXT.encode(json, ProtocolVersion.DEFAULT)); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/LocalTimestampCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/LocalTimestampCodecTest.java deleted file mode 100644 index 7d87cbbba9f..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/LocalTimestampCodecTest.java +++ /dev/null @@ -1,206 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.time; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; - -import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.codec.CodecTestBase; -import com.datastax.oss.driver.internal.core.type.codec.TimestampCodecTest; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.time.Instant; -import java.time.LocalDate; -import java.time.LocalDateTime; -import java.time.ZoneId; -import java.time.ZoneOffset; -import java.time.ZonedDateTime; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class LocalTimestampCodecTest extends CodecTestBase { - - @Test - public void should_encode() { - codec = ExtraTypeCodecs.LOCAL_TIMESTAMP_UTC; - assertThat(encode(Instant.EPOCH.atZone(ZoneOffset.UTC).toLocalDateTime())) - .isEqualTo("0x0000000000000000"); - assertThat(encode(Instant.ofEpochMilli(128).atZone(ZoneOffset.UTC).toLocalDateTime())) - .isEqualTo("0x0000000000000080"); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_decode() { - codec = ExtraTypeCodecs.LOCAL_TIMESTAMP_UTC; - assertThat(decode("0x0000000000000000")) - .isEqualTo(Instant.EPOCH.atZone(ZoneOffset.UTC).toLocalDateTime()); - assertThat(decode("0x0000000000000080")) - .isEqualTo(Instant.ofEpochMilli(128).atZone(ZoneOffset.UTC).toLocalDateTime()); - assertThat(decode(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_not_enough_bytes() { - codec = ExtraTypeCodecs.LOCAL_TIMESTAMP_SYSTEM; - decode("0x0000"); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_too_many_bytes() { - codec = ExtraTypeCodecs.LOCAL_TIMESTAMP_SYSTEM; - decode("0x0000000000000000" + "0000"); - } - - @Test - public void should_format() { - codec = ExtraTypeCodecs.localTimestampAt(ZoneOffset.ofHours(2)); - // No need to test various values because the codec delegates directly to SimpleDateFormat, - // which we assume does its job correctly. - assertThat(format(LocalDateTime.parse("2018-08-16T16:59:34.123"))) - .isEqualTo("'2018-08-16T16:59:34.123+02:00'"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - @UseDataProvider(value = "timeZones", location = TimestampCodecTest.class) - public void should_parse(ZoneId defaultTimeZone) { - codec = ExtraTypeCodecs.localTimestampAt(defaultTimeZone); - - // Raw numbers - assertThat(parse("'0'")).isEqualTo(Instant.EPOCH.atZone(defaultTimeZone).toLocalDateTime()); - assertThat(parse("'-1'")) - .isEqualTo(Instant.EPOCH.minusMillis(1).atZone(defaultTimeZone).toLocalDateTime()); - assertThat(parse("1534463100000")) - .isEqualTo(Instant.ofEpochMilli(1534463100000L).atZone(defaultTimeZone).toLocalDateTime()); - - // Date formats - LocalDateTime expected; - - // date without time, without time zone - expected = LocalDate.parse("2017-01-01").atStartOfDay(); - assertThat(parse("'2017-01-01'")).isEqualTo(expected); - - // date without time, with time zone - expected = - ZonedDateTime.parse("2018-08-16T00:00:00+02:00") - .withZoneSameInstant(defaultTimeZone) - .toLocalDateTime(); - assertThat(parse("'2018-08-16+02'")).isEqualTo(expected); - assertThat(parse("'2018-08-16+0200'")).isEqualTo(expected); - assertThat(parse("'2018-08-16+02:00'")).isEqualTo(expected); - assertThat(parse("'2018-08-16 CEST'")).isEqualTo(expected); - - // date with time, without time zone - expected = LocalDateTime.parse("2018-08-16T23:45"); - assertThat(parse("'2018-08-16T23:45'")).isEqualTo(expected); - assertThat(parse("'2018-08-16 23:45'")).isEqualTo(expected); - - // date with time + seconds, without time zone - expected = LocalDateTime.parse("2019-12-31T16:08:38"); - assertThat(parse("'2019-12-31T16:08:38'")).isEqualTo(expected); - assertThat(parse("'2019-12-31 16:08:38'")).isEqualTo(expected); - - // date with time + seconds + milliseconds, without time zone - expected = LocalDateTime.parse("1950-02-28T12:00:59.230"); - assertThat(parse("'1950-02-28T12:00:59.230'")).isEqualTo(expected); - assertThat(parse("'1950-02-28 12:00:59.230'")).isEqualTo(expected); - - // date with time, with time zone - expected = - ZonedDateTime.parse("1973-06-23T23:59:00.000+01:00") - .withZoneSameInstant(defaultTimeZone) - .toLocalDateTime(); - assertThat(parse("'1973-06-23T23:59+01'")).isEqualTo(expected); - assertThat(parse("'1973-06-23T23:59+0100'")).isEqualTo(expected); - assertThat(parse("'1973-06-23T23:59+01:00'")).isEqualTo(expected); - assertThat(parse("'1973-06-23T23:59 CET'")).isEqualTo(expected); - assertThat(parse("'1973-06-23 23:59+01'")).isEqualTo(expected); - assertThat(parse("'1973-06-23 23:59+0100'")).isEqualTo(expected); - assertThat(parse("'1973-06-23 23:59+01:00'")).isEqualTo(expected); - assertThat(parse("'1973-06-23 23:59 CET'")).isEqualTo(expected); - - // date with time + seconds, with time zone - expected = - ZonedDateTime.parse("1980-01-01T23:59:59.000-08:00") - .withZoneSameInstant(defaultTimeZone) - .toLocalDateTime(); - assertThat(parse("'1980-01-01T23:59:59-08'")).isEqualTo(expected); - assertThat(parse("'1980-01-01T23:59:59-0800'")).isEqualTo(expected); - assertThat(parse("'1980-01-01T23:59:59-08:00'")).isEqualTo(expected); - assertThat(parse("'1980-01-01T23:59:59 PST'")).isEqualTo(expected); - assertThat(parse("'1980-01-01 23:59:59-08'")).isEqualTo(expected); - assertThat(parse("'1980-01-01 23:59:59-0800'")).isEqualTo(expected); - assertThat(parse("'1980-01-01 23:59:59-08:00'")).isEqualTo(expected); - assertThat(parse("'1980-01-01 23:59:59 PST'")).isEqualTo(expected); - - // date with time + seconds + milliseconds, with time zone - expected = - ZonedDateTime.parse("1999-12-31T23:59:59.999+00:00") - .withZoneSameInstant(defaultTimeZone) - .toLocalDateTime(); - assertThat(parse("'1999-12-31T23:59:59.999+00'")).isEqualTo(expected); - assertThat(parse("'1999-12-31T23:59:59.999+0000'")).isEqualTo(expected); - assertThat(parse("'1999-12-31T23:59:59.999+00:00'")).isEqualTo(expected); - assertThat(parse("'1999-12-31T23:59:59.999 UTC'")).isEqualTo(expected); - assertThat(parse("'1999-12-31 23:59:59.999+00'")).isEqualTo(expected); - assertThat(parse("'1999-12-31 23:59:59.999+0000'")).isEqualTo(expected); - assertThat(parse("'1999-12-31 23:59:59.999+00:00'")).isEqualTo(expected); - assertThat(parse("'1999-12-31 23:59:59.999 UTC'")).isEqualTo(expected); - - assertThat(parse("NULL")).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("")).isNull(); - assertThat(parse(null)).isNull(); - } - - @Test - public void should_fail_to_parse_invalid_input() { - codec = ExtraTypeCodecs.LOCAL_TIMESTAMP_SYSTEM; - assertThatThrownBy(() -> parse("not a timestamp")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage("Alphanumeric timestamp literal must be quoted: \"not a timestamp\""); - assertThatThrownBy(() -> parse("'not a timestamp'")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage("Cannot parse timestamp value from \"'not a timestamp'\""); - } - - @Test - public void should_accept_generic_type() { - codec = ExtraTypeCodecs.LOCAL_TIMESTAMP_SYSTEM; - assertThat(codec.accepts(GenericType.LOCAL_DATE_TIME)).isTrue(); - assertThat(codec.accepts(GenericType.INSTANT)).isFalse(); - } - - @Test - public void should_accept_raw_type() { - codec = ExtraTypeCodecs.LOCAL_TIMESTAMP_SYSTEM; - assertThat(codec.accepts(LocalDateTime.class)).isTrue(); - assertThat(codec.accepts(Instant.class)).isFalse(); - } - - @Test - public void should_accept_object() { - codec = ExtraTypeCodecs.LOCAL_TIMESTAMP_SYSTEM; - assertThat(codec.accepts(LocalDateTime.now(ZoneId.systemDefault()))).isTrue(); - assertThat(codec.accepts(Instant.EPOCH)).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/PersistentZonedTimestampCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/PersistentZonedTimestampCodecTest.java deleted file mode 100644 index 9bf1cac1007..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/PersistentZonedTimestampCodecTest.java +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.time; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.codec.CodecTestBase; -import java.time.Instant; -import java.time.ZoneId; -import java.time.ZoneOffset; -import java.time.ZonedDateTime; -import org.junit.Before; -import org.junit.Test; - -public class PersistentZonedTimestampCodecTest extends CodecTestBase { - - private static final ZonedDateTime EPOCH_UTC = Instant.EPOCH.atZone(ZoneOffset.UTC); - - private static final ZonedDateTime EPOCH_MILLIS_CET = - Instant.ofEpochMilli(128).atZone(ZoneId.of("CET")); - - private static final ZonedDateTime EPOCH_MILLIS_OFFSET = - Instant.ofEpochMilli(128).atZone(ZoneOffset.ofHours(2)); - - private static final ZonedDateTime EPOCH_MILLIS_EUROPE_PARIS = - Instant.ofEpochMilli(-128).atZone(ZoneId.of("Europe/Paris")); - - private static final String EPOCH_UTC_ENCODED = - "0x" - + ("00000008" + "0000000000000000") // size and contents of timestamp - + ("00000001" + "5a"); // size and contents of zone ID - - private static final String EPOCH_MILLIS_CET_ENCODED = - "0x" - + ("00000008" + "0000000000000080") // size and contents of timestamp - + ("00000003" + "434554"); // size and contents of zone ID - - private static final String EPOCH_MILLIS_OFFSET_ENCODED = - "0x" - + ("00000008" + "0000000000000080") // size and contents of timestamp - + ("00000006" + "2b30323a3030"); // size and contents of zone ID - - private static final String EPOCH_MILLIS_EUROPE_PARIS_ENCODED = - "0x" - + ("00000008" + "ffffffffffffff80") // size and contents of timestamp - + ("0000000c" + "4575726f70652f5061726973"); // size and contents of zone ID - - private static final String EPOCH_UTC_FORMATTED = "('1970-01-01T00:00:00.000Z','Z')"; - - private static final String EPOCH_MILLIS_CET_FORMATTED = "('1970-01-01T00:00:00.128Z','CET')"; - - private static final String EPOCH_MILLIS_OFFSET_FORMATTED = - "('1970-01-01T00:00:00.128Z','+02:00')"; - - private static final String EPOCH_MILLIS_EUROPE_PARIS_FORMATTED = - "('1969-12-31T23:59:59.872Z','Europe/Paris')"; - - @Before - public void setup() { - codec = ExtraTypeCodecs.ZONED_TIMESTAMP_PERSISTED; - } - - @Test - public void should_encode() { - assertThat(encode(EPOCH_UTC)).isEqualTo(EPOCH_UTC_ENCODED); - assertThat(encode(EPOCH_MILLIS_CET)).isEqualTo(EPOCH_MILLIS_CET_ENCODED); - assertThat(encode(EPOCH_MILLIS_OFFSET)).isEqualTo(EPOCH_MILLIS_OFFSET_ENCODED); - assertThat(encode(EPOCH_MILLIS_EUROPE_PARIS)).isEqualTo(EPOCH_MILLIS_EUROPE_PARIS_ENCODED); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_decode() { - assertThat(decode(EPOCH_UTC_ENCODED)).isEqualTo(EPOCH_UTC); - assertThat(decode(EPOCH_MILLIS_CET_ENCODED)).isEqualTo(EPOCH_MILLIS_CET); - assertThat(decode(EPOCH_MILLIS_OFFSET_ENCODED)).isEqualTo(EPOCH_MILLIS_OFFSET); - assertThat(decode(EPOCH_MILLIS_EUROPE_PARIS_ENCODED)).isEqualTo(EPOCH_MILLIS_EUROPE_PARIS); - assertThat(decode(null)).isNull(); - } - - @Test - public void should_format() { - assertThat(format(EPOCH_UTC)).isEqualTo(EPOCH_UTC_FORMATTED); - assertThat(format(EPOCH_MILLIS_CET)).isEqualTo(EPOCH_MILLIS_CET_FORMATTED); - assertThat(format(EPOCH_MILLIS_OFFSET)).isEqualTo(EPOCH_MILLIS_OFFSET_FORMATTED); - assertThat(format(EPOCH_MILLIS_EUROPE_PARIS)).isEqualTo(EPOCH_MILLIS_EUROPE_PARIS_FORMATTED); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_parse() { - assertThat(parse(EPOCH_UTC_FORMATTED)).isEqualTo(EPOCH_UTC); - assertThat(parse(EPOCH_MILLIS_CET_FORMATTED)).isEqualTo(EPOCH_MILLIS_CET); - assertThat(parse(EPOCH_MILLIS_OFFSET_FORMATTED)).isEqualTo(EPOCH_MILLIS_OFFSET); - assertThat(parse(EPOCH_MILLIS_EUROPE_PARIS_FORMATTED)).isEqualTo(EPOCH_MILLIS_EUROPE_PARIS); - assertThat(parse("NULL")).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("")).isNull(); - assertThat(parse(null)).isNull(); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.of(ZonedDateTime.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(Integer.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(ZonedDateTime.class)).isTrue(); - assertThat(codec.accepts(Integer.class)).isFalse(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(ZonedDateTime.now(ZoneOffset.systemDefault()))).isTrue(); - assertThat(codec.accepts(Integer.MIN_VALUE)).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/TimestampMillisCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/TimestampMillisCodecTest.java deleted file mode 100644 index 36ee71eebe6..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/TimestampMillisCodecTest.java +++ /dev/null @@ -1,205 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.time; - -import static java.time.ZoneOffset.ofHours; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; - -import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.codec.CodecTestBase; -import com.datastax.oss.driver.internal.core.type.codec.TimestampCodecTest; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.time.Instant; -import java.time.LocalDate; -import java.time.LocalDateTime; -import java.time.ZoneId; -import java.time.ZoneOffset; -import java.time.ZonedDateTime; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class TimestampMillisCodecTest extends CodecTestBase { - - @Test - public void should_encode() { - codec = ExtraTypeCodecs.TIMESTAMP_MILLIS_UTC; - assertThat(encode(0L)).isEqualTo("0x0000000000000000"); - assertThat(encode(128L)).isEqualTo("0x0000000000000080"); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_decode() { - codec = ExtraTypeCodecs.TIMESTAMP_MILLIS_UTC; - assertThat(decode("0x0000000000000000")).isEqualTo(0L); - assertThat(decode("0x0000000000000080")).isEqualTo(128L); - assertThat(decode(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_not_enough_bytes() { - codec = ExtraTypeCodecs.TIMESTAMP_MILLIS_SYSTEM; - decode("0x0000"); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_too_many_bytes() { - codec = ExtraTypeCodecs.TIMESTAMP_MILLIS_SYSTEM; - decode("0x0000000000000000" + "0000"); - } - - @Test - public void should_format() { - codec = ExtraTypeCodecs.timestampMillisAt(ZoneOffset.ofHours(2)); - // No need to test various values because the codec delegates directly to SimpleDateFormat, - // which we assume does its job correctly. - assertThat(format(0L)).isEqualTo("'1970-01-01T02:00:00.000+02:00'"); - assertThat(format(1534435174123L)).isEqualTo("'2018-08-16T17:59:34.123+02:00'"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - @UseDataProvider(value = "timeZones", location = TimestampCodecTest.class) - public void should_parse(ZoneId defaultTimeZone) { - codec = ExtraTypeCodecs.timestampMillisAt(defaultTimeZone); - - // Raw numbers - assertThat(parse("'0'")).isEqualTo(0L); - assertThat(parse("'-1'")).isEqualTo(-1L); - assertThat(parse("1534463100000")).isEqualTo(1534463100000L); - - // Date formats - long expected; - - // date without time, without time zone - expected = - LocalDate.parse("2017-01-01") - .atStartOfDay() - .atZone(defaultTimeZone) - .toInstant() - .toEpochMilli(); - assertThat(parse("'2017-01-01'")).isEqualTo(expected); - - // date without time, with time zone - expected = - LocalDate.parse("2018-08-16").atStartOfDay().atZone(ofHours(2)).toInstant().toEpochMilli(); - assertThat(parse("'2018-08-16+02'")).isEqualTo(expected); - assertThat(parse("'2018-08-16+0200'")).isEqualTo(expected); - assertThat(parse("'2018-08-16+02:00'")).isEqualTo(expected); - assertThat(parse("'2018-08-16 CEST'")).isEqualTo(expected); - - // date with time, without time zone - expected = - LocalDateTime.parse("2018-08-16T23:45").atZone(defaultTimeZone).toInstant().toEpochMilli(); - assertThat(parse("'2018-08-16T23:45'")).isEqualTo(expected); - assertThat(parse("'2018-08-16 23:45'")).isEqualTo(expected); - - // date with time + seconds, without time zone - expected = - LocalDateTime.parse("2019-12-31T16:08:38") - .atZone(defaultTimeZone) - .toInstant() - .toEpochMilli(); - assertThat(parse("'2019-12-31T16:08:38'")).isEqualTo(expected); - assertThat(parse("'2019-12-31 16:08:38'")).isEqualTo(expected); - - // date with time + seconds + milliseconds, without time zone - expected = - LocalDateTime.parse("1950-02-28T12:00:59.230") - .atZone(defaultTimeZone) - .toInstant() - .toEpochMilli(); - assertThat(parse("'1950-02-28T12:00:59.230'")).isEqualTo(expected); - assertThat(parse("'1950-02-28 12:00:59.230'")).isEqualTo(expected); - - // date with time, with time zone - expected = ZonedDateTime.parse("1973-06-23T23:59:00.000+01:00").toInstant().toEpochMilli(); - assertThat(parse("'1973-06-23T23:59+01'")).isEqualTo(expected); - assertThat(parse("'1973-06-23T23:59+0100'")).isEqualTo(expected); - assertThat(parse("'1973-06-23T23:59+01:00'")).isEqualTo(expected); - assertThat(parse("'1973-06-23T23:59 CET'")).isEqualTo(expected); - assertThat(parse("'1973-06-23 23:59+01'")).isEqualTo(expected); - assertThat(parse("'1973-06-23 23:59+0100'")).isEqualTo(expected); - assertThat(parse("'1973-06-23 23:59+01:00'")).isEqualTo(expected); - assertThat(parse("'1973-06-23 23:59 CET'")).isEqualTo(expected); - - // date with time + seconds, with time zone - expected = ZonedDateTime.parse("1980-01-01T23:59:59.000-08:00").toInstant().toEpochMilli(); - assertThat(parse("'1980-01-01T23:59:59-08'")).isEqualTo(expected); - assertThat(parse("'1980-01-01T23:59:59-0800'")).isEqualTo(expected); - assertThat(parse("'1980-01-01T23:59:59-08:00'")).isEqualTo(expected); - assertThat(parse("'1980-01-01T23:59:59 PST'")).isEqualTo(expected); - assertThat(parse("'1980-01-01 23:59:59-08'")).isEqualTo(expected); - assertThat(parse("'1980-01-01 23:59:59-0800'")).isEqualTo(expected); - assertThat(parse("'1980-01-01 23:59:59-08:00'")).isEqualTo(expected); - assertThat(parse("'1980-01-01 23:59:59 PST'")).isEqualTo(expected); - - // date with time + seconds + milliseconds, with time zone - expected = ZonedDateTime.parse("1999-12-31T23:59:59.999+00:00").toInstant().toEpochMilli(); - assertThat(parse("'1999-12-31T23:59:59.999+00'")).isEqualTo(expected); - assertThat(parse("'1999-12-31T23:59:59.999+0000'")).isEqualTo(expected); - assertThat(parse("'1999-12-31T23:59:59.999+00:00'")).isEqualTo(expected); - assertThat(parse("'1999-12-31T23:59:59.999 UTC'")).isEqualTo(expected); - assertThat(parse("'1999-12-31 23:59:59.999+00'")).isEqualTo(expected); - assertThat(parse("'1999-12-31 23:59:59.999+0000'")).isEqualTo(expected); - assertThat(parse("'1999-12-31 23:59:59.999+00:00'")).isEqualTo(expected); - assertThat(parse("'1999-12-31 23:59:59.999 UTC'")).isEqualTo(expected); - - assertThat(parse("NULL")).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("")).isNull(); - assertThat(parse(null)).isNull(); - } - - @Test - public void should_fail_to_parse_invalid_input() { - codec = ExtraTypeCodecs.TIMESTAMP_MILLIS_SYSTEM; - assertThatThrownBy(() -> parse("not a timestamp")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage("Alphanumeric timestamp literal must be quoted: \"not a timestamp\""); - assertThatThrownBy(() -> parse("'not a timestamp'")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage("Cannot parse timestamp value from \"'not a timestamp'\""); - } - - @Test - public void should_accept_generic_type() { - codec = ExtraTypeCodecs.TIMESTAMP_MILLIS_UTC; - assertThat(codec.accepts(GenericType.LONG)).isTrue(); - assertThat(codec.accepts(GenericType.INSTANT)).isFalse(); - } - - @Test - public void should_accept_raw_type() { - codec = ExtraTypeCodecs.TIMESTAMP_MILLIS_UTC; - assertThat(codec.accepts(Long.class)).isTrue(); - assertThat(codec.accepts(Long.TYPE)).isTrue(); - assertThat(codec.accepts(Instant.class)).isFalse(); - } - - @Test - public void should_accept_object() { - codec = ExtraTypeCodecs.TIMESTAMP_MILLIS_UTC; - assertThat(codec.accepts(Long.MIN_VALUE)).isTrue(); - assertThat(codec.accepts(Instant.EPOCH)).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/ZonedTimestampCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/ZonedTimestampCodecTest.java deleted file mode 100644 index cd31d13d5ca..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/ZonedTimestampCodecTest.java +++ /dev/null @@ -1,193 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.time; - -import static java.time.ZoneOffset.ofHours; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; - -import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.codec.CodecTestBase; -import com.datastax.oss.driver.internal.core.type.codec.TimestampCodecTest; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.time.Instant; -import java.time.LocalDate; -import java.time.LocalDateTime; -import java.time.ZoneId; -import java.time.ZoneOffset; -import java.time.ZonedDateTime; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class ZonedTimestampCodecTest extends CodecTestBase { - - @Test - @UseDataProvider(value = "timeZones", location = TimestampCodecTest.class) - public void should_encode(ZoneId timeZone) { - codec = ExtraTypeCodecs.zonedTimestampAt(timeZone); - assertThat(encode(Instant.EPOCH.atZone(timeZone))).isEqualTo("0x0000000000000000"); - assertThat(encode(Instant.ofEpochMilli(128).atZone(timeZone))).isEqualTo("0x0000000000000080"); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_decode() { - codec = ExtraTypeCodecs.ZONED_TIMESTAMP_UTC; - assertThat(decode("0x0000000000000000").toInstant().toEpochMilli()).isEqualTo(0); - assertThat(decode("0x0000000000000080").toInstant().toEpochMilli()).isEqualTo(128); - assertThat(decode(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_not_enough_bytes() { - codec = ExtraTypeCodecs.ZONED_TIMESTAMP_SYSTEM; - decode("0x0000"); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_too_many_bytes() { - codec = ExtraTypeCodecs.ZONED_TIMESTAMP_SYSTEM; - decode("0x0000000000000000" + "0000"); - } - - @Test - public void should_format() { - codec = ExtraTypeCodecs.zonedTimestampAt(ZoneOffset.ofHours(2)); - // No need to test various values because the codec delegates directly to SimpleDateFormat, - // which we assume does its job correctly. - assertThat(format(Instant.EPOCH.atZone(ZoneOffset.UTC))) - .isEqualTo("'1970-01-01T02:00:00.000+02:00'"); - assertThat(format(ZonedDateTime.parse("2018-08-16T15:59:34.123Z"))) - .isEqualTo("'2018-08-16T17:59:34.123+02:00'"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - @UseDataProvider(value = "timeZones", location = TimestampCodecTest.class) - public void should_parse(ZoneId timeZone) { - codec = ExtraTypeCodecs.zonedTimestampAt(timeZone); - - // Raw numbers - assertThat(parse("'0'")).isEqualTo(Instant.EPOCH.atZone(timeZone)); - assertThat(parse("'-1'")).isEqualTo(Instant.EPOCH.minusMillis(1).atZone(timeZone)); - assertThat(parse("1534463100000")) - .isEqualTo(Instant.ofEpochMilli(1534463100000L).atZone(timeZone)); - - // Date formats - ZonedDateTime expected; - - // date without time, without time zone - expected = LocalDate.parse("2017-01-01").atStartOfDay().atZone(timeZone); - assertThat(parse("'2017-01-01'")).isEqualTo(expected); - - // date without time, with time zone - expected = LocalDate.parse("2018-08-16").atStartOfDay().atZone(ofHours(2)); - assertThat(parse("'2018-08-16+02'")).isEqualTo(expected); - assertThat(parse("'2018-08-16+0200'")).isEqualTo(expected); - assertThat(parse("'2018-08-16+02:00'")).isEqualTo(expected); - assertThat(parse("'2018-08-16 CEST'")).isEqualTo(expected); - - // date with time, without time zone - expected = LocalDateTime.parse("2018-08-16T23:45").atZone(timeZone); - assertThat(parse("'2018-08-16T23:45'")).isEqualTo(expected); - assertThat(parse("'2018-08-16 23:45'")).isEqualTo(expected); - - // date with time + seconds, without time zone - expected = LocalDateTime.parse("2019-12-31T16:08:38").atZone(timeZone); - assertThat(parse("'2019-12-31T16:08:38'")).isEqualTo(expected); - assertThat(parse("'2019-12-31 16:08:38'")).isEqualTo(expected); - - // date with time + seconds + milliseconds, without time zone - expected = LocalDateTime.parse("1950-02-28T12:00:59.230").atZone(timeZone); - assertThat(parse("'1950-02-28T12:00:59.230'")).isEqualTo(expected); - assertThat(parse("'1950-02-28 12:00:59.230'")).isEqualTo(expected); - - // date with time, with time zone - expected = ZonedDateTime.parse("1973-06-23T23:59:00.000+01:00"); - assertThat(parse("'1973-06-23T23:59+01'")).isEqualTo(expected); - assertThat(parse("'1973-06-23T23:59+0100'")).isEqualTo(expected); - assertThat(parse("'1973-06-23T23:59+01:00'")).isEqualTo(expected); - assertThat(parse("'1973-06-23T23:59 CET'")).isEqualTo(expected); - assertThat(parse("'1973-06-23 23:59+01'")).isEqualTo(expected); - assertThat(parse("'1973-06-23 23:59+0100'")).isEqualTo(expected); - assertThat(parse("'1973-06-23 23:59+01:00'")).isEqualTo(expected); - assertThat(parse("'1973-06-23 23:59 CET'")).isEqualTo(expected); - - // date with time + seconds, with time zone - expected = ZonedDateTime.parse("1980-01-01T23:59:59.000-08:00"); - assertThat(parse("'1980-01-01T23:59:59-08'")).isEqualTo(expected); - assertThat(parse("'1980-01-01T23:59:59-0800'")).isEqualTo(expected); - assertThat(parse("'1980-01-01T23:59:59-08:00'")).isEqualTo(expected); - assertThat(parse("'1980-01-01T23:59:59 PST'")).isEqualTo(expected); - assertThat(parse("'1980-01-01 23:59:59-08'")).isEqualTo(expected); - assertThat(parse("'1980-01-01 23:59:59-0800'")).isEqualTo(expected); - assertThat(parse("'1980-01-01 23:59:59-08:00'")).isEqualTo(expected); - assertThat(parse("'1980-01-01 23:59:59 PST'")).isEqualTo(expected); - - // date with time + seconds + milliseconds, with time zone - expected = ZonedDateTime.parse("1999-12-31T23:59:59.999+00:00"); - assertThat(parse("'1999-12-31T23:59:59.999+00'")).isEqualTo(expected); - assertThat(parse("'1999-12-31T23:59:59.999+0000'")).isEqualTo(expected); - assertThat(parse("'1999-12-31T23:59:59.999+00:00'")).isEqualTo(expected); - assertThat(parse("'1999-12-31T23:59:59.999 UTC'")).isEqualTo(expected); - assertThat(parse("'1999-12-31 23:59:59.999+00'")).isEqualTo(expected); - assertThat(parse("'1999-12-31 23:59:59.999+0000'")).isEqualTo(expected); - assertThat(parse("'1999-12-31 23:59:59.999+00:00'")).isEqualTo(expected); - assertThat(parse("'1999-12-31 23:59:59.999 UTC'")).isEqualTo(expected); - - assertThat(parse("NULL")).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("")).isNull(); - assertThat(parse(null)).isNull(); - } - - @Test - public void should_fail_to_parse_invalid_input() { - codec = new ZonedTimestampCodec(); - assertThatThrownBy(() -> parse("not a timestamp")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage("Alphanumeric timestamp literal must be quoted: \"not a timestamp\""); - assertThatThrownBy(() -> parse("'not a timestamp'")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage("Cannot parse timestamp value from \"'not a timestamp'\""); - } - - @Test - public void should_accept_generic_type() { - codec = new ZonedTimestampCodec(); - assertThat(codec.accepts(GenericType.of(ZonedDateTime.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(Integer.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - codec = new ZonedTimestampCodec(); - assertThat(codec.accepts(ZonedDateTime.class)).isTrue(); - assertThat(codec.accepts(Integer.class)).isFalse(); - } - - @Test - public void should_accept_object() { - codec = new ZonedTimestampCodec(); - assertThat(codec.accepts(ZonedDateTime.now(ZoneOffset.systemDefault()))).isTrue(); - assertThat(codec.accepts(Integer.MIN_VALUE)).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/registry/CachingCodecRegistryTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/registry/CachingCodecRegistryTest.java deleted file mode 100644 index 231f67a93e7..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/registry/CachingCodecRegistryTest.java +++ /dev/null @@ -1,657 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.registry; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static org.assertj.core.api.Assertions.fail; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyZeroInteractions; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.data.TupleValue; -import com.datastax.oss.driver.api.core.data.UdtValue; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.ListType; -import com.datastax.oss.driver.api.core.type.codec.CodecNotFoundException; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.data.DefaultTupleValue; -import com.datastax.oss.driver.internal.core.data.DefaultUdtValue; -import com.datastax.oss.driver.internal.core.type.codec.CqlIntToStringCodec; -import com.datastax.oss.driver.internal.core.type.codec.IntCodec; -import com.datastax.oss.driver.internal.core.type.codec.ListCodec; -import com.datastax.oss.driver.internal.core.type.codec.registry.CachingCodecRegistryTest.TestCachingCodecRegistry.MockCache; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.time.Period; -import java.util.List; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.InOrder; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -@RunWith(DataProviderRunner.class) -public class CachingCodecRegistryTest { - - @Mock private MockCache mockCache; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - } - - @Test - @UseDataProvider( - value = "primitiveCodecs", - location = CachingCodecRegistryTestDataProviders.class) - public void should_find_primitive_codecs_for_types(TypeCodec codec) { - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - DataType cqlType = codec.getCqlType(); - GenericType javaType = codec.getJavaType(); - assertThat(registry.codecFor(cqlType, javaType)).isSameAs(codec); - assertThat(registry.codecFor(cqlType)).isSameAs(codec); - assertThat(javaType.__getToken().getType()).isInstanceOf(Class.class); - Class javaClass = (Class) javaType.__getToken().getType(); - assertThat(registry.codecFor(cqlType, javaClass)).isSameAs(codec); - // Primitive mappings never hit the cache - verifyZeroInteractions(mockCache); - } - - @Test - @UseDataProvider( - value = "primitiveCodecsWithValues", - location = CachingCodecRegistryTestDataProviders.class) - public void should_find_primitive_codecs_for_value(Object value, TypeCodec codec) { - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - assertThat(registry.codecFor(value)).isEqualTo(codec); - verifyZeroInteractions(mockCache); - } - - @Test - @UseDataProvider( - value = "primitiveCodecsWithCqlTypesAndValues", - location = CachingCodecRegistryTestDataProviders.class) - public void should_find_primitive_codecs_for_cql_type_and_value( - DataType cqlType, Object value, TypeCodec codec) { - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - assertThat(registry.codecFor(cqlType, value)).isEqualTo(codec); - verifyZeroInteractions(mockCache); - } - - @Test - public void should_find_user_codec_for_built_in_java_type() { - // int and String are built-in types, but int <-> String is not a built-in mapping - CqlIntToStringCodec intToStringCodec1 = new CqlIntToStringCodec(); - // register a second codec to also check that the first one is preferred - CqlIntToStringCodec intToStringCodec2 = new CqlIntToStringCodec(); - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - registry.register(intToStringCodec1, intToStringCodec2); - verify(mockCache).lookup(DataTypes.INT, GenericType.STRING, false); - - // When the mapping is not ambiguous, the user type should be returned - assertThat(registry.codecFor(DataTypes.INT, GenericType.STRING)).isSameAs(intToStringCodec1); - assertThat(registry.codecFor(DataTypes.INT, String.class)).isSameAs(intToStringCodec1); - assertThat(registry.codecFor(DataTypes.INT, "123")).isSameAs(intToStringCodec1); - - // When there is an ambiguity with a built-in codec, the built-in codec should have priority - assertThat(registry.codecFor(DataTypes.INT)).isSameAs(TypeCodecs.INT); - assertThat(registry.codecFor("123")).isSameAs(TypeCodecs.TEXT); - - verifyZeroInteractions(mockCache); - } - - @Test - public void should_find_user_codec_for_custom_java_type() { - TextToPeriodCodec textToPeriodCodec1 = new TextToPeriodCodec(); - TextToPeriodCodec textToPeriodCodec2 = new TextToPeriodCodec(); - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - registry.register(textToPeriodCodec1, textToPeriodCodec2); - verify(mockCache).lookup(DataTypes.TEXT, GenericType.of(Period.class), false); - - assertThat(registry.codecFor(DataTypes.TEXT, GenericType.of(Period.class))) - .isSameAs(textToPeriodCodec1); - assertThat(registry.codecFor(DataTypes.TEXT, Period.class)).isSameAs(textToPeriodCodec1); - assertThat(registry.codecFor(DataTypes.TEXT, Period.ofDays(1))).isSameAs(textToPeriodCodec1); - // Now even the search by Java value only is not ambiguous - assertThat(registry.codecFor(Period.ofDays(1))).isSameAs(textToPeriodCodec1); - - // The search by CQL type only still returns the built-in codec - assertThat(registry.codecFor(DataTypes.TEXT)).isSameAs(TypeCodecs.TEXT); - - verifyZeroInteractions(mockCache); - } - - @Test - @UseDataProvider( - value = "collectionsWithCqlAndJavaTypes", - location = CachingCodecRegistryTestDataProviders.class) - public void should_create_collection_codec_for_cql_and_java_types( - DataType cqlType, GenericType javaType, Object value) { - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - InOrder inOrder = inOrder(mockCache); - TypeCodec codec = registry.codecFor(cqlType, javaType); - assertThat(codec).isNotNull(); - assertThat(codec.accepts(cqlType)).isTrue(); - assertThat(codec.accepts(javaType)).isTrue(); - assertThat(codec.accepts(value)).isTrue(); - inOrder.verify(mockCache).lookup(cqlType, javaType, false); - } - - @Test - @UseDataProvider( - value = "collectionsWithCqlAndJavaTypes", - location = CachingCodecRegistryTestDataProviders.class) - public void should_create_collection_codec_for_cql_type( - DataType cqlType, GenericType javaType, Object value) { - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - InOrder inOrder = inOrder(mockCache); - TypeCodec codec = registry.codecFor(cqlType); - assertThat(codec).isNotNull(); - assertThat(codec.accepts(cqlType)).isTrue(); - assertThat(codec.accepts(javaType)).isTrue(); - assertThat(codec.accepts(value)).isTrue(); - inOrder.verify(mockCache).lookup(cqlType, null, false); - } - - @Test - @UseDataProvider( - value = "collectionsWithCqlAndJavaTypes", - location = CachingCodecRegistryTestDataProviders.class) - public void should_create_collection_codec_for_cql_type_and_java_value( - DataType cqlType, GenericType javaType, GenericType javaTypeLookup, Object value) { - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - InOrder inOrder = inOrder(mockCache); - TypeCodec codec = registry.codecFor(cqlType, value); - assertThat(codec).isNotNull(); - assertThat(codec.accepts(cqlType)).isTrue(); - assertThat(codec.accepts(javaType)).isTrue(); - assertThat(codec.accepts(value)).isTrue(); - inOrder.verify(mockCache).lookup(cqlType, javaTypeLookup, true); - } - - @Test - @UseDataProvider( - value = "collectionsWithCqlAndJavaTypes", - location = CachingCodecRegistryTestDataProviders.class) - public void should_create_collection_codec_for_java_value( - DataType cqlType, GenericType javaType, GenericType javaTypeLookup, Object value) { - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - InOrder inOrder = inOrder(mockCache); - TypeCodec codec = registry.codecFor(value); - assertThat(codec).isNotNull(); - assertThat(codec.accepts(cqlType)).isTrue(); - assertThat(codec.accepts(javaType)).isTrue(); - assertThat(codec.accepts(value)).isTrue(); - inOrder.verify(mockCache).lookup(cqlType, javaTypeLookup, true); - } - - @Test - @UseDataProvider( - value = "emptyCollectionsWithCqlAndJavaTypes", - location = CachingCodecRegistryTestDataProviders.class) - public void should_create_collection_codec_for_empty_java_value( - DataType cqlType, - GenericType javaType, - DataType cqlTypeLookup, - GenericType javaTypeLookup, - Object value) { - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - InOrder inOrder = inOrder(mockCache); - TypeCodec codec = registry.codecFor(value); - assertThat(codec).isNotNull(); - assertThat(codec.accepts(cqlType)).isFalse(); - assertThat(codec.accepts(javaType)).isFalse(); - assertThat(codec.accepts(value)).isTrue(); - - // Note that empty collections without CQL type are a corner case, in that the registry returns - // a codec that does not accept cqlType, nor the value's declared Java type. - // The only requirement is that it can encode the value, which holds true: - codec.encode(value, ProtocolVersion.DEFAULT); - - inOrder.verify(mockCache).lookup(cqlTypeLookup, javaTypeLookup, true); - } - - @Test - @UseDataProvider( - value = "emptyCollectionsWithCqlAndJavaTypes", - location = CachingCodecRegistryTestDataProviders.class) - public void should_create_collection_codec_for_cql_type_and_empty_java_value( - DataType cqlType, GenericType javaType, Object value) { - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - InOrder inOrder = inOrder(mockCache); - TypeCodec codec = registry.codecFor(cqlType, value); - assertThat(codec).isNotNull(); - assertThat(codec.accepts(cqlType)).isTrue(); - assertThat(codec.accepts(javaType)).isTrue(); - assertThat(codec.accepts(value)).isTrue(); - // Verify that the codec can encode the value - codec.encode(value, ProtocolVersion.DEFAULT); - inOrder.verify(mockCache).lookup(cqlType, javaType, true); - } - - @Test - @UseDataProvider( - value = "collectionsWithNullElements", - location = CachingCodecRegistryTestDataProviders.class) - public void should_throw_for_collection_containing_null_element(Object value, String expected) { - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - assertThatThrownBy(() -> registry.codecFor(value)) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage(expected); - } - - @Test - @UseDataProvider( - value = "tuplesWithCqlTypes", - location = CachingCodecRegistryTestDataProviders.class) - public void should_create_tuple_codec_for_cql_and_java_types(DataType cqlType, Object value) { - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - InOrder inOrder = inOrder(mockCache); - TypeCodec codec = registry.codecFor(cqlType, GenericType.TUPLE_VALUE); - assertThat(codec).isNotNull(); - assertThat(codec.accepts(cqlType)).isTrue(); - assertThat(codec.accepts(GenericType.TUPLE_VALUE)).isTrue(); - assertThat(codec.accepts(TupleValue.class)).isTrue(); - assertThat(codec.accepts(value)).isTrue(); - inOrder.verify(mockCache).lookup(cqlType, GenericType.TUPLE_VALUE, false); - // field codecs are only looked up when fields are accessed, so no cache hit for list now - } - - @Test - @UseDataProvider( - value = "tuplesWithCqlTypes", - location = CachingCodecRegistryTestDataProviders.class) - public void should_create_tuple_codec_for_cql_type(DataType cqlType, Object value) { - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - InOrder inOrder = inOrder(mockCache); - TypeCodec codec = registry.codecFor(cqlType); - assertThat(codec).isNotNull(); - assertThat(codec.accepts(cqlType)).isTrue(); - assertThat(codec.accepts(GenericType.TUPLE_VALUE)).isTrue(); - assertThat(codec.accepts(TupleValue.class)).isTrue(); - assertThat(codec.accepts(value)).isTrue(); - inOrder.verify(mockCache).lookup(cqlType, null, false); - } - - @Test - @UseDataProvider( - value = "tuplesWithCqlTypes", - location = CachingCodecRegistryTestDataProviders.class) - public void should_create_tuple_codec_for_cql_type_and_java_value( - DataType cqlType, Object value) { - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - InOrder inOrder = inOrder(mockCache); - TypeCodec codec = registry.codecFor(cqlType, value); - assertThat(codec).isNotNull(); - assertThat(codec.accepts(cqlType)).isTrue(); - assertThat(codec.accepts(GenericType.TUPLE_VALUE)).isTrue(); - assertThat(codec.accepts(TupleValue.class)).isTrue(); - assertThat(codec.accepts(value)).isTrue(); - inOrder.verify(mockCache).lookup(cqlType, GenericType.of(DefaultTupleValue.class), true); - inOrder.verifyNoMoreInteractions(); - } - - @Test - @UseDataProvider( - value = "tuplesWithCqlTypes", - location = CachingCodecRegistryTestDataProviders.class) - public void should_create_tuple_codec_for_java_value(DataType cqlType, Object value) { - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - InOrder inOrder = inOrder(mockCache); - TypeCodec codec = registry.codecFor(value); - assertThat(codec).isNotNull(); - assertThat(codec.accepts(cqlType)).isTrue(); - assertThat(codec.accepts(GenericType.TUPLE_VALUE)).isTrue(); - assertThat(codec.accepts(TupleValue.class)).isTrue(); - assertThat(codec.accepts(value)).isTrue(); - inOrder.verify(mockCache).lookup(cqlType, GenericType.of(DefaultTupleValue.class), true); - inOrder.verifyNoMoreInteractions(); - } - - @Test - @UseDataProvider( - value = "udtsWithCqlTypes", - location = CachingCodecRegistryTestDataProviders.class) - public void should_create_udt_codec_for_cql_and_java_types(DataType cqlType, Object value) { - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - InOrder inOrder = inOrder(mockCache); - TypeCodec codec = registry.codecFor(cqlType, GenericType.UDT_VALUE); - assertThat(codec).isNotNull(); - assertThat(codec.accepts(cqlType)).isTrue(); - assertThat(codec.accepts(GenericType.UDT_VALUE)).isTrue(); - assertThat(codec.accepts(UdtValue.class)).isTrue(); - assertThat(codec.accepts(value)).isTrue(); - inOrder.verify(mockCache).lookup(cqlType, GenericType.UDT_VALUE, false); - // field codecs are only looked up when fields are accessed, so no cache hit for list now - - } - - @Test - @UseDataProvider( - value = "udtsWithCqlTypes", - location = CachingCodecRegistryTestDataProviders.class) - public void should_create_udt_codec_for_cql_type(DataType cqlType, Object value) { - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - InOrder inOrder = inOrder(mockCache); - TypeCodec codec = registry.codecFor(cqlType); - assertThat(codec).isNotNull(); - assertThat(codec.accepts(cqlType)).isTrue(); - assertThat(codec.accepts(GenericType.UDT_VALUE)).isTrue(); - assertThat(codec.accepts(UdtValue.class)).isTrue(); - assertThat(codec.accepts(value)).isTrue(); - inOrder.verify(mockCache).lookup(cqlType, null, false); - } - - @Test - @UseDataProvider( - value = "udtsWithCqlTypes", - location = CachingCodecRegistryTestDataProviders.class) - public void should_create_udt_codec_for_cql_type_and_java_value(DataType cqlType, Object value) { - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - InOrder inOrder = inOrder(mockCache); - TypeCodec codec = registry.codecFor(cqlType, value); - assertThat(codec).isNotNull(); - assertThat(codec.accepts(cqlType)).isTrue(); - assertThat(codec.accepts(GenericType.UDT_VALUE)).isTrue(); - assertThat(codec.accepts(UdtValue.class)).isTrue(); - assertThat(codec.accepts(value)).isTrue(); - inOrder.verify(mockCache).lookup(cqlType, GenericType.of(DefaultUdtValue.class), true); - inOrder.verifyNoMoreInteractions(); - } - - @Test - @UseDataProvider( - value = "udtsWithCqlTypes", - location = CachingCodecRegistryTestDataProviders.class) - public void should_create_udt_codec_for_java_value(DataType cqlType, Object value) { - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - InOrder inOrder = inOrder(mockCache); - TypeCodec codec = registry.codecFor(value); - assertThat(codec).isNotNull(); - assertThat(codec.accepts(cqlType)).isTrue(); - assertThat(codec.accepts(GenericType.UDT_VALUE)).isTrue(); - assertThat(codec.accepts(UdtValue.class)).isTrue(); - assertThat(codec.accepts(value)).isTrue(); - inOrder.verify(mockCache).lookup(cqlType, GenericType.of(DefaultUdtValue.class), true); - inOrder.verifyNoMoreInteractions(); - } - - @Test - public void should_not_find_codec_if_java_type_unknown() { - try { - CodecRegistry.DEFAULT.codecFor(StringBuilder.class); - fail("Should not have found a codec for ANY <-> StringBuilder"); - } catch (CodecNotFoundException e) { - // expected - } - try { - CodecRegistry.DEFAULT.codecFor(DataTypes.TEXT, StringBuilder.class); - fail("Should not have found a codec for varchar <-> StringBuilder"); - } catch (CodecNotFoundException e) { - // expected - } - try { - CodecRegistry.DEFAULT.codecFor(new StringBuilder()); - fail("Should not have found a codec for ANY <-> StringBuilder"); - } catch (CodecNotFoundException e) { - // expected - } - } - - @Test - public void should_not_allow_covariance_for_lookups_by_java_type() { - - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - registry.register(new ACodec()); - InOrder inOrder = inOrder(mockCache); - - // covariance not allowed - - assertThatThrownBy(() -> registry.codecFor(B.class)) - .isInstanceOf(CodecNotFoundException.class) - .hasMessage("Codec not found for requested operation: [null <-> %s]", B.class.getName()); - // because of invariance, the custom A codec doesn't match so we try the cache - inOrder.verify(mockCache).lookup(null, GenericType.of(B.class), false); - inOrder.verifyNoMoreInteractions(); - - assertThatThrownBy(() -> registry.codecFor(GenericType.listOf(B.class))) - .isInstanceOf(CodecNotFoundException.class); - inOrder.verify(mockCache).lookup(null, GenericType.listOf(B.class), false); - inOrder.verify(mockCache).lookup(null, GenericType.of(B.class), false); - inOrder.verifyNoMoreInteractions(); - } - - @Test - public void should_allow_covariance_for_lookups_by_cql_type_and_value() { - - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - registry.register(new ACodec()); - InOrder inOrder = inOrder(mockCache); - inOrder.verify(mockCache).lookup(DataTypes.INT, GenericType.of(A.class), false); - - // covariance allowed - - assertThat(registry.codecFor(DataTypes.INT, new B())).isInstanceOf(ACodec.class); - // no cache hit since we find the custom codec directly - inOrder.verifyNoMoreInteractions(); - - // note: in Java, type parameters are always invariant, so List is not a subtype of List; - // but in practice, a codec for List is capable of encoding a List, so we allow it (even - // if in driver 3.x that was forbidden). - List list = Lists.newArrayList(new B()); - ListType cqlType = DataTypes.listOf(DataTypes.INT); - TypeCodec> actual = registry.codecFor(cqlType, list); - assertThat(actual).isInstanceOf(ListCodec.class); - assertThat(actual.getJavaType()).isEqualTo(GenericType.listOf(A.class)); - assertThat(actual.accepts(list)).isTrue(); - // accepts(GenericType) remains invariant, so it returns false for List - assertThat(actual.accepts(GenericType.listOf(B.class))).isFalse(); - inOrder.verify(mockCache).lookup(cqlType, GenericType.listOf(B.class), true); - inOrder.verifyNoMoreInteractions(); - } - - @Test - public void should_allow_covariance_for_lookups_by_value() { - - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - registry.register(new ACodec()); - InOrder inOrder = inOrder(mockCache); - inOrder.verify(mockCache).lookup(DataTypes.INT, GenericType.of(A.class), false); - - // covariance allowed - - assertThat(registry.codecFor(new B())).isInstanceOf(ACodec.class); - // no cache hit since we find the custom codec directly - inOrder.verifyNoMoreInteractions(); - - // note: in Java, type parameters are always invariant, so List is not a subtype of List; - // but in practice, a codec for List is capable of encoding a List, so we allow it (even - // if in driver 3.x that was forbidden). - List list = Lists.newArrayList(new B()); - TypeCodec> actual = registry.codecFor(list); - assertThat(actual).isInstanceOf(ListCodec.class); - assertThat(actual.getJavaType()).isEqualTo(GenericType.listOf(A.class)); - assertThat(actual.accepts(list)).isTrue(); - // accepts(GenericType) remains invariant, so it returns false for List - assertThat(actual.accepts(GenericType.listOf(B.class))).isFalse(); - inOrder.verify(mockCache).lookup(null, GenericType.listOf(B.class), true); - inOrder.verifyNoMoreInteractions(); - } - - @Test - public void should_register_user_codec_at_runtime() { - CqlIntToStringCodec intToStringCodec = new CqlIntToStringCodec(); - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - registry.register(intToStringCodec); - // register checks the cache for collisions - verify(mockCache).lookup(DataTypes.INT, GenericType.STRING, false); - - // When the mapping is not ambiguous, the user type should be returned - assertThat(registry.codecFor(DataTypes.INT, GenericType.STRING)).isSameAs(intToStringCodec); - assertThat(registry.codecFor(DataTypes.INT, String.class)).isSameAs(intToStringCodec); - assertThat(registry.codecFor(DataTypes.INT, "123")).isSameAs(intToStringCodec); - - // When there is an ambiguity with a built-in codec, the built-in codec should have priority - assertThat(registry.codecFor(DataTypes.INT)).isSameAs(TypeCodecs.INT); - assertThat(registry.codecFor("123")).isSameAs(TypeCodecs.TEXT); - - verifyZeroInteractions(mockCache); - } - - @Test - public void should_ignore_user_codec_if_collides_with_builtin_codec() { - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - - IntCodec userIntCodec = new IntCodec(); - registry.register(userIntCodec); - - assertThat(registry.codecFor(DataTypes.INT, Integer.class)).isNotSameAs(userIntCodec); - } - - @Test - public void should_ignore_user_codec_if_collides_with_other_user_codec() { - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - CqlIntToStringCodec intToStringCodec1 = new CqlIntToStringCodec(); - CqlIntToStringCodec intToStringCodec2 = new CqlIntToStringCodec(); - - registry.register(intToStringCodec1, intToStringCodec2); - - assertThat(registry.codecFor(DataTypes.INT, GenericType.STRING)).isSameAs(intToStringCodec1); - } - - @Test - public void should_ignore_user_codec_if_collides_with_generated_codec() { - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - - TypeCodec> userListOfIntCodec = TypeCodecs.listOf(TypeCodecs.INT); - registry.register(userListOfIntCodec); - - assertThat( - registry.codecFor(DataTypes.listOf(DataTypes.INT), GenericType.listOf(Integer.class))) - .isNotSameAs(userListOfIntCodec); - } - - // Our intent is not to test Guava cache, so we don't need an actual cache here. - // The only thing we want to check in our tests is if getCachedCodec was called. - public static class TestCachingCodecRegistry extends CachingCodecRegistry { - private final MockCache cache; - - TestCachingCodecRegistry(MockCache cache) { - super("test", CodecRegistryConstants.PRIMITIVE_CODECS); - this.cache = cache; - } - - @Override - protected TypeCodec getCachedCodec( - @Nullable DataType cqlType, @Nullable GenericType javaType, boolean isJavaCovariant) { - cache.lookup(cqlType, javaType, isJavaCovariant); - return createCodec(cqlType, javaType, isJavaCovariant); - } - - public interface MockCache { - void lookup( - @Nullable DataType cqlType, @Nullable GenericType javaType, boolean isJavaCovariant); - } - } - - public static class TextToPeriodCodec implements TypeCodec { - @NonNull - @Override - public GenericType getJavaType() { - return GenericType.of(Period.class); - } - - @NonNull - @Override - public DataType getCqlType() { - return DataTypes.TEXT; - } - - @Override - public ByteBuffer encode(Period value, @NonNull ProtocolVersion protocolVersion) { - throw new UnsupportedOperationException("not implemented for this test"); - } - - @Override - public Period decode(ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - throw new UnsupportedOperationException("not implemented for this test"); - } - - @NonNull - @Override - public String format(Period value) { - throw new UnsupportedOperationException("not implemented for this test"); - } - - @Override - public Period parse(String value) { - throw new UnsupportedOperationException("not implemented for this test"); - } - } - - private static class A {} - - private static class B extends A {} - - private static class ACodec implements TypeCodec { - - @NonNull - @Override - public GenericType getJavaType() { - return GenericType.of(A.class); - } - - @NonNull - @Override - public DataType getCqlType() { - return DataTypes.INT; - } - - @Override - public ByteBuffer encode(A value, @NonNull ProtocolVersion protocolVersion) { - throw new UnsupportedOperationException("irrelevant"); - } - - @Override - public A decode(ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - throw new UnsupportedOperationException("irrelevant"); - } - - @NonNull - @Override - public String format(A value) { - throw new UnsupportedOperationException("irrelevant"); - } - - @Override - public A parse(String value) { - throw new UnsupportedOperationException("irrelevant"); - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/registry/CachingCodecRegistryTestDataProviders.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/registry/CachingCodecRegistryTestDataProviders.java deleted file mode 100644 index 4c0298bafad..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/registry/CachingCodecRegistryTestDataProviders.java +++ /dev/null @@ -1,639 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.registry; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.data.CqlDuration; -import com.datastax.oss.driver.api.core.data.CqlVector; -import com.datastax.oss.driver.api.core.data.TupleValue; -import com.datastax.oss.driver.api.core.data.UdtValue; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.TupleType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.data.DefaultTupleValue; -import com.datastax.oss.driver.internal.core.data.DefaultUdtValue; -import com.datastax.oss.driver.internal.core.type.UserDefinedTypeBuilder; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import com.tngtech.java.junit.dataprovider.DataProvider; -import java.math.BigDecimal; -import java.math.BigInteger; -import java.net.Inet4Address; -import java.net.InetAddress; -import java.net.UnknownHostException; -import java.nio.ByteBuffer; -import java.time.Instant; -import java.time.LocalDate; -import java.time.LocalTime; -import java.util.Collections; -import java.util.UUID; - -@SuppressWarnings("unused") -public class CachingCodecRegistryTestDataProviders { - - @DataProvider - public static Object[][] primitiveCodecs() { - return new Object[][] { - {TypeCodecs.BOOLEAN}, - {TypeCodecs.TINYINT}, - {TypeCodecs.DOUBLE}, - {TypeCodecs.COUNTER}, - {TypeCodecs.FLOAT}, - {TypeCodecs.INT}, - {TypeCodecs.BIGINT}, - {TypeCodecs.SMALLINT}, - {TypeCodecs.TIMESTAMP}, - {TypeCodecs.DATE}, - {TypeCodecs.TIME}, - {TypeCodecs.BLOB}, - {TypeCodecs.TEXT}, - {TypeCodecs.ASCII}, - {TypeCodecs.VARINT}, - {TypeCodecs.DECIMAL}, - {TypeCodecs.UUID}, - {TypeCodecs.TIMEUUID}, - {TypeCodecs.INET}, - {TypeCodecs.DURATION}, - }; - } - - @DataProvider - public static Object[][] primitiveCodecsWithValues() throws UnknownHostException { - return new Object[][] { - {true, TypeCodecs.BOOLEAN}, - {(byte) 0, TypeCodecs.TINYINT}, - {0.0, TypeCodecs.DOUBLE}, - {0.0f, TypeCodecs.FLOAT}, - {0, TypeCodecs.INT}, - {0L, TypeCodecs.BIGINT}, - {(short) 0, TypeCodecs.SMALLINT}, - {Instant.EPOCH, TypeCodecs.TIMESTAMP}, - {LocalDate.MIN, TypeCodecs.DATE}, - {LocalTime.MIDNIGHT, TypeCodecs.TIME}, - {ByteBuffer.allocate(0), TypeCodecs.BLOB}, - {"", TypeCodecs.TEXT}, - {BigInteger.ONE, TypeCodecs.VARINT}, - {BigDecimal.ONE, TypeCodecs.DECIMAL}, - {new UUID(2L, 1L), TypeCodecs.UUID}, - {InetAddress.getByName("127.0.0.1"), TypeCodecs.INET}, - {CqlDuration.newInstance(1, 2, 3), TypeCodecs.DURATION}, - }; - } - - @DataProvider - public static Object[][] primitiveCodecsWithCqlTypesAndValues() throws UnknownHostException { - return new Object[][] { - {DataTypes.BOOLEAN, true, TypeCodecs.BOOLEAN}, - {DataTypes.TINYINT, (byte) 0, TypeCodecs.TINYINT}, - {DataTypes.DOUBLE, 0.0, TypeCodecs.DOUBLE}, - {DataTypes.FLOAT, 0.0f, TypeCodecs.FLOAT}, - {DataTypes.INT, 0, TypeCodecs.INT}, - {DataTypes.BIGINT, 0L, TypeCodecs.BIGINT}, - {DataTypes.SMALLINT, (short) 0, TypeCodecs.SMALLINT}, - {DataTypes.TIMESTAMP, Instant.EPOCH, TypeCodecs.TIMESTAMP}, - {DataTypes.DATE, LocalDate.MIN, TypeCodecs.DATE}, - {DataTypes.TIME, LocalTime.MIDNIGHT, TypeCodecs.TIME}, - {DataTypes.BLOB, ByteBuffer.allocate(0), TypeCodecs.BLOB}, - {DataTypes.TEXT, "", TypeCodecs.TEXT}, - {DataTypes.VARINT, BigInteger.ONE, TypeCodecs.VARINT}, - {DataTypes.DECIMAL, BigDecimal.ONE, TypeCodecs.DECIMAL}, - {DataTypes.UUID, new UUID(2L, 1L), TypeCodecs.UUID}, - {DataTypes.INET, InetAddress.getByName("127.0.0.1"), TypeCodecs.INET}, - {DataTypes.DURATION, CqlDuration.newInstance(1, 2, 3), TypeCodecs.DURATION}, - }; - } - - @DataProvider - public static Object[][] collectionsWithCqlAndJavaTypes() - throws UnknownHostException, ClassNotFoundException { - TupleType tupleType = DataTypes.tupleOf(DataTypes.INT, DataTypes.listOf(DataTypes.TEXT)); - TupleValue tupleValue = tupleType.newValue(); - UserDefinedType userType = - new UserDefinedTypeBuilder( - CqlIdentifier.fromInternal("ks"), CqlIdentifier.fromInternal("type")) - .withField(CqlIdentifier.fromInternal("field1"), DataTypes.INT) - .withField(CqlIdentifier.fromInternal("field2"), DataTypes.listOf(DataTypes.TEXT)) - .build(); - UdtValue udtValue = userType.newValue(); - return new Object[][] { - // lists - { - DataTypes.listOf(DataTypes.INT), - GenericType.listOf(Integer.class), - GenericType.listOf(Integer.class), - ImmutableList.of(1) - }, - { - DataTypes.listOf(DataTypes.TEXT), - GenericType.listOf(String.class), - GenericType.listOf(String.class), - ImmutableList.of("foo") - }, - { - DataTypes.listOf(DataTypes.BLOB), - GenericType.listOf(ByteBuffer.class), - GenericType.listOf(Class.forName("java.nio.HeapByteBuffer")), - ImmutableList.of(ByteBuffer.wrap(new byte[] {127, 0, 0, 1})) - }, - { - DataTypes.listOf(DataTypes.INET), - GenericType.listOf(InetAddress.class), - GenericType.listOf(Inet4Address.class), - ImmutableList.of(InetAddress.getByAddress(new byte[] {127, 0, 0, 1})) - }, - { - DataTypes.listOf(tupleType), - GenericType.listOf(TupleValue.class), - GenericType.listOf(DefaultTupleValue.class), - ImmutableList.of(tupleValue) - }, - { - DataTypes.listOf(userType), - GenericType.listOf(UdtValue.class), - GenericType.listOf(DefaultUdtValue.class), - ImmutableList.of(udtValue) - }, - { - DataTypes.listOf(DataTypes.listOf(DataTypes.INT)), - GenericType.listOf(GenericType.listOf(Integer.class)), - GenericType.listOf(GenericType.listOf(Integer.class)), - ImmutableList.of(ImmutableList.of(1)) - }, - { - DataTypes.listOf(DataTypes.listOf(tupleType)), - GenericType.listOf(GenericType.listOf(TupleValue.class)), - GenericType.listOf(GenericType.listOf(DefaultTupleValue.class)), - ImmutableList.of(ImmutableList.of(tupleValue)) - }, - { - DataTypes.listOf(DataTypes.listOf(userType)), - GenericType.listOf(GenericType.listOf(UdtValue.class)), - GenericType.listOf(GenericType.listOf(DefaultUdtValue.class)), - ImmutableList.of(ImmutableList.of(udtValue)) - }, - // sets - { - DataTypes.setOf(DataTypes.INT), - GenericType.setOf(Integer.class), - GenericType.setOf(Integer.class), - ImmutableSet.of(1) - }, - { - DataTypes.setOf(DataTypes.TEXT), - GenericType.setOf(String.class), - GenericType.setOf(String.class), - ImmutableSet.of("foo") - }, - { - DataTypes.setOf(DataTypes.BLOB), - GenericType.setOf(ByteBuffer.class), - GenericType.setOf(Class.forName("java.nio.HeapByteBuffer")), - ImmutableSet.of(ByteBuffer.wrap(new byte[] {127, 0, 0, 1})) - }, - { - DataTypes.setOf(DataTypes.INET), - GenericType.setOf(InetAddress.class), - GenericType.setOf(Inet4Address.class), - ImmutableSet.of(InetAddress.getByAddress(new byte[] {127, 0, 0, 1})) - }, - { - DataTypes.setOf(tupleType), - GenericType.setOf(TupleValue.class), - GenericType.setOf(DefaultTupleValue.class), - ImmutableSet.of(tupleValue) - }, - { - DataTypes.setOf(userType), - GenericType.setOf(UdtValue.class), - GenericType.setOf(DefaultUdtValue.class), - ImmutableSet.of(udtValue) - }, - { - DataTypes.setOf(DataTypes.setOf(DataTypes.INT)), - GenericType.setOf(GenericType.setOf(Integer.class)), - GenericType.setOf(GenericType.setOf(Integer.class)), - ImmutableSet.of(ImmutableSet.of(1)) - }, - { - DataTypes.setOf(DataTypes.setOf(tupleType)), - GenericType.setOf(GenericType.setOf(TupleValue.class)), - GenericType.setOf(GenericType.setOf(DefaultTupleValue.class)), - ImmutableSet.of(ImmutableSet.of(tupleValue)) - }, - { - DataTypes.setOf(DataTypes.setOf(userType)), - GenericType.setOf(GenericType.setOf(UdtValue.class)), - GenericType.setOf(GenericType.setOf(DefaultUdtValue.class)), - ImmutableSet.of(ImmutableSet.of(udtValue)) - }, - // maps - { - DataTypes.mapOf(DataTypes.INT, DataTypes.TEXT), - GenericType.mapOf(Integer.class, String.class), - GenericType.mapOf(Integer.class, String.class), - ImmutableMap.of(1, "foo") - }, - { - DataTypes.mapOf(DataTypes.BLOB, DataTypes.INET), - GenericType.mapOf(ByteBuffer.class, InetAddress.class), - GenericType.mapOf(Class.forName("java.nio.HeapByteBuffer"), Inet4Address.class), - ImmutableMap.of( - ByteBuffer.wrap(new byte[] {127, 0, 0, 1}), - InetAddress.getByAddress(new byte[] {127, 0, 0, 1})) - }, - { - DataTypes.mapOf(tupleType, tupleType), - GenericType.mapOf(TupleValue.class, TupleValue.class), - GenericType.mapOf(DefaultTupleValue.class, DefaultTupleValue.class), - ImmutableMap.of(tupleValue, tupleValue) - }, - { - DataTypes.mapOf(userType, userType), - GenericType.mapOf(UdtValue.class, UdtValue.class), - GenericType.mapOf(DefaultUdtValue.class, DefaultUdtValue.class), - ImmutableMap.of(udtValue, udtValue) - }, - { - DataTypes.mapOf(DataTypes.UUID, DataTypes.mapOf(DataTypes.INT, DataTypes.TEXT)), - GenericType.mapOf(GenericType.UUID, GenericType.mapOf(Integer.class, String.class)), - GenericType.mapOf(GenericType.UUID, GenericType.mapOf(Integer.class, String.class)), - ImmutableMap.of(UUID.randomUUID(), ImmutableMap.of(1, "foo")) - }, - { - DataTypes.mapOf(DataTypes.mapOf(userType, userType), DataTypes.mapOf(tupleType, tupleType)), - GenericType.mapOf( - GenericType.mapOf(UdtValue.class, UdtValue.class), - GenericType.mapOf(TupleValue.class, TupleValue.class)), - GenericType.mapOf( - GenericType.mapOf(DefaultUdtValue.class, DefaultUdtValue.class), - GenericType.mapOf(DefaultTupleValue.class, DefaultTupleValue.class)), - ImmutableMap.of( - ImmutableMap.of(udtValue, udtValue), ImmutableMap.of(tupleValue, tupleValue)) - }, - // vectors - { - DataTypes.vectorOf(DataTypes.INT, 1), - GenericType.vectorOf(Integer.class), - GenericType.vectorOf(Integer.class), - CqlVector.newInstance(1) - }, - { - DataTypes.vectorOf(DataTypes.BIGINT, 1), - GenericType.vectorOf(Long.class), - GenericType.vectorOf(Long.class), - CqlVector.newInstance(1l) - }, - { - DataTypes.vectorOf(DataTypes.SMALLINT, 1), - GenericType.vectorOf(Short.class), - GenericType.vectorOf(Short.class), - CqlVector.newInstance((short) 1) - }, - { - DataTypes.vectorOf(DataTypes.TINYINT, 1), - GenericType.vectorOf(Byte.class), - GenericType.vectorOf(Byte.class), - CqlVector.newInstance((byte) 1) - }, - { - DataTypes.vectorOf(DataTypes.FLOAT, 1), - GenericType.vectorOf(Float.class), - GenericType.vectorOf(Float.class), - CqlVector.newInstance(1.0f) - }, - { - DataTypes.vectorOf(DataTypes.DOUBLE, 1), - GenericType.vectorOf(Double.class), - GenericType.vectorOf(Double.class), - CqlVector.newInstance(1.0d) - }, - { - DataTypes.vectorOf(DataTypes.DECIMAL, 1), - GenericType.vectorOf(BigDecimal.class), - GenericType.vectorOf(BigDecimal.class), - CqlVector.newInstance(BigDecimal.ONE) - }, - { - DataTypes.vectorOf(DataTypes.VARINT, 1), - GenericType.vectorOf(BigInteger.class), - GenericType.vectorOf(BigInteger.class), - CqlVector.newInstance(BigInteger.ONE) - }, - // vector with arbitrary types - { - DataTypes.vectorOf(DataTypes.TEXT, 2), - GenericType.vectorOf(String.class), - GenericType.vectorOf(String.class), - CqlVector.newInstance("abc", "de") - }, - { - DataTypes.vectorOf(DataTypes.TIME, 2), - GenericType.vectorOf(LocalTime.class), - GenericType.vectorOf(LocalTime.class), - CqlVector.newInstance(LocalTime.MIDNIGHT, LocalTime.NOON) - }, - { - DataTypes.vectorOf(DataTypes.vectorOf(DataTypes.TINYINT, 2), 2), - GenericType.vectorOf(GenericType.vectorOf(Byte.class)), - GenericType.vectorOf(GenericType.vectorOf(Byte.class)), - CqlVector.newInstance( - CqlVector.newInstance((byte) 1, (byte) 2), CqlVector.newInstance((byte) 3, (byte) 4)) - }, - }; - } - - @DataProvider - public static Object[][] emptyCollectionsWithCqlAndJavaTypes() { - TupleType tupleType = DataTypes.tupleOf(DataTypes.INT, DataTypes.listOf(DataTypes.TEXT)); - UserDefinedType userType = - new UserDefinedTypeBuilder( - CqlIdentifier.fromInternal("ks"), CqlIdentifier.fromInternal("type")) - .withField(CqlIdentifier.fromInternal("field1"), DataTypes.INT) - .withField(CqlIdentifier.fromInternal("field2"), DataTypes.listOf(DataTypes.TEXT)) - .build(); - return new Object[][] { - // lists - { - DataTypes.listOf(DataTypes.INT), - GenericType.listOf(Integer.class), - DataTypes.listOf(DataTypes.BOOLEAN), - GenericType.listOf(Boolean.class), - Collections.emptyList() - }, - { - DataTypes.listOf(DataTypes.TEXT), - GenericType.listOf(String.class), - DataTypes.listOf(DataTypes.BOOLEAN), - GenericType.listOf(Boolean.class), - Collections.emptyList() - }, - { - DataTypes.listOf(DataTypes.BLOB), - GenericType.listOf(ByteBuffer.class), - DataTypes.listOf(DataTypes.BOOLEAN), - GenericType.listOf(Boolean.class), - Collections.emptyList() - }, - { - DataTypes.listOf(DataTypes.INET), - GenericType.listOf(InetAddress.class), - DataTypes.listOf(DataTypes.BOOLEAN), - GenericType.listOf(Boolean.class), - Collections.emptyList() - }, - { - DataTypes.listOf(tupleType), - GenericType.listOf(TupleValue.class), - DataTypes.listOf(DataTypes.BOOLEAN), - GenericType.listOf(Boolean.class), - Collections.emptyList() - }, - { - DataTypes.listOf(userType), - GenericType.listOf(UdtValue.class), - DataTypes.listOf(DataTypes.BOOLEAN), - GenericType.listOf(Boolean.class), - Collections.emptyList() - }, - { - DataTypes.listOf(DataTypes.listOf(DataTypes.INT)), - GenericType.listOf(GenericType.listOf(Integer.class)), - DataTypes.listOf(DataTypes.listOf(DataTypes.BOOLEAN)), - GenericType.listOf(GenericType.listOf(Boolean.class)), - ImmutableList.of(Collections.emptyList()) - }, - { - DataTypes.listOf(DataTypes.listOf(tupleType)), - GenericType.listOf(GenericType.listOf(TupleValue.class)), - DataTypes.listOf(DataTypes.listOf(DataTypes.BOOLEAN)), - GenericType.listOf(GenericType.listOf(Boolean.class)), - ImmutableList.of(Collections.emptyList()) - }, - { - DataTypes.listOf(DataTypes.listOf(userType)), - GenericType.listOf(GenericType.listOf(UdtValue.class)), - DataTypes.listOf(DataTypes.listOf(DataTypes.BOOLEAN)), - GenericType.listOf(GenericType.listOf(Boolean.class)), - ImmutableList.of(Collections.emptyList()) - }, - // sets - { - DataTypes.setOf(DataTypes.INT), - GenericType.setOf(Integer.class), - DataTypes.setOf(DataTypes.BOOLEAN), - GenericType.setOf(Boolean.class), - Collections.emptySet() - }, - { - DataTypes.setOf(DataTypes.TEXT), - GenericType.setOf(String.class), - DataTypes.setOf(DataTypes.BOOLEAN), - GenericType.setOf(Boolean.class), - Collections.emptySet() - }, - { - DataTypes.setOf(DataTypes.BLOB), - GenericType.setOf(ByteBuffer.class), - DataTypes.setOf(DataTypes.BOOLEAN), - GenericType.setOf(Boolean.class), - Collections.emptySet() - }, - { - DataTypes.setOf(DataTypes.INET), - GenericType.setOf(InetAddress.class), - DataTypes.setOf(DataTypes.BOOLEAN), - GenericType.setOf(Boolean.class), - Collections.emptySet() - }, - { - DataTypes.setOf(tupleType), - GenericType.setOf(TupleValue.class), - DataTypes.setOf(DataTypes.BOOLEAN), - GenericType.setOf(Boolean.class), - Collections.emptySet() - }, - { - DataTypes.setOf(userType), - GenericType.setOf(UdtValue.class), - DataTypes.setOf(DataTypes.BOOLEAN), - GenericType.setOf(Boolean.class), - Collections.emptySet() - }, - { - DataTypes.setOf(DataTypes.setOf(DataTypes.INT)), - GenericType.setOf(GenericType.setOf(Integer.class)), - DataTypes.setOf(DataTypes.setOf(DataTypes.BOOLEAN)), - GenericType.setOf(GenericType.setOf(Boolean.class)), - ImmutableSet.of(Collections.emptySet()) - }, - { - DataTypes.setOf(DataTypes.setOf(tupleType)), - GenericType.setOf(GenericType.setOf(TupleValue.class)), - DataTypes.setOf(DataTypes.setOf(DataTypes.BOOLEAN)), - GenericType.setOf(GenericType.setOf(Boolean.class)), - ImmutableSet.of(Collections.emptySet()) - }, - { - DataTypes.setOf(DataTypes.setOf(userType)), - GenericType.setOf(GenericType.setOf(UdtValue.class)), - DataTypes.setOf(DataTypes.setOf(DataTypes.BOOLEAN)), - GenericType.setOf(GenericType.setOf(Boolean.class)), - ImmutableSet.of(Collections.emptySet()) - }, - // maps - { - DataTypes.mapOf(DataTypes.INT, DataTypes.TEXT), - GenericType.mapOf(Integer.class, String.class), - DataTypes.mapOf(DataTypes.BOOLEAN, DataTypes.BOOLEAN), - GenericType.mapOf(Boolean.class, Boolean.class), - Collections.emptyMap() - }, - { - DataTypes.mapOf(DataTypes.BLOB, DataTypes.INET), - GenericType.mapOf(ByteBuffer.class, InetAddress.class), - DataTypes.mapOf(DataTypes.BOOLEAN, DataTypes.BOOLEAN), - GenericType.mapOf(Boolean.class, Boolean.class), - Collections.emptyMap() - }, - { - DataTypes.mapOf(tupleType, tupleType), - GenericType.mapOf(TupleValue.class, TupleValue.class), - DataTypes.mapOf(DataTypes.BOOLEAN, DataTypes.BOOLEAN), - GenericType.mapOf(Boolean.class, Boolean.class), - Collections.emptyMap() - }, - { - DataTypes.mapOf(userType, userType), - GenericType.mapOf(UdtValue.class, UdtValue.class), - DataTypes.mapOf(DataTypes.BOOLEAN, DataTypes.BOOLEAN), - GenericType.mapOf(Boolean.class, Boolean.class), - Collections.emptyMap() - }, - { - DataTypes.mapOf(DataTypes.UUID, DataTypes.mapOf(DataTypes.INT, DataTypes.TEXT)), - GenericType.mapOf(GenericType.UUID, GenericType.mapOf(Integer.class, String.class)), - DataTypes.mapOf(DataTypes.UUID, DataTypes.mapOf(DataTypes.BOOLEAN, DataTypes.BOOLEAN)), - GenericType.mapOf(GenericType.UUID, GenericType.mapOf(Boolean.class, Boolean.class)), - ImmutableMap.of(UUID.randomUUID(), Collections.emptyMap()) - }, - { - DataTypes.mapOf(DataTypes.mapOf(DataTypes.INT, DataTypes.TEXT), DataTypes.UUID), - GenericType.mapOf(GenericType.mapOf(Integer.class, String.class), GenericType.UUID), - DataTypes.mapOf(DataTypes.mapOf(DataTypes.BOOLEAN, DataTypes.BOOLEAN), DataTypes.UUID), - GenericType.mapOf(GenericType.mapOf(Boolean.class, Boolean.class), GenericType.UUID), - ImmutableMap.of(Collections.emptyMap(), UUID.randomUUID()) - }, - { - DataTypes.mapOf(DataTypes.mapOf(userType, userType), DataTypes.mapOf(tupleType, tupleType)), - GenericType.mapOf( - GenericType.mapOf(UdtValue.class, UdtValue.class), - GenericType.mapOf(TupleValue.class, TupleValue.class)), - DataTypes.mapOf( - DataTypes.mapOf(DataTypes.BOOLEAN, DataTypes.BOOLEAN), - DataTypes.mapOf(DataTypes.BOOLEAN, DataTypes.BOOLEAN)), - GenericType.mapOf( - GenericType.mapOf(Boolean.class, Boolean.class), - GenericType.mapOf(Boolean.class, Boolean.class)), - ImmutableMap.of(Collections.emptyMap(), Collections.emptyMap()) - }, - }; - } - - @DataProvider - public static Object[][] collectionsWithNullElements() { - return new Object[][] { - { - Collections.singletonList(null), - "Can't infer list codec because the first element is null " - + "(note that CQL does not allow null values in collections)" - }, - { - Collections.singleton(null), - "Can't infer set codec because the first element is null " - + "(note that CQL does not allow null values in collections)" - }, - { - Collections.singletonMap("foo", null), - "Can't infer map codec because the first key and/or value is null " - + "(note that CQL does not allow null values in collections)" - }, - { - Collections.singletonMap(null, "foo"), - "Can't infer map codec because the first key and/or value is null " - + "(note that CQL does not allow null values in collections)" - }, - { - Collections.singletonMap(null, null), - "Can't infer map codec because the first key and/or value is null " - + "(note that CQL does not allow null values in collections)" - }, - }; - } - - @DataProvider - public static Object[][] tuplesWithCqlTypes() { - TupleType tupleType1 = DataTypes.tupleOf(DataTypes.INT, DataTypes.TEXT); - TupleType tupleType2 = DataTypes.tupleOf(DataTypes.INT, DataTypes.listOf(DataTypes.TEXT)); - TupleType tupleType3 = DataTypes.tupleOf(DataTypes.mapOf(tupleType1, tupleType2)); - TupleValue tupleValue1 = tupleType1.newValue(42, "foo"); - TupleValue tupleValue2 = tupleType2.newValue(42, ImmutableList.of("foo", "bar")); - return new Object[][] { - {tupleType1, tupleType1.newValue()}, - {tupleType1, tupleValue1}, - {tupleType2, tupleType2.newValue()}, - {tupleType2, tupleValue2}, - {tupleType3, tupleType3.newValue()}, - {tupleType3, tupleType3.newValue(ImmutableMap.of(tupleValue1, tupleValue2))}, - }; - } - - @DataProvider - public static Object[][] udtsWithCqlTypes() { - UserDefinedType userType1 = - new UserDefinedTypeBuilder( - CqlIdentifier.fromInternal("ks"), CqlIdentifier.fromInternal("type")) - .withField(CqlIdentifier.fromInternal("field1"), DataTypes.INT) - .withField(CqlIdentifier.fromInternal("field2"), DataTypes.TEXT) - .build(); - UserDefinedType userType2 = - new UserDefinedTypeBuilder( - CqlIdentifier.fromInternal("ks"), CqlIdentifier.fromInternal("type")) - .withField(CqlIdentifier.fromInternal("field1"), DataTypes.setOf(DataTypes.BIGINT)) - .withField(CqlIdentifier.fromInternal("field2"), DataTypes.listOf(DataTypes.TEXT)) - .build(); - UserDefinedType userType3 = - new UserDefinedTypeBuilder( - CqlIdentifier.fromInternal("ks"), CqlIdentifier.fromInternal("type")) - .withField(CqlIdentifier.fromInternal("field1"), DataTypes.mapOf(userType1, userType2)) - .build(); - UdtValue userValue1 = userType1.newValue(42, "foo"); - UdtValue userValue2 = - userType2.newValue(ImmutableSet.of(24L, 43L), ImmutableList.of("foo", "bar")); - return new Object[][] { - {userType1, userType1.newValue()}, - {userType1, userValue1}, - {userType2, userType2.newValue()}, - {userType2, userValue2}, - {userType3, userType3.newValue()}, - {userType3, userType3.newValue(ImmutableMap.of(userValue1, userValue2))}, - }; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/util/VIntCodingTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/util/VIntCodingTest.java deleted file mode 100644 index b85d6d66844..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/util/VIntCodingTest.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.util; - -import static org.junit.Assert.assertEquals; - -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.nio.ByteBuffer; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class VIntCodingTest { - @DataProvider - public static Object[] roundTripTestValues() { - return new Integer[] { - Integer.MAX_VALUE + 1, - Integer.MAX_VALUE, - Integer.MAX_VALUE - 1, - Integer.MIN_VALUE, - Integer.MIN_VALUE + 1, - Integer.MIN_VALUE - 1, - 0, - -1, - 1 - }; - }; - - private static final long[] LONGS = - new long[] { - 53L, - 10201L, - 1097151L, - 168435455L, - 33251130335L, - 3281283447775L, - 417672546086779L, - 52057592037927932L, - 72057594037927937L - }; - - @Test - public void should_compute_unsigned_vint_size() { - for (int i = 0; i < LONGS.length; i++) { - long val = LONGS[i]; - assertEquals(i + 1, VIntCoding.computeUnsignedVIntSize(val)); - } - } - - @Test - @UseDataProvider("roundTripTestValues") - public void should_write_and_read_unsigned_vint_32(int value) { - ByteBuffer bb = ByteBuffer.allocate(9); - - VIntCoding.writeUnsignedVInt32(value, bb); - bb.flip(); - assertEquals(value, VIntCoding.getUnsignedVInt32(bb, 0)); - } - - @Test - @UseDataProvider("roundTripTestValues") - public void should_write_and_read_unsigned_vint(int value) { - ByteBuffer bb = ByteBuffer.allocate(9); - - VIntCoding.writeUnsignedVInt(value, bb); - bb.flip(); - assertEquals(value, VIntCoding.getUnsignedVInt(bb, 0)); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/ArrayUtilsTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/ArrayUtilsTest.java deleted file mode 100644 index c2df6449fdb..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/util/ArrayUtilsTest.java +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.mockito.Mockito.anyInt; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import java.util.Random; -import org.junit.Test; - -public class ArrayUtilsTest { - - @Test - public void should_swap() { - String[] array = {"a", "b", "c"}; - ArrayUtils.swap(array, 0, 2); - assertThat(array).containsExactly("c", "b", "a"); - } - - @Test - public void should_swap_with_same_index() { - String[] array = {"a", "b", "c"}; - ArrayUtils.swap(array, 0, 0); - assertThat(array).containsExactly("a", "b", "c"); - } - - @Test - public void should_bubble_up() { - String[] array = {"a", "b", "c", "d", "e"}; - ArrayUtils.bubbleUp(array, 3, 1); - assertThat(array).containsExactly("a", "d", "b", "c", "e"); - } - - @Test - public void should_bubble_up_to_same_index() { - String[] array = {"a", "b", "c", "d", "e"}; - ArrayUtils.bubbleUp(array, 3, 3); - assertThat(array).containsExactly("a", "b", "c", "d", "e"); - } - - @Test - public void should_not_bubble_up_when_target_index_higher() { - String[] array = {"a", "b", "c", "d", "e"}; - ArrayUtils.bubbleUp(array, 3, 5); - assertThat(array).containsExactly("a", "b", "c", "d", "e"); - } - - @Test - public void should_bubble_down() { - String[] array = {"a", "b", "c", "d", "e"}; - ArrayUtils.bubbleDown(array, 1, 3); - assertThat(array).containsExactly("a", "c", "d", "b", "e"); - } - - @Test - public void should_bubble_down_to_same_index() { - String[] array = {"a", "b", "c", "d", "e"}; - ArrayUtils.bubbleDown(array, 3, 3); - assertThat(array).containsExactly("a", "b", "c", "d", "e"); - } - - @Test - public void should_not_bubble_down_when_target_index_lower() { - String[] array = {"a", "b", "c", "d", "e"}; - ArrayUtils.bubbleDown(array, 4, 2); - assertThat(array).containsExactly("a", "b", "c", "d", "e"); - } - - @Test - public void should_shuffle_head() { - String[] array = {"a", "b", "c", "d", "e"}; - Random random = mock(Random.class); - when(random.nextInt(anyInt())) - .thenAnswer( - (invocation) -> { - int i = invocation.getArgument(0); - // shifts elements by 1 to the right - return i - 2; - }); - ArrayUtils.shuffleHead(array, 3, random); - assertThat(array[0]).isEqualTo("c"); - assertThat(array[1]).isEqualTo("a"); - assertThat(array[2]).isEqualTo("b"); - // Tail elements should not move - assertThat(array[3]).isEqualTo("d"); - assertThat(array[4]).isEqualTo("e"); - } - - @Test(expected = ArrayIndexOutOfBoundsException.class) - public void should_fail_to_shuffle_head_when_count_is_too_high() { - ArrayUtils.shuffleHead(new String[] {"a", "b", "c"}, 5); - } - - @Test - public void should_rotate() { - String[] array = {"a", "b", "c", "d", "e"}; - - ArrayUtils.rotate(array, 1, 3, 1); - assertThat(array).containsExactly("a", "c", "d", "b", "e"); - - ArrayUtils.rotate(array, 0, 4, 2); - assertThat(array).containsExactly("d", "b", "a", "c", "e"); - - ArrayUtils.rotate(array, 2, 3, 10); - assertThat(array).containsExactly("d", "b", "c", "e", "a"); - } - - @Test - public void should_not_rotate_when_amount_multiple_of_range_size() { - String[] array = {"a", "b", "c", "d", "e"}; - - ArrayUtils.rotate(array, 1, 3, 9); - assertThat(array).containsExactly("a", "b", "c", "d", "e"); - } - - @Test - public void should_not_rotate_when_range_is_singleton_or_empty() { - String[] array = {"a", "b", "c", "d", "e"}; - - ArrayUtils.rotate(array, 1, 1, 3); - assertThat(array).containsExactly("a", "b", "c", "d", "e"); - - ArrayUtils.rotate(array, 1, 0, 3); - assertThat(array).containsExactly("a", "b", "c", "d", "e"); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/ByteBufs.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/ByteBufs.java deleted file mode 100644 index f526e2f12d4..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/util/ByteBufs.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util; - -import com.datastax.oss.protocol.internal.util.Bytes; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.ByteBufAllocator; -import java.nio.ByteBuffer; - -/** Helper class to create {@link io.netty.buffer.ByteBuf} instances in tests. */ -public class ByteBufs { - public static ByteBuf wrap(int... bytes) { - ByteBuf bb = ByteBufAllocator.DEFAULT.buffer(bytes.length); - for (int b : bytes) { - bb.writeByte(b); - } - return bb; - } - - public static ByteBuf fromHexString(String hexString) { - ByteBuffer tmp = Bytes.fromHexString(hexString); - ByteBuf target = ByteBufAllocator.DEFAULT.buffer(tmp.remaining()); - target.writeBytes(tmp); - return target; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/CollectionsUtilsTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/CollectionsUtilsTest.java deleted file mode 100644 index 5a95e7f3b74..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/util/CollectionsUtilsTest.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; - -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.List; -import java.util.Map; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class CollectionsUtilsTest { - @Test - @UseDataProvider("listsProvider") - public void should_combine_two_lists_by_index( - List firstList, List secondList, Map expected) { - - // when - Map result = - CollectionsUtils.combineListsIntoOrderedMap(firstList, secondList); - - // then - assertThat(result).isEqualTo(expected); - } - - @Test - public void should_throw_if_lists_have_not_matching_size() { - // given - List list1 = ImmutableList.of(1); - List list2 = ImmutableList.of(1, 2); - - // when - assertThatThrownBy(() -> CollectionsUtils.combineListsIntoOrderedMap(list1, list2)) - .isInstanceOf(IllegalArgumentException.class) - .hasMessageMatching("Cannot combine lists with not matching sizes"); - } - - @DataProvider - public static Object[][] listsProvider() { - - return new Object[][] { - {ImmutableList.of(1), ImmutableList.of(1), ImmutableMap.of(1, 1)}, - {ImmutableList.of(1, 10, 5), ImmutableList.of(1, 10, 5), ImmutableMap.of(1, 1, 10, 10, 5, 5)}, - {ImmutableList.of(1, 1), ImmutableList.of(1, 2), ImmutableMap.of(1, 2)} - }; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/DirectedGraphTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/DirectedGraphTest.java deleted file mode 100644 index 1b37a5e5b19..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/util/DirectedGraphTest.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util; - -import static org.assertj.core.api.Assertions.assertThat; - -import org.junit.Test; - -public class DirectedGraphTest { - - @Test - public void should_sort_empty_graph() { - DirectedGraph g = new DirectedGraph<>(); - assertThat(g.topologicalSort()).isEmpty(); - } - - @Test - public void should_sort_graph_with_one_node() { - DirectedGraph g = new DirectedGraph<>("A"); - assertThat(g.topologicalSort()).containsExactly("A"); - } - - @Test - public void should_sort_complex_graph() { - // H G - // / \ /\ - // F | E - // \ / / - // D / - // / \/ - // B C - // | - // A - DirectedGraph g = new DirectedGraph<>("A", "B", "C", "D", "E", "F", "G", "H"); - g.addEdge("H", "F"); - g.addEdge("G", "E"); - g.addEdge("H", "D"); - g.addEdge("F", "D"); - g.addEdge("G", "D"); - g.addEdge("D", "C"); - g.addEdge("E", "C"); - g.addEdge("D", "B"); - g.addEdge("B", "A"); - - // The graph uses linked hash maps internally, so this order will be consistent across JVMs - assertThat(g.topologicalSort()).containsExactly("G", "H", "E", "F", "D", "C", "B", "A"); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_sort_if_graph_has_a_cycle() { - DirectedGraph g = new DirectedGraph<>("A", "B", "C"); - g.addEdge("A", "B"); - g.addEdge("B", "C"); - g.addEdge("C", "B"); - - g.topologicalSort(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_sort_if_graph_is_a_cycle() { - DirectedGraph g = new DirectedGraph<>("A", "B", "C"); - g.addEdge("A", "B"); - g.addEdge("B", "C"); - g.addEdge("C", "A"); - - g.topologicalSort(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/LoggerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/LoggerTest.java deleted file mode 100644 index eec3669efca..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/util/LoggerTest.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util; - -import static org.mockito.Mockito.mock; - -import ch.qos.logback.classic.Level; -import ch.qos.logback.classic.Logger; -import ch.qos.logback.classic.spi.ILoggingEvent; -import ch.qos.logback.core.Appender; -import org.mockito.ArgumentCaptor; -import org.slf4j.LoggerFactory; - -public class LoggerTest { - public static LoggerSetup setupTestLogger(Class clazz, Level levelToCapture) { - @SuppressWarnings("unchecked") - Appender appender = (Appender) mock(Appender.class); - - ArgumentCaptor loggingEventCaptor = ArgumentCaptor.forClass(ILoggingEvent.class); - Logger logger = (Logger) LoggerFactory.getLogger(clazz); - Level originalLoggerLevel = logger.getLevel(); - logger.setLevel(levelToCapture); - logger.addAppender(appender); - return new LoggerSetup(appender, originalLoggerLevel, logger, loggingEventCaptor); - } - - public static class LoggerSetup { - - private final Level originalLoggerLevel; - public final Appender appender; - public final Logger logger; - public ArgumentCaptor loggingEventCaptor; - - private LoggerSetup( - Appender appender, - Level originalLoggerLevel, - Logger logger, - ArgumentCaptor loggingEventCaptor) { - this.appender = appender; - this.originalLoggerLevel = originalLoggerLevel; - this.logger = logger; - this.loggingEventCaptor = loggingEventCaptor; - } - - public void close() { - logger.detachAppender(appender); - logger.setLevel(originalLoggerLevel); - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/ReflectionTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/ReflectionTest.java deleted file mode 100644 index f2614775be4..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/util/ReflectionTest.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.specex.SpeculativeExecutionPolicy; -import com.datastax.oss.driver.internal.core.config.typesafe.TypesafeDriverConfig; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.specex.ConstantSpeculativeExecutionPolicy; -import com.datastax.oss.driver.internal.core.specex.NoSpeculativeExecutionPolicy; -import com.typesafe.config.ConfigFactory; -import java.util.Map; -import org.junit.Test; - -public class ReflectionTest { - - @Test - public void should_build_policies_per_profile() { - String configSource = - "advanced.speculative-execution-policy {\n" - + " class = ConstantSpeculativeExecutionPolicy\n" - + " max-executions = 3\n" - + " delay = 100 milliseconds\n" - + "}\n" - + "profiles {\n" - // Inherits from default profile - + " profile1 {}\n" - // Inherits but changes one option - + " profile2 { \n" - + " advanced.speculative-execution-policy.max-executions = 2" - + " }\n" - // Same as previous profile, should share the same policy instance - + " profile3 { \n" - + " advanced.speculative-execution-policy.max-executions = 2" - + " }\n" - // Completely overrides default profile - + " profile4 { \n" - + " advanced.speculative-execution-policy.class = NoSpeculativeExecutionPolicy\n" - + " }\n" - + "}\n"; - InternalDriverContext context = mock(InternalDriverContext.class); - TypesafeDriverConfig config = new TypesafeDriverConfig(ConfigFactory.parseString(configSource)); - when(context.getConfig()).thenReturn(config); - - Map policies = - Reflection.buildFromConfigProfiles( - context, - DefaultDriverOption.SPECULATIVE_EXECUTION_POLICY_CLASS, - DefaultDriverOption.SPECULATIVE_EXECUTION_POLICY, - SpeculativeExecutionPolicy.class, - "com.datastax.oss.driver.internal.core.specex"); - - assertThat(policies).hasSize(5); - SpeculativeExecutionPolicy defaultPolicy = policies.get(DriverExecutionProfile.DEFAULT_NAME); - SpeculativeExecutionPolicy policy1 = policies.get("profile1"); - SpeculativeExecutionPolicy policy2 = policies.get("profile2"); - SpeculativeExecutionPolicy policy3 = policies.get("profile3"); - SpeculativeExecutionPolicy policy4 = policies.get("profile4"); - assertThat(defaultPolicy) - .isInstanceOf(ConstantSpeculativeExecutionPolicy.class) - .isSameAs(policy1); - assertThat(policy2).isInstanceOf(ConstantSpeculativeExecutionPolicy.class).isSameAs(policy3); - assertThat(policy4).isInstanceOf(NoSpeculativeExecutionPolicy.class); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/StringsTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/StringsTest.java deleted file mode 100644 index d5cc9dae161..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/util/StringsTest.java +++ /dev/null @@ -1,170 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.TestDataProviders; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.Locale; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class StringsTest { - - @Test - @UseDataProvider(location = TestDataProviders.class, value = "locales") - public void should_report_cql_keyword(Locale locale) { - Locale def = Locale.getDefault(); - try { - Locale.setDefault(locale); - - assertThat(Strings.isReservedCqlKeyword(null)).isFalse(); - assertThat(Strings.isReservedCqlKeyword("NOT A RESERVED KEYWORD")).isFalse(); - - assertThat(Strings.isReservedCqlKeyword("add")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("allow")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("alter")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("and")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("apply")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("asc")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("authorize")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("batch")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("begin")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("by")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("columnfamily")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("create")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("default")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("delete")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("desc")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("describe")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("drop")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("entries")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("execute")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("from")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("full")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("grant")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("if")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("in")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("index")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("infinity")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("insert")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("into")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("is")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("keyspace")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("limit")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("materialized")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("mbean")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("mbeans")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("modify")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("nan")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("norecursive")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("not")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("null")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("of")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("on")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("or")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("order")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("primary")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("rename")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("replace")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("revoke")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("schema")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("select")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("set")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("table")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("to")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("token")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("truncate")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("unlogged")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("unset")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("update")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("use")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("using")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("view")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("where")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("with")).isTrue(); - - assertThat(Strings.isReservedCqlKeyword("ALLOW")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("ALTER")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("AND")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("APPLY")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("ASC")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("AUTHORIZE")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("BATCH")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("BEGIN")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("BY")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("COLUMNFAMILY")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("CREATE")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("DEFAULT")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("DELETE")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("DESC")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("DESCRIBE")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("DROP")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("ENTRIES")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("EXECUTE")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("FROM")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("FULL")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("GRANT")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("IF")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("IN")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("INDEX")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("INFINITY")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("INSERT")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("INTO")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("IS")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("KEYSPACE")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("LIMIT")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("MATERIALIZED")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("MBEAN")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("MBEANS")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("MODIFY")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("NAN")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("NORECURSIVE")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("NOT")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("NULL")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("OF")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("ON")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("OR")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("ORDER")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("PRIMARY")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("RENAME")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("REPLACE")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("REVOKE")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("SCHEMA")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("SELECT")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("SET")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("TABLE")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("TO")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("TOKEN")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("TRUNCATE")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("UNLOGGED")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("UNSET")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("UPDATE")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("USE")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("USING")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("VIEW")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("WHERE")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("WITH")).isTrue(); - } finally { - Locale.setDefault(def); - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/collection/CompositeQueryPlanTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/collection/CompositeQueryPlanTest.java deleted file mode 100644 index 1adc06a79d3..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/util/collection/CompositeQueryPlanTest.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.collection; - -import com.datastax.oss.driver.api.core.metadata.Node; -import org.junit.runner.RunWith; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public class CompositeQueryPlanTest extends QueryPlanTestBase { - - @Override - protected QueryPlan newQueryPlan(Node... nodes) { - Object[] n1 = new Object[nodes.length / 2]; - Object[] n2 = new Object[nodes.length - n1.length]; - System.arraycopy(nodes, 0, n1, 0, n1.length); - System.arraycopy(nodes, n1.length, n2, 0, n2.length); - return new CompositeQueryPlan( - new SimpleQueryPlan(n1), - new LazyQueryPlan() { - @Override - protected Object[] computeNodes() { - return n2; - } - }); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/collection/LazyQueryPlanTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/collection/LazyQueryPlanTest.java deleted file mode 100644 index 99c72bace06..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/util/collection/LazyQueryPlanTest.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.collection; - -import com.datastax.oss.driver.api.core.metadata.Node; -import org.junit.runner.RunWith; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public class LazyQueryPlanTest extends QueryPlanTestBase { - - @Override - protected QueryPlan newQueryPlan(Node... nodes) { - return new LazyQueryPlan() { - @Override - protected Object[] computeNodes() { - return nodes; - } - }; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/collection/QueryPlanTestBase.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/collection/QueryPlanTestBase.java deleted file mode 100644 index 8689c282117..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/util/collection/QueryPlanTestBase.java +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.collection; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.metadata.Node; -import java.util.Comparator; -import java.util.Iterator; -import java.util.Set; -import java.util.concurrent.ConcurrentSkipListSet; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public abstract class QueryPlanTestBase { - - @Mock private Node node1; - @Mock private Node node2; - @Mock private Node node3; - - @Test - public void should_poll_elements() { - QueryPlan queryPlan = newQueryPlan(node1, node2, node3); - assertThat(queryPlan.poll()).isSameAs(node1); - assertThat(queryPlan.poll()).isSameAs(node2); - assertThat(queryPlan.poll()).isSameAs(node3); - assertThat(queryPlan.poll()).isNull(); - assertThat(queryPlan.poll()).isNull(); - } - - @Test - public void should_poll_elements_concurrently() throws InterruptedException { - for (int runs = 0; runs < 5; runs++) { - Node[] nodes = new Node[1000]; - for (int i = 0; i < 1000; i++) { - nodes[i] = mock(Node.class, "node" + i); - when(nodes[i].getOpenConnections()).thenReturn(i); - } - QueryPlan queryPlan = newQueryPlan(nodes); - Set actual = - new ConcurrentSkipListSet<>(Comparator.comparingInt(Node::getOpenConnections)); - Thread[] threads = new Thread[5]; - for (int i = 0; i < 5; i++) { - threads[i] = - new Thread( - () -> { - while (true) { - Node node = queryPlan.poll(); - if (node == null) { - return; - } - actual.add(node); - } - }); - } - for (Thread thread : threads) { - thread.start(); - } - for (Thread thread : threads) { - thread.join(); - } - assertThat(actual).hasSize(1000); - Iterator iterator = actual.iterator(); - for (int i = 0; iterator.hasNext(); i++) { - Node node = iterator.next(); - assertThat(node.getOpenConnections()).isEqualTo(i); - } - } - } - - @Test - public void should_return_size() { - QueryPlan queryPlan = newQueryPlan(node1, node2, node3); - assertThat(queryPlan.size()).isEqualTo(3); - queryPlan.poll(); - assertThat(queryPlan.size()).isEqualTo(2); - queryPlan.poll(); - assertThat(queryPlan.size()).isEqualTo(1); - queryPlan.poll(); - assertThat(queryPlan.size()).isEqualTo(0); - queryPlan.poll(); - assertThat(queryPlan.size()).isEqualTo(0); - } - - @Test - public void should_return_iterator() { - QueryPlan queryPlan = newQueryPlan(node1, node2, node3); - Iterator iterator3 = queryPlan.iterator(); - queryPlan.poll(); - Iterator iterator2 = queryPlan.iterator(); - queryPlan.poll(); - Iterator iterator1 = queryPlan.iterator(); - queryPlan.poll(); - Iterator iterator0 = queryPlan.iterator(); - queryPlan.poll(); - Iterator iterator00 = queryPlan.iterator(); - - assertThat(iterator3).toIterable().containsExactly(node1, node2, node3); - assertThat(iterator2).toIterable().containsExactly(node2, node3); - assertThat(iterator1).toIterable().containsExactly(node3); - assertThat(iterator0).toIterable().isEmpty(); - assertThat(iterator00).toIterable().isEmpty(); - } - - protected abstract QueryPlan newQueryPlan(Node... nodes); -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/collection/SimpleQueryPlanTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/collection/SimpleQueryPlanTest.java deleted file mode 100644 index 31e3e1006d7..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/util/collection/SimpleQueryPlanTest.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.collection; - -import com.datastax.oss.driver.api.core.metadata.Node; -import org.junit.runner.RunWith; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public class SimpleQueryPlanTest extends QueryPlanTestBase { - - @Override - protected QueryPlan newQueryPlan(Node... nodes) { - return new SimpleQueryPlan((Object[]) nodes); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/CapturingTimer.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/CapturingTimer.java deleted file mode 100644 index 88cdfa80104..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/CapturingTimer.java +++ /dev/null @@ -1,132 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.datastax.oss.driver.internal.core.util.concurrent; - -import io.netty.util.Timeout; -import io.netty.util.Timer; -import io.netty.util.TimerTask; -import java.util.Collections; -import java.util.HashSet; -import java.util.Set; -import java.util.concurrent.ArrayBlockingQueue; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; - -/** - * Implementation of Netty's {@link io.netty.util.Timer Timer} interface to capture scheduled {@link - * io.netty.util.Timeout Timeouts} instead of running them, so they can be run manually in tests. - */ -public class CapturingTimer implements Timer { - - private final ArrayBlockingQueue timeoutQueue = new ArrayBlockingQueue<>(16); - - @Override - public Timeout newTimeout(TimerTask task, long delay, TimeUnit unit) { - // delay and unit are not needed as the Timeout's TimerTask will be run manually - CapturedTimeout timeout = new CapturedTimeout(task, this, delay, unit); - // add the timeout to the queue - timeoutQueue.add(timeout); - return timeout; - } - - /** - * Retrieves the next scheduled Timeout. In tests, this will usually be a request timeout or a - * speculative execution. Tests will need be able to predict the ordering as it is not easy to - * tell from the returned Timeout itself. - */ - public CapturedTimeout getNextTimeout() { - return timeoutQueue.poll(); - } - - @Override - public Set stop() { - if (timeoutQueue.isEmpty()) { - return Collections.emptySet(); - } - Set timeoutsRemaining = new HashSet<>(timeoutQueue.size()); - for (Timeout t : timeoutQueue) { - if (t != null) { - t.cancel(); - timeoutsRemaining.add(t); - } - } - return timeoutsRemaining; - } - - /** - * Implementation of Netty's {@link io.netty.util.Timeout Timeout} interface. It is just a simple - * class that keeps track of the {@link io.netty.util.TimerTask TimerTask} and the {@link - * io.netty.util.Timer Timer} implementation that should only be used in tests. The intended use - * is to call the {@link io.netty.util.TimerTask#run(io.netty.util.Timeout) run()} method on the - * TimerTask when you want to execute the task (so you don't have to depend on a real timer). - * - *

Example: - * - *

{@code
-   * // get the next timeout from the timer
-   * Timeout t = timer.getNextTimeout();
-   * // run the TimerTask associated with the timeout
-   * t.task.run(t);
-   * }
- */ - public static class CapturedTimeout implements Timeout { - - private final TimerTask task; - private final CapturingTimer timer; - private final long delay; - private final TimeUnit unit; - private final AtomicBoolean cancelled = new AtomicBoolean(false); - - private CapturedTimeout(TimerTask task, CapturingTimer timer, long delay, TimeUnit unit) { - this.task = task; - this.timer = timer; - this.delay = delay; - this.unit = unit; - } - - @Override - public Timer timer() { - return timer; - } - - @Override - public TimerTask task() { - return task; - } - - public long getDelay(TimeUnit targetUnit) { - return targetUnit.convert(delay, unit); - } - - @Override - public boolean isExpired() { - return false; - } - - @Override - public boolean isCancelled() { - return cancelled.get(); - } - - @Override - public boolean cancel() { - return cancelled.compareAndSet(false, true); - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/CompletableFuturesTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/CompletableFuturesTest.java deleted file mode 100644 index 04f96f185fd..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/CompletableFuturesTest.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.concurrent; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.junit.Assert.fail; - -import java.util.Arrays; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -import org.junit.Test; - -public class CompletableFuturesTest { - @Test - public void should_not_suppress_identical_exceptions() throws Exception { - RuntimeException error = new RuntimeException(); - CompletableFuture future1 = new CompletableFuture<>(); - future1.completeExceptionally(error); - CompletableFuture future2 = new CompletableFuture<>(); - future2.completeExceptionally(error); - try { - // if timeout exception is thrown, it indicates that CompletableFutures.allSuccessful() - // did not complete the returned future and potentially caller will wait infinitely - CompletableFutures.allSuccessful(Arrays.asList(future1, future2)) - .toCompletableFuture() - .get(1, TimeUnit.SECONDS); - fail(); - } catch (ExecutionException e) { - assertThat(e.getCause()).isEqualTo(error); - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/CycleDetectorTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/CycleDetectorTest.java deleted file mode 100644 index 74e0801ff61..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/CycleDetectorTest.java +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.concurrent; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; - -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.util.concurrent.ThreadFactoryBuilder; -import com.datastax.oss.driver.shaded.guava.common.util.concurrent.Uninterruptibles; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; -import org.junit.Test; - -public class CycleDetectorTest { - - @Test - public void should_detect_cycle_within_same_thread() { - CycleDetector checker = new CycleDetector("Detected cycle", true); - CyclicContext context = new CyclicContext(checker, false); - try { - context.a.get(); - fail("Expected an exception"); - } catch (Exception e) { - assertThat(e) - .isInstanceOf(IllegalStateException.class) - .hasMessageContaining("Detected cycle"); - } - } - - @Test - public void should_detect_cycle_between_different_threads() throws Throwable { - CycleDetector checker = new CycleDetector("Detected cycle", true); - CyclicContext context = new CyclicContext(checker, true); - ExecutorService executor = - Executors.newFixedThreadPool( - 3, new ThreadFactoryBuilder().setNameFormat("thread%d").build()); - Future futureA = executor.submit(() -> context.a.get()); - Future futureB = executor.submit(() -> context.b.get()); - Future futureC = executor.submit(() -> context.c.get()); - context.latchA.countDown(); - context.latchB.countDown(); - context.latchC.countDown(); - for (Future future : ImmutableList.of(futureA, futureB, futureC)) { - try { - Uninterruptibles.getUninterruptibly(future); - } catch (ExecutionException e) { - assertThat(e.getCause()) - .isInstanceOf(IllegalStateException.class) - .hasMessageContaining("Detected cycle"); - } - } - } - - private static class CyclicContext { - private LazyReference a; - private LazyReference b; - private LazyReference c; - private CountDownLatch latchA; - private CountDownLatch latchB; - private CountDownLatch latchC; - - private CyclicContext(CycleDetector checker, boolean enableLatches) { - this.a = new LazyReference<>("a", this::buildA, checker); - this.b = new LazyReference<>("b", this::buildB, checker); - this.c = new LazyReference<>("c", this::buildC, checker); - if (enableLatches) { - this.latchA = new CountDownLatch(1); - this.latchB = new CountDownLatch(1); - this.latchC = new CountDownLatch(1); - } - } - - private String buildA() { - maybeAwaitUninterruptibly(latchA); - b.get(); - return "a"; - } - - private String buildB() { - maybeAwaitUninterruptibly(latchB); - c.get(); - return "b"; - } - - private String buildC() { - maybeAwaitUninterruptibly(latchC); - a.get(); - return "c"; - } - - private static void maybeAwaitUninterruptibly(CountDownLatch latch) { - if (latch != null) { - try { - latch.await(); - } catch (InterruptedException e) { - fail("interrupted", e); - } - } - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/DebouncerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/DebouncerTest.java deleted file mode 100644 index 71c844e7051..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/DebouncerTest.java +++ /dev/null @@ -1,181 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.concurrent; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.mockito.Mockito.any; -import static org.mockito.Mockito.anyLong; -import static org.mockito.Mockito.eq; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.shaded.guava.common.base.Joiner; -import io.netty.util.concurrent.EventExecutor; -import io.netty.util.concurrent.ScheduledFuture; -import java.time.Duration; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.TimeUnit; -import org.junit.Before; -import org.junit.Test; -import org.mockito.ArgumentCaptor; -import org.mockito.InOrder; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -public class DebouncerTest { - - private static final Duration DEFAULT_WINDOW = Duration.ofSeconds(1); - private static final int DEFAULT_MAX_EVENTS = 10; - - @Mock private EventExecutor adminExecutor; - @Mock private ScheduledFuture scheduledFuture; - private List results; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - when(adminExecutor.inEventLoop()).thenReturn(true); - when(adminExecutor.schedule( - any(Runnable.class), eq(DEFAULT_WINDOW.toNanos()), eq(TimeUnit.NANOSECONDS))) - .thenAnswer((i) -> scheduledFuture); - results = new ArrayList<>(); - } - - private String coalesce(List events) { - return Joiner.on(",").join(events); - } - - private void flush(String result) { - results.add(result); - } - - @Test - public void should_flush_synchronously_if_window_is_zero() { - Debouncer debouncer = - new Debouncer<>( - adminExecutor, this::coalesce, this::flush, Duration.ZERO, DEFAULT_MAX_EVENTS); - - debouncer.receive(1); - debouncer.receive(2); - - verify(adminExecutor, never()).schedule(any(Runnable.class), anyLong(), any(TimeUnit.class)); - - assertThat(results).containsExactly("1", "2"); - } - - @Test - public void should_flush_synchronously_if_max_events_is_one() { - Debouncer debouncer = - new Debouncer<>(adminExecutor, this::coalesce, this::flush, DEFAULT_WINDOW, 1); - - debouncer.receive(1); - debouncer.receive(2); - - verify(adminExecutor, never()).schedule(any(Runnable.class), anyLong(), any(TimeUnit.class)); - - assertThat(results).containsExactly("1", "2"); - } - - @Test - public void should_debounce_after_time_window_if_no_other_event() { - Debouncer debouncer = - new Debouncer<>( - adminExecutor, this::coalesce, this::flush, DEFAULT_WINDOW, DEFAULT_MAX_EVENTS); - debouncer.receive(1); - - // a task should have been scheduled, run it - ArgumentCaptor captor = ArgumentCaptor.forClass(Runnable.class); - verify(adminExecutor) - .schedule(captor.capture(), eq(DEFAULT_WINDOW.toNanos()), eq(TimeUnit.NANOSECONDS)); - captor.getValue().run(); - - // the element should have been flushed - assertThat(results).containsExactly("1"); - } - - @Test - public void should_reset_time_window_when_new_event() { - Debouncer debouncer = - new Debouncer<>( - adminExecutor, this::coalesce, this::flush, DEFAULT_WINDOW, DEFAULT_MAX_EVENTS); - debouncer.receive(1); - debouncer.receive(2); - - InOrder inOrder = inOrder(adminExecutor, scheduledFuture); - - // a first task should have been scheduled, and then cancelled - inOrder - .verify(adminExecutor) - .schedule(any(Runnable.class), eq(DEFAULT_WINDOW.toNanos()), eq(TimeUnit.NANOSECONDS)); - inOrder.verify(scheduledFuture).cancel(true); - - // a second task should have been scheduled, run it - ArgumentCaptor captor = ArgumentCaptor.forClass(Runnable.class); - inOrder - .verify(adminExecutor) - .schedule(captor.capture(), eq(DEFAULT_WINDOW.toNanos()), eq(TimeUnit.NANOSECONDS)); - captor.getValue().run(); - - // both elements should have been flushed together - assertThat(results).containsExactly("1,2"); - } - - @Test - public void should_force_flush_after_max_events() { - Debouncer debouncer = - new Debouncer<>( - adminExecutor, this::coalesce, this::flush, DEFAULT_WINDOW, DEFAULT_MAX_EVENTS); - for (int i = 0; i < 10; i++) { - debouncer.receive(i); - } - verify(adminExecutor, times(9)) - .schedule(any(Runnable.class), eq(DEFAULT_WINDOW.toNanos()), eq(TimeUnit.NANOSECONDS)); - verify(scheduledFuture, times(9)).cancel(true); - assertThat(results).containsExactly("0,1,2,3,4,5,6,7,8,9"); - } - - @Test - public void should_cancel_next_flush_when_stopped() { - Debouncer debouncer = - new Debouncer<>( - adminExecutor, this::coalesce, this::flush, DEFAULT_WINDOW, DEFAULT_MAX_EVENTS); - - debouncer.receive(1); - verify(adminExecutor) - .schedule(any(Runnable.class), eq(DEFAULT_WINDOW.toNanos()), eq(TimeUnit.NANOSECONDS)); - - debouncer.stop(); - verify(scheduledFuture).cancel(true); - } - - @Test - public void should_ignore_new_events_when_flushed() { - Debouncer debouncer = - new Debouncer<>( - adminExecutor, this::coalesce, this::flush, DEFAULT_WINDOW, DEFAULT_MAX_EVENTS); - debouncer.stop(); - - debouncer.receive(1); - verify(adminExecutor, never()) - .schedule(any(Runnable.class), eq(DEFAULT_WINDOW.toNanos()), eq(TimeUnit.NANOSECONDS)); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/PromiseCombinerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/PromiseCombinerTest.java deleted file mode 100644 index 45d0239b604..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/PromiseCombinerTest.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.concurrent; - -import static org.assertj.core.api.Assertions.assertThat; - -import io.netty.util.concurrent.EventExecutor; -import io.netty.util.concurrent.ImmediateEventExecutor; -import io.netty.util.concurrent.Promise; -import java.io.IOException; -import org.junit.Test; - -public class PromiseCombinerTest { - - private final EventExecutor executor = ImmediateEventExecutor.INSTANCE; - - @Test - public void should_complete_normally_if_all_parents_complete_normally() { - // given - Promise promise = executor.newPromise(); - Promise parent1 = executor.newPromise(); - Promise parent2 = executor.newPromise(); - // when - PromiseCombiner.combine(promise, parent1, parent2); - parent1.setSuccess(null); - parent2.setSuccess(null); - // then - assertThat(promise.isSuccess()).isTrue(); - } - - @Test - public void should_complete_exceptionally_if_any_parent_completes_exceptionally() { - // given - Promise promise = executor.newPromise(); - Promise parent1 = executor.newPromise(); - Promise parent2 = executor.newPromise(); - Promise parent3 = executor.newPromise(); - NullPointerException npe = new NullPointerException(); - IOException ioe = new IOException(); - // when - PromiseCombiner.combine(promise, parent1, parent2, parent3); - parent1.setSuccess(null); - parent2.setFailure(npe); - parent3.setFailure(ioe); - // then - assertThat(promise.isSuccess()).isFalse(); - assertThat(promise.cause()).isSameAs(npe).hasSuppressedException(ioe); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/ReconnectionTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/ReconnectionTest.java deleted file mode 100644 index 0e541c13f92..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/ReconnectionTest.java +++ /dev/null @@ -1,361 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.concurrent; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyNoMoreInteractions; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.TestDataProviders; -import com.datastax.oss.driver.api.core.connection.ReconnectionPolicy.ReconnectionSchedule; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import io.netty.channel.embedded.EmbeddedChannel; -import io.netty.util.concurrent.EventExecutor; -import java.time.Duration; -import java.util.concurrent.Callable; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.atomic.AtomicInteger; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -@RunWith(DataProviderRunner.class) -public class ReconnectionTest { - - @Mock private ReconnectionSchedule reconnectionSchedule; - @Mock private Runnable onStartCallback; - @Mock private Runnable onStopCallback; - private EmbeddedChannel channel; - - private MockReconnectionTask reconnectionTask; - private Reconnection reconnection; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - - // Unfortunately Netty does not expose EmbeddedEventLoop, so we have to go through a channel - channel = new EmbeddedChannel(); - EventExecutor eventExecutor = channel.eventLoop(); - - reconnectionTask = new MockReconnectionTask(); - reconnection = - new Reconnection( - "test", - eventExecutor, - () -> reconnectionSchedule, - reconnectionTask, - onStartCallback, - onStopCallback); - } - - @Test - public void should_start_out_not_running() { - assertThat(reconnection.isRunning()).isFalse(); - } - - @Test - public void should_schedule_first_attempt_on_start() { - // Given - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofSeconds(1)); - - // When - reconnection.start(); - - // Then - verify(reconnectionSchedule).nextDelay(); - assertThat(reconnection.isRunning()).isTrue(); - verify(onStartCallback).run(); - } - - @Test - public void should_ignore_start_if_already_started() { - // Given - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofSeconds(1)); - reconnection.start(); - verify(reconnectionSchedule).nextDelay(); - verify(onStartCallback).run(); - - // When - reconnection.start(); - - // Then - verifyNoMoreInteractions(reconnectionSchedule, onStartCallback); - } - - @Test - public void should_stop_if_first_attempt_succeeds() { - // Given - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofNanos(1)); - reconnection.start(); - verify(reconnectionSchedule).nextDelay(); - - // When - // the reconnection task is scheduled: - runPendingTasks(); - assertThat(reconnectionTask.callCount()).isEqualTo(1); - // the reconnection task completes: - reconnectionTask.complete(true); - runPendingTasks(); - - // Then - assertThat(reconnection.isRunning()).isFalse(); - verify(onStopCallback).run(); - } - - @Test - public void should_reschedule_if_first_attempt_fails() { - // Given - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofNanos(1)); - reconnection.start(); - verify(reconnectionSchedule).nextDelay(); - - // When - // the reconnection task is scheduled: - runPendingTasks(); - assertThat(reconnectionTask.callCount()).isEqualTo(1); - // the reconnection task completes: - reconnectionTask.complete(false); - runPendingTasks(); - - // Then - // schedule was called again - verify(reconnectionSchedule, times(2)).nextDelay(); - runPendingTasks(); - // task was called again - assertThat(reconnectionTask.callCount()).isEqualTo(2); - // still running - assertThat(reconnection.isRunning()).isTrue(); - - // When - // second attempt completes - reconnectionTask.complete(true); - runPendingTasks(); - - // Then - assertThat(reconnection.isRunning()).isFalse(); - verify(onStopCallback).run(); - } - - @Test - public void should_reconnect_now_if_next_attempt_not_started() { - // Given - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofDays(1)); - reconnection.start(); - verify(reconnectionSchedule).nextDelay(); - - // When - reconnection.reconnectNow(false); - runPendingTasks(); - - // Then - // reconnection task was run immediately - assertThat(reconnectionTask.callCount()).isEqualTo(1); - // if that attempt fails, another reconnection should be scheduled - reconnectionTask.complete(false); - runPendingTasks(); - verify(reconnectionSchedule, times(2)).nextDelay(); - } - - @Test - public void should_reconnect_now_if_stopped_and_forced() { - // Given - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofDays(1)); - assertThat(reconnection.isRunning()).isFalse(); - - // When - reconnection.reconnectNow(true); - runPendingTasks(); - - // Then - // reconnection task was run immediately - assertThat(reconnectionTask.callCount()).isEqualTo(1); - // if that attempt failed, another reconnection was scheduled - reconnectionTask.complete(false); - runPendingTasks(); - verify(reconnectionSchedule).nextDelay(); - } - - @Test - @UseDataProvider(location = TestDataProviders.class, value = "booleans") - public void should_reconnect_now_when_attempt_in_progress(boolean force) { - // Given - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofNanos(1)); - reconnection.start(); - runPendingTasks(); - // the next scheduled attempt has started, but not completed yet - assertThat(reconnectionTask.callCount()).isEqualTo(1); - - // When - reconnection.reconnectNow(force); - runPendingTasks(); - - // Then - // reconnection task should not have been called again - assertThat(reconnectionTask.callCount()).isEqualTo(1); - // should still run until current attempt completes - assertThat(reconnection.isRunning()).isTrue(); - reconnectionTask.complete(true); - runPendingTasks(); - assertThat(reconnection.isRunning()).isFalse(); - } - - @Test - public void should_not_reconnect_now_if_stopped_and_not_forced() { - // Given - assertThat(reconnection.isRunning()).isFalse(); - - // When - reconnection.reconnectNow(false); - runPendingTasks(); - - // Then - assertThat(reconnectionTask.callCount()).isEqualTo(0); - } - - @Test - public void should_stop_between_attempts() { - // Given - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofSeconds(10)); - reconnection.start(); - runPendingTasks(); - verify(reconnectionSchedule).nextDelay(); - - // When - reconnection.stop(); - runPendingTasks(); - - // Then - verify(onStopCallback).run(); - assertThat(reconnection.isRunning()).isFalse(); - } - - @Test - public void should_restart_after_stopped_between_attempts() { - // Given - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofSeconds(10)); - reconnection.start(); - runPendingTasks(); - verify(reconnectionSchedule).nextDelay(); - reconnection.stop(); - runPendingTasks(); - assertThat(reconnection.isRunning()).isFalse(); - - // When - reconnection.start(); - runPendingTasks(); - - // Then - verify(reconnectionSchedule, times(2)).nextDelay(); - assertThat(reconnection.isRunning()).isTrue(); - } - - @Test - @UseDataProvider(location = TestDataProviders.class, value = "booleans") - public void should_stop_while_attempt_in_progress(boolean outcome) { - // Given - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofNanos(1)); - reconnection.start(); - runPendingTasks(); - // the next scheduled attempt has started, but not completed yet - assertThat(reconnectionTask.callCount()).isEqualTo(1); - verify(onStartCallback).run(); - - // When - reconnection.stop(); - runPendingTasks(); - - // Then - // should let the current attempt complete (whatever its outcome), and become stopped only then - assertThat(reconnection.isRunning()).isTrue(); - verifyNoMoreInteractions(onStopCallback); - reconnectionTask.complete(outcome); - runPendingTasks(); - verify(onStopCallback).run(); - assertThat(reconnection.isRunning()).isFalse(); - } - - @Test - public void should_restart_after_stopped_while_attempt_in_progress() { - // Given - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofNanos(1)); - reconnection.start(); - runPendingTasks(); - // the next scheduled attempt has started, but not completed yet - assertThat(reconnectionTask.callCount()).isEqualTo(1); - verify(onStartCallback).run(); - // now stop - reconnection.stop(); - runPendingTasks(); - assertThat(reconnection.isRunning()).isTrue(); - - // When - reconnection.start(); - runPendingTasks(); - - // Then - assertThat(reconnection.isRunning()).isTrue(); - // still waiting on the same attempt, should not have called the task again - assertThat(reconnectionTask.callCount()).isEqualTo(1); - // because we were still in progress all the time, to the outside it's as if the stop/restart - // had never happened - verifyNoMoreInteractions(onStartCallback); - verifyNoMoreInteractions(onStopCallback); - - // When - reconnectionTask.complete(true); - runPendingTasks(); - - // Then - assertThat(reconnection.isRunning()).isFalse(); - verify(onStopCallback).run(); - } - - private void runPendingTasks() { - channel.runPendingTasks(); - } - - private static class MockReconnectionTask implements Callable> { - private volatile CompletableFuture nextResult; - private final AtomicInteger callCount = new AtomicInteger(); - - @Override - public CompletionStage call() throws Exception { - assertThat(nextResult == null || nextResult.isDone()).isTrue(); - callCount.incrementAndGet(); - nextResult = new CompletableFuture<>(); - return nextResult; - } - - private void complete(boolean outcome) { - assertThat(nextResult != null || !nextResult.isDone()).isTrue(); - nextResult.complete(outcome); - nextResult = null; - } - - private int callCount() { - return callCount.get(); - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/ReplayingEventFilterTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/ReplayingEventFilterTest.java deleted file mode 100644 index 65a2ee69b76..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/ReplayingEventFilterTest.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.concurrent; - -import static org.assertj.core.api.Assertions.assertThat; - -import java.util.ArrayList; -import java.util.List; -import org.junit.Before; -import org.junit.Test; - -public class ReplayingEventFilterTest { - private ReplayingEventFilter filter; - private List filteredEvents; - - @Before - public void setup() { - filteredEvents = new ArrayList<>(); - filter = new ReplayingEventFilter<>(filteredEvents::add); - } - - @Test - public void should_discard_events_until_started() { - filter.accept(1); - filter.accept(2); - assertThat(filteredEvents).isEmpty(); - } - - @Test - public void should_accumulate_events_when_started() { - filter.accept(1); - filter.accept(2); - filter.start(); - filter.accept(3); - filter.accept(4); - assertThat(filter.recordedEvents()).containsExactly(3, 4); - } - - @Test - public void should_flush_accumulated_events_when_ready() { - filter.accept(1); - filter.accept(2); - filter.start(); - filter.accept(3); - filter.accept(4); - filter.markReady(); - assertThat(filteredEvents).containsExactly(3, 4); - filter.accept(5); - filter.accept(6); - assertThat(filteredEvents).containsExactly(3, 4, 5, 6); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/ScheduledTaskCapturingEventLoop.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/ScheduledTaskCapturingEventLoop.java deleted file mode 100644 index 295fa545c76..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/ScheduledTaskCapturingEventLoop.java +++ /dev/null @@ -1,184 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.concurrent; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; -import static org.mockito.ArgumentMatchers.anyBoolean; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.shaded.guava.common.util.concurrent.Uninterruptibles; -import edu.umd.cs.findbugs.annotations.NonNull; -import io.netty.channel.DefaultEventLoop; -import io.netty.channel.EventLoopGroup; -import io.netty.util.concurrent.ScheduledFuture; -import java.util.concurrent.ArrayBlockingQueue; -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.Callable; -import java.util.concurrent.CancellationException; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.FutureTask; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -/** - * Extend Netty's default event loop to capture scheduled tasks instead of running them. The tasks - * can be checked later, and run manually. - * - *

Tasks submitted with {@link #execute(Runnable)} or {@link #submit(Callable)} are still - * executed normally. - * - *

This is used to make unit tests independent of time. - */ -@SuppressWarnings("FunctionalInterfaceClash") // does not matter for test code -public class ScheduledTaskCapturingEventLoop extends DefaultEventLoop { - - private final BlockingQueue> capturedTasks = new ArrayBlockingQueue<>(100); - - public ScheduledTaskCapturingEventLoop(EventLoopGroup parent) { - super(parent); - } - - @NonNull - @Override - public ScheduledFuture schedule(Callable callable, long delay, TimeUnit unit) { - CapturedTask task = new CapturedTask<>(callable, delay, unit); - boolean added = capturedTasks.offer(task); - assertThat(added).isTrue(); - return task.scheduledFuture; - } - - @NonNull - @Override - public ScheduledFuture schedule(Runnable command, long delay, TimeUnit unit) { - return schedule( - () -> { - command.run(); - return null; - }, - delay, - unit); - } - - @NonNull - @Override - public ScheduledFuture scheduleAtFixedRate( - Runnable command, long initialDelay, long period, TimeUnit unit) { - CapturedTask task = - new CapturedTask<>( - () -> { - command.run(); - return null; - }, - initialDelay, - period, - unit); - boolean added = capturedTasks.offer(task); - assertThat(added).isTrue(); - return task.scheduledFuture; - } - - @NonNull - @Override - public ScheduledFuture scheduleWithFixedDelay( - Runnable command, long initialDelay, long delay, TimeUnit unit) { - throw new UnsupportedOperationException("Not supported yet"); - } - - public CapturedTask nextTask() { - try { - return capturedTasks.poll(100, TimeUnit.MILLISECONDS); - } catch (InterruptedException e) { - fail("Unexpected interruption", e); - throw new AssertionError(); - } - } - - /** - * Wait for any pending non-scheduled task (submitted with {@code submit}, {@code execute}, etc.) - * to complete. - */ - public void waitForNonScheduledTasks() { - ScheduledFuture f = super.schedule(() -> null, 5, TimeUnit.NANOSECONDS); - try { - Uninterruptibles.getUninterruptibly(f, 1, TimeUnit.SECONDS); - } catch (ExecutionException e) { - fail("unexpected error", e.getCause()); - } catch (TimeoutException e) { - fail("timed out while waiting for admin tasks to complete", e); - } - } - - public class CapturedTask { - private final FutureTask futureTask; - private final long initialDelay; - private final long period; - private final TimeUnit unit; - - @SuppressWarnings("unchecked") - private final ScheduledFuture scheduledFuture = mock(ScheduledFuture.class); - - CapturedTask(Callable task, long initialDelay, TimeUnit unit) { - this(task, initialDelay, -1, unit); - } - - CapturedTask(Callable task, long initialDelay, long period, TimeUnit unit) { - this.futureTask = new FutureTask<>(task); - this.initialDelay = initialDelay; - this.period = period; - this.unit = unit; - - // If the code under test cancels the scheduled future, cancel our task - when(scheduledFuture.cancel(anyBoolean())) - .thenAnswer(invocation -> futureTask.cancel(invocation.getArgument(0))); - - // Delegate methods of the scheduled future to our task (to be extended to more methods if - // needed) - when(scheduledFuture.isDone()).thenAnswer(invocation -> futureTask.isDone()); - when(scheduledFuture.isCancelled()).thenAnswer(invocation -> futureTask.isCancelled()); - } - - public void run() { - submit(futureTask); - waitForNonScheduledTasks(); - } - - public boolean isCancelled() { - // futureTask.isCancelled() can create timing issues in CI environments, so give the - // cancellation a short time to complete instead: - try { - futureTask.get(3, TimeUnit.SECONDS); - } catch (CancellationException e) { - return true; - } catch (Exception e) { - // ignore - } - return false; - } - - public long getInitialDelay(TimeUnit targetUnit) { - return targetUnit.convert(initialDelay, unit); - } - - /** By convention, non-recurring tasks have a negative period */ - public long getPeriod(TimeUnit targetUnit) { - return targetUnit.convert(period, unit); - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/ScheduledTaskCapturingEventLoopTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/ScheduledTaskCapturingEventLoopTest.java deleted file mode 100644 index cf0314cc335..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/ScheduledTaskCapturingEventLoopTest.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.concurrent; - -import static com.datastax.oss.driver.Assertions.assertThat; - -import com.datastax.oss.driver.internal.core.util.concurrent.ScheduledTaskCapturingEventLoop.CapturedTask; -import io.netty.util.concurrent.ScheduledFuture; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import org.junit.Test; - -public class ScheduledTaskCapturingEventLoopTest { - - @Test - public void should_capture_task_and_let_test_complete_it_manually() { - ScheduledTaskCapturingEventLoop eventLoop = new ScheduledTaskCapturingEventLoop(null); - final AtomicBoolean ran = new AtomicBoolean(); - ScheduledFuture future = eventLoop.schedule(() -> ran.set(true), 1, TimeUnit.NANOSECONDS); - - assertThat(future.isDone()).isFalse(); - assertThat(future.isCancelled()).isFalse(); - assertThat(ran.get()).isFalse(); - - CapturedTask task = eventLoop.nextTask(); - assertThat(task.getInitialDelay(TimeUnit.NANOSECONDS)).isEqualTo(1); - - task.run(); - - assertThat(future.isDone()).isTrue(); - assertThat(future.isCancelled()).isFalse(); - assertThat(ran.get()).isTrue(); - } - - @Test - public void should_let_tested_code_cancel_future() { - ScheduledTaskCapturingEventLoop eventLoop = new ScheduledTaskCapturingEventLoop(null); - final AtomicBoolean ran = new AtomicBoolean(); - ScheduledFuture future = eventLoop.schedule(() -> ran.set(true), 1, TimeUnit.NANOSECONDS); - - assertThat(future.isDone()).isFalse(); - assertThat(future.isCancelled()).isFalse(); - assertThat(ran.get()).isFalse(); - - future.cancel(true); - - assertThat(future.isDone()).isTrue(); - assertThat(future.isCancelled()).isTrue(); - assertThat(ran.get()).isFalse(); - } -} diff --git a/core/src/test/resources/ReloadingKeyManagerFactoryTest/README.md b/core/src/test/resources/ReloadingKeyManagerFactoryTest/README.md deleted file mode 100644 index 9ff9b622e5b..00000000000 --- a/core/src/test/resources/ReloadingKeyManagerFactoryTest/README.md +++ /dev/null @@ -1,39 +0,0 @@ -# How to create cert stores for ReloadingKeyManagerFactoryTest - -Need the following cert stores: -- `server.keystore` -- `client-original.keystore` -- `client-alternate.keystore` -- `server.truststore`: trusts `client-original.keystore` and `client-alternate.keystore` -- `client.truststore`: trusts `server.keystore` - -We shouldn't need any signing requests or chains of trust, since truststores are just including certs directly. - -First create the three keystores: -``` -$ keytool -genkeypair -keyalg RSA -alias server -keystore server.keystore -dname "CN=server" -storepass changeit -keypass changeit -$ keytool -genkeypair -keyalg RSA -alias client-original -keystore client-original.keystore -dname "CN=client-original" -storepass changeit -keypass changeit -$ keytool -genkeypair -keyalg RSA -alias client-alternate -keystore client-alternate.keystore -dname "CN=client-alternate" -storepass changeit -keypass changeit -``` - -Note that we need to use `-keyalg RSA` because keytool's default keyalg is DSA, which TLS 1.3 doesn't support. If DSA is -used, the handshake will fail due to the server not being able to find any authentication schemes compatible with its -x509 certificate ("Unavailable authentication scheme"). - -Then export all the certs: -``` -$ keytool -exportcert -keystore server.keystore -alias server -file server.cert -storepass changeit -$ keytool -exportcert -keystore client-original.keystore -alias client-original -file client-original.cert -storepass changeit -$ keytool -exportcert -keystore client-alternate.keystore -alias client-alternate -file client-alternate.cert -storepass changeit -``` - -Then create the server.truststore that trusts the two client certs: -``` -$ keytool -import -file client-original.cert -alias client-original -keystore server.truststore -storepass changeit -$ keytool -import -file client-alternate.cert -alias client-alternate -keystore server.truststore -storepass changeit -``` - -Then create the client.truststore that trusts the server cert: -``` -$ keytool -import -file server.cert -alias server -keystore client.truststore -storepass changeit -``` diff --git a/core/src/test/resources/ReloadingKeyManagerFactoryTest/certs/client-alternate.keystore b/core/src/test/resources/ReloadingKeyManagerFactoryTest/certs/client-alternate.keystore deleted file mode 100644 index 91cee636a0b..00000000000 Binary files a/core/src/test/resources/ReloadingKeyManagerFactoryTest/certs/client-alternate.keystore and /dev/null differ diff --git a/core/src/test/resources/ReloadingKeyManagerFactoryTest/certs/client-original.keystore b/core/src/test/resources/ReloadingKeyManagerFactoryTest/certs/client-original.keystore deleted file mode 100644 index 74e31f7bc6f..00000000000 Binary files a/core/src/test/resources/ReloadingKeyManagerFactoryTest/certs/client-original.keystore and /dev/null differ diff --git a/core/src/test/resources/ReloadingKeyManagerFactoryTest/certs/client.truststore b/core/src/test/resources/ReloadingKeyManagerFactoryTest/certs/client.truststore deleted file mode 100644 index 3ce9a720dbc..00000000000 Binary files a/core/src/test/resources/ReloadingKeyManagerFactoryTest/certs/client.truststore and /dev/null differ diff --git a/core/src/test/resources/ReloadingKeyManagerFactoryTest/certs/server.keystore b/core/src/test/resources/ReloadingKeyManagerFactoryTest/certs/server.keystore deleted file mode 100644 index 7d279638a34..00000000000 Binary files a/core/src/test/resources/ReloadingKeyManagerFactoryTest/certs/server.keystore and /dev/null differ diff --git a/core/src/test/resources/ReloadingKeyManagerFactoryTest/certs/server.truststore b/core/src/test/resources/ReloadingKeyManagerFactoryTest/certs/server.truststore deleted file mode 100644 index c9b06b5fbe1..00000000000 Binary files a/core/src/test/resources/ReloadingKeyManagerFactoryTest/certs/server.truststore and /dev/null differ diff --git a/core/src/test/resources/application.conf b/core/src/test/resources/application.conf deleted file mode 100644 index efea37cc078..00000000000 --- a/core/src/test/resources/application.conf +++ /dev/null @@ -1,20 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -datastax-java-driver { - basic.request.timeout = ${datastax-java-driver.advanced.connection.init-query-timeout} -} diff --git a/core/src/test/resources/config/cloud/creds.zip b/core/src/test/resources/config/cloud/creds.zip deleted file mode 100644 index 3b5d1cb1cbd..00000000000 Binary files a/core/src/test/resources/config/cloud/creds.zip and /dev/null differ diff --git a/core/src/test/resources/config/cloud/identity.jks b/core/src/test/resources/config/cloud/identity.jks deleted file mode 100644 index bac5bbaa965..00000000000 Binary files a/core/src/test/resources/config/cloud/identity.jks and /dev/null differ diff --git a/core/src/test/resources/config/cloud/metadata.json b/core/src/test/resources/config/cloud/metadata.json deleted file mode 100644 index 35aa26f67f1..00000000000 --- a/core/src/test/resources/config/cloud/metadata.json +++ /dev/null @@ -1 +0,0 @@ -{"region":"local","contact_info":{"type":"sni_proxy","local_dc":"dc1","contact_points":["4ac06655-f861-49f9-881e-3fee22e69b94","2af7c253-3394-4a0d-bfac-f1ad81b5154d","b17b6e2a-3f48-4d6a-81c1-20a0a1f3192a"],"sni_proxy_address":"localhost:30002"}} diff --git a/core/src/test/resources/config/cloud/trustStore.jks b/core/src/test/resources/config/cloud/trustStore.jks deleted file mode 100644 index 8ee03f97da0..00000000000 Binary files a/core/src/test/resources/config/cloud/trustStore.jks and /dev/null differ diff --git a/core/src/test/resources/config/customApplication.conf b/core/src/test/resources/config/customApplication.conf deleted file mode 100644 index c3e3dc7b468..00000000000 --- a/core/src/test/resources/config/customApplication.conf +++ /dev/null @@ -1,23 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -datastax-java-driver { - // Check that references to other options in `reference.conf` are correctly resolved - basic.request.timeout = ${datastax-java-driver.advanced.connection.init-query-timeout} - - advanced.continuous-paging.max-pages = 10 -} diff --git a/core/src/test/resources/config/customApplication.json b/core/src/test/resources/config/customApplication.json deleted file mode 100644 index 4988a72cd9a..00000000000 --- a/core/src/test/resources/config/customApplication.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "datastax-java-driver": { - "basic": { - "request": { - "page-size": "2000" - } - }, - "advanced": { - "continuous-paging": { - "page-size": 2000 - } - } - } -} diff --git a/core/src/test/resources/config/customApplication.properties b/core/src/test/resources/config/customApplication.properties deleted file mode 100644 index 4c1d1ea0647..00000000000 --- a/core/src/test/resources/config/customApplication.properties +++ /dev/null @@ -1,20 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -datastax-java-driver.basic.request.consistency=ONE -datastax-java-driver.advanced.continuous-paging.max-enqueued-pages = 8 \ No newline at end of file diff --git a/core/src/test/resources/insights/duplicate-dependencies.txt b/core/src/test/resources/insights/duplicate-dependencies.txt deleted file mode 100644 index a808dff3f57..00000000000 --- a/core/src/test/resources/insights/duplicate-dependencies.txt +++ /dev/null @@ -1,2 +0,0 @@ -io.netty:netty-handler:jar:4.0.56.Final:compile -io.netty:netty-handler:jar:4.1.2.Final:compile \ No newline at end of file diff --git a/core/src/test/resources/insights/malformed-pom.properties b/core/src/test/resources/insights/malformed-pom.properties deleted file mode 100644 index 0a503062fbd..00000000000 --- a/core/src/test/resources/insights/malformed-pom.properties +++ /dev/null @@ -1,22 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -#Created by Apache Maven 3.5.0 -#no version -groupId=io.netty -artifactId=netty-handler \ No newline at end of file diff --git a/core/src/test/resources/insights/netty-dependency-optional.txt b/core/src/test/resources/insights/netty-dependency-optional.txt deleted file mode 100644 index 2bd0cd21a0c..00000000000 --- a/core/src/test/resources/insights/netty-dependency-optional.txt +++ /dev/null @@ -1 +0,0 @@ -io.netty:netty-handler:jar:4.0.0.Final:compile (optional) \ No newline at end of file diff --git a/core/src/test/resources/insights/netty-dependency.txt b/core/src/test/resources/insights/netty-dependency.txt deleted file mode 100644 index 69c350c30e8..00000000000 --- a/core/src/test/resources/insights/netty-dependency.txt +++ /dev/null @@ -1 +0,0 @@ -io.netty:netty-handler:jar:4.0.0.Final:runtime \ No newline at end of file diff --git a/core/src/test/resources/insights/ordered-dependencies.txt b/core/src/test/resources/insights/ordered-dependencies.txt deleted file mode 100644 index a5518f89736..00000000000 --- a/core/src/test/resources/insights/ordered-dependencies.txt +++ /dev/null @@ -1,3 +0,0 @@ -b-org.com:art1:jar:1.0:compile -a-org.com:art1:jar:2.0:compile -c-org.com:art1:jar:3.0:compile \ No newline at end of file diff --git a/core/src/test/resources/insights/pom.properties b/core/src/test/resources/insights/pom.properties deleted file mode 100644 index e68a31c8fc7..00000000000 --- a/core/src/test/resources/insights/pom.properties +++ /dev/null @@ -1,23 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -#Created by Apache Maven 3.5.0 -version=4.0.56.Final -groupId=io.netty -artifactId=netty-handler - diff --git a/core/src/test/resources/insights/test-dependencies.txt b/core/src/test/resources/insights/test-dependencies.txt deleted file mode 100644 index e9186a35e6b..00000000000 --- a/core/src/test/resources/insights/test-dependencies.txt +++ /dev/null @@ -1,31 +0,0 @@ - -The following files have been resolved: - com.github.jnr:jffi:jar:1.2.16:compile - org.ow2.asm:asm:jar:5.0.3:compile - com.github.jnr:jnr-constants:jar:0.9.9:compile - com.esri.geometry:esri-geometry-api:jar:1.2.1:compile - com.google.guava:guava:jar:19.0:compile - com.fasterxml.jackson.core:jackson-annotations:jar:2.8.11:compile - com.github.jnr:jnr-posix:jar:3.0.44:compile - org.codehaus.jackson:jackson-core-asl:jar:1.9.12:compile - io.netty:netty-handler:jar:4.0.56.Final:compile - org.ow2.asm:asm-commons:jar:5.0.3:compile - org.ow2.asm:asm-util:jar:5.0.3:compile - org.xerial.snappy:snappy-java:jar:1.1.2.6:compile (optional) - io.netty:netty-buffer:jar:4.0.56.Final:compile - com.github.jnr:jnr-ffi:jar:2.1.7:compile - com.fasterxml.jackson.core:jackson-core:jar:2.8.11:compile - org.hdrhistogram:HdrHistogram:jar:2.1.10:compile (optional) - org.ow2.asm:asm-tree:jar:5.0.3:compile - at.yawk.lz4:lz4-java:jar:1.10.1:compile (optional) - io.netty:netty-transport:jar:4.0.56.Final:compile - io.dropwizard.metrics:metrics-core:jar:3.2.2:compile - io.netty:netty-common:jar:4.0.56.Final:compile - com.fasterxml.jackson.core:jackson-databind:jar:2.7.9.3:compile - org.slf4j:slf4j-api:jar:1.7.25:compile - io.netty:netty-transport-native-epoll:jar:4.0.56.Final:compile (optional) - org.ow2.asm:asm-analysis:jar:5.0.3:compile - com.github.jnr:jnr-x86asm:jar:1.0.2:compile - io.netty:netty-codec:jar:4.0.56.Final:compile - org.json:json:jar:20090211:compile - com.github.jnr:jffi:jar:native:1.2.16:runtime \ No newline at end of file diff --git a/core/src/test/resources/logback-test.xml b/core/src/test/resources/logback-test.xml deleted file mode 100644 index 1424331a31d..00000000000 --- a/core/src/test/resources/logback-test.xml +++ /dev/null @@ -1,32 +0,0 @@ - - - - - - %d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n - - - - - - - - diff --git a/core/src/test/resources/project.properties b/core/src/test/resources/project.properties deleted file mode 100644 index 66eab90b6e4..00000000000 --- a/core/src/test/resources/project.properties +++ /dev/null @@ -1,19 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -project.basedir=${basedir} \ No newline at end of file diff --git a/distribution-source/pom.xml b/distribution-source/pom.xml deleted file mode 100644 index 4c1f11e53a8..00000000000 --- a/distribution-source/pom.xml +++ /dev/null @@ -1,125 +0,0 @@ - - - - 4.0.0 - - org.apache.cassandra - java-driver-parent - 4.19.3-SNAPSHOT - - java-driver-distribution-source - pom - Apache Cassandra Java Driver - source distribution - - apache-cassandra-java-driver-${project.version}-source - - - maven-jar-plugin - - - - default-jar - none - - - - - maven-source-plugin - - true - - - - maven-install-plugin - - true - - - - maven-deploy-plugin - - true - - - - org.revapi - revapi-maven-plugin - - true - - - - org.sonatype.plugins - nexus-staging-maven-plugin - - true - - - - - - - release - - - - maven-assembly-plugin - - - assemble-source-tarball - package - - single - - - - - false - - src/assembly/source-tarball.xml - - posix - - - - net.nicoulaj.maven.plugins - checksum-maven-plugin - 1.7 - - - - artifacts - - - - - true - - sha256 - sha512 - - - - - - - - diff --git a/distribution-source/src/assembly/source-tarball.xml b/distribution-source/src/assembly/source-tarball.xml deleted file mode 100644 index b3e2d0f463a..00000000000 --- a/distribution-source/src/assembly/source-tarball.xml +++ /dev/null @@ -1,43 +0,0 @@ - - - - source-tarball - - tar.gz - - - - .. - . - true - - - **/*.iml - **/.classpath - **/.project - **/.java-version - **/.flattened-pom.xml - **/dependency-reduced-pom.xml - **/${project.build.directory}/** - - - - diff --git a/distribution-tests/pom.xml b/distribution-tests/pom.xml deleted file mode 100644 index 9cef313f8a5..00000000000 --- a/distribution-tests/pom.xml +++ /dev/null @@ -1,122 +0,0 @@ - - - - 4.0.0 - - org.apache.cassandra - java-driver-parent - 4.19.3-SNAPSHOT - - java-driver-distribution-tests - Apache Cassandra Java Driver - distribution tests - - - - ${project.groupId} - java-driver-bom - ${project.version} - pom - import - - - - - - org.apache.cassandra - java-driver-test-infra - test - - - org.apache.cassandra - java-driver-query-builder - test - - - org.apache.cassandra - java-driver-mapper-processor - test - - - org.apache.cassandra - java-driver-mapper-runtime - test - - - org.apache.cassandra - java-driver-core - test - - - org.apache.cassandra - java-driver-metrics-micrometer - test - - - org.apache.cassandra - java-driver-metrics-microprofile - test - - - junit - junit - test - - - - - - org.apache.maven.plugins - maven-surefire-plugin - - ${testing.jvm}/bin/java - ${mockitoopens.argline} - 1 - - - - org.revapi - revapi-maven-plugin - - true - - - - maven-install-plugin - - true - - - - maven-deploy-plugin - - true - - - - org.sonatype.plugins - nexus-staging-maven-plugin - - true - - - - - diff --git a/distribution-tests/src/test/java/com/datastax/oss/driver/api/core/DriverDependencyTest.java b/distribution-tests/src/test/java/com/datastax/oss/driver/api/core/DriverDependencyTest.java deleted file mode 100644 index 16952e3d771..00000000000 --- a/distribution-tests/src/test/java/com/datastax/oss/driver/api/core/DriverDependencyTest.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.mapper.MapperBuilder; -import com.datastax.oss.driver.api.querybuilder.QueryBuilder; -import com.datastax.oss.driver.api.testinfra.CassandraResourceRule; -import com.datastax.oss.driver.internal.core.util.Reflection; -import com.datastax.oss.driver.internal.mapper.processor.MapperProcessor; -import com.datastax.oss.driver.internal.metrics.micrometer.MicrometerMetricsFactory; -import com.datastax.oss.driver.internal.metrics.microprofile.MicroProfileMetricsFactory; -import org.junit.Test; - -public class DriverDependencyTest { - @Test - public void should_include_core_jar() { - assertThat(Reflection.loadClass(null, "com.datastax.oss.driver.api.core.session.Session")) - .isEqualTo(Session.class); - } - - @Test - public void should_include_query_builder_jar() { - assertThat(Reflection.loadClass(null, "com.datastax.oss.driver.api.querybuilder.QueryBuilder")) - .isEqualTo(QueryBuilder.class); - } - - @Test - public void should_include_mapper_processor_jar() { - assertThat( - Reflection.loadClass( - null, "com.datastax.oss.driver.internal.mapper.processor.MapperProcessor")) - .isEqualTo(MapperProcessor.class); - } - - @Test - public void should_include_mapper_runtime_jar() { - assertThat(Reflection.loadClass(null, "com.datastax.oss.driver.api.mapper.MapperBuilder")) - .isEqualTo(MapperBuilder.class); - } - - @Test - public void should_include_metrics_micrometer_jar() { - assertThat( - Reflection.loadClass( - null, - "com.datastax.oss.driver.internal.metrics.micrometer.MicrometerMetricsFactory")) - .isEqualTo(MicrometerMetricsFactory.class); - } - - @Test - public void should_include_metrics_microprofile_jar() { - assertThat( - Reflection.loadClass( - null, - "com.datastax.oss.driver.internal.metrics.microprofile.MicroProfileMetricsFactory")) - .isEqualTo(MicroProfileMetricsFactory.class); - } - - @Test - public void should_include_test_infra_jar() { - assertThat( - Reflection.loadClass( - null, "com.datastax.oss.driver.api.testinfra.CassandraResourceRule")) - .isEqualTo(CassandraResourceRule.class); - } -} diff --git a/distribution-tests/src/test/java/com/datastax/oss/driver/api/core/OptionalDependencyTest.java b/distribution-tests/src/test/java/com/datastax/oss/driver/api/core/OptionalDependencyTest.java deleted file mode 100644 index 28626413487..00000000000 --- a/distribution-tests/src/test/java/com/datastax/oss/driver/api/core/OptionalDependencyTest.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.internal.core.util.Dependency; -import com.datastax.oss.driver.internal.core.util.Reflection; -import org.junit.Test; - -public class OptionalDependencyTest { - @Test - public void should_not_include_snappy_jar() { - Dependency.SNAPPY - .classes() - .forEach(clazz -> assertThat(Reflection.loadClass(null, clazz)).isNull()); - } - - @Test - public void should_not_include_l4z_jar() { - Dependency.LZ4 - .classes() - .forEach(clazz -> assertThat(Reflection.loadClass(null, clazz)).isNull()); - } - - @Test - public void should_not_include_esri_jar() { - Dependency.ESRI - .classes() - .forEach(clazz -> assertThat(Reflection.loadClass(null, clazz)).isNull()); - } - - @Test - public void should_not_include_tinkerpop_jar() { - Dependency.TINKERPOP - .classes() - .forEach(clazz -> assertThat(Reflection.loadClass(null, clazz)).isNull()); - } -} diff --git a/distribution-tests/src/test/java/com/datastax/oss/driver/api/core/ProvidedDependencyTest.java b/distribution-tests/src/test/java/com/datastax/oss/driver/api/core/ProvidedDependencyTest.java deleted file mode 100644 index 1070bbc2fb1..00000000000 --- a/distribution-tests/src/test/java/com/datastax/oss/driver/api/core/ProvidedDependencyTest.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.internal.core.util.Reflection; -import org.junit.Test; - -public class ProvidedDependencyTest { - @Test - public void should_not_include_graal_sdk_jar() { - assertThat(Reflection.loadClass(null, "org.graalvm.nativeimage.VMRuntime")).isNull(); - } - - @Test - public void should_not_include_spotbugs_annotations_jar() { - assertThat(Reflection.loadClass(null, "edu.umd.cs.findbugs.annotations.NonNull")).isNull(); - } - - @Test - public void should_not_include_jicp_annotations_jar() { - assertThat(Reflection.loadClass(null, "net.jcip.annotations.ThreadSafe")).isNull(); - } - - @Test - public void should_not_include_blockhound_jar() { - assertThat(Reflection.loadClass(null, "reactor.blockhound.BlockHoundRuntime")).isNull(); - } -} diff --git a/distribution/pom.xml b/distribution/pom.xml deleted file mode 100644 index 20b9afc1bcd..00000000000 --- a/distribution/pom.xml +++ /dev/null @@ -1,163 +0,0 @@ - - - - 4.0.0 - - org.apache.cassandra - java-driver-parent - 4.19.3-SNAPSHOT - - java-driver-distribution - - jar - Apache Cassandra Java Driver - binary distribution - - - - - ${project.groupId} - java-driver-bom - ${project.version} - pom - import - - - - - - ${project.groupId} - java-driver-core - ${project.version} - - - ${project.groupId} - java-driver-query-builder - ${project.version} - - - ${project.groupId} - java-driver-mapper-runtime - ${project.version} - - - ${project.groupId} - java-driver-mapper-processor - ${project.version} - - - - apache-cassandra-java-driver-${project.version} - - - maven-jar-plugin - - - - default-jar - none - - - - - maven-source-plugin - - true - - - - maven-install-plugin - - true - - - - maven-deploy-plugin - - true - - - - org.revapi - revapi-maven-plugin - - true - - - - org.sonatype.plugins - nexus-staging-maven-plugin - - true - - - - - - - release - - - - maven-assembly-plugin - - - assemble-binary-tarball - package - - single - - - - - false - - src/assembly/binary-tarball.xml - - posix - - - - net.nicoulaj.maven.plugins - checksum-maven-plugin - 1.7 - - - - artifacts - - - - - true - - sha256 - sha512 - - - - - - - - diff --git a/distribution/src/assembly/binary-tarball.xml b/distribution/src/assembly/binary-tarball.xml deleted file mode 100644 index b6294a25340..00000000000 --- a/distribution/src/assembly/binary-tarball.xml +++ /dev/null @@ -1,176 +0,0 @@ - - - - binary-tarball - - tar.gz - - true - - - - true - - org.apache.cassandra:java-driver-core - - - lib/core - false - - - lib/core - - - org.apache.cassandra:java-driver-query-builder - org.apache.cassandra:java-driver-mapper-runtime - org.apache.cassandra:java-driver-mapper-processor - - true - - - - - - - true - - org.apache.cassandra:java-driver-query-builder - - - lib/query-builder - false - - - - org.apache.cassandra:java-driver-core - org.apache.cassandra:java-driver-mapper-runtime - org.apache.cassandra:java-driver-mapper-processor - org.apache.cassandra:java-driver-guava-shaded - - com.github.stephenc.jcip:jcip-annotations - com.github.spotbugs:spotbugs-annotations - - true - - - - - - - true - - org.apache.cassandra:java-driver-mapper-runtime - - - lib/mapper-runtime - false - - - - org.apache.cassandra:java-driver-core - org.apache.cassandra:java-driver-query-builder - org.apache.cassandra:java-driver-mapper-processor - org.apache.cassandra:java-driver-guava-shaded - - com.github.stephenc.jcip:jcip-annotations - com.github.spotbugs:spotbugs-annotations - - true - - - - - - - true - - org.apache.cassandra:java-driver-mapper-processor - - - lib/mapper-processor - false - - - - org.apache.cassandra:java-driver-core - org.apache.cassandra:java-driver-query-builder - org.apache.cassandra:java-driver-mapper-runtime - org.apache.cassandra:java-driver-guava-shaded - - com.github.stephenc.jcip:jcip-annotations - com.github.spotbugs:spotbugs-annotations - - true - - - - - - - true - - org.apache.cassandra:java-driver-core - org.apache.cassandra:java-driver-query-builder - org.apache.cassandra:java-driver-mapper-runtime - org.apache.cassandra:java-driver-mapper-processor - - - false - sources - ${module.artifactId}-${module.version}-src.zip - src - - * - - - - - - - target/apidocs - apidocs - - - .. - . - - README* - LICENSE_binary - NOTICE_binary.txt - - - - ../changelog - - - ../faq - - - ../manual - - - ../upgrade_guide - - - diff --git a/docs.yaml b/docs.yaml index 7c679a0f47e..9b6e55e51cb 100644 --- a/docs.yaml +++ b/docs.yaml @@ -1,35 +1,14 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -title: Java Driver -summary: Java Driver for Apache Cassandra® -homepage: http://docs.datastax.com/en/developer/java-driver -theme: datastax +# This is provided on the 2.0 branch for convenience. Use docs.yaml from the +# 2.1 branch to generate the docs for all versions. +title: Java Driver for Apache Cassandra +summary: High performance Java client for Apache Cassandra +homepage: http://datastax.github.io/java-driver/ sections: - - title: Manual - prefix: /manual + - title: Features + prefix: /features sources: - type: markdown - files: 'manual/**/*.md' - - title: Reference configuration - prefix: /manual/core/configuration/reference - sources: - - type: rst - files: 'manual/core/configuration/reference/*.rst' + files: 'features/**/*.md' - title: Changelog prefix: /changelog sources: @@ -49,12 +28,22 @@ links: - title: Code href: https://github.com/datastax/java-driver/ - title: Docs - href: http://docs.datastax.com/en/developer/java-driver + href: http://datastax.github.io/java-driver/ - title: Issues href: https://datastax-oss.atlassian.net/browse/JAVA/ - title: Mailing List href: https://groups.google.com/a/lists.datastax.com/forum/#!forum/java-driver-user + - title: IRC Channel + href: irc://irc.freenode.net/datastax-drivers - title: Releases href: https://github.com/datastax/java-driver/releases -api_docs: - 4.0: http://docs.datastax.com/en/drivers/java/4.0 + +#versions: +# - name: 2.1.6 +# ref: '2.1.6' +# - name: 2.1.5 +# ref: '2.1.5' +# - name: 2.0.10.1 +# ref: '12f8840' +# - name: 2.0.10 +# ref: '3a602f1' diff --git a/driver-core/CHANGELOG.rst b/driver-core/CHANGELOG.rst new file mode 100644 index 00000000000..4193bbd7be7 --- /dev/null +++ b/driver-core/CHANGELOG.rst @@ -0,0 +1 @@ +The changelog has moved `here <../changelog/>`_. diff --git a/driver-core/Upgrade_guide_to_2.0.rst b/driver-core/Upgrade_guide_to_2.0.rst new file mode 100644 index 00000000000..0d59a4de1c5 --- /dev/null +++ b/driver-core/Upgrade_guide_to_2.0.rst @@ -0,0 +1 @@ +The upgrade guide has moved `here <../upgrade_guide/>`_. diff --git a/driver-core/Upgrade_guide_to_2.1.rst b/driver-core/Upgrade_guide_to_2.1.rst new file mode 100644 index 00000000000..0d59a4de1c5 --- /dev/null +++ b/driver-core/Upgrade_guide_to_2.1.rst @@ -0,0 +1 @@ +The upgrade guide has moved `here <../upgrade_guide/>`_. diff --git a/driver-core/pom.xml b/driver-core/pom.xml new file mode 100644 index 00000000000..e6bb79ce962 --- /dev/null +++ b/driver-core/pom.xml @@ -0,0 +1,449 @@ + + + 4.0.0 + + com.datastax.cassandra + cassandra-driver-parent + 2.1.8-SNAPSHOT + + cassandra-driver-core + bundle + DataStax Java Driver for Apache Cassandra - Core + A driver for Apache Cassandra 1.2+ that works exclusively with the Cassandra Query Language version 3 (CQL3) and Cassandra's binary protocol. + https://github.com/datastax/java-driver + + + ${project.parent.basedir} + + + + + io.netty + netty-handler + ${netty.version} + + + + com.google.guava + guava + ${guava.version} + + + + com.codahale.metrics + metrics-core + ${metrics.version} + + + + + + + org.xerial.snappy + snappy-java + ${snappy.version} + true + + + + net.jpountz.lz4 + lz4 + ${lz4.version} + true + + + + + + io.netty + netty-transport-native-epoll + ${netty.version} + true + + + + org.hdrhistogram + HdrHistogram + ${hdr.version} + true + + + + org.testng + testng + ${testng.version} + test + + + + org.assertj + assertj-core + ${assertj.version} + test + + + + org.mockito + mockito-all + ${mockito.version} + test + + + + org.scassandra + java-client + ${scassandra.version} + test + + + ch.qos.logback + logback-classic + + + + + + org.apache.commons + commons-exec + 1.3 + test + + + + + + + + src/main/resources + true + + + + + maven-compiler-plugin + + -Xlint:all + true + true + + + + org.apache.maven.plugins + maven-jar-plugin + 2.2 + + + + test-jar + + test-compile + + + + + org.apache.felix + maven-bundle-plugin + true + 2.4.0 + + + + com.datastax.driver.core + ${project.version} + <_include>-osgi.bnd + + + + jar + bundle + pom + + + + + + bundle-manifest-shaded + prepare-package + + manifest + + + ${project.build.directory}/META-INF-shaded + + + com.datastax.shaded.* + + + + + + + maven-shade-plugin + 2.3 + + + package + shade + + true + + + io.netty:* + + + io.netty:netty-transport-native-epoll + + + + + io.netty + com.datastax.shaded.netty + + + + + + META-INF/MANIFEST.MF + META-INF/io.netty.versions.properties + META-INF/maven/io.netty/netty-buffer/pom.properties + META-INF/maven/io.netty/netty-buffer/pom.xml + META-INF/maven/io.netty/netty-codec/pom.properties + META-INF/maven/io.netty/netty-codec/pom.xml + META-INF/maven/io.netty/netty-common/pom.properties + META-INF/maven/io.netty/netty-common/pom.xml + META-INF/maven/io.netty/netty-handler/pom.properties + META-INF/maven/io.netty/netty-handler/pom.xml + META-INF/maven/io.netty/netty-transport/pom.properties + META-INF/maven/io.netty/netty-transport/pom.xml + + + + + META-INF/MANIFEST.MF + ${project.build.directory}/META-INF-shaded/MANIFEST.MF + + + + + + + + + + + + + org.eclipse.m2e + lifecycle-mapping + 1.0.0 + + + + + + org.apache.maven.plugins + maven-jar-plugin + [2.2,) + + test-jar + + + + + + + + + + + + + + + + + default + + default + + + true + + + + + org.apache.maven.plugins + maven-surefire-plugin + 2.16 + + unit + false + + ${cassandra.version} + ${ipprefix} + 60 + + + io.netty:netty-transport-native-epoll + + + + + + + + + + + short + + default + + + + + org.apache.maven.plugins + maven-surefire-plugin + 2.16 + + unit,short + false + + ${cassandra.version} + ${ipprefix} + + + io.netty:netty-transport-native-epoll + + + + + + + + + long + + default + + + + + org.apache.maven.plugins + maven-surefire-plugin + 2.16 + + unit,short,long + false + + ${cassandra.version} + ${ipprefix} + + + io.netty:netty-transport-native-epoll + + + + + + + + + duration + + default + + + + + org.apache.maven.plugins + maven-surefire-plugin + 2.16 + + unit,short,long,duration + false + + ${cassandra.version} + ${ipprefix} + + + io.netty:netty-transport-native-epoll + + + + + + + + + doc + + default + + + + + org.apache.maven.plugins + maven-surefire-plugin + 2.16 + + unit,doc + false + + ${cassandra.version} + ${ipprefix} + + + io.netty:netty-transport-native-epoll + + + + + + + + + + + Apache 2 + http://www.apache.org/licenses/LICENSE-2.0.txt + repo + Apache License Version 2.0 + + + + + scm:git:git@github.com:datastax/java-driver.git + scm:git:git@github.com:datastax/java-driver.git + https://github.com/datastax/java-driver + HEAD + + + + + Various + DataStax + + + + + diff --git a/driver-core/src/main/java/com/datastax/driver/core/AbstractAddressableByIndexData.java b/driver-core/src/main/java/com/datastax/driver/core/AbstractAddressableByIndexData.java new file mode 100644 index 00000000000..c59b9175972 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/AbstractAddressableByIndexData.java @@ -0,0 +1,251 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.math.BigDecimal; +import java.math.BigInteger; +import java.net.InetAddress; +import java.nio.ByteBuffer; +import java.util.*; + +import com.datastax.driver.core.exceptions.InvalidTypeException; + +abstract class AbstractAddressableByIndexData> extends AbstractGettableByIndexData implements SettableByIndexData { + + final ByteBuffer[] values; + + protected AbstractAddressableByIndexData(ProtocolVersion protocolVersion, int size) { + super(protocolVersion); + this.values = new ByteBuffer[size]; + } + + @SuppressWarnings("unchecked") + protected T setValue(int i, ByteBuffer value) { + values[i] = value; + return (T)this; + } + + protected ByteBuffer getValue(int i) { + return values[i]; + } + + public T setBool(int i, boolean v) { + checkType(i, DataType.Name.BOOLEAN); + return setValue(i, TypeCodec.BooleanCodec.instance.serializeNoBoxing(v)); + } + + public T setInt(int i, int v) { + checkType(i, DataType.Name.INT); + return setValue(i, TypeCodec.IntCodec.instance.serializeNoBoxing(v)); + } + + public T setLong(int i, long v) { + checkType(i, DataType.Name.BIGINT, DataType.Name.COUNTER); + return setValue(i, TypeCodec.LongCodec.instance.serializeNoBoxing(v)); + } + + public T setDate(int i, Date v) { + checkType(i, DataType.Name.TIMESTAMP); + return setValue(i, v == null ? null : TypeCodec.DateCodec.instance.serialize(v)); + } + + public T setFloat(int i, float v) { + checkType(i, DataType.Name.FLOAT); + return setValue(i, TypeCodec.FloatCodec.instance.serializeNoBoxing(v)); + } + + public T setDouble(int i, double v) { + checkType(i, DataType.Name.DOUBLE); + return setValue(i, TypeCodec.DoubleCodec.instance.serializeNoBoxing(v)); + } + + public T setString(int i, String v) { + DataType.Name type = checkType(i, DataType.Name.VARCHAR, DataType.Name.TEXT, DataType.Name.ASCII); + switch (type) { + case ASCII: + return setValue(i, v == null ? null : TypeCodec.StringCodec.asciiInstance.serialize(v)); + case TEXT: + case VARCHAR: + return setValue(i, v == null ? null : TypeCodec.StringCodec.utf8Instance.serialize(v)); + default: + throw new AssertionError(); + } + } + + public T setBytes(int i, ByteBuffer v) { + checkType(i, DataType.Name.BLOB); + return setBytesUnsafe(i, v); + } + + public T setBytesUnsafe(int i, ByteBuffer v) { + return setValue(i, v == null ? null : v.duplicate()); + } + + public T setVarint(int i, BigInteger v) { + checkType(i, DataType.Name.VARINT); + return setValue(i, v == null ? null : TypeCodec.BigIntegerCodec.instance.serialize(v)); + } + + public T setDecimal(int i, BigDecimal v) { + checkType(i, DataType.Name.DECIMAL); + return setValue(i, v == null ? null : TypeCodec.DecimalCodec.instance.serialize(v)); + } + + public T setUUID(int i, UUID v) { + DataType.Name type = checkType(i, DataType.Name.UUID, DataType.Name.TIMEUUID); + + if (v == null) + return setValue(i, null); + + if (type == DataType.Name.TIMEUUID && v.version() != 1) + throw new InvalidTypeException(String.format("%s is not a Type 1 (time-based) UUID", v)); + + return type == DataType.Name.UUID + ? setValue(i, TypeCodec.UUIDCodec.instance.serialize(v)) + : setValue(i, TypeCodec.TimeUUIDCodec.instance.serialize(v)); + } + + public T setInet(int i, InetAddress v) { + checkType(i, DataType.Name.INET); + return setValue(i, v == null ? null : TypeCodec.InetCodec.instance.serialize(v)); + } + + public T setList(int i, List v) { + DataType type = getType(i); + if (type.getName() != DataType.Name.LIST) + throw new InvalidTypeException(String.format("Column %s is of type %s, cannot set to a list", getName(i), type)); + + if (v == null) + return setValue(i, null); + + // If the list is empty, it will never fail validation, but otherwise we should check the list given if of the right type + if (!v.isEmpty()) { + // Ugly? Yes + Class providedClass = v.get(0).getClass(); + Class expectedClass = type.getTypeArguments().get(0).asJavaClass(); + + if (!expectedClass.isAssignableFrom(providedClass)) + throw new InvalidTypeException(String.format("Invalid value for column %s of CQL type %s, expecting list of %s but provided list of %s", getName(i), type, expectedClass, providedClass)); + } + return setValue(i, type.codec(protocolVersion).serialize(v)); + } + + public T setMap(int i, Map v) { + DataType type = getType(i); + if (type.getName() != DataType.Name.MAP) + throw new InvalidTypeException(String.format("Column %s is of type %s, cannot set to a map", getName(i), type)); + + if (v == null) + return setValue(i, null); + + if (!v.isEmpty()) { + // Ugly? Yes + Map.Entry entry = v.entrySet().iterator().next(); + Class providedKeysClass = entry.getKey().getClass(); + Class providedValuesClass = entry.getValue().getClass(); + + Class expectedKeysClass = type.getTypeArguments().get(0).getName().javaType; + Class expectedValuesClass = type.getTypeArguments().get(1).getName().javaType; + if (!expectedKeysClass.isAssignableFrom(providedKeysClass) || !expectedValuesClass.isAssignableFrom(providedValuesClass)) + throw new InvalidTypeException(String.format("Invalid value for column %s of CQL type %s, expecting map of %s->%s but provided map of %s->%s", getName(i), type, expectedKeysClass, expectedValuesClass, providedKeysClass, providedValuesClass)); + } + return setValue(i, type.codec(protocolVersion).serialize(v)); + } + + public T setSet(int i, Set v) { + DataType type = getType(i); + if (type.getName() != DataType.Name.SET) + throw new InvalidTypeException(String.format("Column %s is of type %s, cannot set to a set", getName(i), type)); + + if (v == null) + return setValue(i, null); + + if (!v.isEmpty()) { + // Ugly? Yes + Class providedClass = v.iterator().next().getClass(); + Class expectedClass = type.getTypeArguments().get(0).getName().javaType; + + if (!expectedClass.isAssignableFrom(providedClass)) + throw new InvalidTypeException(String.format("Invalid value for column %s of CQL type %s, expecting set of %s but provided set of %s", getName(i), type, expectedClass, providedClass)); + } + return setValue(i, type.codec(protocolVersion).serialize(v)); + } + + public T setUDTValue(int i, UDTValue v) { + DataType type = getType(i); + if (type.getName() != DataType.Name.UDT) + throw new InvalidTypeException(String.format("Column %s is of type %s, cannot set to a UDT", getName(i), type)); + + if (v == null) + return setValue(i, null); + + // UDT always use the V3 protocol version to encode values + return setValue(i, type.codec(ProtocolVersion.V3).serialize(v)); + } + + public T setTupleValue(int i, TupleValue v) { + DataType type = getType(i); + if (type.getName() != DataType.Name.TUPLE) + throw new InvalidTypeException(String.format("Column %s is of type %s, cannot set to a tuple", getName(i), type)); + + if (v == null) + return setValue(i, null); + + // Tuples always user the V3 protocol version to encode values + return setValue(i, type.codec(ProtocolVersion.V3).serialize(v)); + } + + public T setToNull(int i) { + return setValue(i, null); + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof AbstractAddressableByIndexData)) + return false; + + AbstractAddressableByIndexData that = (AbstractAddressableByIndexData)o; + if (values.length != that.values.length) + return false; + + // Deserializing each value is slightly inefficient, but comparing + // the bytes could in theory be wrong (for varint for instance, 2 values + // can have different binary representation but be the same value due to + // leading zeros). So we don't take any risk. + for (int i = 0; i < values.length; i++) { + DataType thisType = getType(i); + DataType thatType = that.getType(i); + if (!thisType.equals(thatType)) + return false; + + if ((values[i] == null) != (that.values[i] == null)) + return false; + + if (values[i] != null && !(thisType.deserialize(values[i], protocolVersion).equals(thatType.deserialize(that.values[i], protocolVersion)))) + return false; + } + return true; + } + + @Override + public int hashCode() { + // Same as equals + int hash = 31; + for (int i = 0; i < values.length; i++) + hash += values[i] == null ? 1 : getType(i).deserialize(values[i], protocolVersion).hashCode(); + return hash; + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/AbstractData.java b/driver-core/src/main/java/com/datastax/driver/core/AbstractData.java new file mode 100644 index 00000000000..fc187cd02cb --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/AbstractData.java @@ -0,0 +1,460 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.math.BigDecimal; +import java.math.BigInteger; +import java.net.InetAddress; +import java.nio.ByteBuffer; +import java.util.*; + +import com.datastax.driver.core.exceptions.InvalidTypeException; + +// We don't want to expose this one: it's less useful externally and it's a bit ugly to expose anyway (but it's convenient). +abstract class AbstractData> extends AbstractGettableData implements SettableData { + + final T wrapped; + final ByteBuffer[] values; + + // Ugly, we coould probably clean that: it is currently needed however because we sometimes + // want wrapped to be 'this' (UDTValue), and sometimes some other object (in BoundStatement). + @SuppressWarnings("unchecked") + protected AbstractData(ProtocolVersion protocolVersion, int size) { + super(protocolVersion); + this.wrapped = (T)this; + this.values = new ByteBuffer[size]; + } + + protected AbstractData(ProtocolVersion protocolVersion, T wrapped, int size) { + this(protocolVersion, wrapped, new ByteBuffer[size]); + } + + protected AbstractData(ProtocolVersion protocolVersion, T wrapped, ByteBuffer[] values) { + super(protocolVersion); + this.wrapped = wrapped; + this.values = values; + } + + protected abstract int[] getAllIndexesOf(String name); + + private T setValue(int i, ByteBuffer value) { + values[i] = value; + return wrapped; + } + + protected ByteBuffer getValue(int i) { + return values[i]; + } + + protected int getIndexOf(String name) { + return getAllIndexesOf(name)[0]; + } + + public T setBool(int i, boolean v) { + checkType(i, DataType.Name.BOOLEAN); + return setValue(i, TypeCodec.BooleanCodec.instance.serializeNoBoxing(v)); + } + + public T setBool(String name, boolean v) { + int[] indexes = getAllIndexesOf(name); + ByteBuffer value = TypeCodec.BooleanCodec.instance.serializeNoBoxing(v); + for (int i = 0; i < indexes.length; i++) { + checkType(indexes[i], DataType.Name.BOOLEAN); + setValue(indexes[i], value); + } + return wrapped; + } + + public T setInt(int i, int v) { + checkType(i, DataType.Name.INT); + return setValue(i, TypeCodec.IntCodec.instance.serializeNoBoxing(v)); + } + + public T setInt(String name, int v) { + int[] indexes = getAllIndexesOf(name); + ByteBuffer value = TypeCodec.IntCodec.instance.serializeNoBoxing(v); + for (int i = 0; i < indexes.length; i++) { + checkType(indexes[i], DataType.Name.INT); + setValue(indexes[i], value); + } + return wrapped; + } + + public T setLong(int i, long v) { + checkType(i, DataType.Name.BIGINT, DataType.Name.COUNTER); + return setValue(i, TypeCodec.LongCodec.instance.serializeNoBoxing(v)); + } + + public T setLong(String name, long v) { + int[] indexes = getAllIndexesOf(name); + ByteBuffer value = TypeCodec.LongCodec.instance.serializeNoBoxing(v); + for (int i = 0; i < indexes.length; i++) { + checkType(indexes[i], DataType.Name.BIGINT, DataType.Name.COUNTER); + setValue(indexes[i], value); + } + return wrapped; + } + + public T setDate(int i, Date v) { + checkType(i, DataType.Name.TIMESTAMP); + return setValue(i, v == null ? null : TypeCodec.DateCodec.instance.serialize(v)); + } + + public T setDate(String name, Date v) { + int[] indexes = getAllIndexesOf(name); + ByteBuffer value = v == null ? null : TypeCodec.DateCodec.instance.serialize(v); + for (int i = 0; i < indexes.length; i++) { + checkType(indexes[i], DataType.Name.TIMESTAMP); + setValue(indexes[i], value); + } + return wrapped; + } + + public T setFloat(int i, float v) { + checkType(i, DataType.Name.FLOAT); + return setValue(i, TypeCodec.FloatCodec.instance.serializeNoBoxing(v)); + } + + public T setFloat(String name, float v) { + int[] indexes = getAllIndexesOf(name); + ByteBuffer value = TypeCodec.FloatCodec.instance.serializeNoBoxing(v); + for (int i = 0; i < indexes.length; i++) { + checkType(indexes[i], DataType.Name.FLOAT); + setValue(indexes[i], value); + } + return wrapped; + } + + public T setDouble(int i, double v) { + checkType(i, DataType.Name.DOUBLE); + return setValue(i, TypeCodec.DoubleCodec.instance.serializeNoBoxing(v)); + } + + public T setDouble(String name, double v) { + int[] indexes = getAllIndexesOf(name); + ByteBuffer value = TypeCodec.DoubleCodec.instance.serializeNoBoxing(v); + for (int i = 0; i < indexes.length; i++) { + checkType(indexes[i], DataType.Name.DOUBLE); + setValue(indexes[i], value); + } + return wrapped; + } + + public T setString(int i, String v) { + DataType.Name type = checkType(i, DataType.Name.VARCHAR, DataType.Name.TEXT, DataType.Name.ASCII); + switch (type) { + case ASCII: + return setValue(i, v == null ? null : TypeCodec.StringCodec.asciiInstance.serialize(v)); + case TEXT: + case VARCHAR: + return setValue(i, v == null ? null : TypeCodec.StringCodec.utf8Instance.serialize(v)); + default: + throw new AssertionError(); + } + } + + public T setString(String name, String v) { + int[] indexes = getAllIndexesOf(name); + for (int i = 0; i < indexes.length; i++) + setString(indexes[i], v); + return wrapped; + } + + public T setBytes(int i, ByteBuffer v) { + checkType(i, DataType.Name.BLOB); + return setBytesUnsafe(i, v); + } + + public T setBytes(String name, ByteBuffer v) { + int[] indexes = getAllIndexesOf(name); + ByteBuffer value = v == null ? null : v.duplicate(); + for (int i = 0; i < indexes.length; i++) { + checkType(indexes[i], DataType.Name.BLOB); + setValue(indexes[i], value); + } + return wrapped; + } + + public T setBytesUnsafe(int i, ByteBuffer v) { + return setValue(i, v == null ? null : v.duplicate()); + } + + public T setBytesUnsafe(String name, ByteBuffer v) { + int[] indexes = getAllIndexesOf(name); + ByteBuffer value = v == null ? null : v.duplicate(); + for (int i = 0; i < indexes.length; i++) + setValue(indexes[i], value); + return wrapped; + } + + public T setVarint(int i, BigInteger v) { + checkType(i, DataType.Name.VARINT); + return setValue(i, v == null ? null : TypeCodec.BigIntegerCodec.instance.serialize(v)); + } + + public T setVarint(String name, BigInteger v) { + int[] indexes = getAllIndexesOf(name); + ByteBuffer value = v == null ? null : TypeCodec.BigIntegerCodec.instance.serialize(v); + for (int i = 0; i < indexes.length; i++) { + checkType(indexes[i], DataType.Name.VARINT); + setValue(indexes[i], value); + } + return wrapped; + } + + public T setDecimal(int i, BigDecimal v) { + checkType(i, DataType.Name.DECIMAL); + return setValue(i, v == null ? null : TypeCodec.DecimalCodec.instance.serialize(v)); + } + + public T setDecimal(String name, BigDecimal v) { + int[] indexes = getAllIndexesOf(name); + ByteBuffer value = v == null ? null : TypeCodec.DecimalCodec.instance.serialize(v); + for (int i = 0; i < indexes.length; i++) { + checkType(indexes[i], DataType.Name.DECIMAL); + setValue(indexes[i], value); + } + return wrapped; + } + + public T setUUID(int i, UUID v) { + DataType.Name type = checkType(i, DataType.Name.UUID, DataType.Name.TIMEUUID); + + if (v == null) + return setValue(i, null); + + if (type == DataType.Name.TIMEUUID && v.version() != 1) + throw new InvalidTypeException(String.format("%s is not a Type 1 (time-based) UUID", v)); + + return type == DataType.Name.UUID + ? setValue(i, TypeCodec.UUIDCodec.instance.serialize(v)) + : setValue(i, TypeCodec.TimeUUIDCodec.instance.serialize(v)); + } + + public T setUUID(String name, UUID v) { + int[] indexes = getAllIndexesOf(name); + ByteBuffer value = v == null ? null : TypeCodec.UUIDCodec.instance.serialize(v); + for (int i = 0; i < indexes.length; i++) { + DataType.Name type = checkType(indexes[i], DataType.Name.UUID, DataType.Name.TIMEUUID); + if (v != null && type == DataType.Name.TIMEUUID && v.version() != 1) + throw new InvalidTypeException(String.format("%s is not a Type 1 (time-based) UUID", v)); + setValue(indexes[i], value); + } + return wrapped; + } + + public T setInet(int i, InetAddress v) { + checkType(i, DataType.Name.INET); + return setValue(i, v == null ? null : TypeCodec.InetCodec.instance.serialize(v)); + } + + public T setInet(String name, InetAddress v) { + int[] indexes = getAllIndexesOf(name); + ByteBuffer value = v == null ? null : TypeCodec.InetCodec.instance.serialize(v); + for (int i = 0; i < indexes.length; i++) { + checkType(indexes[i], DataType.Name.INET); + setValue(indexes[i], value); + } + return wrapped; + } + + // setToken is package-private because we only want to expose it in BoundStatement + T setToken(int i, Token v) { + if (v == null) + throw new NullPointerException(String.format("Cannot set a null token for column %s", getName(i))); + checkType(i, v.getType().getName()); + return setValue(i, v.getType().codec(protocolVersion).serialize(v.getValue())); + } + + T setToken(String name, Token v) { + int[] indexes = getAllIndexesOf(name); + for (int i = 0; i < indexes.length; i++) + setToken(indexes[i], v); + return wrapped; + } + + public T setList(int i, List v) { + DataType type = getType(i); + if (type.getName() != DataType.Name.LIST) + throw new InvalidTypeException(String.format("Column %s is of type %s, cannot set to a list", getName(i), type)); + + if (v == null) + return setValue(i, null); + + // If the list is empty, it will never fail validation, but otherwise we should check the list given if of the right type + if (!v.isEmpty()) { + // Ugly? Yes + Class providedClass = v.get(0).getClass(); + Class expectedClass = type.getTypeArguments().get(0).asJavaClass(); + + if (!expectedClass.isAssignableFrom(providedClass)) + throw new InvalidTypeException(String.format("Invalid value for column %s of CQL type %s, expecting list of %s but provided list of %s", getName(i), type, expectedClass, providedClass)); + } + return setValue(i, type.codec(protocolVersion).serialize(v)); + } + + public T setList(String name, List v) { + int[] indexes = getAllIndexesOf(name); + for (int i = 0; i < indexes.length; i++) + setList(indexes[i], v); + return wrapped; + } + + public T setMap(int i, Map v) { + DataType type = getType(i); + if (type.getName() != DataType.Name.MAP) + throw new InvalidTypeException(String.format("Column %s is of type %s, cannot set to a map", getName(i), type)); + + if (v == null) + return setValue(i, null); + + if (!v.isEmpty()) { + // Ugly? Yes + Map.Entry entry = v.entrySet().iterator().next(); + Class providedKeysClass = entry.getKey().getClass(); + Class providedValuesClass = entry.getValue().getClass(); + + Class expectedKeysClass = type.getTypeArguments().get(0).getName().javaType; + Class expectedValuesClass = type.getTypeArguments().get(1).getName().javaType; + if (!expectedKeysClass.isAssignableFrom(providedKeysClass) || !expectedValuesClass.isAssignableFrom(providedValuesClass)) + throw new InvalidTypeException(String.format("Invalid value for column %s of CQL type %s, expecting map of %s->%s but provided map of %s->%s", getName(i), type, expectedKeysClass, expectedValuesClass, providedKeysClass, providedValuesClass)); + } + return setValue(i, type.codec(protocolVersion).serialize(v)); + } + + public T setMap(String name, Map v) { + int[] indexes = getAllIndexesOf(name); + for (int i = 0; i < indexes.length; i++) + setMap(indexes[i], v); + return wrapped; + } + + public T setSet(int i, Set v) { + DataType type = getType(i); + if (type.getName() != DataType.Name.SET) + throw new InvalidTypeException(String.format("Column %s is of type %s, cannot set to a set", getName(i), type)); + + if (v == null) + return setValue(i, null); + + if (!v.isEmpty()) { + // Ugly? Yes + Class providedClass = v.iterator().next().getClass(); + Class expectedClass = type.getTypeArguments().get(0).getName().javaType; + + if (!expectedClass.isAssignableFrom(providedClass)) + throw new InvalidTypeException(String.format("Invalid value for column %s of CQL type %s, expecting set of %s but provided set of %s", getName(i), type, expectedClass, providedClass)); + } + return setValue(i, type.codec(protocolVersion).serialize(v)); + } + + public T setSet(String name, Set v) { + int[] indexes = getAllIndexesOf(name); + for (int i = 0; i < indexes.length; i++) + setSet(indexes[i], v); + return wrapped; + } + + public T setUDTValue(int i, UDTValue v) { + DataType type = getType(i); + if (type.getName() != DataType.Name.UDT) + throw new InvalidTypeException(String.format("Column %s is of type %s, cannot set to a UDT", getName(i), type)); + + if (v == null) + return setValue(i, null); + + // UDT always use the V3 protocol version to encode values + setValue(i, type.codec(ProtocolVersion.V3).serialize(v)); + return wrapped; + } + + public T setUDTValue(String name, UDTValue v) { + int[] indexes = getAllIndexesOf(name); + for (int i = 0; i < indexes.length; i++) + setUDTValue(indexes[i], v); + return wrapped; + } + + public T setTupleValue(int i, TupleValue v) { + DataType type = getType(i); + if (type.getName() != DataType.Name.TUPLE) + throw new InvalidTypeException(String.format("Column %s is of type %s, cannot set to a tuple", getName(i), type)); + + if (v == null) + return setValue(i, null); + + // Tuples always user the V3 protocol version to encode values + setValue(i, type.codec(ProtocolVersion.V3).serialize(v)); + return wrapped; + } + + public T setTupleValue(String name, TupleValue v) { + int[] indexes = getAllIndexesOf(name); + for (int i = 0; i < indexes.length; i++) + setTupleValue(indexes[i], v); + return wrapped; + } + + @Override + public T setToNull(int i) { + return setValue(i, null); + } + + @Override + public T setToNull(String name) { + int[] indexes = getAllIndexesOf(name); + for (int i = 0; i < indexes.length; i++) + setToNull(indexes[i]); + return wrapped; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof AbstractData)) + return false; + + AbstractData that = (AbstractData)o; + if (values.length != that.values.length) + return false; + + // Deserializing each value is slightly inefficient, but comparing + // the bytes could in theory be wrong (for varint for instance, 2 values + // can have different binary representation but be the same value due to + // leading zeros). So we don't take any risk. + for (int i = 0; i < values.length; i++) { + DataType thisType = getType(i); + DataType thatType = that.getType(i); + if (!thisType.equals(thatType)) + return false; + + if ((values[i] == null) != (that.values[i] == null)) + return false; + + if (values[i] != null && !(thisType.deserialize(values[i], protocolVersion).equals(thatType.deserialize(that.values[i], protocolVersion)))) + return false; + } + return true; + } + + @Override + public int hashCode() { + // Same as equals + int hash = 31; + for (int i = 0; i < values.length; i++) + hash += values[i] == null ? 1 : getType(i).deserialize(values[i], protocolVersion).hashCode(); + return hash; + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/AbstractGettableByIndexData.java b/driver-core/src/main/java/com/datastax/driver/core/AbstractGettableByIndexData.java new file mode 100644 index 00000000000..a87ba177095 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/AbstractGettableByIndexData.java @@ -0,0 +1,468 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.math.BigDecimal; +import java.math.BigInteger; +import java.net.InetAddress; +import java.nio.ByteBuffer; +import java.util.Collections; +import java.util.Date; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; + +import com.google.common.reflect.TypeToken; + +import com.datastax.driver.core.exceptions.InvalidTypeException; + +abstract class AbstractGettableByIndexData implements GettableByIndexData { + + protected final ProtocolVersion protocolVersion; + + protected AbstractGettableByIndexData(ProtocolVersion protocolVersion) { + this.protocolVersion = protocolVersion; + } + + /** + * Returns the type for the value at index {@code i}. + * + * @param i the index of the type to fetch. + * @return the type of the value at index {@code i}. + * + * @throws IndexOutOfBoundsException if {@code i} is not a valid index. + */ + protected abstract DataType getType(int i); + + /** + * Returns the name corresponding to the value at index {@code i}. + * + * @param i the index of the name to fetch. + * @return the name corresponding to the value at index {@code i}. + * + * @throws IndexOutOfBoundsException if {@code i} is not a valid index. + */ + protected abstract String getName(int i); + + /** + * Returns the value at index {@code i}. + * + * @param i the index to fetch. + * @return the value at index {@code i}. + * + * @throws IndexOutOfBoundsException if {@code i} is not a valid index. + */ + protected abstract ByteBuffer getValue(int i); + + // Note: we avoid having a vararg method to avoid the array allocation that comes with it. + protected void checkType(int i, DataType.Name name) { + DataType defined = getType(i); + if (name != defined.getName()) + throw new InvalidTypeException(String.format("Value %s is of type %s", getName(i), defined)); + } + + protected DataType.Name checkType(int i, DataType.Name name1, DataType.Name name2) { + DataType defined = getType(i); + if (name1 != defined.getName() && name2 != defined.getName()) + throw new InvalidTypeException(String.format("Value %s is of type %s", getName(i), defined)); + + return defined.getName(); + } + + protected DataType.Name checkType(int i, DataType.Name name1, DataType.Name name2, DataType.Name name3) { + DataType defined = getType(i); + if (name1 != defined.getName() && name2 != defined.getName() && name3 != defined.getName()) + throw new InvalidTypeException(String.format("Value %s is of type %s", getName(i), defined)); + + return defined.getName(); + } + + /** + * {@inheritDoc} + */ + @Override + public boolean isNull(int i) { + return getValue(i) == null; + } + + /** + * {@inheritDoc} + */ + @Override + public boolean getBool(int i) { + checkType(i, DataType.Name.BOOLEAN); + + ByteBuffer value = getValue(i); + if (value == null || value.remaining() == 0) + return false; + + return TypeCodec.BooleanCodec.instance.deserializeNoBoxing(value); + } + + /** + * {@inheritDoc} + */ + @Override + public int getInt(int i) { + checkType(i, DataType.Name.INT); + + ByteBuffer value = getValue(i); + if (value == null || value.remaining() == 0) + return 0; + + return TypeCodec.IntCodec.instance.deserializeNoBoxing(value); + } + + /** + * {@inheritDoc} + */ + @Override + public long getLong(int i) { + checkType(i, DataType.Name.BIGINT, DataType.Name.COUNTER); + + ByteBuffer value = getValue(i); + if (value == null || value.remaining() == 0) + return 0L; + + return TypeCodec.LongCodec.instance.deserializeNoBoxing(value); + } + + /** + * {@inheritDoc} + */ + @Override + public Date getDate(int i) { + checkType(i, DataType.Name.TIMESTAMP); + + ByteBuffer value = getValue(i); + if (value == null || value.remaining() == 0) + return null; + + return TypeCodec.DateCodec.instance.deserialize(value); + } + + /** + * {@inheritDoc} + */ + @Override + public float getFloat(int i) { + checkType(i, DataType.Name.FLOAT); + + ByteBuffer value = getValue(i); + if (value == null || value.remaining() == 0) + return 0.0f; + + return TypeCodec.FloatCodec.instance.deserializeNoBoxing(value); + } + + /** + * {@inheritDoc} + */ + @Override + public double getDouble(int i) { + checkType(i, DataType.Name.DOUBLE); + + ByteBuffer value = getValue(i); + if (value == null || value.remaining() == 0) + return 0.0; + + return TypeCodec.DoubleCodec.instance.deserializeNoBoxing(value); + } + + /** + * {@inheritDoc} + */ + @Override + public ByteBuffer getBytesUnsafe(int i) { + ByteBuffer value = getValue(i); + if (value == null) + return null; + + return value.duplicate(); + } + + /** + * {@inheritDoc} + */ + @Override + public ByteBuffer getBytes(int i) { + checkType(i, DataType.Name.BLOB); + return getBytesUnsafe(i); + } + + /** + * {@inheritDoc} + */ + @Override + public String getString(int i) { + DataType.Name type = checkType(i, DataType.Name.VARCHAR, + DataType.Name.TEXT, + DataType.Name.ASCII); + + ByteBuffer value = getValue(i); + if (value == null) + return null; + + return type == DataType.Name.ASCII + ? TypeCodec.StringCodec.asciiInstance.deserialize(value) + : TypeCodec.StringCodec.utf8Instance.deserialize(value); + } + + /** + * {@inheritDoc} + */ + @Override + public BigInteger getVarint(int i) { + checkType(i, DataType.Name.VARINT); + + ByteBuffer value = getValue(i); + if (value == null || value.remaining() == 0) + return null; + + return TypeCodec.BigIntegerCodec.instance.deserialize(value); + } + + /** + * {@inheritDoc} + */ + @Override + public BigDecimal getDecimal(int i) { + checkType(i, DataType.Name.DECIMAL); + + ByteBuffer value = getValue(i); + if (value == null || value.remaining() == 0) + return null; + + return TypeCodec.DecimalCodec.instance.deserialize(value); + } + + /** + * {@inheritDoc} + */ + @Override + public UUID getUUID(int i) { + DataType.Name type = checkType(i, DataType.Name.UUID, DataType.Name.TIMEUUID); + + ByteBuffer value = getValue(i); + if (value == null || value.remaining() == 0) + return null; + + return type == DataType.Name.UUID + ? TypeCodec.UUIDCodec.instance.deserialize(value) + : TypeCodec.TimeUUIDCodec.instance.deserialize(value); + } + + /** + * {@inheritDoc} + */ + @Override + public InetAddress getInet(int i) { + checkType(i, DataType.Name.INET); + + ByteBuffer value = getValue(i); + if (value == null || value.remaining() == 0) + return null; + + return TypeCodec.InetCodec.instance.deserialize(value); + } + + /** + * {@inheritDoc} + */ + @Override + @SuppressWarnings("unchecked") + public List getList(int i, Class elementsClass) { + DataType type = getType(i); + if (type.getName() != DataType.Name.LIST) + throw new InvalidTypeException(String.format("Column %s is not of list type", getName(i))); + + Class expectedClass = type.getTypeArguments().get(0).getName().javaType; + if (!elementsClass.isAssignableFrom(expectedClass)) + throw new InvalidTypeException(String.format("Column %s is a list of %s (CQL type %s), cannot be retrieved as a list of %s", getName(i), expectedClass, type, elementsClass)); + + ByteBuffer value = getValue(i); + if (value == null) + return Collections.emptyList(); + + return Collections.unmodifiableList((List)type.codec(protocolVersion).deserialize(value)); + } + + /** + * {@inheritDoc} + */ + @Override + @SuppressWarnings("unchecked") + public List getList(int i, TypeToken elementsType) { + DataType type = getType(i); + if (type.getName() != DataType.Name.LIST) + throw new InvalidTypeException(String.format("Column %s is not of list type", getName(i))); + + DataType expectedType = type.getTypeArguments().get(0); + if (!expectedType.canBeDeserializedAs(elementsType)) + throw new InvalidTypeException(String.format("Column %s has CQL type %s, cannot be retrieved as a list of %s", getName(i), type, elementsType)); + + ByteBuffer value = getValue(i); + if (value == null) + return Collections.emptyList(); + + return Collections.unmodifiableList((List)type.codec(protocolVersion).deserialize(value)); + } + + /** + * {@inheritDoc} + */ + @Override + @SuppressWarnings("unchecked") + public Set getSet(int i, Class elementsClass) { + DataType type = getType(i); + if (type.getName() != DataType.Name.SET) + throw new InvalidTypeException(String.format("Column %s is not of set type", getName(i))); + + Class expectedClass = type.getTypeArguments().get(0).getName().javaType; + if (!elementsClass.isAssignableFrom(expectedClass)) + throw new InvalidTypeException(String.format("Column %s is a set of %s (CQL type %s), cannot be retrieved as a set of %s", getName(i), expectedClass, type, elementsClass)); + + ByteBuffer value = getValue(i); + if (value == null) + return Collections.emptySet(); + + return Collections.unmodifiableSet((Set)type.codec(protocolVersion).deserialize(value)); + } + + /** + * {@inheritDoc} + */ + @Override + @SuppressWarnings("unchecked") + public Set getSet(int i, TypeToken elementsType) { + DataType type = getType(i); + if (type.getName() != DataType.Name.SET) + throw new InvalidTypeException(String.format("Column %s is not of set type", getName(i))); + + DataType expectedType = type.getTypeArguments().get(0); + if (!expectedType.canBeDeserializedAs(elementsType)) + throw new InvalidTypeException(String.format("Column %s has CQL type %s, cannot be retrieved as a set of %s", getName(i), type, elementsType)); + + ByteBuffer value = getValue(i); + if (value == null) + return Collections.emptySet(); + + return Collections.unmodifiableSet((Set)type.codec(protocolVersion).deserialize(value)); + } + + /** + * {@inheritDoc} + */ + @Override + @SuppressWarnings("unchecked") + public Map getMap(int i, Class keysClass, Class valuesClass) { + DataType type = getType(i); + if (type.getName() != DataType.Name.MAP) + throw new InvalidTypeException(String.format("Column %s is not of map type", getName(i))); + + Class expectedKeysClass = type.getTypeArguments().get(0).getName().javaType; + Class expectedValuesClass = type.getTypeArguments().get(1).getName().javaType; + if (!keysClass.isAssignableFrom(expectedKeysClass) || !valuesClass.isAssignableFrom(expectedValuesClass)) + throw new InvalidTypeException(String.format("Column %s is a map of %s->%s (CQL type %s), cannot be retrieved as a map of %s->%s", getName(i), expectedKeysClass, expectedValuesClass, type, keysClass, valuesClass)); + + ByteBuffer value = getValue(i); + if (value == null) + return Collections.emptyMap(); + + return Collections.unmodifiableMap((Map)type.codec(protocolVersion).deserialize(value)); + } + + /** + * {@inheritDoc} + */ + @Override + @SuppressWarnings("unchecked") + public Map getMap(int i, TypeToken keysType, TypeToken valuesType) { + DataType type = getType(i); + if (type.getName() != DataType.Name.MAP) + throw new InvalidTypeException(String.format("Column %s is not of map type", getName(i))); + + DataType expectedKeysType = type.getTypeArguments().get(0); + DataType expectedValuesType = type.getTypeArguments().get(1); + if (!expectedKeysType.canBeDeserializedAs(keysType) || !expectedValuesType.canBeDeserializedAs(valuesType)) + throw new InvalidTypeException(String.format("Column %s has CQL type %s, cannot be retrieved as a map of %s->%s", getName(i), type, keysType, valuesType)); + + ByteBuffer value = getValue(i); + if (value == null) + return Collections.emptyMap(); + + return Collections.unmodifiableMap((Map)type.codec(protocolVersion).deserialize(value)); + } + + /** + * {@inheritDoc} + */ + @Override + @SuppressWarnings("unchecked") + public UDTValue getUDTValue(int i) { + DataType type = getType(i); + if (type.getName() != DataType.Name.UDT) + throw new InvalidTypeException(String.format("Column %s is not a UDT", getName(i))); + + ByteBuffer value = getValue(i); + if (value == null || value.remaining() == 0) + return null; + + // UDT always use the protocol V3 to encode values + return (UDTValue)type.codec(ProtocolVersion.V3).deserialize(value); + } + + /** + * {@inheritDoc} + */ + @Override + @SuppressWarnings("unchecked") + public TupleValue getTupleValue(int i) { + DataType type = getType(i); + if (type.getName() != DataType.Name.TUPLE) + throw new InvalidTypeException(String.format("Column %s is not a tuple", getName(i))); + + ByteBuffer value = getValue(i); + if (value == null || value.remaining() == 0) + return null; + + // tuples always use the protocol V3 to encode values + return (TupleValue)type.codec(ProtocolVersion.V3).deserialize(value); + } + + /** + * {@inheritDoc} + */ + @Override + public Object getObject(int i) { + ByteBuffer raw = getValue(i); + DataType type = getType(i); + if (raw == null) + switch (type.getName()) { + case LIST: + return Collections.emptyList(); + case SET: + return Collections.emptySet(); + case MAP: + return Collections.emptyMap(); + default: + return null; + } + else + return type.deserialize(raw, protocolVersion); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/AbstractGettableData.java b/driver-core/src/main/java/com/datastax/driver/core/AbstractGettableData.java new file mode 100644 index 00000000000..aa57f08c535 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/AbstractGettableData.java @@ -0,0 +1,245 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.math.BigDecimal; +import java.math.BigInteger; +import java.net.InetAddress; +import java.nio.ByteBuffer; +import java.util.*; + +import com.google.common.reflect.TypeToken; + +public abstract class AbstractGettableData extends AbstractGettableByIndexData implements GettableData { + + /** + * Creates a new AbstractGettableData object. + * + * @param protocolVersion the protocol version in which values returned + * by {@link #getValue} will be returned. This must be a protocol version + * supported by this driver. In general, the correct value will be the + * value returned by {@link ProtocolOptions#getProtocolVersion}. + * + * @throws IllegalArgumentException if {@code protocolVersion} is not a valid protocol version. + */ + protected AbstractGettableData(ProtocolVersion protocolVersion) { + super(protocolVersion); + } + + /** + * @throws IllegalArgumentException if {@code protocolVersion} does not correspond to any known version. + * + * @deprecated This constructor is provided for backward compatibility, use {@link #AbstractGettableData(ProtocolVersion)} instead. + */ + @Deprecated + protected AbstractGettableData(int protocolVersion) { + this(ProtocolVersion.fromInt(protocolVersion)); + } + + /** + * Returns the index corresponding to a given name. + * + * @param name the name for which to return the index of. + * @return the index for the value coressponding to {@code name}. + * + * @throws IllegalArgumentException if {@code name} is not valid name for this object. + */ + protected abstract int getIndexOf(String name); + + /** + * {@inheritDoc} + */ + @Override + public boolean isNull(String name) { + return isNull(getIndexOf(name)); + } + + /** + * {@inheritDoc} + */ + @Override + public boolean getBool(String name) { + return getBool(getIndexOf(name)); + } + + /** + * {@inheritDoc} + */ + @Override + public int getInt(String name) { + return getInt(getIndexOf(name)); + } + + /** + * {@inheritDoc} + */ + @Override + public long getLong(String name) { + return getLong(getIndexOf(name)); + } + + /** + * {@inheritDoc} + */ + @Override + public Date getDate(String name) { + return getDate(getIndexOf(name)); + } + + /** + * {@inheritDoc} + */ + @Override + public float getFloat(String name) { + return getFloat(getIndexOf(name)); + } + + /** + * {@inheritDoc} + */ + @Override + public double getDouble(String name) { + return getDouble(getIndexOf(name)); + } + + /** + * {@inheritDoc} + */ + @Override + public ByteBuffer getBytesUnsafe(String name) { + return getBytesUnsafe(getIndexOf(name)); + } + + /** + * {@inheritDoc} + */ + @Override + public ByteBuffer getBytes(String name) { + return getBytes(getIndexOf(name)); + } + + /** + * {@inheritDoc} + */ + @Override + public String getString(String name) { + return getString(getIndexOf(name)); + } + + /** + * {@inheritDoc} + */ + @Override + public BigInteger getVarint(String name) { + return getVarint(getIndexOf(name)); + } + + /** + * {@inheritDoc} + */ + @Override + public BigDecimal getDecimal(String name) { + return getDecimal(getIndexOf(name)); + } + + /** + * {@inheritDoc} + */ + @Override + public UUID getUUID(String name) { + return getUUID(getIndexOf(name)); + } + + /** + * {@inheritDoc} + */ + @Override + public InetAddress getInet(String name) { + return getInet(getIndexOf(name)); + } + + /** + * {@inheritDoc} + */ + @Override + public List getList(String name, Class elementsClass) { + return getList(getIndexOf(name), elementsClass); + } + + /** + * {@inheritDoc} + */ + @Override + public List getList(String name, TypeToken elementsType) { + return getList(getIndexOf(name), elementsType); + } + + /** + * {@inheritDoc} + */ + @Override + public Set getSet(String name, Class elementsClass) { + return getSet(getIndexOf(name), elementsClass); + } + + /** + * {@inheritDoc} + */ + @Override + public Set getSet(String name, TypeToken elementsType) { + return getSet(getIndexOf(name), elementsType); + } + + /** + * {@inheritDoc} + */ + @Override + public Map getMap(String name, Class keysClass, Class valuesClass) { + return getMap(getIndexOf(name), keysClass, valuesClass); + } + + /** + * {@inheritDoc} + */ + @Override + public Map getMap(String name, TypeToken keysType, TypeToken valuesType) { + return getMap(getIndexOf(name), keysType, valuesType); + } + + /** + * {@inheritDoc} + */ + @Override + public UDTValue getUDTValue(String name) { + return getUDTValue(getIndexOf(name)); + } + + /** + * {@inheritDoc} + */ + @Override + public TupleValue getTupleValue(String name) { + return getTupleValue(getIndexOf(name)); + } + + /** + * {@inheritDoc} + */ + @Override + public Object getObject(String name) { + return getObject(getIndexOf(name)); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/AbstractReconnectionHandler.java b/driver-core/src/main/java/com/datastax/driver/core/AbstractReconnectionHandler.java new file mode 100644 index 00000000000..7a4236fd05c --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/AbstractReconnectionHandler.java @@ -0,0 +1,202 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.util.concurrent.*; +import java.util.concurrent.atomic.AtomicReference; + +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.AbstractFuture; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.datastax.driver.core.exceptions.AuthenticationException; +import com.datastax.driver.core.policies.ReconnectionPolicy; + +/** + * Manages periodic reconnection attempts after a host has been marked down. + *

+ * Concurrent attempts are handled via the {@link #currentAttempt} reference passed to the constructor. + * For a given reference, only one handler will run at a given time. Additional handlers will cancel + * themselves if they find a previous handler running. + *

+ * This class is designed for concurrency, but instances must not be shared: each thread creates and + * starts its own private handler, all interactions happen through {@link #currentAttempt}. + */ +abstract class AbstractReconnectionHandler implements Runnable { + + private static final Logger logger = LoggerFactory.getLogger(AbstractReconnectionHandler.class); + + private final ScheduledExecutorService executor; + private final ReconnectionPolicy.ReconnectionSchedule schedule; + /** + * The future that is exposed to clients, representing completion of the current active handler + */ + private final AtomicReference> currentAttempt; + + private final HandlerFuture handlerFuture = new HandlerFuture(); + + private final long initialDelayMs; + + private final CountDownLatch ready = new CountDownLatch(1); + + public AbstractReconnectionHandler(ScheduledExecutorService executor, ReconnectionPolicy.ReconnectionSchedule schedule, AtomicReference> currentAttempt) { + this(executor, schedule, currentAttempt, -1); + } + + public AbstractReconnectionHandler(ScheduledExecutorService executor, ReconnectionPolicy.ReconnectionSchedule schedule, AtomicReference> currentAttempt, long initialDelayMs) { + this.executor = executor; + this.schedule = schedule; + this.currentAttempt = currentAttempt; + this.initialDelayMs = initialDelayMs; + } + + protected abstract Connection tryReconnect() throws ConnectionException, InterruptedException, UnsupportedProtocolVersionException, ClusterNameMismatchException; + protected abstract void onReconnection(Connection connection); + + protected boolean onConnectionException(ConnectionException e, long nextDelayMs) { return true; } + protected boolean onUnknownException(Exception e, long nextDelayMs) { return true; } + + // Retrying on authentication errors makes sense for applications that can update the credentials at runtime, we don't want to force them + // to restart. + protected boolean onAuthenticationException(AuthenticationException e, long nextDelayMs) { return true; } + + // Retrying on these errors is unlikely to work + protected boolean onUnsupportedProtocolVersionException(UnsupportedProtocolVersionException e, long nextDelayMs) { return false; } + protected boolean onClusterNameMismatchException(ClusterNameMismatchException e, long nextDelayMs) { return false; } + + public void start() { + long firstDelay = (initialDelayMs >= 0) ? initialDelayMs : schedule.nextDelayMs(); + logger.debug("First reconnection scheduled in {}ms", firstDelay); + try { + handlerFuture.nextTry = executor.schedule(this, firstDelay, TimeUnit.MILLISECONDS); + + while (true) { + ListenableFuture previous = currentAttempt.get(); + if (previous != null && !previous.isCancelled()) { + logger.debug("Found another already active handler, cancelling"); + handlerFuture.cancel(false); + break; + } + if (currentAttempt.compareAndSet(previous, handlerFuture)) { + logger.debug("Becoming the active handler"); + break; + } + } + ready.countDown(); + } catch (RejectedExecutionException e) { + // The executor has been shutdown, fair enough, just ignore + logger.debug("Aborting reconnection handling since the cluster is shutting down"); + } + } + + @Override + public void run() { + // Just make sure we don't start the first try too fast, in case we find out in start() that we need to cancel ourselves + try { + ready.await(); + } catch (InterruptedException e) { + // This can happen at shutdown + Thread.currentThread().interrupt(); + return; + } + + if (handlerFuture.isCancelled()) { + logger.debug("Got cancelled, stopping"); + currentAttempt.compareAndSet(handlerFuture, null); + return; + } + + try { + onReconnection(tryReconnect()); + handlerFuture.markAsDone(); + currentAttempt.compareAndSet(handlerFuture, null); + logger.debug("Reconnection successful, cleared the future"); + } catch (ConnectionException e) { + long nextDelay = schedule.nextDelayMs(); + if (onConnectionException(e, nextDelay)) + reschedule(nextDelay); + else + currentAttempt.compareAndSet(handlerFuture, null); + } catch (AuthenticationException e) { + logger.error(e.getMessage()); + long nextDelay = schedule.nextDelayMs(); + if (onAuthenticationException(e, nextDelay)) { + reschedule(nextDelay); + } else { + logger.error("Retry against {} have been suspended. It won't be retried unless the node is restarted.", e.getHost()); + currentAttempt.compareAndSet(handlerFuture, null); + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } catch (UnsupportedProtocolVersionException e) { + logger.error(e.getMessage()); + long nextDelay = schedule.nextDelayMs(); + if (onUnsupportedProtocolVersionException(e, nextDelay)) { + reschedule(nextDelay); + } else { + logger.error("Retry against {} have been suspended. It won't be retried unless the node is restarted.", e.address); + currentAttempt.compareAndSet(handlerFuture, null); + } + } catch (ClusterNameMismatchException e) { + logger.error(e.getMessage()); + long nextDelay = schedule.nextDelayMs(); + if (onClusterNameMismatchException(e, nextDelay)) { + reschedule(nextDelay); + } else { + logger.error("Retry against {} have been suspended. It won't be retried unless the node is restarted.", e.address); + currentAttempt.compareAndSet(handlerFuture, null); + } + } catch (Exception e) { + long nextDelay = schedule.nextDelayMs(); + if (onUnknownException(e, nextDelay)) + reschedule(nextDelay); + else + currentAttempt.compareAndSet(handlerFuture, null); + } + } + + private void reschedule(long nextDelay) { + // If we got cancelled during the failed reconnection attempt that lead here, don't reschedule + if (handlerFuture.isCancelled()) { + currentAttempt.compareAndSet(handlerFuture, null); + return; + } + + handlerFuture.nextTry = executor.schedule(this, nextDelay, TimeUnit.MILLISECONDS); + } + + // The future that the handler exposes to its clients via currentAttempt + private static class HandlerFuture extends AbstractFuture { + // A future representing completion of the next task submitted to the executor + volatile ScheduledFuture nextTry; + + @Override + public boolean cancel(boolean mayInterruptIfRunning) { + // This is a check-then-act, so we may race with the scheduling of the first try, but in that case + // we'll re-check for cancellation when this first try starts running + if (nextTry != null) { + nextTry.cancel(mayInterruptIfRunning); + } + + return super.cancel(mayInterruptIfRunning); + } + + void markAsDone() { + super.set(null); + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/AbstractSession.java b/driver-core/src/main/java/com/datastax/driver/core/AbstractSession.java new file mode 100644 index 00000000000..2c690d74a3b --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/AbstractSession.java @@ -0,0 +1,133 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.nio.ByteBuffer; +import java.util.concurrent.ExecutionException; + +import com.google.common.base.Function; +import com.google.common.util.concurrent.*; + +/** + * Abstract implementation of the Session interface. + * + * This is primarly intended to make mocking easier. + */ +public abstract class AbstractSession implements Session { + + /** + * {@inheritDoc} + */ + @Override + public ResultSet execute(String query) { + return execute(new SimpleStatement(query)); + } + + /** + * {@inheritDoc} + */ + @Override + public ResultSet execute(String query, Object... values) { + return execute(new SimpleStatement(query, values)); + } + + /** + * {@inheritDoc} + */ + @Override + public ResultSet execute(Statement statement) { + return executeAsync(statement).getUninterruptibly(); + } + + /** + * {@inheritDoc} + */ + @Override + public ResultSetFuture executeAsync(String query) { + return executeAsync(new SimpleStatement(query)); + } + + /** + * {@inheritDoc} + */ + @Override + public ResultSetFuture executeAsync(String query, Object... values) { + return executeAsync(new SimpleStatement(query, values)); + } + + /** + * {@inheritDoc} + */ + @Override + public PreparedStatement prepare(String query) { + try { + return Uninterruptibles.getUninterruptibly(prepareAsync(query)); + } catch (ExecutionException e) { + throw DefaultResultSetFuture.extractCauseFromExecutionException(e); + } + } + + /** + * {@inheritDoc} + */ + @Override + public PreparedStatement prepare(RegularStatement statement) { + try { + return Uninterruptibles.getUninterruptibly(prepareAsync(statement)); + } catch (ExecutionException e) { + throw DefaultResultSetFuture.extractCauseFromExecutionException(e); + } + } + + /** + * {@inheritDoc} + */ + @Override + public ListenableFuture prepareAsync(final RegularStatement statement) { + if (statement.hasValues()) + throw new IllegalArgumentException("A statement to prepare should not have values"); + + ListenableFuture prepared = prepareAsync(statement.toString()); + return Futures.transform(prepared, new Function() { + @Override + public PreparedStatement apply(PreparedStatement prepared) { + ByteBuffer routingKey = statement.getRoutingKey(); + if (routingKey != null) + prepared.setRoutingKey(routingKey); + prepared.setConsistencyLevel(statement.getConsistencyLevel()); + if (statement.isTracing()) + prepared.enableTracing(); + prepared.setRetryPolicy(statement.getRetryPolicy()); + + return prepared; + } + }); + } + + /** + * {@inheritDoc} + */ + @Override + public void close() { + try { + closeAsync().get(); + } catch (ExecutionException e) { + throw DefaultResultSetFuture.extractCauseFromExecutionException(e); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/AbstractTimestampGenerator.java b/driver-core/src/main/java/com/datastax/driver/core/AbstractTimestampGenerator.java new file mode 100644 index 00000000000..2104940c991 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/AbstractTimestampGenerator.java @@ -0,0 +1,49 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Base implementation for generators based on {@link System#currentTimeMillis()} and a counter to generate + * the sub-millisecond part. + */ +abstract class AbstractMonotonicTimestampGenerator implements TimestampGenerator { + private static final Logger logger = LoggerFactory.getLogger(AbstractMonotonicTimestampGenerator.class); + + volatile Clock clock = new SystemClock(); + + protected long computeNext(long last) { + long millis = last / 1000; + long counter = last % 1000; + + long now = clock.currentTime(); + + // System.currentTimeMillis can go backwards on an NTP resync, hence the ">" below + if (millis >= now) { + if (counter == 999) + logger.warn("Sub-millisecond counter overflowed, some query timestamps will not be distinct"); + else + counter += 1; + } else { + millis = now; + counter = 0; + } + + return millis * 1000 + counter; + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/ArrayBackedResultSet.java b/driver-core/src/main/java/com/datastax/driver/core/ArrayBackedResultSet.java new file mode 100644 index 00000000000..d8f6e0eb12a --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/ArrayBackedResultSet.java @@ -0,0 +1,435 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.nio.ByteBuffer; +import java.util.*; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.LinkedBlockingDeque; + +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.SettableFuture; +import com.google.common.util.concurrent.Uninterruptibles; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.datastax.driver.core.exceptions.DriverInternalError; +import com.datastax.driver.core.utils.MoreFutures; + +/** + * Default implementation of a result set, backed by an ArrayDeque of ArrayList. + */ +abstract class ArrayBackedResultSet implements ResultSet { + + private static final Logger logger = LoggerFactory.getLogger(ResultSet.class); + + private static final Queue> EMPTY_QUEUE = new ArrayDeque>(0); + + protected final ColumnDefinitions metadata; + protected final Token.Factory tokenFactory; + private final boolean wasApplied; + + protected final ProtocolVersion protocolVersion; + + private ArrayBackedResultSet(ColumnDefinitions metadata, Token.Factory tokenFactory, List firstRow, ProtocolVersion protocolVersion) { + this.metadata = metadata; + this.protocolVersion = protocolVersion; + this.tokenFactory = tokenFactory; + this.wasApplied = checkWasApplied(firstRow, metadata); + } + + static ArrayBackedResultSet fromMessage(Responses.Result msg, SessionManager session, ProtocolVersion protocolVersion, ExecutionInfo info, Statement statement) { + info = update(info, msg, session); + + switch (msg.kind) { + case VOID: + return empty(info); + case ROWS: + Responses.Result.Rows r = (Responses.Result.Rows)msg; + + ColumnDefinitions columnDefs; + if (r.metadata.columns == null) { + assert statement instanceof BoundStatement; + columnDefs = ((BoundStatement)statement).statement.getPreparedId().resultSetMetadata; + assert columnDefs != null; + } else { + columnDefs = r.metadata.columns; + } + + Token.Factory tokenFactory = (session == null) ? null + : session.getCluster().getMetadata().tokenFactory(); + + // info can be null only for internal calls, but we don't page those. We assert + // this explicitly because MultiPage implementation don't support info == null. + assert r.metadata.pagingState == null || info != null; + return r.metadata.pagingState == null + ? new SinglePage(columnDefs, tokenFactory, protocolVersion, r.data, info) + : new MultiPage(columnDefs, tokenFactory, protocolVersion, r.data, info, r.metadata.pagingState, session, statement); + + case SET_KEYSPACE: + case SCHEMA_CHANGE: + return empty(info); + case PREPARED: + throw new RuntimeException("Prepared statement received when a ResultSet was expected"); + default: + logger.error("Received unknown result type '{}'; returning empty result set", msg.kind); + return empty(info); + } + } + + private static ExecutionInfo update(ExecutionInfo info, Responses.Result msg, SessionManager session) { + UUID tracingId = msg.getTracingId(); + return tracingId == null || info == null ? info : info.withTrace(new QueryTrace(tracingId, session)); + } + + private static ArrayBackedResultSet empty(ExecutionInfo info) { + // We could pass the protocol version but we know we won't need it so passing a bogus value (null) + return new SinglePage(ColumnDefinitions.EMPTY, null, null, EMPTY_QUEUE, info); + } + + public ColumnDefinitions getColumnDefinitions() { + return metadata; + } + + public List all() { + if (isExhausted()) + return Collections.emptyList(); + + // We may have more than 'getAvailableWithoutFetching' results but we won't have less, and + // at least in the single page case this will be exactly the size we want so ... + List result = new ArrayList(getAvailableWithoutFetching()); + for (Row row : this) + result.add(row); + return result; + } + + @Override + public Iterator iterator() { + return new Iterator() { + + @Override + public boolean hasNext() { + return !isExhausted(); + } + + @Override + public Row next() { + return ArrayBackedResultSet.this.one(); + } + + @Override + public void remove() { + throw new UnsupportedOperationException(); + } + }; + } + + @Override + public boolean wasApplied() { + return wasApplied; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("ResultSet[ exhausted: ").append(isExhausted()); + sb.append(", ").append(metadata).append(']'); + return sb.toString(); + } + + private static class SinglePage extends ArrayBackedResultSet { + + private final Queue> rows; + private final ExecutionInfo info; + + private SinglePage(ColumnDefinitions metadata, + Token.Factory tokenFactory, + ProtocolVersion protocolVersion, + Queue> rows, + ExecutionInfo info) { + super(metadata, tokenFactory, rows.peek(), protocolVersion); + this.info = info; + this.rows = rows; + } + + public boolean isExhausted() { + return rows.isEmpty(); + } + + public Row one() { + return ArrayBackedRow.fromData(metadata, tokenFactory, protocolVersion, rows.poll()); + } + + public int getAvailableWithoutFetching() { + return rows.size(); + } + + public boolean isFullyFetched() { + return true; + } + + public ListenableFuture fetchMoreResults() { + return MoreFutures.VOID_SUCCESS; + } + + public ExecutionInfo getExecutionInfo() { + return info; + } + + public List getAllExecutionInfo() { + return Collections.singletonList(info); + } + } + + private static class MultiPage extends ArrayBackedResultSet { + + private Queue> currentPage; + private final Queue>> nextPages = new ConcurrentLinkedQueue>>(); + + private final Deque infos = new LinkedBlockingDeque(); + + /* + * The fetching state of this result set. The fetchState will always be in one of + * the 3 following state: + * 1) fetchState is null or reference a null: fetching is done, there + * is nothing more to fetch and no query in progress. + * 2) fetchState.get().nextStart is not null: there is more pages to fetch. In + * that case, inProgress is *guaranteed* to be null. + * 3) fetchState.get().inProgress is not null: a page is being fetched. + * In that case, nextStart is *guaranteed* to be null. + * + * Also note that while ResultSet doesn't pretend to be thread-safe, the actual + * fetch is done asynchronously and so we do need to be volatile below. + */ + private volatile FetchingState fetchState; + + private final SessionManager session; + private final Statement statement; + + private MultiPage(ColumnDefinitions metadata, + Token.Factory tokenFactory, + ProtocolVersion protocolVersion, + Queue> rows, + ExecutionInfo info, + ByteBuffer pagingState, + SessionManager session, + Statement statement) { + + // Note: as of Cassandra 2.1.0, it turns out that the result of a CAS update is never paged, so + // we could hard-code the result of wasApplied in this class to "true". However, we can not be sure + // that this will never change, so apply the generic check by peeking at the first row. + super(metadata, tokenFactory, rows.peek(), protocolVersion); + this.currentPage = rows; + this.infos.offer(info.withPagingState(pagingState, protocolVersion).withStatement(statement)); + + this.fetchState = new FetchingState(pagingState, null); + this.session = session; + this.statement = statement; + } + + public boolean isExhausted() { + prepareNextRow(); + return currentPage.isEmpty(); + } + + public Row one() { + prepareNextRow(); + return ArrayBackedRow.fromData(metadata, tokenFactory, protocolVersion, currentPage.poll()); + } + + public int getAvailableWithoutFetching() { + int available = currentPage.size(); + for (Queue> page : nextPages) + available += page.size(); + return available; + } + + public boolean isFullyFetched() { + return fetchState == null; + } + + // Ensure that after the call the next row to consume is in 'currentPage', i.e. that + // 'currentPage' is empty IFF the ResultSet if fully exhausted. + private void prepareNextRow() { + while (currentPage.isEmpty()) { + // Grab the current state now to get a consistent view in this iteration. + FetchingState fetchingState = this.fetchState; + + Queue> nextPage = nextPages.poll(); + if (nextPage != null) { + currentPage = nextPage; + continue; + } + if (fetchingState == null) + return; + + // We need to know if there is more result, so fetch the next page and + // wait on it. + try { + Uninterruptibles.getUninterruptibly(fetchMoreResults()); + } catch (ExecutionException e) { + throw DefaultResultSetFuture.extractCauseFromExecutionException(e); + } + } + } + + public ListenableFuture fetchMoreResults() { + return fetchMoreResults(this.fetchState); + } + + private ListenableFuture fetchMoreResults(FetchingState fetchState) { + if (fetchState == null) + return MoreFutures.VOID_SUCCESS; + + if (fetchState.inProgress != null) + return fetchState.inProgress; + + assert fetchState.nextStart != null; + ByteBuffer state = fetchState.nextStart; + SettableFuture future = SettableFuture.create(); + this.fetchState = new FetchingState(null, future); + return queryNextPage(state, future); + } + + private ListenableFuture queryNextPage(ByteBuffer nextStart, final SettableFuture future) { + + assert !(statement instanceof BatchStatement); + + final Message.Request request = session.makeRequestMessage(statement, nextStart); + session.execute(new RequestHandler.Callback() { + + @Override + public Message.Request request() { + return request; + } + + @Override + public void register(RequestHandler handler) { + } + + @Override + public void onSet(Connection connection, Message.Response response, ExecutionInfo info, Statement statement, long latency) { + try { + switch (response.type) { + case RESULT: + Responses.Result rm = (Responses.Result)response; + info = update(info, rm, MultiPage.this.session); + if (rm.kind == Responses.Result.Kind.ROWS) { + Responses.Result.Rows rows = (Responses.Result.Rows)rm; + if (rows.metadata.pagingState != null) + info = info.withPagingState(rows.metadata.pagingState, protocolVersion).withStatement(statement); + MultiPage.this.nextPages.offer(rows.data); + MultiPage.this.fetchState = rows.metadata.pagingState == null ? null : new FetchingState(rows.metadata.pagingState, null); + } else if (rm.kind == Responses.Result.Kind.VOID) { + // We shouldn't really get a VOID message here but well, no harm in handling it I suppose + MultiPage.this.fetchState = null; + } else { + logger.error("Received unknown result type '{}' during paging: ignoring message", rm.kind); + // This mean we have probably have a bad node, so defunct the connection + connection.defunct(new ConnectionException(connection.address, String.format("Got unexpected %s result response", rm.kind))); + future.setException(new DriverInternalError(String.format("Got unexpected %s result response from %s", rm.kind, connection.address))); + return; + } + + MultiPage.this.infos.offer(info); + future.set(null); + break; + case ERROR: + future.setException(((Responses.Error)response).asException(connection.address)); + break; + default: + // This mean we have probably have a bad node, so defunct the connection + connection.defunct(new ConnectionException(connection.address, String.format("Got unexpected %s response", response.type))); + future.setException(new DriverInternalError(String.format("Got unexpected %s response from %s", response.type, connection.address))); + break; + } + } catch (RuntimeException e) { + // If we get a bug here, the client will not get it, so better forwarding the error + future.setException(new DriverInternalError("Unexpected error while processing response from " + connection.address, e)); + } + } + + // This is only called for internal calls, so don't bother with ExecutionInfo + @Override + public void onSet(Connection connection, Message.Response response, long latency, int retryCount) { + onSet(connection, response, null, null, latency); + } + + @Override + public void onException(Connection connection, Exception exception, long latency, int retryCount) { + future.setException(exception); + } + + @Override + public boolean onTimeout(Connection connection, long latency, int retryCount) { + // This won't be called directly since this will be wrapped by RequestHandler. + throw new UnsupportedOperationException(); + } + + @Override + public int retryCount() { + // This is only called for internal calls (i.e, when the callback is not wrapped in RequestHandler). + // There is no retry logic in that case, so the value does not really matter. + return 0; + } + }, statement); + + return future; + } + + public ExecutionInfo getExecutionInfo() { + return infos.getLast(); + } + + public List getAllExecutionInfo() { + return new ArrayList(infos); + } + + private static class FetchingState { + public final ByteBuffer nextStart; + public final ListenableFuture inProgress; + + FetchingState(ByteBuffer nextStart, ListenableFuture inProgress) { + assert (nextStart == null) != (inProgress == null); + this.nextStart = nextStart; + this.inProgress = inProgress; + } + } + } + + // This method checks the value of the "[applied]" column manually, to avoid instantiating an ArrayBackedRow + // object that we would throw away immediately. + private static boolean checkWasApplied(List firstRow, ColumnDefinitions metadata) { + // If the column is not present or not a boolean, we assume the query + // was not a conditional statement, and therefore return true. + if (firstRow == null) + return true; + int[] is = metadata.findAllIdx("[applied]"); + if (is == null) + return true; + int i = is[0]; + if (!DataType.cboolean().equals(metadata.getType(i))) + return true; + + // Otherwise return the value of the column + ByteBuffer value = firstRow.get(i); + if (value == null || value.remaining() == 0) + return false; + + return TypeCodec.BooleanCodec.instance.deserializeNoBoxing(value); + } +} \ No newline at end of file diff --git a/driver-core/src/main/java/com/datastax/driver/core/ArrayBackedRow.java b/driver-core/src/main/java/com/datastax/driver/core/ArrayBackedRow.java new file mode 100644 index 00000000000..3d295aecf72 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/ArrayBackedRow.java @@ -0,0 +1,116 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.nio.ByteBuffer; +import java.util.*; + +import com.datastax.driver.core.exceptions.DriverInternalError; + +/** + * Implementation of a Row backed by an ArrayList. + */ +class ArrayBackedRow extends AbstractGettableData implements Row { + + private final ColumnDefinitions metadata; + private final Token.Factory tokenFactory; + private final List data; + + private ArrayBackedRow(ColumnDefinitions metadata, Token.Factory tokenFactory, ProtocolVersion protocolVersion, List data) { + super(protocolVersion); + this.metadata = metadata; + this.tokenFactory = tokenFactory; + this.data = data; + } + + static Row fromData(ColumnDefinitions metadata, Token.Factory tokenFactory, ProtocolVersion protocolVersion, List data) { + if (data == null) + return null; + + return new ArrayBackedRow(metadata, tokenFactory, protocolVersion, data); + } + + @Override + public ColumnDefinitions getColumnDefinitions() { + return metadata; + } + + @Override + protected DataType getType(int i) { + return metadata.getType(i); + } + + @Override + protected String getName(int i) { + return metadata.getName(i); + } + + @Override + protected ByteBuffer getValue(int i) { + return data.get(i); + } + + @Override + protected int getIndexOf(String name) { + return metadata.getFirstIdx(name); + } + + @Override + public Token getToken(int i) { + if (tokenFactory == null) + throw new DriverInternalError("Token factory not set. This should only happen at initialization time"); + + metadata.checkType(i, tokenFactory.getTokenType().getName()); + + ByteBuffer value = data.get(i); + if (value == null || value.remaining() == 0) + return null; + + return tokenFactory.deserialize(value, protocolVersion); + } + + @Override + public Token getToken(String name) { + return getToken(metadata.getFirstIdx(name)); + } + + public Token getPartitionKeyToken() { + int i = 0; + for (ColumnDefinitions.Definition column : metadata) { + if (column.getName().matches("token(.*)")) + return getToken(i); + i++; + } + throw new IllegalStateException("Found no column named 'token(...)'. If the column is aliased, use getToken(String)."); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("Row["); + for (int i = 0; i < metadata.size(); i++) { + if (i != 0) + sb.append(", "); + ByteBuffer bb = data.get(i); + if (bb == null) + sb.append("NULL"); + else + sb.append(metadata.getType(i).codec(protocolVersion).deserialize(bb).toString()); + } + sb.append(']'); + return sb.toString(); + } +} \ No newline at end of file diff --git a/driver-core/src/main/java/com/datastax/driver/core/AtomicMonotonicTimestampGenerator.java b/driver-core/src/main/java/com/datastax/driver/core/AtomicMonotonicTimestampGenerator.java new file mode 100644 index 00000000000..31565a53c6d --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/AtomicMonotonicTimestampGenerator.java @@ -0,0 +1,43 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.util.concurrent.atomic.AtomicLong; + +/** + * A timestamp generator based on {@code System.currentTimeMillis()}, with an incrementing atomic counter + * to generate the sub-millisecond part. + *

+ * This implementation guarantees incrementing timestamps among all client threads, provided that no more than + * 1000 are requested for a given clock tick (the exact granularity of of {@link System#currentTimeMillis()} + * depends on the operating system). + *

+ * If that rate is exceeded, a warning is logged and the timestamps don't increment anymore until the next clock + * tick. If you consistently exceed that rate, consider using {@link ThreadLocalMonotonicTimestampGenerator}. + */ +public class AtomicMonotonicTimestampGenerator extends AbstractMonotonicTimestampGenerator { + private AtomicLong lastRef = new AtomicLong(0); + + @Override + public long next() { + while (true) { + long last = lastRef.get(); + long next = computeNext(last); + if (lastRef.compareAndSet(last, next)) + return next; + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/AuthProvider.java b/driver-core/src/main/java/com/datastax/driver/core/AuthProvider.java new file mode 100644 index 00000000000..b3ad90ce01f --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/AuthProvider.java @@ -0,0 +1,50 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.net.InetSocketAddress; + +import com.datastax.driver.core.exceptions.AuthenticationException; + +/** + * Provides {@link Authenticator} instances for use when connecting + * to Cassandra nodes. + * + * See {@link PlainTextAuthProvider} for an implementation which uses SASL + * PLAIN mechanism to authenticate using username/password strings + */ +public interface AuthProvider { + + /** + * A provider that provides no authentication capability. + *

+ * This is only useful as a placeholder when no authentication is to be used. + */ + public static final AuthProvider NONE = new AuthProvider() { + public Authenticator newAuthenticator(InetSocketAddress host) { + throw new AuthenticationException(host, + String.format("Host %s requires authentication, but no authenticator found in Cluster configuration", host)); + } + }; + + /** + * The {@code Authenticator} to use when connecting to {@code host} + * + * @param host the Cassandra host to connect to. + * @return The authentication implementation to use. + */ + public Authenticator newAuthenticator(InetSocketAddress host) throws AuthenticationException; +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/Authenticator.java b/driver-core/src/main/java/com/datastax/driver/core/Authenticator.java new file mode 100644 index 00000000000..c34c6b334fb --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/Authenticator.java @@ -0,0 +1,73 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +/** + * Handles SASL authentication with Cassandra servers. + *

+ * Each time a new connection is created and the server requires authentication, + * a new instance of this class will be created by the corresponding + * {@link AuthProvider} to handle that authentication. The lifecycle of that + * new {@code Authenticator} will be: + *

    + *
  1. The {@code initialResponse} method will be called. The initial return + * value will be sent to the server to initiate the handshake.
  2. + *
  3. The server will respond to each client response by either issuing a + * challenge or indicating that the authentication is complete (successfully or not). + * If a new challenge is issued, the authenticator {@code evaluateChallenge} + * method will be called to produce a response that will be sent to the + * server. This challenge/response negotiation will continue until the server + * responds that authentication is successful (or an {@code AuthenticationException} + * is raised). + *
  4. + *
  5. When the server indicates that authentication is successful, the + * {@code onAuthenticationSuccess} method will be called with the last information + * that the server may optionally have sent. + *
  6. + *
+ * The exact nature of the negotiation between client and server is specific + * to the authentication mechanism configured server side. + */ +public interface Authenticator { + + /** + * Obtain an initial response token for initializing the SASL handshake + * + * @return the initial response to send to the server, may be null + */ + public byte[] initialResponse(); + + /** + * Evaluate a challenge received from the Server. Generally, this method + * should return null when authentication is complete from the client + * perspective + * + * @param challenge the server's SASL challenge + * @return updated SASL token, may be null to indicate the client + * requires no further action + */ + public byte[] evaluateChallenge(byte[] challenge); + + /** + * Called when authentication is successful with the last information + * optionally sent by the server. + * + * @param token the information sent by the server with the authentication + * successful message. This will be {@code null} if the server sends no + * particular information on authentication success. + */ + public void onAuthenticationSuccess(byte[] token); +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/BatchStatement.java b/driver-core/src/main/java/com/datastax/driver/core/BatchStatement.java new file mode 100644 index 00000000000..f8de37faccd --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/BatchStatement.java @@ -0,0 +1,252 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +import com.google.common.collect.ImmutableList; + +import com.datastax.driver.core.exceptions.UnsupportedFeatureException; + +/** + * A statement that groups a number of {@link Statement} so they get executed as + * a batch. + *

+ * Note: BatchStatement is not supported with the native protocol version 1: you + * will get an {@link UnsupportedFeatureException} when submitting one if + * version 1 of the protocol is in use (i.e. if you've force version 1 through + * {@link Cluster.Builder#withProtocolVersion} or you use Cassandra 1.2). Note + * however that you can still use CQL Batch statements + * even with the protocol version 1. + *

+ * Setting a BatchStatement's serial consistency level is only supported with the + * native protocol version 3 or higher (see {@link #setSerialConsistencyLevel(ConsistencyLevel)}). + */ +public class BatchStatement extends Statement { + + /** + * The type of batch to use. + */ + public enum Type { + /** + * A logged batch: Cassandra will first write the batch to its distributed batch log + * to ensure the atomicity of the batch (atomicity meaning that if any statement in + * the batch succeeds, all will eventually succeed). + */ + LOGGED, + + /** + * A batch that doesn't use Cassandra's distributed batch log. Such batch are not + * guaranteed to be atomic. + */ + UNLOGGED, + + /** + * A counter batch. Note that such batch is the only type that can contain counter + * operations and it can only contain these. + */ + COUNTER + }; + + final Type batchType; + private final List statements = new ArrayList(); + + /** + * Creates a new {@code LOGGED} batch statement. + */ + public BatchStatement() { + this(Type.LOGGED); + } + + /** + * Creates a new batch statement of the provided type. + * + * @param batchType the type of batch. + */ + public BatchStatement(Type batchType) { + this.batchType = batchType; + } + + IdAndValues getIdAndValues(ProtocolVersion protocolVersion) { + IdAndValues idAndVals = new IdAndValues(statements.size()); + for (Statement statement : statements) { + if (statement instanceof RegularStatement) { + RegularStatement st = (RegularStatement)statement; + ByteBuffer[] vals = st.getValues(protocolVersion); + idAndVals.ids.add(st.getQueryString()); + idAndVals.values.add(vals == null ? Collections.emptyList() : Arrays.asList(vals)); + } else { + // We handle BatchStatement in add() so ... + assert statement instanceof BoundStatement; + BoundStatement st = (BoundStatement)statement; + idAndVals.ids.add(st.statement.getPreparedId().id); + idAndVals.values.add(Arrays.asList(st.wrapper.values)); + } + } + return idAndVals; + } + + /** + * Adds a new statement to this batch. + *

+ * Note that {@code statement} can be any {@code Statement}. It is allowed to mix + * {@code RegularStatement} and {@code BoundStatement} in the same + * {@code BatchStatement} in particular. Adding another {@code BatchStatement} + * is also allowed for convenience and is equivalent to adding all the {@code Statement} + * contained in that other {@code BatchStatement}. + *

+ * When adding a {@code BoundStatement}, all of its values must be set, otherwise an + * {@code IllegalStateException} will be thrown when submitting the batch statement. + * See {@link BoundStatement} for more details, in particular how to handle {@code null} + * values. + *

+ * Please note that the options of the added Statement (all those defined directly by the + * {@link Statement} class: consistency level, fetch size, tracing, ...) will be ignored + * for the purpose of the execution of the Batch. Instead, the options used are the one + * of this {@code BatchStatement} object. + * + * @param statement the new statement to add. + * @return this batch statement. + * + * @throws IllegalStateException if adding the new statement means that this + * {@code BatchStatement} has more than 65536 statements (since this is the maximum number + * of statements for a BatchStatement allowed by the underlying protocol). + */ + public BatchStatement add(Statement statement) { + + // We handle BatchStatement here (rather than in getIdAndValues) as it make it slightly + // easier to avoid endless loop if the use mistakenly pass a batch that depends on this + // object (or this directly). + if (statement instanceof BatchStatement) { + for (Statement subStatements : ((BatchStatement)statement).statements) { + add(subStatements); + } + } else { + if (statements.size() >= 0xFFFF) + throw new IllegalStateException("Batch statement cannot contain more than " + 0xFFFF + " statements."); + statements.add(statement); + } + return this; + } + + /** + * Adds multiple statements to this batch. + *

+ * This is a shortcut method that calls {@link #add} on all the statements + * from {@code statements}. + * + * @param statements the statements to add. + * @return this batch statement. + */ + public BatchStatement addAll(Iterable statements) { + for (Statement statement : statements) + add(statement); + return this; + } + + /** + * The statements that have been added to this batch so far. + * + * @return an (immutable) collection of the statements that have been added + * to this batch so far. + */ + public Collection getStatements() { + return ImmutableList.copyOf(statements); + } + + /** + * Clears this batch, removing all statements added so far. + * + * @return this (now empty) {@code BatchStatement}. + */ + public BatchStatement clear() { + statements.clear(); + return this; + } + + /** + * Returns the number of elements in this batch. + * + * @return the number of elements in this batch. + */ + public int size() { + return statements.size(); + } + + /** + * Sets the serial consistency level for the query. + *

+ * This is only supported with version 3 or higher of the native protocol. If you call + * this method when version 2 is in use, you will get an {@link UnsupportedFeatureException} + * when submitting the statement. With version 2, protocol batches with conditions + * have their serial consistency level hardcoded to SERIAL; if you need to execute a batch + * with LOCAL_SERIAL, you will have to use a CQL batch. + * + * @param serialConsistency the serial consistency level to set. + * @return this {@code Statement} object. + * + * @throws IllegalArgumentException if {@code serialConsistency} is not one of + * {@code ConsistencyLevel.SERIAL} or {@code ConsistencyLevel.LOCAL_SERIAL}. + * + * @see Statement#setSerialConsistencyLevel(ConsistencyLevel) + */ + @Override + public BatchStatement setSerialConsistencyLevel(ConsistencyLevel serialConsistency) { + return (BatchStatement) super.setSerialConsistencyLevel(serialConsistency); + } + + @Override + public ByteBuffer getRoutingKey() { + for (Statement statement : statements) { + ByteBuffer rk = statement.getRoutingKey(); + if (rk != null) + return rk; + } + return null; + } + + @Override + public String getKeyspace() { + for (Statement statement : statements) { + String keyspace = statement.getKeyspace(); + if (keyspace != null) + return keyspace; + } + return null; + } + + void ensureAllSet() { + for (Statement statement : statements) + if (statement instanceof BoundStatement) + ((BoundStatement) statement).ensureAllSet(); + } + + static class IdAndValues { + + public final List ids; + public final List> values; + + IdAndValues(int nbstatements) { + ids = new ArrayList(nbstatements); + values = new ArrayList>(nbstatements); + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java b/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java new file mode 100644 index 00000000000..6d3b6286fbf --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java @@ -0,0 +1,1390 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.math.BigDecimal; +import java.math.BigInteger; +import java.net.InetAddress; +import java.nio.ByteBuffer; +import java.util.*; + +import com.google.common.reflect.TypeToken; + +import com.datastax.driver.core.exceptions.InvalidTypeException; + +/** + * A prepared statement with values bound to the bind variables. + *

+ * Once values has been provided for the variables of the {@link PreparedStatement} + * it has been created from, such BoundStatement can be executed (through + * {@link Session#execute(Statement)}). + *

+ * The values of a BoundStatement can be set by either index or name. When + * setting them by name, names follow the case insensitivity rules explained in + * {@link ColumnDefinitions} but with the difference that if multiple bind + * variables have the same name, setting that name will set all the + * variables for that name. + *

+ * All the variables of the statement must be bound. If you don't explicitly + * set a value for a variable, an {@code IllegalStateException} will be + * thrown when submitting the statement. If you want to set a variable to + * {@code null}, use {@link #setToNull(int) setToNull}. + */ +public class BoundStatement extends Statement implements SettableData, GettableData { + private static final ByteBuffer UNSET = ByteBuffer.allocate(0); + + final PreparedStatement statement; + + // Statement is already an abstract class, so we can't make it extend AbstractData directly. But + // we still want to avoid duplicating too much code so we wrap. + final DataWrapper wrapper; + + private ByteBuffer routingKey; + + /** + * Creates a new {@code BoundStatement} from the provided prepared + * statement. + * @param statement the prepared statement from which to create a {@code BoundStatement}. + */ + public BoundStatement(PreparedStatement statement) { + this.statement = statement; + this.wrapper = new DataWrapper(this, statement.getVariables().size()); + for (int i = 0; i < wrapper.values.length; i++) { + wrapper.values[i] = UNSET; + } + + if (statement.getConsistencyLevel() != null) + this.setConsistencyLevel(statement.getConsistencyLevel()); + if (statement.getSerialConsistencyLevel() != null) + this.setSerialConsistencyLevel(statement.getSerialConsistencyLevel()); + if (statement.isTracing()) + this.enableTracing(); + if (statement.getRetryPolicy() != null) + this.setRetryPolicy(statement.getRetryPolicy()); + } + + /** + * Returns the prepared statement on which this BoundStatement is based. + * + * @return the prepared statement on which this BoundStatement is based. + */ + public PreparedStatement preparedStatement() { + return statement; + } + + /** + * Returns whether the {@code i}th variable has been bound. + * + * @param i the index of the variable to check. + * @return whether the {@code i}th variable has been bound. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. + */ + public boolean isSet(int i) { + return wrapper.getValue(i) != UNSET; + } + + /** + * Returns whether the first occurrence of variable {@code name} has been + * bound. + * + * @param name the name of the variable to check. + * @return whether the first occurrence of variable {@code name} has been + * bound to a non-null value. + * + * @throws IllegalArgumentException if {@code name} is not a prepared + * variable, that is if {@code !this.preparedStatement().variables().names().contains(name)}. + */ + public boolean isSet(String name) { + return wrapper.getValue(wrapper.getIndexOf(name)) != UNSET; + } + + /** + * Bound values to the variables of this statement. + * + * This is a convenience method to bind all the variables of the + * {@code BoundStatement} in one call. + * + * @param values the values to bind to the variables of the newly created + * BoundStatement. The first element of {@code values} will be bound to the + * first bind variable, etc. It is legal to provide fewer values than the + * statement has bound variables. In that case, the remaining variable need + * to be bound before execution. If more values than variables are provided + * however, an IllegalArgumentException wil be raised. + * @return this bound statement. + * + * @throws IllegalArgumentException if more {@code values} are provided + * than there is of bound variables in this statement. + * @throws InvalidTypeException if any of the provided value is not of + * correct type to be bound to the corresponding bind variable. + * @throws NullPointerException if one of {@code values} is a collection + * (List, Set or Map) containing a null value. Nulls are not supported in + * collections by CQL. + */ + public BoundStatement bind(Object... values) { + + if (values.length > statement.getVariables().size()) + throw new IllegalArgumentException(String.format("Prepared statement has only %d variables, %d values provided", statement.getVariables().size(), values.length)); + + for (int i = 0; i < values.length; i++) + { + Object toSet = values[i]; + + if (toSet == null) { + wrapper.values[i] = null; + continue; + } + + DataType columnType = statement.getVariables().getType(i); + switch (columnType.getName()) { + case LIST: + if (!(toSet instanceof List)) + throw new InvalidTypeException(String.format("Invalid type for value %d, column is a list but %s provided", i, toSet.getClass())); + + List l = (List)toSet; + // If the list is empty, it will never fail validation, but otherwise we should check the list given if of the right type + if (!l.isEmpty()) { + // Ugly? Yes + Class providedClass = l.get(0).getClass(); + Class expectedClass = columnType.getTypeArguments().get(0).asJavaClass(); + if (!expectedClass.isAssignableFrom(providedClass)) + throw new InvalidTypeException(String.format("Invalid type for value %d of CQL type %s, expecting list of %s but provided list of %s", i, columnType, expectedClass, providedClass)); + } + break; + case SET: + if (!(toSet instanceof Set)) + throw new InvalidTypeException(String.format("Invalid type for value %d, column is a set but %s provided", i, toSet.getClass())); + + Set s = (Set)toSet; + // If the list is empty, it will never fail validation, but otherwise we should check the list given if of the right type + if (!s.isEmpty()) { + // Ugly? Yes + Class providedClass = s.iterator().next().getClass(); + Class expectedClass = columnType.getTypeArguments().get(0).getName().javaType; + if (!expectedClass.isAssignableFrom(providedClass)) + throw new InvalidTypeException(String.format("Invalid type for value %d of CQL type %s, expecting set of %s but provided set of %s", i, columnType, expectedClass, providedClass)); + } + break; + case MAP: + if (!(toSet instanceof Map)) + throw new InvalidTypeException(String.format("Invalid type for value %d, column is a map but %s provided", i, toSet.getClass())); + + Map m = (Map)toSet; + // If the list is empty, it will never fail validation, but otherwise we should check the list given if of the right type + if (!m.isEmpty()) { + // Ugly? Yes + Map.Entry entry = m.entrySet().iterator().next(); + Class providedKeysClass = entry.getKey().getClass(); + Class providedValuesClass = entry.getValue().getClass(); + + Class expectedKeysClass = columnType.getTypeArguments().get(0).getName().javaType; + Class expectedValuesClass = columnType.getTypeArguments().get(1).getName().javaType; + if (!expectedKeysClass.isAssignableFrom(providedKeysClass) || !expectedValuesClass.isAssignableFrom(providedValuesClass)) + throw new InvalidTypeException(String.format("Invalid type for value %d of CQL type %s, expecting map of %s->%s but provided set of %s->%s", i, columnType, expectedKeysClass, expectedValuesClass, providedKeysClass, providedValuesClass)); + } + break; + default: + if (toSet instanceof Token) + toSet = ((Token)toSet).getValue(); + + Class providedClass = toSet.getClass(); + Class expectedClass = columnType.getName().javaType; + if (!expectedClass.isAssignableFrom(providedClass)) + throw new InvalidTypeException(String.format("Invalid type for value %d of CQL type %s, expecting %s but %s provided", i, columnType, expectedClass, providedClass)); + break; + } + wrapper.values[i] = columnType.codec(statement.getPreparedId().protocolVersion).serialize(toSet); + } + return this; + } + + /** + * Sets the routing key for this bound statement. + *

+ * This is useful when the routing key can neither be set on the {@code PreparedStatement} this bound statement + * was built from, nor automatically computed from bound variables. In particular, this is the case if the + * partition key is composite and only some of its components are bound. + * + * @param routingKey the raw (binary) value to use as routing key. + * @return this {@code BoundStatement} object. + * + * @see BoundStatement#getRoutingKey + */ + public BoundStatement setRoutingKey(ByteBuffer routingKey) { + this.routingKey = routingKey; + return this; + } + + /** + * The routing key for this bound query. + *

+ * This method will return a non-{@code null} value if either of the following occur: + *

    + *
  • The routing key has been set directly through {@link BoundStatement#setRoutingKey}.
  • + *
  • The routing key has been set through {@link PreparedStatement#setRoutingKey} for the + * {@code PreparedStatement} this statement has been built from.
  • + *
  • All the columns composing the partition key are bound variables of this {@code BoundStatement}. The routing + * key will then be built using the values provided for these partition key columns.
  • + *
+ * Otherwise, {@code null} is returned. + *

+ * + * Note that if the routing key has been set through {@link BoundStatement#setRoutingKey}, then that takes + * precedence. If the routing key has been set through {@link PreparedStatement#setRoutingKey} then that is used + * next. If neither of those are set then it is computed. + * + * @return the routing key for this statement or {@code null}. + */ + @Override + public ByteBuffer getRoutingKey() { + if (this.routingKey != null) { + return this.routingKey; + } + + if (statement.getRoutingKey() != null) { + return statement.getRoutingKey(); + } + + int[] rkIndexes = statement.getPreparedId().routingKeyIndexes; + if (rkIndexes != null) { + if (rkIndexes.length == 1) { + return wrapper.values[rkIndexes[0]]; + } else { + ByteBuffer[] components = new ByteBuffer[rkIndexes.length]; + for (int i = 0; i < components.length; ++i) { + ByteBuffer value = wrapper.values[rkIndexes[i]]; + if (value == null) + return null; + components[i] = value; + } + return SimpleStatement.compose(components); + } + } + return null; + } + + /** + * Returns the keyspace this query operates on. + *

+ * This method will always return a non-{@code null} value (unless the statement + * has no variables, but you should avoid prepared statement in the first in that + * case). The keyspace returned will be the one corresponding to the first + * variable prepared in this statement (which in almost all case will be the + * keyspace for the operation, though it's possible in CQL to build a batch + * statement that acts on multiple keyspace). + * + * @return the keyspace for this statement (see above), or {@code null} if the + * statement has no variables. + */ + @Override + public String getKeyspace() { + return statement.getPreparedId().metadata.size() == 0 ? null : statement.getPreparedId().metadata.getKeyspace(0); + } + + /** + * Sets the {@code i}th value to the provided boolean. + * + * @param i the index of the variable to set. + * @param v the value to set. + * @return this BoundStatement. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. + * @throws InvalidTypeException if column {@code i} is not of type BOOLEAN. + */ + public BoundStatement setBool(int i, boolean v) { + return wrapper.setBool(i, v); + } + + /** + * Sets the value for (all occurrences of) variable {@code name} to the + * provided boolean. + * + * @param name the name of the variable to set; if multiple variables + * {@code name} are prepared, all of them are set. + * @param v the value to set. + * @return this BoundStatement. + * + * @throws IllegalArgumentException if {@code name} is not a prepared + * variable, that is, if {@code !this.preparedStatement().variables().names().contains(name)}. + * @throws InvalidTypeException if (any one occurrence of) {@code name} is not of type BOOLEAN. + */ + public BoundStatement setBool(String name, boolean v) { + return wrapper.setBool(name, v); + } + + /** + * Set the {@code i}th value to the provided integer. + * + * @param i the index of the variable to set. + * @param v the value to set. + * @return this BoundStatement. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. + * @throws InvalidTypeException if column {@code i} is not of type INT. + */ + public BoundStatement setInt(int i, int v) { + return wrapper.setInt(i, v); + } + + /** + * Sets the value for (all occurrences of) variable {@code name} to the + * provided integer. + * + * @param name the name of the variable to set; if multiple variables + * {@code name} are prepared, all of them are set. + * @param v the value to set. + * @return this BoundStatement. + * + * @throws IllegalArgumentException if {@code name} is not a prepared + * variable, that is, if {@code !this.preparedStatement().variables().names().contains(name)}. + * @throws InvalidTypeException if (any one occurrence of) {@code name} is not of type INT. + */ + public BoundStatement setInt(String name, int v) { + return wrapper.setInt(name, v); + } + + /** + * Sets the {@code i}th value to the provided long. + * + * @param i the index of the variable to set. + * @param v the value to set. + * @return this BoundStatement. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. + * @throws InvalidTypeException if column {@code i} is not of type BIGINT or COUNTER. + */ + public BoundStatement setLong(int i, long v) { + return wrapper.setLong(i, v); + } + + /** + * Sets the value for (all occurrences of) variable {@code name} to the + * provided long. + * + * @param name the name of the variable to set; if multiple variables + * {@code name} are prepared, all of them are set. + * @param v the value to set. + * @return this BoundStatement. + * + * @throws IllegalArgumentException if {@code name} is not a prepared + * variable, that is, if {@code !this.preparedStatement().variables().names().contains(name)}. + * @throws InvalidTypeException if (any occurrence of) {@code name} is + * not of type BIGINT or COUNTER. + */ + public BoundStatement setLong(String name, long v) { + return wrapper.setLong(name, v); + } + + /** + * Set the {@code i}th value to the provided date. + * + * @param i the index of the variable to set. + * @param v the value to set. + * @return this BoundStatement. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. + * @throws InvalidTypeException if column {@code i} is not of type TIMESTAMP. + */ + public BoundStatement setDate(int i, Date v) { + return wrapper.setDate(i, v); + } + + /** + * Sets the value for (all occurrences of) variable {@code name} to the + * provided date. + * + * @param name the name of the variable to set; if multiple variables + * {@code name} are prepared, all of them are set. + * @param v the value to set. + * @return this BoundStatement. + * + * @throws IllegalArgumentException if {@code name} is not a prepared + * variable, that is, if {@code !this.preparedStatement().variables().names().contains(name)}. + * @throws InvalidTypeException if (any occurrence of) {@code name} is + * not of type TIMESTAMP. + */ + public BoundStatement setDate(String name, Date v) { + return wrapper.setDate(name, v); + } + + /** + * Sets the {@code i}th value to the provided float. + * + * @param i the index of the variable to set. + * @param v the value to set. + * @return this BoundStatement. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. + * @throws InvalidTypeException if column {@code i} is not of type FLOAT. + */ + public BoundStatement setFloat(int i, float v) { + return wrapper.setFloat(i, v); + } + + /** + * Sets the value for (all occurrences of) variable {@code name} to the + * provided float. + * + * @param name the name of the variable to set; if multiple variables + * {@code name} are prepared, all of them are set. + * @param v the value to set. + * @return this BoundStatement. + * + * @throws IllegalArgumentException if {@code name} is not a prepared + * variable, that is, if {@code !this.preparedStatement().variables().names().contains(name)}. + * @throws InvalidTypeException if (any occurrence of) {@code name} is + * not of type FLOAT. + */ + public BoundStatement setFloat(String name, float v) { + return wrapper.setFloat(name, v); + } + + /** + * Sets the {@code i}th value to the provided double. + * + * @param i the index of the variable to set. + * @param v the value to set. + * @return this BoundStatement. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. + * @throws InvalidTypeException if column {@code i} is not of type DOUBLE. + */ + public BoundStatement setDouble(int i, double v) { + return wrapper.setDouble(i, v); + } + + /** + * Sets the value for (all occurrences of) variable {@code name} to the + * provided double. + * + * @param name the name of the variable to set; if multiple variables + * {@code name} are prepared, all of them are set. + * @param v the value to set. + * @return this BoundStatement. + * + * @throws IllegalArgumentException if {@code name} is not a prepared + * variable, that is, if {@code !this.preparedStatement().variables().names().contains(name)}. + * @throws InvalidTypeException if (any occurrence of) {@code name} is + * not of type DOUBLE. + */ + public BoundStatement setDouble(String name, double v) { + return wrapper.setDouble(name, v); + } + + /** + * Sets the {@code i}th value to the provided string. + * + * @param i the index of the variable to set. + * @param v the value to set. + * @return this BoundStatement. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. + * @throws InvalidTypeException if column {@code i} is of neither of the + * following types: VARCHAR, TEXT or ASCII. + */ + public BoundStatement setString(int i, String v) { + return wrapper.setString(i, v); + } + + /** + * Sets the value for (all occurrences of) variable {@code name} to the + * provided string. + * + * @param name the name of the variable to set; if multiple variables + * {@code name} are prepared, all of them are set. + * @param v the value to set. + * @return this BoundStatement. + * + * @throws IllegalArgumentException if {@code name} is not a prepared + * variable, that is, if {@code !this.preparedStatement().variables().names().contains(name)}. + * @throws InvalidTypeException if (any occurrence of) {@code name} is + * of neither of the following types: VARCHAR, TEXT or ASCII. + */ + public BoundStatement setString(String name, String v) { + return wrapper.setString(name, v); + } + + /** + * Sets the {@code i}th value to the provided byte buffer. + * + * This method validate that the type of the column set is BLOB. If you + * want to insert manually serialized data into columns of another type, + * use {@link #setBytesUnsafe} instead. + * + * @param i the index of the variable to set. + * @param v the value to set. + * @return this BoundStatement. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. + * @throws InvalidTypeException if column {@code i} is not of type BLOB. + */ + public BoundStatement setBytes(int i, ByteBuffer v) { + return wrapper.setBytes(i, v); + } + + /** + * Sets the value for (all occurrences of) variable {@code name} to the + * provided byte buffer. + * + * This method validate that the type of the column set is BLOB. If you + * want to insert manually serialized data into columns of another type, + * use {@link #setBytesUnsafe} instead. + * + * @param name the name of the variable to set; if multiple variables + * {@code name} are prepared, all of them are set. + * @param v the value to set. + * @return this BoundStatement. + * + * @throws IllegalArgumentException if {@code name} is not a prepared + * variable, that is if {@code !this.preparedStatement().variables().names().contains(name)}. + * @throws InvalidTypeException if (any occurrence of) {@code name} is not of type BLOB. + */ + public BoundStatement setBytes(String name, ByteBuffer v) { + return wrapper.setBytes(name, v); + } + + /** + * Sets the {@code i}th value to the provided byte buffer. + * + * Contrary to {@link #setBytes}, this method does not check the + * type of the column set. If you insert data that is not compatible with + * the type of the column, you will get an {@code InvalidQueryException} at + * execute time. + * + * @param i the index of the variable to set. + * @param v the value to set. + * @return this BoundStatement. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. + */ + public BoundStatement setBytesUnsafe(int i, ByteBuffer v) { + return wrapper.setBytesUnsafe(i, v); + } + + /** + * Sets the value for (all occurrences of) variable {@code name} to the + * provided byte buffer. + * + * Contrary to {@link #setBytes}, this method does not check the + * type of the column set. If you insert data that is not compatible with + * the type of the column, you will get an {@code InvalidQueryException} at + * execute time. + * + * @param name the name of the variable to set; if multiple variables + * {@code name} are prepared, all of them are set. + * @param v the value to set. + * @return this BoundStatement. + * + * @throws IllegalArgumentException if {@code name} is not a prepared + * variable, that is if {@code !this.preparedStatement().variables().names().contains(name)}. + */ + public BoundStatement setBytesUnsafe(String name, ByteBuffer v) { + return wrapper.setBytesUnsafe(name, v); + } + + /** + * Sets the {@code i}th value to the provided big integer. + * + * @param i the index of the variable to set. + * @param v the value to set. + * @return this BoundStatement. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. + * @throws InvalidTypeException if column {@code i} is not of type VARINT. + */ + public BoundStatement setVarint(int i, BigInteger v) { + return wrapper.setVarint(i, v); + } + + /** + * Sets the value for (all occurrences of) variable {@code name} to the + * provided big integer. + * + * @param name the name of the variable to set; if multiple variables + * {@code name} are prepared, all of them are set. + * @param v the value to set. + * @return this BoundStatement. + * + * @throws IllegalArgumentException if {@code name} is not a prepared + * variable, that is, if {@code !this.preparedStatement().variables().names().contains(name)}. + * @throws InvalidTypeException if (any occurrence of) {@code name} is + * not of type VARINT. + */ + public BoundStatement setVarint(String name, BigInteger v) { + return wrapper.setVarint(name, v); + } + + /** + * Sets the {@code i}th value to the provided big decimal. + * + * @param i the index of the variable to set. + * @param v the value to set. + * @return this BoundStatement. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. + * @throws InvalidTypeException if column {@code i} is not of type DECIMAL. + */ + public BoundStatement setDecimal(int i, BigDecimal v) { + return wrapper.setDecimal(i, v); + } + + /** + * Sets the value for (all occurrences of) variable {@code name} to the + * provided big decimal. + * + * @param name the name of the variable to set; if multiple variables + * {@code name} are prepared, all of them are set. + * @param v the value to set. + * @return this BoundStatement. + * + * @throws IllegalArgumentException if {@code name} is not a prepared + * variable, that is, if {@code !this.preparedStatement().variables().names().contains(name)}. + * @throws InvalidTypeException if (any occurrence of) {@code name} is + * not of type DECIMAL. + */ + public BoundStatement setDecimal(String name, BigDecimal v) { + return wrapper.setDecimal(name, v); + } + + /** + * Sets the {@code i}th value to the provided UUID. + * + * @param i the index of the variable to set. + * @param v the value to set. + * @return this BoundStatement. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. + * @throws InvalidTypeException if column {@code i} is not of type UUID or + * TIMEUUID, or if column {@code i} is of type TIMEUUID but {@code v} is + * not a type 1 UUID. + */ + public BoundStatement setUUID(int i, UUID v) { + return wrapper.setUUID(i, v); + } + + /** + * Sets the value for (all occurrences of) variable {@code name} to the + * provided UUID. + * + * @param name the name of the variable to set; if multiple variables + * {@code name} are prepared, all of them are set. + * @param v the value to set. + * @return this BoundStatement. + * + * @throws IllegalArgumentException if {@code name} is not a prepared + * variable, that is, if {@code !this.preparedStatement().variables().names().contains(name)}. + * @throws InvalidTypeException if (any occurrence of) {@code name} is + * not of type UUID or TIMEUUID, or if column {@code name} is of type + * TIMEUUID but {@code v} is not a type 1 UUID. + */ + public BoundStatement setUUID(String name, UUID v) { + return wrapper.setUUID(name, v); + } + + /** + * Sets the {@code i}th value to the provided inet address. + * + * @param i the index of the variable to set. + * @param v the value to set. + * @return this BoundStatement. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. + * @throws InvalidTypeException if column {@code i} is not of type INET. + */ + public BoundStatement setInet(int i, InetAddress v) { + return wrapper.setInet(i, v); + } + + /** + * Sets the value for (all occurrences of) variable {@code name} to the + * provided inet address. + * + * @param name the name of the variable to set; if multiple variables + * {@code name} are prepared, all of them are set. + * @param v the value to set. + * @return this BoundStatement. + * + * @throws IllegalArgumentException if {@code name} is not a prepared + * variable, that is, if {@code !this.preparedStatement().variables().names().contains(name)}. + * @throws InvalidTypeException if (any occurrence of) {@code name} is + * not of type INET. + */ + public BoundStatement setInet(String name, InetAddress v) { + return wrapper.setInet(name, v); + } + + /** + * Sets the {@code i}th value to the provided {@link Token}. + *

+ * {@link #setPartitionKeyToken(Token)} should generally be preferred if you + * have a single token variable. + * + * @param i the index of the variable to set. + * @param v the value to set. + * @return this BoundStatement. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. + * @throws InvalidTypeException if column {@code i} is not of the type of the token's value. + */ + public BoundStatement setToken(int i, Token v) { + return wrapper.setToken(i, v); + } + + /** + * Sets the value for (all occurrences of) variable {@code name} to the + * provided token. + *

+ * {@link #setPartitionKeyToken(Token)} should generally be preferred if you + * have a single token variable. + *

+ * If you have multiple token variables, use positional binding ({@link #setToken(int, Token)}, + * or named bind markers: + *

+     * {@code
+     * PreparedStatement pst = session.prepare("SELECT * FROM my_table WHERE token(k) > :min AND token(k) <= :max");
+     * BoundStatement b = pst.bind().setToken("min", minToken).setToken("max", maxToken);
+     * }
+     * 
+ * + * @param name the name of the variable to set; if multiple variables + * {@code name} are prepared, all of them are set. + * @param v the value to set. + * @return this BoundStatement. + * + * @throws IllegalArgumentException if {@code name} is not a prepared + * variable, that is, if {@code !this.preparedStatement().variables().names().contains(name)}. + * @throws InvalidTypeException if (any occurrence of) {@code name} is + * not of the type of the token's value. + */ + public BoundStatement setToken(String name, Token v) { + return wrapper.setToken(name, v); + } + + /** + * Sets the value for (all occurrences of) variable "{@code partition key token}" + * to the provided token (this is the name generated by Cassandra for markers + * corresponding to a {@code token(...)} call). + *

+ * This method is a shorthand for statements with a single token variable: + *

+     * {@code
+     * Token token = ...
+     * PreparedStatement pst = session.prepare("SELECT * FROM my_table WHERE token(k) = ?");
+     * BoundStatement b = pst.bind().setPartitionKeyToken(token);
+     * }
+     * 
+ * If you have multiple token variables, use positional binding ({@link #setToken(int, Token)}, + * or named bind markers: + *
+     * {@code
+     * PreparedStatement pst = session.prepare("SELECT * FROM my_table WHERE token(k) > :min AND token(k) <= :max");
+     * BoundStatement b = pst.bind().setToken("min", minToken).setToken("max", maxToken);
+     * }
+     * 
+ * + * @param v the value to set. + * @return this BoundStatement. + * + * @throws IllegalArgumentException if {@code name} is not a prepared + * variable, that is, if {@code !this.preparedStatement().variables().names().contains(name)}. + * @throws InvalidTypeException if (any occurrence of) {@code name} is + * not of the type of the token's value. + */ + public BoundStatement setPartitionKeyToken(Token v) { + return setToken("partition key token", v); + } + + /** + * Sets the {@code i}th value to the provided list. + *

+ * Please note that {@code null} values are not supported inside collection by CQL. + * + * @param the type of the elements of the list to set. + * @param i the index of the variable to set. + * @param v the value to set. + * @return this BoundStatement. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. + * @throws InvalidTypeException if column {@code i} is not a list type or + * if the elements of {@code v} are not of the type of the elements of + * column {@code i}. + * @throws NullPointerException if {@code v} contains null values. Nulls are not supported in collections + * by CQL. + */ + public BoundStatement setList(int i, List v) { + return wrapper.setList(i, v); + } + + /** + * Sets the value for (all occurrences of) variable {@code name} to the + * provided list. + *

+ * Please note that {@code null} values are not supported inside collection by CQL. + * + * @param the type of the elements of the list to set. + * @param name the name of the variable to set; if multiple variables + * {@code name} are prepared, all of them are set. + * @param v the value to set. + * @return this BoundStatement. + * + * @throws IllegalArgumentException if {@code name} is not a prepared + * variable, that is, if {@code !this.preparedStatement().variables().names().contains(name)}. + * @throws InvalidTypeException if (any occurrence of) {@code name} is + * not a list type or if the elements of {@code v} are not of the type of + * the elements of column {@code name}. + * @throws NullPointerException if {@code v} contains null values. Nulls are not supported in collections + * by CQL. + */ + public BoundStatement setList(String name, List v) { + return wrapper.setList(name, v); + } + + /** + * Sets the {@code i}th value to the provided map. + *

+ * Please note that {@code null} values are not supported inside collection by CQL. + * + * @param the type of the keys for the map to set. + * @param the type of the values for the map to set. + * @param i the index of the variable to set. + * @param v the value to set. + * @return this BoundStatement. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. + * @throws InvalidTypeException if column {@code i} is not a map type or + * if the elements (keys or values) of {@code v} are not of the type of the + * elements of column {@code i}. + * @throws NullPointerException if {@code v} contains null values. Nulls are not supported in collections + * by CQL. + */ + public BoundStatement setMap(int i, Map v) { + return wrapper.setMap(i, v); + } + + /** + * Sets the value for (all occurrences of) variable {@code name} to the + * provided map. + *

+ * Please note that {@code null} values are not supported inside collection by CQL. + * + * @param the type of the keys for the map to set. + * @param the type of the values for the map to set. + * @param name the name of the variable to set; if multiple variables + * {@code name} are prepared, all of them are set. + * @param v the value to set. + * @return this BoundStatement. + * + * @throws IllegalArgumentException if {@code name} is not a prepared + * variable, that is, if {@code !this.preparedStatement().variables().names().contains(name)}. + * @throws InvalidTypeException if (any occurrence of) {@code name} is + * not a map type or if the elements (keys or values) of {@code v} are not of + * the type of the elements of column {@code name}. + * @throws NullPointerException if {@code v} contains null values. Nulls are not supported in collections + * by CQL. + */ + public BoundStatement setMap(String name, Map v) { + return wrapper.setMap(name, v); + } + + /** + * Sets the {@code i}th value to the provided set. + *

+ * Please note that {@code null} values are not supported inside collection by CQL. + * + * @param the type of the elements of the set to set. + * @param i the index of the variable to set. + * @param v the value to set. + * @return this BoundStatement. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. + * @throws InvalidTypeException if column {@code i} is not a set type or + * if the elements of {@code v} are not of the type of the elements of + * column {@code i}. + * @throws NullPointerException if {@code v} contains null values. Nulls are not supported in collections + * by CQL. + */ + public BoundStatement setSet(int i, Set v) { + return wrapper.setSet(i, v); + } + + /** + * Sets the value for (all occurrences of) variable {@code name} to the + * provided set. + *

+ * Please note that {@code null} values are not supported inside collection by CQL. + * + * @param the type of the elements of the set to set. + * @param name the name of the variable to set; if multiple variables + * {@code name} are prepared, all of them are set. + * @param v the value to set. + * @return this BoundStatement. + * + * @throws IllegalArgumentException if {@code name} is not a prepared + * variable, that is, if {@code !this.preparedStatement().variables().names().contains(name)}. + * @throws InvalidTypeException if (any occurrence of) {@code name} is + * not a map type or if the elements of {@code v} are not of the type of + * the elements of column {@code name}. + * @throws NullPointerException if {@code v} contains null values. Nulls are not supported in collections + * by CQL. + */ + public BoundStatement setSet(String name, Set v) { + return wrapper.setSet(name, v); + } + + /** + * Sets the {@code i}th value to the provided UDT value. + * + * @param i the index of the value to set. + * @param v the value to set. + * @return this BoundStatement . + * + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this BoundStatement. + * @throws InvalidTypeException if value {@code i} is not a UDT value or if its definition + * does not correspond to the one of {@code v}. + */ + public BoundStatement setUDTValue(int i, UDTValue v) { + return wrapper.setUDTValue(i, v); + } + + /** + * Sets the value for (all occurrences of) variable {@code name} to the + * provided UDT value. + * + * @param name the name of the value to set; if {@code name} is present multiple + * times, all its values are set. + * @param v the value to set. + * @return this BoundStatement. + * + * @throws IllegalArgumentException if {@code name} is not a valid name for this BoundStatement. + * @throws InvalidTypeException if (any occurrence of) {@code name} is + * not a UDT value or if the definition of column {@code name} does not correspond to + * the one of {@code v}. + */ + public BoundStatement setUDTValue(String name, UDTValue v) { + return wrapper.setUDTValue(name, v); + } + + /** + * Sets the {@code i}th value to the provided tuple value. + * + * @param i the index of the value to set. + * @param v the value to set. + * @return this BoundStatement. + * + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this BoundStatement. + * @throws InvalidTypeException if value {@code i} is not a tuple value or if its types + * do not correspond to the ones of {@code v}. + */ + public BoundStatement setTupleValue(int i, TupleValue v) { + return wrapper.setTupleValue(i, v); + } + + /** + * Sets the value for (all occurrences of) variable {@code name} to the + * provided tuple value. + * + * @param name the name of the value to set; if {@code name} is present multiple + * times, all its values are set. + * @param v the value to set. + * @return this BoundStatement. + * + * @throws IllegalArgumentException if {@code name} is not a valid name for this BoundStatement. + * @throws InvalidTypeException if (any occurrence of) {@code name} is + * not a tuple value or if the types of column {@code name} do not correspond to + * the ones of {@code v}. + */ + public BoundStatement setTupleValue(String name, TupleValue v) { + return wrapper.setTupleValue(name, v); + } + + /** + * Sets the {@code i}th value to {@code null}. + *

+ * This is mainly intended for CQL types which map to native Java types. + * + * @param i the index of the value to set. + * @return this object. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + */ + public BoundStatement setToNull(int i) { + return wrapper.setToNull(i); + } + + /** + * Sets the value for (all occurrences of) variable {@code name} to {@code null}. + *

+ * This is mainly intended for CQL types which map to native Java types. + * + * @param name the name of the value to set; if {@code name} is present multiple + * times, all its values are set. + * @return this object. + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + */ + public BoundStatement setToNull(String name) { + return wrapper.setToNull(name); + } + + /** + * {@inheritDoc} + */ + public boolean isNull(int i) { + return wrapper.isNull(i); + } + + /** + * {@inheritDoc} + */ + public boolean isNull(String name) { + return wrapper.isNull(name); + } + + /** + * {@inheritDoc} + */ + public boolean getBool(int i) { + return wrapper.getBool(i); + } + + /** + * {@inheritDoc} + */ + public boolean getBool(String name) { + return wrapper.getBool(name); + } + + /** + * {@inheritDoc} + */ + public int getInt(int i) { + return wrapper.getInt(i); + } + + /** + * {@inheritDoc} + */ + public int getInt(String name) { + return wrapper.getInt(name); + } + + /** + * {@inheritDoc} + */ + public long getLong(int i) { + return wrapper.getLong(i); + } + + /** + * {@inheritDoc} + */ + public long getLong(String name) { + return wrapper.getLong(name); + } + + /** + * {@inheritDoc} + */ + public Date getDate(int i) { + return wrapper.getDate(i); + } + + /** + * {@inheritDoc} + */ + public Date getDate(String name) { + return wrapper.getDate(name); + } + + /** + * {@inheritDoc} + */ + public float getFloat(int i) { + return wrapper.getFloat(i); + } + + /** + * {@inheritDoc} + */ + public float getFloat(String name) { + return wrapper.getFloat(name); + } + + /** + * {@inheritDoc} + */ + public double getDouble(int i) { + return wrapper.getDouble(i); + } + + /** + * {@inheritDoc} + */ + public double getDouble(String name) { + return wrapper.getDouble(name); + } + + /** + * {@inheritDoc} + */ + public ByteBuffer getBytesUnsafe(int i) { + return wrapper.getBytesUnsafe(i); + } + + /** + * {@inheritDoc} + */ + public ByteBuffer getBytesUnsafe(String name) { + return wrapper.getBytesUnsafe(name); + } + + /** + * {@inheritDoc} + */ + public ByteBuffer getBytes(int i) { + return wrapper.getBytes(i); + } + + /** + * {@inheritDoc} + */ + public ByteBuffer getBytes(String name) { + return wrapper.getBytes(name); + } + + /** + * {@inheritDoc} + */ + public String getString(int i) { + return wrapper.getString(i); + } + + /** + * {@inheritDoc} + */ + public String getString(String name) { + return wrapper.getString(name); + } + + /** + * {@inheritDoc} + */ + public BigInteger getVarint(int i) { + return wrapper.getVarint(i); + } + + /** + * {@inheritDoc} + */ + public BigInteger getVarint(String name) { + return wrapper.getVarint(name); + } + + /** + * {@inheritDoc} + */ + public BigDecimal getDecimal(int i) { + return wrapper.getDecimal(i); + } + + /** + * {@inheritDoc} + */ + public BigDecimal getDecimal(String name) { + return wrapper.getDecimal(name); + } + + /** + * {@inheritDoc} + */ + public UUID getUUID(int i) { + return wrapper.getUUID(i); + } + + /** + * {@inheritDoc} + */ + public UUID getUUID(String name) { + return wrapper.getUUID(name); + } + + /** + * {@inheritDoc} + */ + public InetAddress getInet(int i) { + return wrapper.getInet(i); + } + + /** + * {@inheritDoc} + */ + public InetAddress getInet(String name) { + return wrapper.getInet(name); + } + + /** + * {@inheritDoc} + */ + public List getList(int i, Class elementsClass) { + return wrapper.getList(i, elementsClass); + } + + /** + * {@inheritDoc} + */ + public List getList(int i, TypeToken elementsType) { + return wrapper.getList(i, elementsType); + } + + /** + * {@inheritDoc} + */ + public List getList(String name, Class elementsClass) { + return wrapper.getList(name, elementsClass); + } + + /** + * {@inheritDoc} + */ + public List getList(String name, TypeToken elementsType) { + return wrapper.getList(name, elementsType); + } + + /** + * {@inheritDoc} + */ + public Set getSet(int i, Class elementsClass) { + return wrapper.getSet(i, elementsClass); + } + + /** + * {@inheritDoc} + */ + public Set getSet(int i, TypeToken elementsType) { + return wrapper.getSet(i, elementsType); + } + + /** + * {@inheritDoc} + */ + public Set getSet(String name, Class elementsClass) { + return wrapper.getSet(name, elementsClass); + } + + /** + * {@inheritDoc} + */ + public Set getSet(String name, TypeToken elementsType) { + return wrapper.getSet(name, elementsType); + } + + /** + * {@inheritDoc} + */ + public Map getMap(int i, Class keysClass, Class valuesClass) { + return wrapper.getMap(i, keysClass, valuesClass); + } + + /** + * {@inheritDoc} + */ + public Map getMap(int i, TypeToken keysType, TypeToken valuesType) { + return wrapper.getMap(i, keysType, valuesType); + } + + /** + * {@inheritDoc} + */ + public Map getMap(String name, Class keysClass, Class valuesClass) { + return wrapper.getMap(name, keysClass, valuesClass); + } + + /** + * {@inheritDoc} + */ + public Map getMap(String name, TypeToken keysType, TypeToken valuesType) { + return wrapper.getMap(name, keysType, valuesType); + } + + /** + * {@inheritDoc} + */ + public UDTValue getUDTValue(int i) { + return wrapper.getUDTValue(i); + } + + /** + * {@inheritDoc} + */ + public UDTValue getUDTValue(String name) { + return wrapper.getUDTValue(name); + } + + /** + * {@inheritDoc} + */ + public TupleValue getTupleValue(int i) { + return wrapper.getTupleValue(i); + } + + /** + * {@inheritDoc} + */ + public TupleValue getTupleValue(String name) { + return wrapper.getTupleValue(name); + } + + /** + * {@inheritDoc} + */ + public Object getObject(int i) { + return wrapper.getObject(i); + } + + /** + * {@inheritDoc} + */ + public Object getObject(String name) { + return wrapper.getObject(name); + } + + static class DataWrapper extends AbstractData { + + DataWrapper(BoundStatement wrapped, int size) { + super(wrapped.statement.getPreparedId().protocolVersion, wrapped, size); + } + + protected int[] getAllIndexesOf(String name) { + return wrapped.statement.getVariables().getAllIdx(name); + } + + protected DataType getType(int i) { + return wrapped.statement.getVariables().getType(i); + } + + protected String getName(int i) { + return wrapped.statement.getVariables().getName(i); + } + } + + void ensureAllSet() { + int index = 0; + for (ByteBuffer value : wrapper.values) { + if (value == BoundStatement.UNSET) + throw new IllegalStateException("Unset value at index " + index + ". " + + "If you want this value to be null, please set it to null explicitly."); + index += 1; + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/BusyConnectionException.java b/driver-core/src/main/java/com/datastax/driver/core/BusyConnectionException.java new file mode 100644 index 00000000000..380f51c572e --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/BusyConnectionException.java @@ -0,0 +1,25 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +class BusyConnectionException extends Exception +{ + private static final long serialVersionUID = 0; + + public BusyConnectionException() { + super(); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/CBUtil.java b/driver-core/src/main/java/com/datastax/driver/core/CBUtil.java new file mode 100644 index 00000000000..c775aaf4de7 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/CBUtil.java @@ -0,0 +1,347 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.UnknownHostException; +import java.nio.ByteBuffer; +import java.nio.charset.CharacterCodingException; +import java.util.*; + +import io.netty.buffer.ByteBuf; +import io.netty.util.CharsetUtil; + +import com.datastax.driver.core.exceptions.DriverInternalError; + +/** + * ByteBuf utility methods. + */ +abstract class CBUtil { // TODO rename + + private CBUtil() {} + + private static String readString(ByteBuf cb, int length) { + try { + String str = cb.toString(cb.readerIndex(), length, CharsetUtil.UTF_8); + cb.readerIndex(cb.readerIndex() + length); + return str; + } catch (IllegalStateException e) { + // That's the way netty encapsulate a CCE + if (e.getCause() instanceof CharacterCodingException) + throw new DriverInternalError("Cannot decode string as UTF8"); + else + throw e; + } + } + + public static String readString(ByteBuf cb) { + try { + int length = cb.readUnsignedShort(); + return readString(cb, length); + } catch (IndexOutOfBoundsException e) { + throw new DriverInternalError("Not enough bytes to read an UTF8 serialized string preceded by it's 2 bytes length"); + } + } + + public static void writeString(String str, ByteBuf cb) { + byte[] bytes = str.getBytes(CharsetUtil.UTF_8); + cb.writeShort(bytes.length); + cb.writeBytes(bytes); + } + + public static int sizeOfString(String str) { + return 2 + encodedUTF8Length(str); + } + + private static int encodedUTF8Length(String st) { + int strlen = st.length(); + int utflen = 0; + for (int i = 0; i < strlen; i++) { + int c = st.charAt(i); + if ((c >= 0x0001) && (c <= 0x007F)) + utflen++; + else if (c > 0x07FF) + utflen += 3; + else + utflen += 2; + } + return utflen; + } + + public static String readLongString(ByteBuf cb) { + try { + int length = cb.readInt(); + return readString(cb, length); + } catch (IndexOutOfBoundsException e) { + throw new DriverInternalError("Not enough bytes to read an UTF8 serialized string preceded by it's 4 bytes length"); + } + } + + public static void writeLongString(String str, ByteBuf cb) { + byte[] bytes = str.getBytes(CharsetUtil.UTF_8); + cb.writeInt(bytes.length); + cb.writeBytes(bytes); + } + + public static int sizeOfLongString(String str) { + return 4 + str.getBytes(CharsetUtil.UTF_8).length; + } + + public static byte[] readBytes(ByteBuf cb) { + try { + int length = cb.readUnsignedShort(); + byte[] bytes = new byte[length]; + cb.readBytes(bytes); + return bytes; + } catch (IndexOutOfBoundsException e) { + throw new DriverInternalError("Not enough bytes to read a byte array preceded by it's 2 bytes length"); + } + } + + public static void writeBytes(byte[] bytes, ByteBuf cb) { + cb.writeShort(bytes.length); + cb.writeBytes(bytes); + } + + public static int sizeOfBytes(byte[] bytes) { + return 2 + bytes.length; + } + + public static ConsistencyLevel readConsistencyLevel(ByteBuf cb) { + return ConsistencyLevel.fromCode(cb.readUnsignedShort()); + } + + public static void writeConsistencyLevel(ConsistencyLevel consistency, ByteBuf cb) { + cb.writeShort(consistency.code); + } + + public static int sizeOfConsistencyLevel(ConsistencyLevel consistency) { + return 2; + } + + public static > T readEnumValue(Class enumType, ByteBuf cb) { + String value = CBUtil.readString(cb); + try { + return Enum.valueOf(enumType, value.toUpperCase()); + } catch (IllegalArgumentException e) { + throw new DriverInternalError(String.format("Invalid value '%s' for %s", value, enumType.getSimpleName())); + } + } + + public static > void writeEnumValue(T enumValue, ByteBuf cb) { + writeString(enumValue.toString(), cb); + } + + public static > int sizeOfEnumValue(T enumValue) { + return sizeOfString(enumValue.toString()); + } + + public static UUID readUUID(ByteBuf cb) { + long msb = cb.readLong(); + long lsb = cb.readLong(); + return new UUID(msb, lsb); + } + + public static void writeUUID(UUID uuid, ByteBuf cb) { + cb.writeLong(uuid.getMostSignificantBits()); + cb.writeLong(uuid.getLeastSignificantBits()); + } + + public static int sizeOfUUID(UUID uuid) { + return 16; + } + + public static List readStringList(ByteBuf cb) { + int length = cb.readUnsignedShort(); + List l = new ArrayList(length); + for (int i = 0; i < length; i++) + l.add(readString(cb)); + return l; + } + + public static void writeStringList(List l, ByteBuf cb) { + cb.writeShort(l.size()); + for (String str : l) + writeString(str, cb); + } + + public static int sizeOfStringList(List l) { + int size = 2; + for (String str : l) + size += sizeOfString(str); + return size; + } + + public static Map readStringMap(ByteBuf cb) { + int length = cb.readUnsignedShort(); + Map m = new HashMap(length); + for (int i = 0; i < length; i++) { + String k = readString(cb).toUpperCase(); + String v = readString(cb); + m.put(k, v); + } + return m; + } + + public static void writeStringMap(Map m, ByteBuf cb) { + cb.writeShort(m.size()); + for (Map.Entry entry : m.entrySet()) { + writeString(entry.getKey(), cb); + writeString(entry.getValue(), cb); + } + } + + public static int sizeOfStringMap(Map m) { + int size = 2; + for (Map.Entry entry : m.entrySet()) { + size += sizeOfString(entry.getKey()); + size += sizeOfString(entry.getValue()); + } + return size; + } + + public static Map> readStringToStringListMap(ByteBuf cb) { + int length = cb.readUnsignedShort(); + Map> m = new HashMap>(length); + for (int i = 0; i < length; i++) { + String k = readString(cb).toUpperCase(); + List v = readStringList(cb); + m.put(k, v); + } + return m; + } + + public static void writeStringToStringListMap(Map> m, ByteBuf cb) { + cb.writeShort(m.size()); + for (Map.Entry> entry : m.entrySet()) { + writeString(entry.getKey(), cb); + writeStringList(entry.getValue(), cb); + } + } + + public static int sizeOfStringToStringListMap(Map> m) { + int size = 2; + for (Map.Entry> entry : m.entrySet()) { + size += sizeOfString(entry.getKey()); + size += sizeOfStringList(entry.getValue()); + } + return size; + } + + public static ByteBuffer readValue(ByteBuf cb) { + int length = cb.readInt(); + if (length < 0) + return null; + ByteBuf slice = cb.readSlice(length); + + return ByteBuffer.wrap(readRawBytes(slice)); + } + + public static void writeValue(byte[] bytes, ByteBuf cb) { + if (bytes == null) { + cb.writeInt(-1); + return; + } + + cb.writeInt(bytes.length); + cb.writeBytes(bytes); + } + + public static void writeValue(ByteBuffer bytes, ByteBuf cb) { + if (bytes == null) { + cb.writeInt(-1); + return; + } + + cb.writeInt(bytes.remaining()); + cb.writeBytes(bytes.duplicate()); + } + + public static int sizeOfValue(byte[] bytes) { + return 4 + (bytes == null ? 0 : bytes.length); + } + + public static int sizeOfValue(ByteBuffer bytes) { + return 4 + (bytes == null ? 0 : bytes.remaining()); + } + + public static List readValueList(ByteBuf cb) { + int size = cb.readUnsignedShort(); + if (size == 0) + return Collections.emptyList(); + + List l = new ArrayList(size); + for (int i = 0; i < size; i++) + l.add(readValue(cb)); + return l; + } + + public static void writeValueList(List values, ByteBuf cb) { + cb.writeShort(values.size()); + for (ByteBuffer value : values) + CBUtil.writeValue(value, cb); + } + + public static int sizeOfValueList(List values) { + int size = 2; + for (ByteBuffer value : values) + size += CBUtil.sizeOfValue(value); + return size; + } + + public static InetSocketAddress readInet(ByteBuf cb) { + int addrSize = cb.readByte(); + byte[] address = new byte[addrSize]; + cb.readBytes(address); + int port = cb.readInt(); + try { + return new InetSocketAddress(InetAddress.getByAddress(address), port); + } catch (UnknownHostException e) { + throw new DriverInternalError(String.format("Invalid IP address (%d.%d.%d.%d) while deserializing inet address", address[0], address[1], address[2], address[3])); + } + } + + public static void writeInet(InetSocketAddress inet, ByteBuf cb) { + byte[] address = inet.getAddress().getAddress(); + + cb.writeByte(address.length); + cb.writeBytes(address); + cb.writeInt(inet.getPort()); + } + + public static int sizeOfInet(InetSocketAddress inet) { + byte[] address = inet.getAddress().getAddress(); + return 1 + address.length + 4; + } + + /* + * Reads *all* readable bytes from {@code cb} and return them. + * If {@code cb} is backed by an array, this will return the underlying array directly, without copy. + */ + public static byte[] readRawBytes(ByteBuf cb) { + if (cb.hasArray() && cb.readableBytes() == cb.array().length) { + // Move the readerIndex just so we consistently consume the input + cb.readerIndex(cb.writerIndex()); + return cb.array(); + } + + // Otherwise, just read the bytes in a new array + byte[] bytes = new byte[cb.readableBytes()]; + cb.readBytes(bytes); + return bytes; + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/CassandraTypeParser.java b/driver-core/src/main/java/com/datastax/driver/core/CassandraTypeParser.java new file mode 100644 index 00000000000..358f6dc4de6 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/CassandraTypeParser.java @@ -0,0 +1,401 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.util.*; + +import com.google.common.collect.ImmutableMap; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.datastax.driver.core.exceptions.DriverInternalError; +import com.datastax.driver.core.utils.Bytes; + +/* + * Helps transforming Cassandra types (as read in the schema tables) to + * DataType. + * + * This is modified (and simplified) from Cassandra's TypeParser class to suit + * our needs. In particular it's not very efficient, but it doesn't really matter + * since it's rarely used and never in a critical path. + * + * Note that those methods all throw DriverInternalError when there is a parsing + * problem because in theory we'll only parse class names coming from Cassandra and + * so there shouldn't be anything wrong with them. + */ +class CassandraTypeParser { + private static final Logger logger = LoggerFactory.getLogger(CassandraTypeParser.class); + + private static final String REVERSED_TYPE = "org.apache.cassandra.db.marshal.ReversedType"; + private static final String FROZEN_TYPE = "org.apache.cassandra.db.marshal.FrozenType"; + private static final String COMPOSITE_TYPE = "org.apache.cassandra.db.marshal.CompositeType"; + private static final String COLLECTION_TYPE = "org.apache.cassandra.db.marshal.ColumnToCollectionType"; + private static final String LIST_TYPE = "org.apache.cassandra.db.marshal.ListType"; + private static final String SET_TYPE = "org.apache.cassandra.db.marshal.SetType"; + private static final String MAP_TYPE = "org.apache.cassandra.db.marshal.MapType"; + private static final String UDT_TYPE = "org.apache.cassandra.db.marshal.UserType"; + private static final String TUPLE_TYPE = "org.apache.cassandra.db.marshal.TupleType"; + + private static ImmutableMap cassTypeToDataType = + new ImmutableMap.Builder() + .put("org.apache.cassandra.db.marshal.AsciiType", DataType.ascii()) + .put("org.apache.cassandra.db.marshal.LongType", DataType.bigint()) + .put("org.apache.cassandra.db.marshal.BytesType", DataType.blob()) + .put("org.apache.cassandra.db.marshal.BooleanType", DataType.cboolean()) + .put("org.apache.cassandra.db.marshal.CounterColumnType", DataType.counter()) + .put("org.apache.cassandra.db.marshal.DecimalType", DataType.decimal()) + .put("org.apache.cassandra.db.marshal.DoubleType", DataType.cdouble()) + .put("org.apache.cassandra.db.marshal.FloatType", DataType.cfloat()) + .put("org.apache.cassandra.db.marshal.InetAddressType", DataType.inet()) + .put("org.apache.cassandra.db.marshal.Int32Type", DataType.cint()) + .put("org.apache.cassandra.db.marshal.UTF8Type", DataType.text()) + .put("org.apache.cassandra.db.marshal.TimestampType", DataType.timestamp()) + .put("org.apache.cassandra.db.marshal.DateType", DataType.timestamp()) + .put("org.apache.cassandra.db.marshal.UUIDType", DataType.uuid()) + .put("org.apache.cassandra.db.marshal.IntegerType", DataType.varint()) + .put("org.apache.cassandra.db.marshal.TimeUUIDType", DataType.timeuuid()) + .build(); + + static DataType parseOne(String className) { + boolean frozen = false; + if (isReversed(className)) { + // Just skip the ReversedType part, we don't care + className = getNestedClassName(className); + } else if (isFrozen(className)) { + frozen = true; + className = getNestedClassName(className); + } + + Parser parser = new Parser(className, 0); + String next = parser.parseNextName(); + + if (next.startsWith(LIST_TYPE)) + return DataType.list(parseOne(parser.getTypeParameters().get(0)), frozen); + + if (next.startsWith(SET_TYPE)) + return DataType.set(parseOne(parser.getTypeParameters().get(0)), frozen); + + if (next.startsWith(MAP_TYPE)) { + List params = parser.getTypeParameters(); + return DataType.map(parseOne(params.get(0)), parseOne(params.get(1)), frozen); + } + + if (frozen) + logger.warn("Got o.a.c.db.marshal.FrozenType for something else than a collection, " + + "this driver version might be too old for your version of Cassandra"); + + if (isUserType(next)) { + ++parser.idx; // skipping '(' + + String keyspace = parser.readOne(); + parser.skipBlankAndComma(); + String typeName = TypeCodec.StringCodec.utf8Instance.deserialize(Bytes.fromHexString("0x" + parser.readOne())); + parser.skipBlankAndComma(); + Map rawFields = parser.getNameAndTypeParameters(); + List fields = new ArrayList(rawFields.size()); + for (Map.Entry entry : rawFields.entrySet()) + fields.add(new UserType.Field(entry.getKey(), parseOne(entry.getValue()))); + return new UserType(keyspace, typeName, fields); + } + + if (isTupleType(next)) { + List rawTypes = parser.getTypeParameters(); + List types = new ArrayList(rawTypes.size()); + for (String rawType : rawTypes) { + types.add(parseOne(rawType)); + } + return new TupleType(types); + } + + DataType type = cassTypeToDataType.get(next); + return type == null ? DataType.custom(className) : type; + } + + public static boolean isReversed(String className) { + return className.startsWith(REVERSED_TYPE); + } + + public static boolean isFrozen(String className) { + return className.startsWith(FROZEN_TYPE); + } + + private static String getNestedClassName(String className) { + Parser p = new Parser(className, 0); + p.parseNextName(); + List l = p.getTypeParameters(); + if (l.size() != 1) + throw new IllegalStateException(); + className = l.get(0); + return className; + } + + public static boolean isUserType(String className) { + return className.startsWith(UDT_TYPE); + } + + public static boolean isTupleType(String className) { + return className.startsWith(TUPLE_TYPE); + } + + private static boolean isComposite(String className) { + return className.startsWith(COMPOSITE_TYPE); + } + + private static boolean isCollection(String className) { + return className.startsWith(COLLECTION_TYPE); + } + + static ParseResult parseWithComposite(String className) { + Parser parser = new Parser(className, 0); + + String next = parser.parseNextName(); + if (!isComposite(next)) + return new ParseResult(parseOne(className), isReversed(next)); + + List subClassNames = parser.getTypeParameters(); + int count = subClassNames.size(); + String last = subClassNames.get(count - 1); + Map collections = new HashMap(); + if (isCollection(last)) { + count--; + Parser collectionParser = new Parser(last, 0); + collectionParser.parseNextName(); // skips columnToCollectionType + Map params = collectionParser.getCollectionsParameters(); + for (Map.Entry entry : params.entrySet()) + collections.put(entry.getKey(), parseOne(entry.getValue())); + } + + List types = new ArrayList(count); + List reversed = new ArrayList(count); + for (int i = 0; i < count; i++) { + types.add(parseOne(subClassNames.get(i))); + reversed.add(isReversed(subClassNames.get(i))); + } + + return new ParseResult(true, types, reversed, collections); + } + + static class ParseResult { + public final boolean isComposite; + public final List types; + public final List reversed; + public final Map collections; + + private ParseResult(DataType type, boolean reversed) { + this(false, + Collections.singletonList(type), + Collections.singletonList(reversed), + Collections.emptyMap()); + } + + private ParseResult(boolean isComposite, List types, List reversed, Map collections) { + this.isComposite = isComposite; + this.types = types; + this.reversed = reversed; + this.collections = collections; + } + } + + private static class Parser { + + private final String str; + private int idx; + + private Parser(String str, int idx) { + this.str = str; + this.idx = idx; + } + + public String parseNextName() { + skipBlank(); + return readNextIdentifier(); + } + + public String readOne() { + String name = parseNextName(); + String args = readRawArguments(); + return name + args; + } + + // Assumes we have just read a class name and read it's potential arguments + // blindly. I.e. it assume that either parsing is done or that we're on a '(' + // and this reads everything up until the corresponding closing ')'. It + // returns everything read, including the enclosing parenthesis. + private String readRawArguments() { + skipBlank(); + + if (isEOS() || str.charAt(idx) == ')' || str.charAt(idx) == ',') + return ""; + + if (str.charAt(idx) != '(') + throw new IllegalStateException(String.format("Expecting char %d of %s to be '(' but '%c' found", idx, str, str.charAt(idx))); + + int i = idx; + int open = 1; + while (open > 0) { + ++idx; + + if (isEOS()) + throw new IllegalStateException("Non closed parenthesis"); + + if (str.charAt(idx) == '(') { + open++; + } else if (str.charAt(idx) == ')') { + open--; + } + } + // we've stopped at the last closing ')' so move past that + ++idx; + return str.substring(i, idx); + } + + public List getTypeParameters() { + List list = new ArrayList(); + + if (isEOS()) + return list; + + if (str.charAt(idx) != '(') + throw new IllegalStateException(); + + ++idx; // skipping '(' + + while (skipBlankAndComma()) { + if (str.charAt(idx) == ')') { + ++idx; + return list; + } + + try { + list.add(readOne()); + } catch (DriverInternalError e) { + DriverInternalError ex = new DriverInternalError(String.format("Exception while parsing '%s' around char %d", str, idx)); + ex.initCause(e); + throw ex; + } + } + throw new DriverInternalError(String.format("Syntax error parsing '%s' at char %d: unexpected end of string", str, idx)); + } + + public Map getCollectionsParameters() { + if (isEOS()) + return Collections.emptyMap(); + + if (str.charAt(idx) != '(') + throw new IllegalStateException(); + + ++idx; // skipping '(' + + return getNameAndTypeParameters(); + } + + // Must be at the start of the first parameter to read + public Map getNameAndTypeParameters() { + // The order of the hashmap matters for UDT + Map map = new LinkedHashMap(); + + while (skipBlankAndComma()) { + if (str.charAt(idx) == ')') { + ++idx; + return map; + } + + String bbHex = readNextIdentifier(); + String name = null; + try { + name = TypeCodec.StringCodec.utf8Instance.deserialize(Bytes.fromHexString("0x" + bbHex)); + } catch (NumberFormatException e) { + throwSyntaxError(e.getMessage()); + } + + skipBlank(); + if (str.charAt(idx) != ':') + throwSyntaxError("expecting ':' token"); + + ++idx; + skipBlank(); + try { + map.put(name, readOne()); + } catch (DriverInternalError e) { + DriverInternalError ex = new DriverInternalError(String.format("Exception while parsing '%s' around char %d", str, idx)); + ex.initCause(e); + throw ex; + } + } + throw new DriverInternalError(String.format("Syntax error parsing '%s' at char %d: unexpected end of string", str, idx)); + } + + private void throwSyntaxError(String msg) { + throw new DriverInternalError(String.format("Syntax error parsing '%s' at char %d: %s", str, idx, msg)); + } + + private boolean isEOS() { + return isEOS(str, idx); + } + + private static boolean isEOS(String str, int i) { + return i >= str.length(); + } + + private void skipBlank() { + idx = skipBlank(str, idx); + } + + private static int skipBlank(String str, int i) { + while (!isEOS(str, i) && ParseUtils.isBlank(str.charAt(i))) + ++i; + + return i; + } + + // skip all blank and at best one comma, return true if there not EOS + private boolean skipBlankAndComma() { + boolean commaFound = false; + while (!isEOS()) { + int c = str.charAt(idx); + if (c == ',') { + if (commaFound) + return true; + else + commaFound = true; + } else if (!ParseUtils.isBlank(c)) { + return true; + } + ++idx; + } + return false; + } + + // left idx positioned on the character stopping the read + public String readNextIdentifier() { + int i = idx; + while (!isEOS() && ParseUtils.isIdentifierChar(str.charAt(idx))) + ++idx; + + return str.substring(i, idx); + } + + public char readNextChar() { + skipBlank(); + return str.charAt(idx++); + } + + @Override + public String toString() { + return str.substring(0, idx) + "[" + (idx == str.length() ? "" : str.charAt(idx)) + "]" + str.substring(idx+1); + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/Clock.java b/driver-core/src/main/java/com/datastax/driver/core/Clock.java new file mode 100644 index 00000000000..75d67e5e7fe --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/Clock.java @@ -0,0 +1,39 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +/** + * This interface allows us not to have a direct call to {@code System.currentTimeMillis()} for testing purposes + */ +interface Clock { + /** + * Returns the current time in milliseconds + * + * @return the difference, measured in milliseconds, between the current time and midnight, January 1, 1970 UTC. + * @see System#currentTimeMillis() + */ + long currentTime(); +} + +/** + * Default implementation of a clock that delegate its calls to the system clock. + */ +class SystemClock implements Clock { + @Override + public long currentTime() { + return System.currentTimeMillis(); + } +} \ No newline at end of file diff --git a/driver-core/src/main/java/com/datastax/driver/core/CloseFuture.java b/driver-core/src/main/java/com/datastax/driver/core/CloseFuture.java new file mode 100644 index 00000000000..cbb8778255e --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/CloseFuture.java @@ -0,0 +1,95 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.util.List; + +import com.google.common.util.concurrent.AbstractFuture; +import com.google.common.util.concurrent.FutureCallback; +import com.google.common.util.concurrent.Futures; + +/** + * A future on the shutdown of a Cluster or Session instance. + *

+ * This is a standard future except for the fact that this class has an + * additional {@link #force} method that can be used to expedite the shutdown + * process (see below). + *

+ * Note that this class implements Guava's {@code + * ListenableFuture} and can so be used with Guava's future utilities. + */ +public abstract class CloseFuture extends AbstractFuture { + + CloseFuture() {} + + static CloseFuture immediateFuture() { + CloseFuture future = new CloseFuture() { + @Override + public CloseFuture force() { + return this; + } + }; + future.set(null); + return future; + } + + /** + * Try to force the completion of the shutdown this is a future of. + *

+ * This method will do its best to expedite the shutdown process. In + * particular, all connections will be closed right away, even if there is + * ongoing queries at the time this method is called. + *

+ * Note that this method does not block. The completion of this method does + * not imply the shutdown process is done, you still need to wait on this + * future to ensure that, but calling this method will ensure said + * future will return in a timely way. + * + * @return this {@code CloseFuture}. + */ + public abstract CloseFuture force(); + + // Internal utility for cases where we want to build a future that wait on other ones + static class Forwarding extends CloseFuture { + + private final List futures; + + Forwarding(List futures) { + this.futures = futures; + + Futures.addCallback(Futures.allAsList(futures), new FutureCallback>() { + public void onFailure(Throwable t) { + Forwarding.this.setException(t); + } + + public void onSuccess(List v) { + Forwarding.this.onFuturesDone(); + } + }); + } + + @Override + public CloseFuture force() { + for (CloseFuture future : futures) + future.force(); + return this; + } + + protected void onFuturesDone() { + set(null); + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java new file mode 100644 index 00000000000..667e4d326c3 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java @@ -0,0 +1,2389 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.io.Closeable; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.UnknownHostException; +import java.util.*; +import java.util.Map.Entry; +import java.util.concurrent.*; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Objects; +import com.google.common.base.Predicates; +import com.google.common.collect.*; +import com.google.common.util.concurrent.*; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.datastax.driver.core.exceptions.AuthenticationException; +import com.datastax.driver.core.exceptions.DriverInternalError; +import com.datastax.driver.core.exceptions.NoHostAvailableException; +import com.datastax.driver.core.policies.*; + +import static com.datastax.driver.core.SchemaElement.KEYSPACE; + +/** + * Information and known state of a Cassandra cluster. + *

+ * This is the main entry point of the driver. A simple example of access to a + * Cassandra cluster would be: + *

+ *   Cluster cluster = Cluster.builder().addContactPoint("192.168.0.1").build();
+ *   Session session = cluster.connect("db1");
+ *
+ *   for (Row row : session.execute("SELECT * FROM table1"))
+ *       // do something ...
+ * 
+ *

+ * A cluster object maintains a permanent connection to one of the cluster nodes + * which it uses solely to maintain information on the state and current + * topology of the cluster. Using the connection, the driver will discover all + * the nodes currently in the cluster as well as new nodes joining the cluster + * subsequently. + */ +public class Cluster implements Closeable { + + private static final Logger logger = LoggerFactory.getLogger(Cluster.class); + + @VisibleForTesting + static final int NEW_NODE_DELAY_SECONDS = SystemProperties.getInt("com.datastax.driver.NEW_NODE_DELAY_SECONDS", 1); + private static final int NON_BLOCKING_EXECUTOR_SIZE = SystemProperties.getInt("com.datastax.driver.NON_BLOCKING_EXECUTOR_SIZE", + Runtime.getRuntime().availableProcessors()); + + private static final ResourceBundle driverProperties = ResourceBundle.getBundle("com.datastax.driver.core.Driver"); + + // Some per-JVM number that allows to generate unique cluster names when + // multiple Cluster instance are created in the same JVM. + private static final AtomicInteger CLUSTER_ID = new AtomicInteger(0); + + private static final int DEFAULT_THREAD_KEEP_ALIVE = 30; + + private static final int NOTIF_LOCK_TIMEOUT_SECONDS = SystemProperties.getInt("com.datastax.driver.NOTIF_LOCK_TIMEOUT_SECONDS", 60); + + final Manager manager; + + /** + * Constructs a new Cluster instance. + *

+ * This constructor is mainly exposed so Cluster can be sub-classed as a means to make testing/mocking + * easier or to "intercept" its method call. Most users shouldn't extend this class however and + * should prefer either using the {@link #builder} or calling {@link #buildFrom} with a custom + * Initializer. + * + * @param name the name to use for the cluster (this is not the Cassandra cluster name, see {@link #getClusterName}). + * @param contactPoints the list of contact points to use for the new cluster. + * @param configuration the configuration for the new cluster. + */ + protected Cluster(String name, List contactPoints, Configuration configuration) { + this(name, contactPoints, configuration, Collections.emptySet()); + } + + /** + * Constructs a new Cluster instance. + *

+ * This constructor is mainly exposed so Cluster can be sub-classed as a means to make testing/mocking + * easier or to "intercept" its method call. Most users shouldn't extend this class however and + * should prefer using the {@link #builder}. + * + * @param initializer the initializer to use. + * @see #buildFrom + */ + protected Cluster(Initializer initializer) { + this(initializer.getClusterName(), + checkNotEmpty(initializer.getContactPoints()), + initializer.getConfiguration(), + initializer.getInitialListeners()); + } + + private static List checkNotEmpty(List contactPoints) { + if (contactPoints.isEmpty()) + throw new IllegalArgumentException("Cannot build a cluster without contact points"); + return contactPoints; + } + + private Cluster(String name, List contactPoints, Configuration configuration, Collection listeners) { + this.manager = new Manager(name, contactPoints, configuration, listeners); + } + + /** + * Initialize this Cluster instance. + * + * This method creates an initial connection to one of the contact points + * used to construct the {@code Cluster} instance. That connection is then + * used to populate the cluster {@link Metadata}. + *

+ * Calling this method is optional in the sense that any call to one of the + * {@code connect} methods of this object will automatically trigger a call + * to this method beforehand. It is thus only useful to call this method if + * for some reason you want to populate the metadata (or test that at least + * one contact point can be reached) without creating a first {@code + * Session}. + *

+ * Please note that this method only creates one control connection for + * gathering cluster metadata. In particular, it doesn't create any connection pools. + * Those are created when a new {@code Session} is created through + * {@code connect}. + *

+ * This method has no effect if the cluster is already initialized. + * + * @return this {@code Cluster} object. + * + * @throws NoHostAvailableException if no host amongst the contact points + * can be reached. + * @throws AuthenticationException if an authentication error occurs + * while contacting the initial contact points. + * @throws IllegalStateException if the Cluster was closed prior to calling + * this method. This can occur either directly (through {@link #close()} or + * {@link #closeAsync()}), or as a result of an error while initializing the + * Cluster. + */ + public Cluster init() { + this.manager.init(); + return this; + } + + /** + * Build a new cluster based on the provided initializer. + *

+ * Note that for building a cluster pragmatically, Cluster.Builder + * provides a slightly less verbose shortcut with {@link Builder#build}. + *

+ * Also note that that all the contact points provided by {@code + * initializer} must share the same port. + * + * @param initializer the Cluster.Initializer to use + * @return the newly created Cluster instance + * + * @throws IllegalArgumentException if the list of contact points provided + * by {@code initializer} is empty or if not all those contact points have the same port. + */ + public static Cluster buildFrom(Initializer initializer) { + return new Cluster(initializer); + } + + /** + * Creates a new {@link Cluster.Builder} instance. + *

+ * This is a convenience method for {@code new Cluster.Builder()}. + * + * @return the new cluster builder. + */ + public static Cluster.Builder builder() { + return new Cluster.Builder(); + } + + /** + * Returns the current version of the driver. + *

+ * This is intended for products that wrap or extend the driver, as a way to check + * compatibility if end-users override the driver version in their application. + * + * @return the version. + */ + public static String getDriverVersion() { + return driverProperties.getString("driver.version"); + } + + /** + * Creates a new session on this cluster but does not initialize it. + *

+ * Because this method does not perform any initialization, it cannot fail. + * The initialization of the session (the connection of the Session to the + * Cassandra nodes) will occur if either the {@link Session#init} method is + * called explicitly, or whenever the returned session object is used. + *

+ * Once a session returned by this method gets initialized (see above), it + * will be set to no keyspace. If you want to set such session to a + * keyspace, you will have to explicitly execute a 'USE mykeyspace' query. + *

+ * Note that if you do not particularly need to defer initialization, it is + * simpler to use one of the {@code connect()} method of this class. + * + * @return a new, non-initialized session on this cluster. + */ + public Session newSession() { + checkNotClosed(manager); + return manager.newSession(); + } + + /** + * Creates a new session on this cluster and initialize it. + *

+ * Note that this method will initialize the newly created session, trying + * to connect to the Cassandra nodes before returning. If you only want to + * create a Session object without initializing it right away, see + * {@link #newSession}. + * + * @return a new session on this cluster sets to no keyspace. + * + * @throws NoHostAvailableException if the Cluster has not been initialized + * yet ({@link #init} has not be called and this is the first connect call) + * and no host amongst the contact points can be reached. + * @throws AuthenticationException if an authentication error occurs while + * contacting the initial contact points. + * @throws IllegalStateException if the Cluster was closed prior to calling + * this method. This can occur either directly (through {@link #close()} or + * {@link #closeAsync()}), or as a result of an error while initializing the + * Cluster. + */ + public Session connect() { + checkNotClosed(manager); + init(); + Session session = manager.newSession(); + session.init(); + return session; + } + + /** + * Creates a new session on this cluster, initialize it and sets the + * keyspace to the provided one. + *

+ * Note that this method will initialize the newly created session, trying + * to connect to the Cassandra nodes before returning. If you only want to + * create a Session object without initializing it right away, see + * {@link #newSession}. + * + * @param keyspace The name of the keyspace to use for the created + * {@code Session}. + * @return a new session on this cluster sets to keyspace + * {@code keyspaceName}. + * + * @throws NoHostAvailableException if the Cluster has not been initialized + * yet ({@link #init} has not be called and this is the first connect call) + * and no host amongst the contact points can be reached, or if no host can + * be contacted to set the {@code keyspace}. + * @throws AuthenticationException if an authentication error occurs while + * contacting the initial contact points. + * @throws IllegalStateException if the Cluster was closed prior to calling + * this method. This can occur either directly (through {@link #close()} or + * {@link #closeAsync()}), or as a result of an error while initializing the + * Cluster. + */ + public Session connect(String keyspace) { + long timeout = getConfiguration().getSocketOptions().getConnectTimeoutMillis(); + Session session = connect(); + try { + try { + ResultSetFuture future = session.executeAsync("USE " + keyspace); + // Note: using the connection timeout isn't perfectly correct, we should probably change that someday + Uninterruptibles.getUninterruptibly(future, timeout, TimeUnit.MILLISECONDS); + return session; + } catch (TimeoutException e) { + throw new DriverInternalError(String.format("No responses after %d milliseconds while setting current keyspace. This should not happen, unless you have setup a very low connection timeout.", timeout)); + } catch (ExecutionException e) { + throw DefaultResultSetFuture.extractCauseFromExecutionException(e); + } + } catch (RuntimeException e) { + session.close(); + throw e; + } + } + + /** + * The name of this cluster object. + *

+ * Note that this is not the Cassandra cluster name, but rather a name + * assigned to this Cluster object. Currently, that name is only used + * for one purpose: to distinguish exposed JMX metrics when multiple + * Cluster instances live in the same JVM (which should be rare in the first + * place). That name can be set at Cluster building time (through + * {@link Builder#withClusterName} for instance) but will default to a + * name like {@code cluster1} where each Cluster instance in the same JVM + * will have a different number. + * + * @return the name for this cluster instance. + */ + public String getClusterName() { + return manager.clusterName; + } + + /** + * Returns read-only metadata on the connected cluster. + *

+ * This includes the known nodes with their status as seen by the driver, + * as well as the schema definitions. Since this return metadata on the + * connected cluster, this method may trigger the creation of a connection + * if none has been established yet (neither {@code init()} nor {@code connect()} + * has been called yet). + * + * @return the cluster metadata. + * + * @throws NoHostAvailableException if the Cluster has not been initialized yet + * and no host amongst the contact points can be reached. + * @throws AuthenticationException if an authentication error occurs + * while contacting the initial contact points. + * @throws IllegalStateException if the Cluster was closed prior to calling + * this method. This can occur either directly (through {@link #close()} or + * {@link #closeAsync()}), or as a result of an error while initializing the + * Cluster. + */ + public Metadata getMetadata() { + manager.init(); + return manager.metadata; + } + + /** + * The cluster configuration. + * + * @return the cluster configuration. + */ + public Configuration getConfiguration() { + return manager.configuration; + } + + /** + * The cluster metrics. + * + * @return the cluster metrics, or {@code null} if metrics collection has + * been disabled (that is if {@link Configuration#getMetricsOptions} + * returns {@code null}). + */ + public Metrics getMetrics() { + checkNotClosed(manager); + return manager.metrics; + } + + /** + * Registers the provided listener to be notified on hosts + * up/down/added/removed events. + *

+ * Registering the same listener multiple times is a no-op. + *

+ * Note that while {@link LoadBalancingPolicy} implements + * {@code Host.StateListener}, the configured load balancing does not + * need to (and should not) be registered through this method to + * received host related events. + * + * @param listener the new {@link Host.StateListener} to register. + * @return this {@code Cluster} object; + */ + public Cluster register(Host.StateListener listener) { + checkNotClosed(manager); + manager.listeners.add(listener); + return this; + } + + /** + * Unregisters the provided listener from being notified on hosts events. + *

+ * This method is a no-op if {@code listener} hadn't previously be + * registered against this Cluster. + * + * @param listener the {@link Host.StateListener} to unregister. + * @return this {@code Cluster} object; + */ + public Cluster unregister(Host.StateListener listener) { + checkNotClosed(manager); + manager.listeners.remove(listener); + return this; + } + + /** + * Registers the provided tracker to be updated with hosts read + * latencies. + *

+ * Registering the same listener multiple times is a no-op. + *

+ * Be wary that the registered tracker {@code update} method will be call + * very frequently (at the end of every query to a Cassandra host) and + * should thus not be costly. + *

+ * The main use case for a {@code LatencyTracker} is so + * {@link LoadBalancingPolicy} can implement latency awareness + * Typically, {@link LatencyAwarePolicy} registers it's own internal + * {@code LatencyTracker} (automatically, you don't have to call this + * method directly). + * + * @param tracker the new {@link LatencyTracker} to register. + * @return this {@code Cluster} object; + */ + public Cluster register(LatencyTracker tracker) { + checkNotClosed(manager); + manager.trackers.add(tracker); + return this; + } + + /** + * Unregisters the provided latency tracking from being updated + * with host read latencies. + *

+ * This method is a no-op if {@code tracker} hadn't previously be + * registered against this Cluster. + * + * @param tracker the {@link LatencyTracker} to unregister. + * @return this {@code Cluster} object; + */ + public Cluster unregister(LatencyTracker tracker) { + checkNotClosed(manager); + manager.trackers.remove(tracker); + return this; + } + + /** + * Initiates a shutdown of this cluster instance. + *

+ * This method is asynchronous and return a future on the completion + * of the shutdown process. As soon a the cluster is shutdown, no + * new request will be accepted, but already submitted queries are + * allowed to complete. This method closes all connections from all + * sessions and reclaims all resources used by this Cluster + * instance. + *

+ * If for some reason you wish to expedite this process, the + * {@link CloseFuture#force} can be called on the result future. + *

+ * This method has no particular effect if the cluster was already closed + * (in which case the returned future will return immediately). + * + * @return a future on the completion of the shutdown process. + */ + public CloseFuture closeAsync() { + return manager.close(); + } + + /** + * Initiates a shutdown of this cluster instance and blocks until + * that shutdown completes. + *

+ * This method is a shortcut for {@code closeAsync().get()}. + */ + public void close() { + try { + closeAsync().get(); + } catch (ExecutionException e) { + throw DefaultResultSetFuture.extractCauseFromExecutionException(e); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + } + + /** + * Whether this Cluster instance has been closed. + *

+ * Note that this method returns true as soon as one of the close methods + * ({@link #closeAsync} or {@link #close}) has been called, it does not guarantee + * that the closing is done. If you want to guarantee that the closing is done, + * you can call {@code close()} and wait until it returns (or call the get method + * on {@code closeAsync()} with a very short timeout and check this doesn't timeout). + * + * @return {@code true} if this Cluster instance has been closed, {@code false} + * otherwise. + */ + public boolean isClosed() { + return manager.closeFuture.get() != null; + } + + private static void checkNotClosed(Manager manager) { + if (manager.isClosed()) + throw new IllegalStateException("Can't use this cluster instance because it was previously closed"); + } + + /** + * Initializer for {@link Cluster} instances. + *

+ * If you want to create a new {@code Cluster} instance programmatically, + * then it is advised to use {@link Cluster.Builder} which can be obtained from the + * {@link Cluster#builder} method. + *

+ * But it is also possible to implement a custom {@code Initializer} that + * retrieves initialization from a web-service or from a configuration file. + */ + public interface Initializer { + + /** + * An optional name for the created cluster. + *

+ * Such name is optional (a default name will be created otherwise) and is currently + * only use for JMX reporting of metrics. See {@link Cluster#getClusterName} for more + * information. + * + * @return the name for the created cluster or {@code null} to use an automatically + * generated name. + */ + public String getClusterName(); + + /** + * Returns the initial Cassandra hosts to connect to. + * + * @return the initial Cassandra contact points. See {@link Builder#addContactPoint} + * for more details on contact points. + */ + public List getContactPoints(); + + /** + * The configuration to use for the new cluster. + *

+ * Note that some configuration can be modified after the cluster + * initialization but some others cannot. In particular, the ones that + * cannot be changed afterwards includes: + *

    + *
  • the port use to connect to Cassandra nodes (see {@link ProtocolOptions}).
  • + *
  • the policies used (see {@link Policies}).
  • + *
  • the authentication info provided (see {@link Configuration}).
  • + *
  • whether metrics are enabled (see {@link Configuration}).
  • + *
+ * + * @return the configuration to use for the new cluster. + */ + public Configuration getConfiguration(); + + /** + * Optional listeners to register against the newly created cluster. + *

+ * Note that contrary to listeners registered post Cluster creation, + * the listeners returned by this method will see {@link Host.StateListener#onAdd} + * events for the initial contact points. + * + * @return a possibly empty collection of {@code Host.StateListener} to register + * against the newly created cluster. + */ + public Collection getInitialListeners(); + } + + /** + * Helper class to build {@link Cluster} instances. + */ + public static class Builder implements Initializer { + + private String clusterName; + private final List addresses = new ArrayList(); + private final List rawAddresses = new ArrayList(); + private int port = ProtocolOptions.DEFAULT_PORT; + private int maxSchemaAgreementWaitSeconds = ProtocolOptions.DEFAULT_MAX_SCHEMA_AGREEMENT_WAIT_SECONDS; + private ProtocolVersion protocolVersion; + private AuthProvider authProvider = AuthProvider.NONE; + + private LoadBalancingPolicy loadBalancingPolicy; + private ReconnectionPolicy reconnectionPolicy; + private RetryPolicy retryPolicy; + private AddressTranslater addressTranslater; + private TimestampGenerator timestampGenerator; + private SpeculativeExecutionPolicy speculativeExecutionPolicy; + + private ProtocolOptions.Compression compression = ProtocolOptions.Compression.NONE; + private SSLOptions sslOptions = null; + private boolean metricsEnabled = true; + private boolean jmxEnabled = true; + + private PoolingOptions poolingOptions; + private SocketOptions socketOptions; + private QueryOptions queryOptions; + + private NettyOptions nettyOptions = NettyOptions.DEFAULT_INSTANCE; + + private Collection listeners; + + + @Override + public String getClusterName() { + return clusterName; + } + + @Override + public List getContactPoints() { + if (rawAddresses.isEmpty()) + return addresses; + + List allAddresses = new ArrayList(addresses); + for (InetAddress address : rawAddresses) + allAddresses.add(new InetSocketAddress(address, port)); + return allAddresses; + } + + /** + * An optional name for the create cluster. + *

+ * Note: this is not related to the Cassandra cluster name (though you + * are free to provide the same name). See {@link Cluster#getClusterName} for + * details. + *

+ * If you use this method and create more than one Cluster instance in the + * same JVM (which should be avoided unless you need to connect to multiple + * Cassandra clusters), you should make sure each Cluster instance get a + * unique name or you may have a problem with JMX reporting. + * + * @param name the cluster name to use for the created Cluster instance. + * @return this Builder. + */ + public Builder withClusterName(String name) { + this.clusterName = name; + return this; + } + + /** + * The port to use to connect to the Cassandra host. + *

+ * If not set through this method, the default port (9042) will be used + * instead. + * + * @param port the port to set. + * @return this Builder. + */ + public Builder withPort(int port) { + this.port = port; + return this; + } + + /** + * Sets the maximum time to wait for schema agreement before returning from a DDL query. + *

+ * If not set through this method, the default value (10 seconds) will be used. + * + * @param maxSchemaAgreementWaitSeconds the new value to set. + * @return this Builder. + * + * @throws IllegalStateException if the provided value is zero or less. + */ + public Builder withMaxSchemaAgreementWaitSeconds(int maxSchemaAgreementWaitSeconds) { + if (maxSchemaAgreementWaitSeconds <= 0) + throw new IllegalArgumentException("Max schema agreement wait must be greater than zero"); + + this.maxSchemaAgreementWaitSeconds = maxSchemaAgreementWaitSeconds; + return this; + } + + /** + * The native protocol version to use. + *

+ * The driver supports versions 1 to 3 of the native protocol. Higher versions + * of the protocol have more features and should be preferred, but this also depends + * on the Cassandra version: + * + * + * + * + * + * + * + *
Native protocol version to Cassandra version correspondence
Protocol versionMinimum Cassandra version
11.2
22.0
32.1
+ *

+ * By default, the driver will "auto-detect" which protocol version it can use + * when connecting to the first node. More precisely, it will try first with + * {@link ProtocolVersion#NEWEST_SUPPORTED}, and if not supported fallback to + * the highest version supported by the first node it connects to. Please note + * that once the version is "auto-detected", it won't change: if the first node + * the driver connects to is a Cassandra 1.2 node and auto-detection is used + * (the default), then the native protocol version 1 will be use for the lifetime + * of the Cluster instance. + *

+ * This method allows to force the use of a particular protocol version. Forcing + * version 1 is always fine since all Cassandra version (at least all those + * supporting the native protocol in the first place) so far support it. However, + * please note that a number of features of the driver won't be available if that + * version of the protocol is in use, including result set paging, + * {@link BatchStatement}, executing a non-prepared query with binary values + * ({@link Session#execute(String, Object...)}), ... (those methods will throw + * an UnsupportedFeatureException). Using the protocol version 1 should thus + * only be considered when using Cassandra 1.2, until nodes have been upgraded + * to Cassandra 2.0. + *

+ * If version 2 of the protocol is used, then Cassandra 1.2 nodes will be ignored + * (the driver won't connect to them). + *

+ * The default behavior (auto-detection) is fine in almost all case, but you may + * want to force a particular version if you have a Cassandra cluster with mixed + * 1.2/2.0 nodes (i.e. during a Cassandra upgrade). + * + * @param version the native protocol version to use. {@code null} is also supported + * to trigger auto-detection (see above) but this is the default (so you don't have + * to call this method for that behavior). + * @return this Builder. + */ + public Builder withProtocolVersion(ProtocolVersion version) { + this.protocolVersion = version; + return this; + } + + /** + * The native protocol version to use, as a number. + * + * @param version the native protocol version as a number. + * @return this Builder. + * @throws IllegalArgumentException if the number does not correspond to any known + * native protocol version. + * + * @deprecated This method is provided for backward compatibility. Use + * {@link #withProtocolVersion(ProtocolVersion)} instead. + */ + @Deprecated + public Builder withProtocolVersion(int version) { + this.protocolVersion = ProtocolVersion.fromInt(version); + return this; + } + + /** + * Adds a contact point. + *

+ * Contact points are addresses of Cassandra nodes that the driver uses + * to discover the cluster topology. Only one contact point is required + * (the driver will retrieve the address of the other nodes + * automatically), but it is usually a good idea to provide more than + * one contact point, because if that single contact point is unavailable, + * the driver cannot initialize itself correctly. + *

+ * Note that by default (that is, unless you use the {@link #withLoadBalancingPolicy}) + * method of this builder), the first succesfully contacted host will be use + * to define the local data-center for the client. If follows that if you are + * running Cassandra in a multiple data-center setting, it is a good idea to + * only provided contact points that are in the same datacenter than the client, + * or to provide manually the load balancing policy that suits your need. + * + * @param address the address of the node to connect to + * @return this Builder. + * + * @throws IllegalArgumentException if no IP address for {@code address} + * could be found + * @throws SecurityException if a security manager is present and + * permission to resolve the host name is denied. + */ + public Builder addContactPoint(String address) { + // We explicitely check for nulls because InetAdress.getByName() will happily + // accept it and use localhost (while a null here almost likely mean a user error, + // not "connect to localhost") + if (address == null) + throw new NullPointerException(); + + try { + this.rawAddresses.add(InetAddress.getByName(address)); + return this; + } catch (UnknownHostException e) { + throw new IllegalArgumentException(e.getMessage()); + } + } + + /** + * Adds contact points. + *

+ * See {@link Builder#addContactPoint} for more details on contact + * points. + * + * @param addresses addresses of the nodes to add as contact point. + * @return this Builder. + * + * @throws IllegalArgumentException if no IP address for at least one + * of {@code addresses} could be found + * @throws SecurityException if a security manager is present and + * permission to resolve the host name is denied. + * + * @see Builder#addContactPoint + */ + public Builder addContactPoints(String... addresses) { + for (String address : addresses) + addContactPoint(address); + return this; + } + + /** + * Adds a contact point - or many if it host resolves to multiple InetAddresss (A records). + *

+ * + * If the host name points to a dns records with multiple a-records, all InetAddresses + * returned will be used. Make sure that all resulting InetAddresss returned + * points to the same cluster and datacenter. + *

+ * See {@link Builder#addContactPoint} for more details on contact + * points and thrown exceptions + * + * @param address address of the nodes to look up InetAddresses from to add as contact points. + * @return this Builder. + * + * + * @see Builder#addContactPoint + */ + public Builder addContactPoints(String address) { + // We explicitely check for nulls because InetAdress.getByName() will happily + // accept it and use localhost (while a null here almost likely mean a user error, + // not "connect to localhost") + if (address == null) + throw new NullPointerException(); + + try { + addContactPoints(InetAddress.getAllByName(address)); + } catch (UnknownHostException e) { + throw new IllegalArgumentException(e.getMessage()); + } + return this; + } + + /** + * Adds contact points. + *

+ * See {@link Builder#addContactPoint} for more details on contact + * points. + * + * @param addresses addresses of the nodes to add as contact point. + * @return this Builder. + * + * @see Builder#addContactPoint + */ + public Builder addContactPoints(InetAddress... addresses) { + Collections.addAll(this.rawAddresses, addresses); + return this; + } + + /** + * Adds contact points. + * + * See {@link Builder#addContactPoint} for more details on contact + * points. + * + * @param addresses addresses of the nodes to add as contact point + * @return this Builder + * + * @see Builder#addContactPoint + */ + public Builder addContactPoints(Collection addresses) { + this.rawAddresses.addAll(addresses); + return this; + } + + /** + * Adds contact points. + *

+ * See {@link Builder#addContactPoint} for more details on contact + * points. Contrarily to other {@code addContactPoints} methods, this method + * allow to provide a different port for each contact points. Since Cassandra + * nodes must always all listen on the same port, this is rarelly what you + * want and most use should prefer other {@code addContactPoints} methods to + * this one. However, this can be useful if the Cassandra nodes are behind + * a router and are not accessed directly. Note that if you are in this + * situtation (Cassandra nodes are behind a router, not directly accessible), + * you almost surely want to provide a specific {@code AddressTranslater} + * (through {@link #withAddressTranslater}) to translate actual Cassandra node + * addresses to the addresses the driver should use, otherwise the driver + * will not be able to auto-detect new nodes (and will generally not function + * optimally). + * + * @param addresses addresses of the nodes to add as contact point + * @return this Builder + * + * @see Builder#addContactPoint + */ + public Builder addContactPointsWithPorts(Collection addresses) { + this.addresses.addAll(addresses); + return this; + } + + /** + * Configures the load balancing policy to use for the new cluster. + *

+ * If no load balancing policy is set through this method, + * {@link Policies#defaultLoadBalancingPolicy} will be used instead. + * + * @param policy the load balancing policy to use. + * @return this Builder. + */ + public Builder withLoadBalancingPolicy(LoadBalancingPolicy policy) { + this.loadBalancingPolicy = policy; + return this; + } + + /** + * Configures the reconnection policy to use for the new cluster. + *

+ * If no reconnection policy is set through this method, + * {@link Policies#DEFAULT_RECONNECTION_POLICY} will be used instead. + * + * @param policy the reconnection policy to use. + * @return this Builder. + */ + public Builder withReconnectionPolicy(ReconnectionPolicy policy) { + this.reconnectionPolicy = policy; + return this; + } + + /** + * Configures the retry policy to use for the new cluster. + *

+ * If no retry policy is set through this method, + * {@link Policies#DEFAULT_RETRY_POLICY} will be used instead. + * + * @param policy the retry policy to use. + * @return this Builder. + */ + public Builder withRetryPolicy(RetryPolicy policy) { + this.retryPolicy = policy; + return this; + } + + /** + * Configures the address translater to use for the new cluster. + *

+ * See {@link AddressTranslater} for more detail on address translation, + * but the default translater, {@link IdentityTranslater}, should be + * correct in most cases. If unsure, stick to the default. + * + * @param translater the translater to use. + * @return this Builder. + */ + public Builder withAddressTranslater(AddressTranslater translater) { + this.addressTranslater = translater; + return this; + } + + /** + * Configures the generator that will produce the client-side timestamp sent + * with each query. + *

+ * This feature is only available with version {@link ProtocolVersion#V3 V3} or + * above of the native protocol. With earlier versions, timestamps are always + * generated server-side, and setting a generator through this method will have + * no effect. + *

+ * If no generator is set through this method, the driver will default to the + * legacy server-side behavior by using {@link ServerSideTimestampGenerator}. + * + * @param timestampGenerator the generator to use. + * @return this Builder. + */ + public Builder withTimestampGenerator(TimestampGenerator timestampGenerator) { + this.timestampGenerator = timestampGenerator; + return this; + } + + /** + * Configures the speculative execution policy to use for the new cluster. + *

+ * If no policy is set through this method, {@link Policies#defaultSpeculativeExecutionPolicy()} + * will be used instead. + * + * @param policy the policy to use. + * @return this Builder. + */ + public Builder withSpeculativeExecutionPolicy(SpeculativeExecutionPolicy policy) { + this.speculativeExecutionPolicy = policy; + return this; + } + + /** + * Uses the provided credentials when connecting to Cassandra hosts. + *

+ * This should be used if the Cassandra cluster has been configured to + * use the {@code PasswordAuthenticator}. If the the default {@code + * AllowAllAuthenticator} is used instead, using this method has no + * effect. + * + * @param username the username to use to login to Cassandra hosts. + * @param password the password corresponding to {@code username}. + * @return this Builder. + */ + public Builder withCredentials(String username, String password) { + this.authProvider = new PlainTextAuthProvider(username, password); + return this; + } + + /** + * Use the specified AuthProvider when connecting to Cassandra + * hosts. + *

+ * Use this method when a custom authentication scheme is in place. + * You shouldn't call both this method and {@code withCredentials} + * on the same {@code Builder} instance as one will supersede the + * other + * + * @param authProvider the {@link AuthProvider} to use to login to + * Cassandra hosts. + * @return this Builder + */ + public Builder withAuthProvider(AuthProvider authProvider) { + this.authProvider = authProvider; + return this; + } + + /** + * Sets the compression to use for the transport. + * + * @param compression the compression to set. + * @return this Builder. + * + * @see ProtocolOptions.Compression + */ + public Builder withCompression(ProtocolOptions.Compression compression) { + this.compression = compression; + return this; + } + + /** + * Disables metrics collection for the created cluster (metrics are + * enabled by default otherwise). + * + * @return this builder. + */ + public Builder withoutMetrics() { + this.metricsEnabled = false; + return this; + } + + /** + * Enables the use of SSL for the created {@code Cluster}. + *

+ * Calling this method will use default SSL options (see {@link SSLOptions#SSLOptions()}). + * This is thus a shortcut for {@code withSSL(new SSLOptions())}. + *

+ * Note that if SSL is enabled, the driver will not connect to any + * Cassandra nodes that doesn't have SSL enabled and it is strongly + * advised to enable SSL on every Cassandra node if you plan on using + * SSL in the driver. + * + * @return this builder. + */ + public Builder withSSL() { + this.sslOptions = new SSLOptions(); + return this; + } + + /** + * Enable the use of SSL for the created {@code Cluster} using the provided options. + * + * @param sslOptions the SSL options to use. + * + * @return this builder. + */ + public Builder withSSL(SSLOptions sslOptions) { + this.sslOptions = sslOptions; + return this; + } + + /** + * Register the provided listeners in the newly created cluster. + *

+ * Note: repeated calls to this method will override the previous ones. + * + * @param listeners the listeners to register. + * @return this builder. + */ + public Builder withInitialListeners(Collection listeners) { + this.listeners = listeners; + return this; + } + + /** + * Disables JMX reporting of the metrics. + *

+ * JMX reporting is enabled by default (see {@link Metrics}) but can be + * disabled using this option. If metrics are disabled, this is a + * no-op. + * + * @return this builder. + */ + public Builder withoutJMXReporting() { + this.jmxEnabled = false; + return this; + } + + /** + * Sets the PoolingOptions to use for the newly created Cluster. + *

+ * If no pooling options are set through this method, default pooling + * options will be used. + * + * @param options the pooling options to use. + * @return this builder. + */ + public Builder withPoolingOptions(PoolingOptions options) { + this.poolingOptions = options; + return this; + } + + /** + * Sets the SocketOptions to use for the newly created Cluster. + *

+ * If no socket options are set through this method, default socket + * options will be used. + * + * @param options the socket options to use. + * @return this builder. + */ + public Builder withSocketOptions(SocketOptions options) { + this.socketOptions = options; + return this; + } + + /** + * Sets the QueryOptions to use for the newly created Cluster. + *

+ * If no query options are set through this method, default query + * options will be used. + * + * @param options the query options to use. + * @return this builder. + */ + public Builder withQueryOptions(QueryOptions options) { + this.queryOptions = options; + return this; + } + + /** + * Set the {@link NettyOptions} to use for the newly created Cluster. + *

+ * If no Netty options are set through this method, {@link NettyOptions#DEFAULT_INSTANCE} + * will be used as a default value, which means that no customization will be applied. + * + * @param nettyOptions the {@link NettyOptions} to use. + * @return this builder. + */ + public Builder withNettyOptions(NettyOptions nettyOptions) { + this.nettyOptions = nettyOptions; + return this; + } + + /** + * The configuration that will be used for the new cluster. + *

+ * You should not modify this object directly because changes made + * to the returned object may not be used by the cluster build. + * Instead, you should use the other methods of this {@code Builder}. + * + * @return the configuration to use for the new cluster. + */ + @Override + public Configuration getConfiguration() { + Policies policies = Policies.builder() + .withLoadBalancingPolicy(loadBalancingPolicy) + .withReconnectionPolicy(reconnectionPolicy) + .withRetryPolicy(retryPolicy) + .withAddressTranslater(addressTranslater) + .withTimestampGenerator(timestampGenerator) + .withSpeculativeExecutionPolicy(speculativeExecutionPolicy) + .build(); + return new Configuration(policies, + new ProtocolOptions(port, protocolVersion, maxSchemaAgreementWaitSeconds, sslOptions, authProvider).setCompression(compression), + poolingOptions == null ? new PoolingOptions() : poolingOptions, + socketOptions == null ? new SocketOptions() : socketOptions, + metricsEnabled ? new MetricsOptions(jmxEnabled) : null, + queryOptions == null ? new QueryOptions() : queryOptions, + nettyOptions); + } + + @Override + public Collection getInitialListeners() { + return listeners == null ? Collections.emptySet() : listeners; + } + + /** + * Builds the cluster with the configured set of initial contact points + * and policies. + *

+ * This is a convenience method for {@code Cluster.buildFrom(this)}. + * + * @return the newly built Cluster instance. + */ + public Cluster build() { + return Cluster.buildFrom(this); + } + } + + static long timeSince(long startNanos, TimeUnit destUnit) { + return destUnit.convert(System.nanoTime() - startNanos, TimeUnit.NANOSECONDS); + } + + private static String generateClusterName() { + return "cluster" + CLUSTER_ID.incrementAndGet(); + } + + /** + * The sessions and hosts managed by this a Cluster instance. + *

+ * Note: the reason we create a Manager object separate from Cluster is + * that Manager is not publicly visible. For instance, we wouldn't want + * user to be able to call the {@link #onUp} and {@link #onDown} methods. + */ + class Manager implements Connection.DefaultResponseHandler { + + final String clusterName; + private boolean isInit; + private volatile boolean isFullyInit; + + // Initial contacts point + final List contactPoints; + final Set sessions = new CopyOnWriteArraySet(); + + Metadata metadata; + final Configuration configuration; + Metrics metrics; + + Connection.Factory connectionFactory; + ControlConnection controlConnection; + + final ConvictionPolicy.Factory convictionPolicyFactory = new ConvictionPolicy.Simple.Factory(); + + ScheduledThreadPoolExecutor reconnectionExecutor; + ScheduledThreadPoolExecutor scheduledTasksExecutor; + + // Executor used for tasks that shouldn't be executed on an IO thread. Used for short-lived, generally non-blocking tasks + ListeningExecutorService executor; + + // Work Queue used by executor. + LinkedBlockingQueue executorQueue; + + // An executor for tasks that might block some time, like creating new connection, but are generally not too critical. + ListeningExecutorService blockingExecutor; + + // Work Queue used by blockingExecutor. + LinkedBlockingQueue blockingExecutorQueue; + + ConnectionReaper reaper; + + final AtomicReference closeFuture = new AtomicReference(); + + // All the queries that have been prepared (we keep them so we can re-prepared them when a node fail or a + // new one join the cluster). + // Note: we could move this down to the session level, but since prepared statement are global to a node, + // this would yield a slightly less clear behavior. + ConcurrentMap preparedQueries; + + final Set listeners; + final Set trackers = new CopyOnWriteArraySet(); + + private Manager(String clusterName, List contactPoints, Configuration configuration, Collection listeners) { + this.clusterName = clusterName == null ? generateClusterName() : clusterName; + this.configuration = configuration; + this.contactPoints = contactPoints; + this.listeners = new CopyOnWriteArraySet(listeners); + } + + // Initialization is not too performance intensive and in practice there shouldn't be contention + // on it so synchronized is good enough. + synchronized void init() { + checkNotClosed(this); + if (isInit) + return; + isInit = true; + + logger.debug("Starting new cluster with contact points " + contactPoints); + + this.configuration.register(this); + + this.executorQueue = new LinkedBlockingQueue(); + this.executor = makeExecutor(NON_BLOCKING_EXECUTOR_SIZE, "worker", executorQueue); + this.blockingExecutorQueue = new LinkedBlockingQueue(); + this.blockingExecutor = makeExecutor(2, "blocking-task-worker", blockingExecutorQueue); + this.reconnectionExecutor = new ScheduledThreadPoolExecutor(2, threadFactory("reconnection")); + // scheduledTasksExecutor is used to process C* notifications. So having it mono-threaded ensures notifications are + // applied in the order received. + this.scheduledTasksExecutor = new ScheduledThreadPoolExecutor(1, threadFactory("scheduled-task-worker")); + + this.reaper = new ConnectionReaper(this); + this.metadata = new Metadata(this); + this.connectionFactory = new Connection.Factory(this, configuration); + this.controlConnection = new ControlConnection(this); + this.metrics = configuration.getMetricsOptions() == null ? null : new Metrics(this); + this.preparedQueries = new MapMaker().weakValues().makeMap(); + + this.scheduledTasksExecutor.scheduleWithFixedDelay(new CleanupIdleConnectionsTask(), 10, 10, TimeUnit.SECONDS); + + + for (InetSocketAddress address : contactPoints) { + // We don't want to signal -- call onAdd() -- because nothing is ready + // yet (loadbalancing policy, control connection, ...). All we want is + // create the Host object so we can initialize the control connection. + metadata.add(address); + } + + // At this stage, metadata.allHosts() only contains the contact points, that's what we want to pass to LBP.init(). + // But the control connection will initialize first and discover more hosts, so make a copy. + Set contactPointHosts = Sets.newHashSet(metadata.allHosts()); + + try { + try { + controlConnection.connect(); + } catch (UnsupportedProtocolVersionException e) { + logger.debug("Cannot connect with protocol {}, trying {}", e.unsupportedVersion, e.serverVersion); + + connectionFactory.protocolVersion = e.serverVersion; + try { + controlConnection.connect(); + } catch (UnsupportedProtocolVersionException e1) { + throw new DriverInternalError("Cannot connect to node with its own version, this makes no sense", e); + } + } + + // The control connection can mark hosts down if it failed to connect to them, separate them + Set downContactPointHosts = Sets.newHashSet(); + for (Host host : contactPointHosts) + if (host.state == Host.State.DOWN) + downContactPointHosts.add(host); + contactPointHosts.removeAll(downContactPointHosts); + + // Now that the control connection is ready, we have all the information we need about the nodes (datacenter, + // rack...) to initialize the load balancing policy + loadBalancingPolicy().init(Cluster.this, contactPointHosts); + speculativeRetryPolicy().init(Cluster.this); + for (Host host : downContactPointHosts) { + loadBalancingPolicy().onDown(host); + for (Host.StateListener listener : listeners) + listener.onDown(host); + } + + configuration.getPoolingOptions().setProtocolVersion(protocolVersion()); + + for (Host host : metadata.allHosts()) { + // If the host is down at this stage, it's a contact point that the control connection failed to reach. + // Reconnection attempts are already scheduled, and the LBP and listeners have been notified above. + if (host.state == Host.State.DOWN) continue; + + // Otherwise, we want to do the equivalent of onAdd(). But since we know for sure that no sessions or prepared + // statements exist at this point, we can skip some of the steps (plus this avoids scheduling concurrent pool + // creations if a session is created right after this method returns). + logger.info("New Cassandra host {} added", host); + + if (!connectionFactory.protocolVersion.isSupportedBy(host)) { + logUnsupportedVersionProtocol(host, connectionFactory.protocolVersion); + return; + } + + if (!contactPointHosts.contains(host)) + loadBalancingPolicy().onAdd(host); + + host.setUp(); + + for (Host.StateListener listener : listeners) + listener.onAdd(host); + } + isFullyInit = true; + } catch (NoHostAvailableException e) { + close(); + throw e; + } + } + + ProtocolVersion protocolVersion() { + return connectionFactory.protocolVersion; + } + + ThreadFactory threadFactory(String name) { + return new ThreadFactoryBuilder().setNameFormat(clusterName + "-" + name + "-%d").build(); + } + + private ListeningExecutorService makeExecutor(int threads, String name, LinkedBlockingQueue workQueue) { + ThreadPoolExecutor executor = new ThreadPoolExecutor(threads, + threads, + DEFAULT_THREAD_KEEP_ALIVE, + TimeUnit.SECONDS, + workQueue, + threadFactory(name)); + + executor.allowCoreThreadTimeOut(true); + return MoreExecutors.listeningDecorator(executor); + } + + Cluster getCluster() { + return Cluster.this; + } + + LoadBalancingPolicy loadBalancingPolicy() { + return configuration.getPolicies().getLoadBalancingPolicy(); + } + + SpeculativeExecutionPolicy speculativeRetryPolicy() { + return configuration.getPolicies().getSpeculativeExecutionPolicy(); + } + + ReconnectionPolicy reconnectionPolicy() { + return configuration.getPolicies().getReconnectionPolicy(); + } + + InetSocketAddress translateAddress(InetAddress address) { + InetSocketAddress sa = new InetSocketAddress(address, connectionFactory.getPort()); + InetSocketAddress translated = configuration.getPolicies().getAddressTranslater().translate(sa); + return translated == null ? sa : translated; + } + + private Session newSession() { + SessionManager session = new SessionManager(Cluster.this); + sessions.add(session); + return session; + } + + boolean removeSession(Session session) { + return sessions.remove(session); + } + + void reportLatency(Host host, Statement statement, Exception exception, long latencyNanos) { + for (LatencyTracker tracker : trackers) { + tracker.update(host, statement, exception, latencyNanos); + } + } + + boolean isClosed() { + return closeFuture.get() != null; + } + + private CloseFuture close() { + + CloseFuture future = closeFuture.get(); + if (future != null) + return future; + + if (isInit) { + logger.debug("Shutting down"); + + // If we're shutting down, there is no point in waiting on scheduled reconnections, nor on notifications + // delivery or blocking tasks so we use shutdownNow + shutdownNow(reconnectionExecutor); + shutdownNow(scheduledTasksExecutor); + shutdownNow(blockingExecutor); + + // but for the worker executor, we want to let submitted tasks finish unless the shutdown is forced. + executor.shutdown(); + + // We also close the metrics + if (metrics != null) + metrics.shutdown(); + + // And the load balancing policy + LoadBalancingPolicy loadBalancingPolicy = loadBalancingPolicy(); + if (loadBalancingPolicy instanceof CloseableLoadBalancingPolicy) + ((CloseableLoadBalancingPolicy)loadBalancingPolicy).close(); + + speculativeRetryPolicy().close(); + + AddressTranslater translater = configuration.getPolicies().getAddressTranslater(); + if (translater instanceof CloseableAddressTranslater) + ((CloseableAddressTranslater)translater).close(); + + // Then we shutdown all connections + List futures = new ArrayList(sessions.size() + 1); + futures.add(controlConnection.closeAsync()); + for (Session session : sessions) + futures.add(session.closeAsync()); + + future = new ClusterCloseFuture(futures); + // The rest will happen asynchronously, when all connections are successfully closed + } else { + future = CloseFuture.immediateFuture(); + } + + return closeFuture.compareAndSet(null, future) + ? future + : closeFuture.get(); // We raced, it's ok, return the future that was actually set + } + + private void shutdownNow(ExecutorService executor) { + List pendingTasks = executor.shutdownNow(); + // If some tasks were submitted to this executor but not yet commenced, make sure the corresponding futures complete + for (Runnable pendingTask : pendingTasks) { + if (pendingTask instanceof FutureTask) + ((FutureTask)pendingTask).cancel(false); + } + } + + void logUnsupportedVersionProtocol(Host host, ProtocolVersion version) { + logger.warn("Detected added or restarted Cassandra host {} but ignoring it since it does not support the version {} of the native " + + "protocol which is currently in use. If you want to force the use of a particular version of the native protocol, use " + + "Cluster.Builder#usingProtocolVersion() when creating the Cluster instance.", host, version); + } + + void logClusterNameMismatch(Host host, String expectedClusterName, String actualClusterName) { + logger.warn("Detected added or restarted Cassandra host {} but ignoring it since its cluster name '{}' does not match the one " + + "currently known ({})", + host, actualClusterName, expectedClusterName); + } + + public ListenableFuture triggerOnUp(final Host host) { + return executor.submit(new ExceptionCatchingRunnable() { + @Override + public void runMayThrow() throws InterruptedException, ExecutionException { + onUp(host, null); + } + }); + } + + // Use triggerOnUp unless you're sure you want to run this on the current thread. + private void onUp(final Host host, Connection reusedConnection) throws InterruptedException, ExecutionException { + logger.debug("Host {} is UP", host); + + if (isClosed()) + return; + + if (!connectionFactory.protocolVersion.isSupportedBy(host)) { + logUnsupportedVersionProtocol(host, connectionFactory.protocolVersion); + return; + } + + try { + + boolean locked = host.notificationsLock.tryLock(NOTIF_LOCK_TIMEOUT_SECONDS, TimeUnit.SECONDS); + if (!locked) { + logger.warn("Could not acquire notifications lock within {} seconds, ignoring UP notification for {}", NOTIF_LOCK_TIMEOUT_SECONDS, host); + return; + } + try { + + // We don't want to use the public Host.isUp() as this would make us skip the rest for suspected hosts + if (host.state == Host.State.UP) + return; + + // If there is a reconnection attempt scheduled for that node, cancel it + Future scheduledAttempt = host.reconnectionAttempt.getAndSet(null); + if (scheduledAttempt != null) { + logger.debug("Cancelling reconnection attempt since node is UP"); + scheduledAttempt.cancel(false); + } + + try { + reusedConnection = prepareAllQueries(host, reusedConnection); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + // Don't propagate because we don't want to prevent other listener to run + } catch (UnsupportedProtocolVersionException e) { + logUnsupportedVersionProtocol(host, e.unsupportedVersion); + return; + } catch (ClusterNameMismatchException e) { + logClusterNameMismatch(host, e.expectedClusterName, e.actualClusterName); + return; + } + + // Session#onUp() expects the load balancing policy to have been updated first, so that + // Host distances are up to date. This mean the policy could return the node before the + // new pool have been created. This is harmless if there is no prior pool since RequestHandler + // will ignore the node, but we do want to make sure there is no prior pool so we don't + // query from a pool we will shutdown right away. + for (SessionManager s : sessions) + s.removePool(host); + loadBalancingPolicy().onUp(host); + controlConnection.onUp(host); + + logger.trace("Adding/renewing host pools for newly UP host {}", host); + + List> futures = Lists.newArrayListWithCapacity(sessions.size()); + for (SessionManager s : sessions) + futures.add(s.forceRenewPool(host, reusedConnection)); + + try { + // Only mark the node up once all session have re-added their pool (if the load-balancing + // policy says it should), so that Host.isUp() don't return true before we're reconnected + // to the node. + List poolCreationResults = Futures.allAsList(futures).get(); + + // If any of the creation failed, they will have signaled a connection failure + // which will trigger a reconnection to the node. So don't bother marking UP. + if (Iterables.any(poolCreationResults, Predicates.equalTo(false))) { + logger.debug("Connection pool cannot be created, not marking {} UP", host); + return; + } + + host.setUp(); + + for (Host.StateListener listener : listeners) + listener.onUp(host); + + } catch (ExecutionException e) { + Throwable t = e.getCause(); + // That future is not really supposed to throw unexpected exceptions + if (!(t instanceof InterruptedException) && !(t instanceof CancellationException)) + logger.error("Unexpected error while marking node UP: while this shouldn't happen, this shouldn't be critical", t); + } + + // Now, check if there isn't pools to create/remove following the addition. + // We do that now only so that it's not called before we've set the node up. + for (SessionManager s : sessions) + s.updateCreatedPools(); + + } finally { + host.notificationsLock.unlock(); + } + + } finally { + if (reusedConnection != null && !reusedConnection.hasPool()) + reusedConnection.closeAsync(); + } + } + + public ListenableFuture triggerOnDown(final Host host, boolean startReconnection) { + return triggerOnDown(host, false, startReconnection); + } + + public ListenableFuture triggerOnDown(final Host host, final boolean isHostAddition, final boolean startReconnection) { + return executor.submit(new ExceptionCatchingRunnable() { + @Override + public void runMayThrow() throws InterruptedException, ExecutionException { + onDown(host, isHostAddition, startReconnection); + } + }); + } + + // Use triggerOnDown unless you're sure you want to run this on the current thread. + private void onDown(final Host host, final boolean isHostAddition, boolean startReconnection) throws InterruptedException, ExecutionException { + logger.debug("Host {} is DOWN", host); + + if (isClosed()) + return; + + boolean locked = host.notificationsLock.tryLock(NOTIF_LOCK_TIMEOUT_SECONDS, TimeUnit.SECONDS); + if (!locked) { + logger.warn("Could not acquire notifications lock within {} seconds, ignoring DOWN notification for {}", NOTIF_LOCK_TIMEOUT_SECONDS, host); + return; + } + try { + + // Note: we don't want to skip that method if !host.isUp() because we set isUp + // late in onUp, and so we can rely on isUp if there is an error during onUp. + // But if there is a reconnection attempt in progress already, then we know + // we've already gone through that method since the last successful onUp(), so + // we're good skipping it. + if (host.reconnectionAttempt.get() != null) { + logger.debug("Aborting onDown because a reconnection is running on DOWN host {}", host); + return; + } + + // Remember if we care about this node at all. We must call this before + // we've signalled the load balancing policy, since most policy will always + // IGNORE down nodes anyway. + HostDistance distance = loadBalancingPolicy().distance(host); + + boolean wasUp = host.isUp(); + host.setDown(); + + loadBalancingPolicy().onDown(host); + controlConnection.onDown(host); + for (SessionManager s : sessions) + s.onDown(host); + + // Contrarily to other actions of that method, there is no reason to notify listeners + // unless the host was UP at the beginning of this function since even if a onUp fail + // mid-method, listeners won't have been notified of the UP. + if (wasUp) { + for (Host.StateListener listener : listeners) + listener.onDown(host); + } + + // Don't start a reconnection if we ignore the node anyway (JAVA-314) + if (distance == HostDistance.IGNORED || !startReconnection) + return; + + logger.debug("{} is down, scheduling connection retries", host); + startPeriodicReconnectionAttempt(host, isHostAddition); + } finally { + host.notificationsLock.unlock(); + } + } + + void startPeriodicReconnectionAttempt(final Host host, final boolean isHostAddition) { + new AbstractReconnectionHandler(reconnectionExecutor, reconnectionPolicy().newSchedule(), host.reconnectionAttempt) { + + protected Connection tryReconnect() throws ConnectionException, InterruptedException, UnsupportedProtocolVersionException, ClusterNameMismatchException { + return connectionFactory.open(host); + } + + protected void onReconnection(Connection connection) { + // Make sure we have up-to-date infos on that host before adding it (so we typically + // catch that an upgraded node uses a new cassandra version). + if (controlConnection.refreshNodeInfo(host)) { + logger.debug("Successful reconnection to {}, setting host UP", host); + try { + if (isHostAddition) + onAdd(host, connection); + else + onUp(host, connection); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } catch (Exception e) { + logger.error("Unexpected error while setting node up", e); + } + } else { + logger.debug("Not enough info for {}, ignoring host", host); + connection.closeAsync(); + } + } + + protected boolean onConnectionException(ConnectionException e, long nextDelayMs) { + if (logger.isDebugEnabled()) + logger.debug("Failed reconnection to {} ({}), scheduling retry in {} milliseconds", host, e.getMessage(), nextDelayMs); + return true; + } + + protected boolean onUnknownException(Exception e, long nextDelayMs) { + logger.error(String.format("Unknown error during reconnection to %s, scheduling retry in %d milliseconds", host, nextDelayMs), e); + return true; + } + + protected boolean onAuthenticationException(AuthenticationException e, long nextDelayMs) { + logger.error(String.format("Authentication error during reconnection to %s, scheduling retry in %d milliseconds", host, nextDelayMs), e); + return true; + } + + }.start(); + } + + void startSingleReconnectionAttempt(final Host host) { + if (isClosed() || host.isUp()) + return; + + logger.debug("Scheduling one-time reconnection to {}", host); + + // Setting an initial delay of 0 to start immediately, and all the exception handlers return false to prevent further attempts + new AbstractReconnectionHandler(reconnectionExecutor, reconnectionPolicy().newSchedule(), host.reconnectionAttempt, 0) { + + protected Connection tryReconnect() throws ConnectionException, InterruptedException, UnsupportedProtocolVersionException, ClusterNameMismatchException { + return connectionFactory.open(host); + } + + protected void onReconnection(Connection connection) { + // Make sure we have up-to-date infos on that host before adding it (so we typically + // catch that an upgraded node uses a new cassandra version). + if (controlConnection.refreshNodeInfo(host)) { + logger.debug("Successful reconnection to {}, setting host UP", host); + try { + onUp(host, connection); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } catch (Exception e) { + logger.error("Unexpected error while setting node up", e); + } + } else { + logger.debug("Not enough info for {}, ignoring host", host); + connection.closeAsync(); + } + } + + protected boolean onConnectionException(ConnectionException e, long nextDelayMs) { + if (logger.isDebugEnabled()) + logger.debug("Failed one-time reconnection to {} ({})", host, e.getMessage()); + return false; + } + + protected boolean onUnknownException(Exception e, long nextDelayMs) { + logger.error(String.format("Unknown error during one-time reconnection to %s", host), e); + return false; + } + + protected boolean onAuthenticationException(AuthenticationException e, long nextDelayMs) { + logger.error(String.format("Authentication error during one-time reconnection to %s", host), e); + return false; + } + }.start(); + } + + public ListenableFuture triggerOnAdd(final Host host) { + return executor.submit(new ExceptionCatchingRunnable() { + @Override + public void runMayThrow() throws InterruptedException, ExecutionException { + onAdd(host, null); + } + }); + } + + // Use triggerOnAdd unless you're sure you want to run this on the current thread. + private void onAdd(final Host host, Connection reusedConnection) throws InterruptedException, ExecutionException { + if (isClosed()) + return; + + logger.info("New Cassandra host {} added", host); + + if (!connectionFactory.protocolVersion.isSupportedBy(host)) { + logUnsupportedVersionProtocol(host, connectionFactory.protocolVersion); + return; + } + + try { + + boolean locked = host.notificationsLock.tryLock(NOTIF_LOCK_TIMEOUT_SECONDS, TimeUnit.SECONDS); + if (!locked) { + logger.warn("Could not acquire notifications lock within {} seconds, ignoring ADD notification for {}", NOTIF_LOCK_TIMEOUT_SECONDS, host); + return; + } + try { + + // Adds to the load balancing first and foremost, as doing so might change the decision + // it will make for distance() on that node (not likely but we leave that possibility). + // This does mean the policy may start returning that node for query plan, but as long + // as no pools have been created (below) this will be ignored by RequestHandler so it's fine. + loadBalancingPolicy().onAdd(host); + + // Next, if the host should be ignored, well, ignore it. + if (loadBalancingPolicy().distance(host) == HostDistance.IGNORED) { + // We still mark the node UP though as it should be (and notifiy the listeners). + // We'll mark it down if we have a notification anyway and we've documented that especially + // for IGNORED hosts, the isUp() method was a best effort guess + host.setUp(); + for (Host.StateListener listener : listeners) + listener.onAdd(host); + return; + } + + try { + reusedConnection = prepareAllQueries(host, reusedConnection); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + // Don't propagate because we don't want to prevent other listener to run + } catch (UnsupportedProtocolVersionException e) { + logUnsupportedVersionProtocol(host, e.unsupportedVersion); + return; + } catch (ClusterNameMismatchException e) { + logClusterNameMismatch(host, e.expectedClusterName, e.actualClusterName); + return; + } + + controlConnection.onAdd(host); + + List> futures = Lists.newArrayListWithCapacity(sessions.size()); + for (SessionManager s : sessions) + futures.add(s.maybeAddPool(host, reusedConnection)); + + try { + // Only mark the node up once all session have added their pool (if the load-balancing + // policy says it should), so that Host.isUp() don't return true before we're reconnected + // to the node. + List poolCreationResults = Futures.allAsList(futures).get(); + + // If any of the creation failed, they will have signaled a connection failure + // which will trigger a reconnection to the node. So don't bother marking UP. + if (Iterables.any(poolCreationResults, Predicates.equalTo(false))) { + logger.debug("Connection pool cannot be created, not marking {} UP", host); + return; + } + + host.setUp(); + + for (Host.StateListener listener : listeners) + listener.onAdd(host); + + } catch (ExecutionException e) { + Throwable t = e.getCause(); + // That future is not really supposed to throw unexpected exceptions + if (!(t instanceof InterruptedException) && !(t instanceof CancellationException)) + logger.error("Unexpected error while adding node: while this shouldn't happen, this shouldn't be critical", t); + } + + // Now, check if there isn't pools to create/remove following the addition. + // We do that now only so that it's not called before we've set the node up. + for (SessionManager s : sessions) + s.updateCreatedPools(); + + } finally { + host.notificationsLock.unlock(); + } + + } finally { + if (reusedConnection != null && !reusedConnection.hasPool()) + reusedConnection.closeAsync(); + } + } + + public ListenableFuture triggerOnRemove(final Host host) { + return executor.submit(new ExceptionCatchingRunnable() { + @Override + public void runMayThrow() throws InterruptedException, ExecutionException { + onRemove(host); + } + }); + } + + // Use triggerOnRemove unless you're sure you want to run this on the current thread. + private void onRemove(Host host) throws InterruptedException, ExecutionException { + if (isClosed()) + return; + + boolean locked = host.notificationsLock.tryLock(NOTIF_LOCK_TIMEOUT_SECONDS, TimeUnit.SECONDS); + if (!locked) { + logger.warn("Could not acquire notifications lock within {} seconds, ignoring REMOVE notification for {}", NOTIF_LOCK_TIMEOUT_SECONDS, host); + return; + } + try { + + host.setDown(); + + logger.debug("Removing host {}", host); + loadBalancingPolicy().onRemove(host); + controlConnection.onRemove(host); + for (SessionManager s : sessions) + s.onRemove(host); + + for (Host.StateListener listener : listeners) + listener.onRemove(host); + } finally { + host.notificationsLock.unlock(); + } + } + + public boolean signalConnectionFailure(Host host, ConnectionException exception, boolean isHostAddition) { + // Don't signal failure until we've fully initialized the controlConnection as this might mess up with + // the protocol detection + if (!isFullyInit || isClosed()) + return true; + + boolean isDown = host.signalConnectionFailure(exception); + if (isDown) + triggerOnDown(host, isHostAddition, true); + return isDown; + } + + public void removeHost(Host host, boolean isInitialConnection) { + if (host == null) + return; + + if (metadata.remove(host)) { + if (isInitialConnection) { + logger.warn("You listed {} in your contact points, but it could not be reached at startup", host); + } else { + logger.info("Cassandra host {} removed", host); + triggerOnRemove(host); + } + } + } + + public void ensurePoolsSizing() { + if (protocolVersion().compareTo(ProtocolVersion.V3) >= 0) + return; + + for (SessionManager session : sessions) { + for (HostConnectionPool pool : session.pools.values()) + pool.ensureCoreConnections(); + } + } + + public PreparedStatement addPrepared(PreparedStatement stmt) { + PreparedStatement previous = preparedQueries.putIfAbsent(stmt.getPreparedId().id, stmt); + if (previous != null) { + logger.warn("Re-preparing already prepared query {}. Please note that preparing the same query more than once is " + + "generally an anti-pattern and will likely affect performance. Consider preparing the statement only once.", stmt.getQueryString()); + + // The one object in the cache will get GCed once it's not referenced by the client anymore since we use a weak reference. + // So we need to make sure that the instance we do return to the user is the one that is in the cache. + return previous; + } + return stmt; + } + + /** + * @param reusedConnection an existing connection (from a reconnection attempt) that we want to + * reuse to prepare the statements (might be null). + * @return a connection that the rest of the initialization process can use (it will be made part + * of a connection pool). Can be reusedConnection, or one that was open in the method. + */ + private Connection prepareAllQueries(Host host, Connection reusedConnection) throws InterruptedException, UnsupportedProtocolVersionException, ClusterNameMismatchException { + if (preparedQueries.isEmpty()) + return reusedConnection; + + logger.debug("Preparing {} prepared queries on newly up node {}", preparedQueries.size(), host); + Connection connection = null; + try { + connection = (reusedConnection == null) + ? connectionFactory.open(host) + : reusedConnection; + + try { + ControlConnection.waitForSchemaAgreement(connection, this); + } catch (ExecutionException e) { + // As below, just move on + } + + // Furthermore, along with each prepared query we keep the current keyspace at the time of preparation + // as we need to make it is the same when we re-prepare on new/restarted nodes. Most query will use the + // same keyspace so keeping it each time is slightly wasteful, but this doesn't really matter and is + // simpler. Besides, we do avoid in prepareAllQueries to not set the current keyspace more than needed. + + // We need to make sure we prepared every query with the right current keyspace, i.e. the one originally + // used for preparing it. However, since we are likely that all prepared query belong to only a handful + // of different keyspace (possibly only one), and to avoid setting the current keyspace more than needed, + // we first sort the query per keyspace. + SetMultimap perKeyspace = HashMultimap.create(); + for (PreparedStatement ps : preparedQueries.values()) { + // It's possible for a query to not have a current keyspace. But since null doesn't work well as + // map keys, we use the empty string instead (that is not a valid keyspace name). + String keyspace = ps.getQueryKeyspace() == null ? "" : ps.getQueryKeyspace(); + perKeyspace.put(keyspace, ps.getQueryString()); + } + + for (String keyspace : perKeyspace.keySet()) { + // Empty string mean no particular keyspace to set + if (!keyspace.isEmpty()) + connection.setKeyspace(keyspace); + + List futures = new ArrayList(preparedQueries.size()); + for (String query : perKeyspace.get(keyspace)) { + futures.add(connection.write(new Requests.Prepare(query))); + } + for (Connection.Future future : futures) { + try { + future.get(); + } catch (ExecutionException e) { + // This "might" happen if we drop a CF but haven't removed it's prepared queries (which we don't do + // currently). It's not a big deal however as if it's a more serious problem it'll show up later when + // the query is tried for execution. + logger.debug("Unexpected error while preparing queries on new/newly up host", e); + } + } + } + + return connection; + } catch (ConnectionException e) { + // Ignore, not a big deal + if (connection != null) + connection.closeAsync(); + return null; + } catch (AuthenticationException e) { + // That's a bad news, but ignore at this point + if (connection != null) + connection.closeAsync(); + return null; + } catch (BusyConnectionException e) { + // Ignore, not a big deal + // In theory the problem is transient so the connection could be reused later, but if the core pool size is 1 + // it's better to close this one so that we start with a fresh connection. + if (connection != null) + connection.closeAsync(); + return null; + } + } + + public void submitSchemaRefresh(final SchemaElement targetType, final String targetKeyspace, final String targetName) { + logger.trace("Submitting schema refresh"); + executor.submit(new ExceptionCatchingRunnable() { + @Override + public void runMayThrow() throws InterruptedException, ExecutionException { + controlConnection.refreshSchema(targetType, targetKeyspace, targetName); + } + }); + } + + // refresh the schema using the provided connection, and notice the future with the provided resultset once done + public void refreshSchemaAndSignal(final Connection connection, final DefaultResultSetFuture future, final ResultSet rs, final SchemaElement target, final String keyspace, final String name) { + if (logger.isDebugEnabled()) + logger.debug("Refreshing schema for {}{}", + target == null ? "everything" : keyspace, + (target == KEYSPACE) ? "" : "." + name + " (" + target + ")"); + + maybeRefreshSchemaAndSignal(connection, future, rs, target, keyspace, name); + } + + public void waitForSchemaAgreementAndSignal(final Connection connection, final DefaultResultSetFuture future, final ResultSet rs) { + maybeRefreshSchemaAndSignal(connection, future, rs, null, null, null); + } + + private void maybeRefreshSchemaAndSignal(final Connection connection, final DefaultResultSetFuture future, final ResultSet rs, final SchemaElement targetType, final String targetKeyspace, final String targetName) { + final boolean refreshSchema = (targetKeyspace != null); // if false, only wait for schema agreement + + executor.submit(new Runnable() { + @Override + public void run() { + boolean schemaInAgreement = false; + try { + // Before refreshing the schema, wait for schema agreement so + // that querying a table just after having created it don't fail. + schemaInAgreement = ControlConnection.waitForSchemaAgreement(connection, Manager.this); + if (!schemaInAgreement) + logger.warn("No schema agreement from live replicas after {} s. The schema may not be up to date on some nodes.", configuration.getProtocolOptions().getMaxSchemaAgreementWaitSeconds()); + if (refreshSchema) + ControlConnection.refreshSchema(connection, targetType, targetKeyspace, targetName, Manager.this, false); + } catch (Exception e) { + if (refreshSchema) { + logger.error("Error during schema refresh ({}). The schema from Cluster.getMetadata() might appear stale. Asynchronously submitting job to fix.", e.getMessage()); + submitSchemaRefresh(targetType, targetKeyspace, targetName); + } else { + logger.warn("Error while waiting for schema agreement", e); + } + } finally { + // Always sets the result, but remember if we reached schema agreement + rs.getExecutionInfo().setSchemaInAgreement(schemaInAgreement); + future.setResult(rs); + } + } + }); + } + + // Called when some message has been received but has been initiated from the server (streamId < 0). + // This is called on an I/O thread, so all blocking operation must be done on an executor. + @Override + public void handle(Message.Response response) { + + if (!(response instanceof Responses.Event)) { + logger.error("Received an unexpected message from the server: {}", response); + return; + } + + final ProtocolEvent event = ((Responses.Event)response).event; + + logger.debug("Received event {}, scheduling delivery", response); + + switch (event.type) { + case TOPOLOGY_CHANGE: + ProtocolEvent.TopologyChange tpc = (ProtocolEvent.TopologyChange)event; + InetSocketAddress tpAddr = translateAddress(tpc.node.getAddress()); + switch (tpc.change) { + case NEW_NODE: + final Host newHost = metadata.add(tpAddr); + if (newHost != null) { + // Cassandra tends to send notifications for new/up nodes a bit early (it is triggered once + // gossip is up, but that is before the client-side server is up), so we add a delay + // (otherwise the connection will likely fail and have to be retry which is wasteful). This + // probably should be fixed C* side, after which we'll be able to remove this. + scheduledTasksExecutor.schedule(new ExceptionCatchingRunnable() { + @Override + public void runMayThrow() throws InterruptedException, ExecutionException { + // Make sure we have up-to-date infos on that host before adding it (so we typically + // catch that an upgraded node uses a new cassandra version). + if (controlConnection.refreshNodeInfo(newHost)) { + onAdd(newHost, null); + } else { + logger.debug("Not enough info for {}, ignoring host", newHost); + } + } + }, NEW_NODE_DELAY_SECONDS, TimeUnit.SECONDS); + } + break; + case REMOVED_NODE: + removeHost(metadata.getHost(tpAddr), false); + break; + case MOVED_NODE: + executor.submit(new ExceptionCatchingRunnable() { + @Override + public void runMayThrow() { + controlConnection.refreshNodeListAndTokenMap(); + } + }); + break; + } + break; + case STATUS_CHANGE: + ProtocolEvent.StatusChange stc = (ProtocolEvent.StatusChange)event; + InetSocketAddress stAddr = translateAddress(stc.node.getAddress()); + switch (stc.status) { + case UP: + final Host hostUp = metadata.getHost(stAddr); + if (hostUp == null) { + final Host h = metadata.add(stAddr); + // If hostUp is still null, it means we didn't knew about it the line before but + // got beaten at adding it to the metadata by another thread. In that case, it's + // fine to let the other thread win and ignore the notification here + if (h == null) + return; + + // See NEW_NODE above + scheduledTasksExecutor.schedule(new ExceptionCatchingRunnable() { + @Override + public void runMayThrow() throws InterruptedException, ExecutionException { + // Make sure we have up-to-date infos on that host before adding it (so we typically + // catch that an upgraded node uses a new cassandra version). + if (controlConnection.refreshNodeInfo(h)) { + onAdd(h, null); + } else { + logger.debug("Not enough info for {}, ignoring host", h); + } + } + }, NEW_NODE_DELAY_SECONDS, TimeUnit.SECONDS); + } else { + executor.submit(new ExceptionCatchingRunnable() { + @Override + public void runMayThrow() throws InterruptedException, ExecutionException { + // Make sure we have up-to-date infos on that host before adding it (so we typically + // catch that an upgraded node uses a new cassandra version). + if (controlConnection.refreshNodeInfo(hostUp)) { + onUp(hostUp, null); + } else { + logger.debug("Not enough info for {}, ignoring host", hostUp); + } + } + }); + } + break; + case DOWN: + // Note that there is a slight risk we can receive the event late and thus + // mark the host down even though we already had reconnected successfully. + // But it is unlikely, and don't have too much consequence since we'll try reconnecting + // right away, so we favor the detection to make the Host.isUp method more reliable. + Host hostDown = metadata.getHost(stAddr); + if (hostDown != null) + triggerOnDown(hostDown, true); + break; + } + break; + case SCHEMA_CHANGE: + ProtocolEvent.SchemaChange scc = (ProtocolEvent.SchemaChange)event; + switch (scc.change) { + case CREATED: + case UPDATED: + submitSchemaRefresh(scc.targetType, scc.targetKeyspace, scc.targetName); + break; + case DROPPED: + KeyspaceMetadata keyspace; + switch (scc.targetType) { + case KEYSPACE: + manager.metadata.removeKeyspace(scc.targetKeyspace); + break; + case TABLE: + keyspace = manager.metadata.getKeyspaceInternal(scc.targetKeyspace); + if (keyspace == null) + logger.warn("Received a DROPPED notification for table {}.{}, but this keyspace is unknown in our metadata", + scc.targetKeyspace, scc.targetName); + else + keyspace.removeTable(scc.targetName); + break; + case TYPE: + keyspace = manager.metadata.getKeyspaceInternal(scc.targetKeyspace); + if (keyspace == null) + logger.warn("Received a DROPPED notification for UDT {}.{}, but this keyspace is unknown in our metadata", + scc.targetKeyspace, scc.targetName); + else + keyspace.removeUserType(scc.targetName); + break; + } + break; + } + break; + } + } + + void refreshConnectedHosts() { + // Deal first with the control connection: if it's connected to a node that is not LOCAL, try + // reconnecting (thus letting the loadBalancingPolicy pick a better node) + Host ccHost = controlConnection.connectedHost(); + if (ccHost == null || loadBalancingPolicy().distance(ccHost) != HostDistance.LOCAL) + controlConnection.reconnect(); + + for (SessionManager s : sessions) + s.updateCreatedPools(); + } + + void refreshConnectedHost(Host host) { + // Deal with the control connection if it was using this host + Host ccHost = controlConnection.connectedHost(); + if (ccHost == null || ccHost.equals(host) && loadBalancingPolicy().distance(ccHost) != HostDistance.LOCAL) + controlConnection.reconnect(); + + for (SessionManager s : sessions) + s.updateCreatedPools(host); + } + + private class ClusterCloseFuture extends CloseFuture.Forwarding { + + ClusterCloseFuture(List futures) { + super(futures); + } + + @Override + public CloseFuture force() { + // The only ExecutorService we haven't forced yet is executor + shutdownNow(executor); + return super.force(); + } + + @Override + protected void onFuturesDone() { + /* + * When we reach this, all sessions should be shutdown. We've also started a shutdown + * of the thread pools used by this object. Remains 2 things before marking the shutdown + * as done: + * 1) we need to wait for the completion of the shutdown of the Cluster threads pools. + * 2) we need to shutdown the Connection.Factory, i.e. the executors used by Netty. + * But at least for 2), we must not do it on the current thread because that could be + * a netty worker, which we're going to shutdown. So creates some thread for that. + */ + (new Thread("Shutdown-checker") { + public void run() { + // Just wait indefinitely on the the completion of the thread pools. Provided the user + // call force(), we'll never really block forever. + try { + reconnectionExecutor.awaitTermination(Long.MAX_VALUE, TimeUnit.SECONDS); + scheduledTasksExecutor.awaitTermination(Long.MAX_VALUE, TimeUnit.SECONDS); + executor.awaitTermination(Long.MAX_VALUE, TimeUnit.SECONDS); + blockingExecutor.awaitTermination(Long.MAX_VALUE, TimeUnit.SECONDS); + + // Some of the jobs on the executors can be doing query stuff, so close the + // connectionFactory at the very last + connectionFactory.shutdown(); + + reaper.shutdown(); + + set(null); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + setException(e); + } + } + }).start(); + } + } + + private class CleanupIdleConnectionsTask implements Runnable { + @Override public void run() { + try { + long now = System.currentTimeMillis(); + for (SessionManager session : sessions) { + session.cleanupIdleConnections(now); + } + } catch (Exception e) { + logger.warn("Error while trashing idle connections", e); + } + } + } + } + + /** + * Periodically ensures that closed connections are properly terminated once they have no more pending requests. + * + * This is normally done when the connection errors out, or when the last request is processed; this class acts as + * a last-effort protection since unterminated connections can lead to deadlocks. If it terminates a connection, + * this indicates a bug; warnings are logged so that this can be reported. + * + * @see Connection#tryTerminate(boolean) + */ + static class ConnectionReaper { + private static final int INTERVAL_MS = 15000; + + private final ScheduledExecutorService executor; + @VisibleForTesting + final Map connections = new ConcurrentHashMap(); + + private volatile boolean shutdown; + + private final Runnable reaperTask = new Runnable() { + @Override + public void run() { + long now = System.currentTimeMillis(); + Iterator> iterator = connections.entrySet().iterator(); + while (iterator.hasNext()) { + Entry entry = iterator.next(); + Connection connection = entry.getKey(); + Long terminateTime = entry.getValue(); + if (terminateTime <= now) { + boolean terminated = connection.tryTerminate(true); + if (terminated) + iterator.remove(); + } + } + } + }; + + ConnectionReaper(Cluster.Manager manager) { + executor = Executors.newScheduledThreadPool(1, manager.threadFactory("connection-reaper")); + executor.scheduleWithFixedDelay(reaperTask, INTERVAL_MS, INTERVAL_MS, TimeUnit.MILLISECONDS); + } + + void register(Connection connection, long terminateTime) { + if (shutdown) { + // This should not happen since the reaper is shut down after all sessions. + logger.warn("Connection registered after reaper shutdown: {}", connection); + connection.tryTerminate(true); + } else { + connections.put(connection, terminateTime); + } + } + + void shutdown() { + shutdown = true; + // Force shutdown to avoid waiting for the interval, and run the task manually one last time + executor.shutdownNow(); + reaperTask.run(); + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/ClusterNameMismatchException.java b/driver-core/src/main/java/com/datastax/driver/core/ClusterNameMismatchException.java new file mode 100644 index 00000000000..78effe3b96e --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/ClusterNameMismatchException.java @@ -0,0 +1,38 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.net.InetSocketAddress; + +/** + * Indicates that we've attempted to connect to a node which cluster name doesn't match that of the other nodes known to the driver. + */ +class ClusterNameMismatchException extends Exception { + + private static final long serialVersionUID = 0; + + public final InetSocketAddress address; + public final String expectedClusterName; + public final String actualClusterName; + + public ClusterNameMismatchException(InetSocketAddress address, String actualClusterName, String expectedClusterName) { + super(String.format("[%s] Host %s reports cluster name '%s' that doesn't match our cluster name '%s'. This host will be ignored.", + address, address, actualClusterName, expectedClusterName)); + this.address = address; + this.expectedClusterName = expectedClusterName; + this.actualClusterName = actualClusterName; + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/ColumnDefinitions.java b/driver-core/src/main/java/com/datastax/driver/core/ColumnDefinitions.java new file mode 100644 index 00000000000..0bc83feb08c --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/ColumnDefinitions.java @@ -0,0 +1,380 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.util.*; + +import com.datastax.driver.core.exceptions.InvalidTypeException; + +/** + * Metadata describing the columns returned in a {@link ResultSet} or a + * {@link PreparedStatement}. + *

+ * A {@code columnDefinitions}} instance is mainly a list of + * {@code ColumnsDefinitions.Definition}. The definitions or metadata for a column + * can be accessed either by: + *

    + *
  • index (indexed from 0)
  • + *
  • name
  • + *
+ *

+ * When accessed by name, column selection is case insensitive. In case multiple + * columns only differ by the case of their name, then the column returned with + * be the first column that has been defined in CQL without forcing case sensitivity + * (that is, it has either been defined without quotes or is fully lowercase). + * If none of the columns have been defined in this manner, the first column matching + * (with case insensitivity) is returned. You can force the case of a selection + * by double quoting the name. + *

+ * For example: + *

    + *
  • If {@code cd} contains column {@code fOO}, then {@code cd.contains("foo")}, + * {@code cd.contains("fOO")} and {@code cd.contains("Foo")} will return {@code true}.
  • + *
  • If {@code cd} contains both {@code foo} and {@code FOO} then: + *
      + *
    • {@code cd.getType("foo")}, {@code cd.getType("fOO")} and {@code cd.getType("FOO")} + * will all match column {@code foo}.
    • + *
    • {@code cd.getType("\"FOO\"")} will match column {@code FOO}
    • + *
    + *
+ * Note that the preceding rules mean that if a {@code ColumnDefinitions} object + * contains multiple occurrences of the exact same name (be it the same column + * multiple times or columns from different tables with the same name), you + * will have to use selection by index to disambiguate. + */ +public class ColumnDefinitions implements Iterable { + + static final ColumnDefinitions EMPTY = new ColumnDefinitions(new Definition[0]); + + private final Definition[] byIdx; + private final Map byName; + + ColumnDefinitions(Definition[] defs) { + + this.byIdx = defs; + this.byName = new HashMap(defs.length); + + for (int i = 0; i < defs.length; i++) { + // Be optimistic, 99% of the time, previous will be null. + int[] previous = this.byName.put(defs[i].name.toLowerCase(), new int[]{ i }); + if (previous != null) { + int[] indexes = new int[previous.length + 1]; + System.arraycopy(previous, 0, indexes, 0, previous.length); + indexes[indexes.length - 1] = i; + this.byName.put(defs[i].name.toLowerCase(), indexes); + } + } + } + + /** + * Returns the number of columns described by this {@code Columns} + * instance. + * + * @return the number of columns described by this metadata. + */ + public int size() { + return byIdx.length; + } + + /** + * Returns whether this metadata contains a given name. + * + * @param name the name to check. + * @return {@code true} if this metadata contains the column named {@code name}, + * {@code false} otherwise. + */ + public boolean contains(String name) { + return findAllIdx(name) != null; + } + + /** + * The first index in this metadata of the provided name, if present. + * + * @param name the name of the column. + * @return the index of the first occurrence of {@code name} in this metadata if + * {@code contains(name)}, -1 otherwise. + */ + public int getIndexOf(String name) { + return findFirstIdx(name); + } + + /** + * Returns an iterator over the {@link Definition} contained in this metadata. + * + * The order of the iterator will be the one of this metadata. + * + * @return an iterator over the {@link Definition} contained in this metadata. + */ + @Override + public Iterator iterator() { + return Arrays.asList(byIdx).iterator(); + } + + /** + * Returns a list containing all the definitions of this metadata in order. + * + * @return a list of the {@link Definition} contained in this metadata. + */ + public List asList() { + return Arrays.asList(byIdx); + } + + /** + * Returns the name of the {@code i}th column in this metadata. + * + * @param i the index in this metadata. + * @return the name of the {@code i}th column in this metadata. + * + * @throws IndexOutOfBoundsException if {@code i < 0} or {@code i >= size()} + */ + public String getName(int i) { + return byIdx[i].name; + } + + /** + * Returns the type of the {@code i}th column in this metadata. + * + * @param i the index in this metadata. + * @return the type of the {@code i}th column in this metadata. + * + * @throws IndexOutOfBoundsException if {@code i < 0} or {@code i >= size()} + */ + public DataType getType(int i) { + return byIdx[i].type; + } + + /** + * Returns the type of the first occurrence of {@code name} in this metadata. + * + * @param name the name of the column. + * @return the type of (the first occurrence of) {@code name} in this metadata. + * + * @throws IllegalArgumentException if {@code name} is not in this metadata. + */ + public DataType getType(String name) { + return getType(getFirstIdx(name)); + } + + /** + * Returns the keyspace of the {@code i}th column in this metadata. + * + * @param i the index in this metadata. + * @return the keyspace of the {@code i}th column in this metadata. + * + * @throws IndexOutOfBoundsException if {@code i < 0} or {@code i >= size()} + */ + public String getKeyspace(int i) { + return byIdx[i].keyspace; + } + + /** + * Returns the keyspace of the first occurrence of {@code name} in this metadata. + * + * @param name the name of the column. + * @return the keyspace of (the first occurrence of) column {@code name} in this metadata. + * + * @throws IllegalArgumentException if {@code name} is not in this metadata. + */ + public String getKeyspace(String name) { + return getKeyspace(getFirstIdx(name)); + } + + /** + * Returns the table of the {@code i}th column in this metadata. + * + * @param i the index in this metadata. + * @return the table of the {@code i}th column in this metadata. + * + * @throws IndexOutOfBoundsException if {@code i < 0} or {@code i >= size()} + */ + public String getTable(int i) { + return byIdx[i].table; + } + + /** + * Returns the table of first occurrence of {@code name} in this metadata. + * + * @param name the name of the column. + * @return the table of (the first occurrence of) column {@code name} in this metadata. + * + * @throws IllegalArgumentException if {@code name} is not in this metadata. + */ + public String getTable(String name) { + return getTable(getFirstIdx(name)); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("Columns["); + for (int i = 0; i < size(); i++) { + if (i != 0) + sb.append(", "); + Definition def = byIdx[i]; + sb.append(def.name).append('(').append(def.type).append(')'); + } + sb.append(']'); + return sb.toString(); + } + + int findFirstIdx(String name) { + + int[] indexes = findAllIdx(name); + return indexes == null ? -1 : indexes[0]; + } + + int[] findAllIdx(String name) { + boolean caseSensitive = false; + if (name.length() >= 2 && name.charAt(0) == '"' && name.charAt(name.length() - 1) == '"') { + name = name.substring(1, name.length() - 1); + caseSensitive = true; + } + + int[] indexes = byName.get(name.toLowerCase()); + if (!caseSensitive || indexes == null) + return indexes; + + // First, optimistic and assume all are matching + int nbMatch = 0; + for (int i = 0; i < indexes.length; i++) + if (name.equals(byIdx[indexes[i]].name)) + nbMatch++; + + if (nbMatch == indexes.length) + return indexes; + + int[] result = new int[nbMatch]; + int j = 0; + for (int i = 0; i < indexes.length; i++) { + int idx = indexes[i]; + if (name.equals(byIdx[idx].name)) + result[j++] = idx; + } + + return result; + } + + int[] getAllIdx(String name) { + int[] indexes = findAllIdx(name); + if (indexes == null) + throw new IllegalArgumentException(name + " is not a column defined in this metadata"); + + return indexes; + } + + int getFirstIdx(String name) { + return getAllIdx(name)[0]; + } + + void checkBounds(int i) { + if (i < 0 || i >= size()) + throw new ArrayIndexOutOfBoundsException(i); + } + + // Note: we avoid having a vararg method to avoid the array allocation that comes with it. + void checkType(int i, DataType.Name name) { + DataType defined = getType(i); + if (name != defined.getName()) + throw new InvalidTypeException(String.format("Column %s is of type %s", getName(i), defined)); + } + + DataType.Name checkType(int i, DataType.Name name1, DataType.Name name2) { + DataType defined = getType(i); + if (name1 != defined.getName() && name2 != defined.getName()) + throw new InvalidTypeException(String.format("Column %s is of type %s", getName(i), defined)); + + return defined.getName(); + } + + DataType.Name checkType(int i, DataType.Name name1, DataType.Name name2, DataType.Name name3) { + DataType defined = getType(i); + if (name1 != defined.getName() && name2 != defined.getName() && name3 != defined.getName()) + throw new InvalidTypeException(String.format("Column %s is of type %s", getName(i), defined)); + + return defined.getName(); + } + + /** + * A column definition. + */ + public static class Definition { + + private final String keyspace; + private final String table; + private final String name; + private final DataType type; + + Definition(String keyspace, String table, String name, DataType type) { + this.keyspace = keyspace; + this.table = table; + this.name = name; + this.type = type; + } + + /** + * The name of the keyspace this column is part of. + * + * @return the name of the keyspace this column is part of. + */ + public String getKeyspace() { + return keyspace; + } + + /** + * Returns the name of the table this column is part of. + * + * @return the name of the table this column is part of. + */ + public String getTable() { + return table; + } + + /** + * Returns the name of the column. + * + * @return the name of the column. + */ + public String getName() { + return name; + } + + /** + * Returns the type of the column. + * + * @return the type of the column. + */ + public DataType getType() { + return type; + } + + @Override + public final int hashCode() { + return Arrays.hashCode(new Object[]{ keyspace, table, name, type}); + } + + @Override + public final boolean equals(Object o) { + if(!(o instanceof Definition)) + return false; + + Definition other = (Definition)o; + return keyspace.equals(other.keyspace) + && table.equals(other.table) + && name.equals(other.name) + && type.equals(other.type); + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/ColumnMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/ColumnMetadata.java new file mode 100644 index 00000000000..0e12e1f3e73 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/ColumnMetadata.java @@ -0,0 +1,335 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; +import java.util.Map.Entry; + +/** + * Describes a Column. + */ +public class ColumnMetadata { + + static final String COLUMN_NAME = "column_name"; + static final String VALIDATOR = "validator"; + static final String COMPONENT_INDEX = "component_index"; + static final String KIND = "type"; + + static final String INDEX_TYPE = "index_type"; + static final String INDEX_OPTIONS = "index_options"; + static final String INDEX_NAME = "index_name"; + private static final String CUSTOM_INDEX_CLASS = "class_name"; + + private static final String INDEX_MAP_KEYS = "index_keys"; + private static final String INDEX_MAP_ENTRIES = "index_keys_and_values"; + private final TableMetadata table; + private final String name; + private final DataType type; + private final IndexMetadata index; + private final boolean isStatic; + + private ColumnMetadata(TableMetadata table, String name, DataType type, boolean isStatic, Map indexColumns) { + this.table = table; + this.name = name; + this.type = type; + this.isStatic = isStatic; + this.index = IndexMetadata.build(this, indexColumns); + } + + static ColumnMetadata fromRaw(TableMetadata tm, Raw raw) { + return new ColumnMetadata(tm, raw.name, raw.dataType, raw.kind == Raw.Kind.STATIC, raw.indexColumns); + } + + static ColumnMetadata forAlias(TableMetadata tm, String name, DataType type) { + return new ColumnMetadata(tm, name, type, false, Collections.emptyMap()); + } + + /** + * Returns the name of the column. + * + * @return the name of the column. + */ + public String getName() { + return name; + } + + /** + * Returns the metadata of the table this column is part of. + * + * @return the {@code TableMetadata} for the table this column is part of. + */ + public TableMetadata getTable() { + return table; + } + + /** + * Returns the type of the column. + * + * @return the type of the column. + */ + public DataType getType() { + return type; + } + + /** + * Returns the indexing metadata on this column if the column is indexed. + * + * @return the metadata on the column index if the column is indexed, + * {@code null} otherwise. + */ + public IndexMetadata getIndex() { + return index; + } + + /** + * Whether this column is a static column. + * + * @return Whether this column is a static column or not. + */ + public boolean isStatic() { + return isStatic; + } + + /** + * Metadata on a column index. + */ + public static class IndexMetadata { + + private final ColumnMetadata column; + private final String name; + private final Map indexOptions; + + private IndexMetadata(ColumnMetadata column, String name) { + this(column, name, null); + } + + private IndexMetadata(ColumnMetadata column, String name, Map indexOptions) { + this.column = column; + this.name = name; + this.indexOptions = indexOptions; + } + + /** + * Returns the column this index metadata refers to. + * + * @return the column this index metadata refers to. + */ + public ColumnMetadata getIndexedColumn() { + return column; + } + + /** + * Returns the index name. + * + * @return the index name. + */ + public String getName() { + return name; + } + + /** + * Returns whether this index is a custom one. + *

+ * If it is indeed a custom index, {@link #getIndexClassName} will + * return the name of the class used in Cassandra to implement that + * index. + * + * @return {@code true} if this metadata represents a custom index. + */ + public boolean isCustomIndex() { + return getIndexClassName() != null; + } + + /** + * The name of the class used to implement the custom index, if it is one. + * + * @return the name of the class used Cassandra side to implement this + * custom index if {@code isCustomIndex() == true}, {@code null} otherwise. + */ + public String getIndexClassName() { + return getOption(CUSTOM_INDEX_CLASS); + } + + /** + * Return whether this index is a 'KEYS' index on a map, e.g., + * CREATE INDEX ON mytable (KEYS(mymap)) + * + * @return {@code true} if this is a 'KEYS' index on a map. + */ + public boolean isKeys() { + return getOption(INDEX_MAP_KEYS) != null; + } + + /** + * Return whether this index is a 'FULL' index on a frozen collection, e.g., + * CREATE INDEX ON mytable (FULL(mymap)) + * + * @return {@code true} if this is a 'FULL' index on a frozen collection. + */ + public boolean isFull() { + /* + * This check is analogous to the Cassandra counterpart + * in IndexTarget#fromColumnDefinition. + */ + return !isKeys() + && !isEntries() + && column.getType().isCollection() + && column.getType().isFrozen(); + } + + /** + * Return whether this index is a 'ENTRIES' index on a map, e.g., + * CREATE INDEX ON mytable (ENTRIES(mymap)) + * + * @return {@code true} if this is an 'ENTRIES' index on a map. + */ + public boolean isEntries() { + return getOption(INDEX_MAP_ENTRIES) != null; + } + + /** + * Return the value for the given option name. + * + * @param name Option name + * @return Option value + */ + public String getOption(String name) { + return indexOptions != null ? indexOptions.get(name) : null; + } + + /** + * Returns a CQL query representing this index. + * + * This method returns a single 'CREATE INDEX' query corresponding to + * this index definition. + * + * @return the 'CREATE INDEX' query corresponding to this index. + */ + public String asCQLQuery() { + TableMetadata table = column.getTable(); + String ksName = Metadata.escapeId(table.getKeyspace().getName()); + String cfName = Metadata.escapeId(table.getName()); + String colName = Metadata.escapeId(column.getName()); + return isCustomIndex() + ? String.format("CREATE CUSTOM INDEX %s ON %s.%s (%s) USING '%s' WITH OPTIONS = %s;", + name, ksName, cfName, colName, getIndexClassName(), getOptionsAsCql()) + : String.format("CREATE INDEX %s ON %s.%s (%s);", name, ksName, cfName, getIndexFunction(colName)); + } + + /** + * Builds a string representation of the custom index options. + * + * @return String representation of the custom index options, similar to what Cassandra stores in + * the 'index_options' column of the 'schema_columns' table in the 'system' keyspace. + */ + private String getOptionsAsCql() { + StringBuilder builder = new StringBuilder(); + builder.append("{"); + Iterator> it = indexOptions.entrySet().iterator(); + while (it.hasNext()) { + Entry option = it.next(); + builder.append(String.format("'%s' : '%s'", option.getKey(), option.getValue())); + if (it.hasNext()) + builder.append(", "); + } + builder.append("}"); + return builder.toString(); + } + + /** + * Wraps the column name with the appropriate index function (KEYS, FULL, ENTRIES), + * if necessary. + * + * @return Column name wrapped with the appropriate index function. + */ + private String getIndexFunction(String colName) { + if (isKeys()) + return String.format("KEYS(%s)", colName); + else if (isFull()) + return String.format("FULL(%s)", colName); + else if (isEntries()) + return String.format("ENTRIES(%s)", colName); + return colName; + } + + private static IndexMetadata build(ColumnMetadata column, Map indexColumns) { + if (indexColumns.isEmpty()) + return null; + + String type = indexColumns.get(INDEX_TYPE); + if (type == null) + return null; + + if (!indexColumns.containsKey(INDEX_OPTIONS)) + return new IndexMetadata(column, indexColumns.get(INDEX_NAME)); + + Map indexOptions = SimpleJSONParser.parseStringMap(indexColumns.get(INDEX_OPTIONS)); + return new IndexMetadata(column, indexColumns.get(INDEX_NAME), indexOptions); + } + } + + @Override + public String toString() { + String str = Metadata.escapeId(name) + ' ' + type; + return isStatic ? str + " static" : str; + } + + // Temporary class that is used to make building the schema easier. Not meant to be + // exposed publicly at all. + static class Raw { + public enum Kind { PARTITION_KEY, CLUSTERING_KEY, REGULAR, COMPACT_VALUE, STATIC } + + public final String name; + public final Kind kind; + public final int componentIndex; + public final DataType dataType; + public final boolean isReversed; + + public final Map indexColumns = new HashMap(); + + Raw(String name, Kind kind, int componentIndex, DataType dataType, boolean isReversed) { + this.name = name; + this.kind = kind; + this.componentIndex = componentIndex; + this.dataType = dataType; + this.isReversed = isReversed; + } + + static Raw fromRow(Row row, VersionNumber version) { + + String name = row.getString(COLUMN_NAME); + Kind kind = version.getMajor() < 2 || row.isNull(KIND) + ? Kind.REGULAR + : Enum.valueOf(Kind.class, row.getString(KIND).toUpperCase()); + int componentIndex = row.isNull(COMPONENT_INDEX) ? 0 : row.getInt(COMPONENT_INDEX); + String validatorStr = row.getString(VALIDATOR); + boolean reversed = CassandraTypeParser.isReversed(validatorStr); + DataType dataType = CassandraTypeParser.parseOne(validatorStr); + + Raw c = new Raw(name, kind, componentIndex, dataType, reversed); + + for (String str : Arrays.asList(INDEX_TYPE, INDEX_NAME, INDEX_OPTIONS)) + if (!row.isNull(str)) + c.indexColumns.put(str, row.getString(str)); + + return c; + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/Configuration.java b/driver-core/src/main/java/com/datastax/driver/core/Configuration.java new file mode 100644 index 00000000000..79ae5cf8f52 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/Configuration.java @@ -0,0 +1,169 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import com.datastax.driver.core.policies.Policies; + +/** + * The configuration of the cluster. + * It configures the following: + *

    + *
  • Cassandra protocol level configuration (compression).
  • + *
  • Connection pooling configurations.
  • + *
  • low-level TCP configuration options (tcpNoDelay, keepAlive, ...).
  • + *
  • Metrics related options.
  • + *
  • Query related options (default consistency level, fetchSize, ...).
  • + *
  • Netty layer customization options.
  • + *
+ * This is also where you get the configured policies, though those cannot be changed + * (they are set during the built of the Cluster object). + */ +public class Configuration { + + private final Policies policies; + + private final ProtocolOptions protocolOptions; + private final PoolingOptions poolingOptions; + private final SocketOptions socketOptions; + private final MetricsOptions metricsOptions; + private final QueryOptions queryOptions; + private final NettyOptions nettyOptions; + + /* + * Creates a configuration object. + */ + public Configuration() { + this(Policies.builder().build(), + new ProtocolOptions(), + new PoolingOptions(), + new SocketOptions(), + new MetricsOptions(), + new QueryOptions(), + NettyOptions.DEFAULT_INSTANCE); + } + + /** + * Creates a configuration with the specified parameters. + * + * @param policies the policies to use + * @param protocolOptions the protocol options to use + * @param poolingOptions the pooling options to use + * @param socketOptions the socket options to use + * @param metricsOptions the metrics options, or null to disable metrics. + * @param queryOptions defaults related to queries. + * @param nettyOptions the {@link NettyOptions} instance to use + */ + public Configuration(Policies policies, + ProtocolOptions protocolOptions, + PoolingOptions poolingOptions, + SocketOptions socketOptions, + MetricsOptions metricsOptions, + QueryOptions queryOptions, + NettyOptions nettyOptions) { + this.policies = policies; + this.protocolOptions = protocolOptions; + this.poolingOptions = poolingOptions; + this.socketOptions = socketOptions; + this.metricsOptions = metricsOptions; + this.queryOptions = queryOptions; + this.nettyOptions = nettyOptions; + } + + /** + * @deprecated this constructor is provided for backward compatibility. + */ + @Deprecated + public Configuration(Policies policies, + ProtocolOptions protocolOptions, + PoolingOptions poolingOptions, + SocketOptions socketOptions, + MetricsOptions metricsOptions, + QueryOptions queryOptions) { + this(policies, protocolOptions, poolingOptions, socketOptions, metricsOptions, queryOptions, + NettyOptions.DEFAULT_INSTANCE); + } + + void register(Cluster.Manager manager) { + protocolOptions.register(manager); + poolingOptions.register(manager); + queryOptions.register(manager); + } + + /** + * Returns the policies set for the cluster. + * + * @return the policies set for the cluster. + */ + public Policies getPolicies() { + return policies; + } + + /** + * Returns the low-level TCP configuration options used (tcpNoDelay, keepAlive, ...). + * + * @return the socket options. + */ + public SocketOptions getSocketOptions() { + return socketOptions; + } + + /** + * Returns the Cassandra binary protocol level configuration (compression). + * + * @return the protocol options. + */ + public ProtocolOptions getProtocolOptions() { + return protocolOptions; + } + + /** + * Returns the connection pooling configuration. + * + * @return the pooling options. + */ + public PoolingOptions getPoolingOptions() { + return poolingOptions; + } + + /** + * Returns the metrics configuration, if metrics are enabled. + *

+ * Metrics collection is enabled by default but can be disabled at cluster + * construction time through {@link Cluster.Builder#withoutMetrics}. + * + * @return the metrics options or {@code null} if metrics are not enabled. + */ + public MetricsOptions getMetricsOptions() { + return metricsOptions; + } + + /** + * Returns the queries configuration. + * + * @return the queries options. + */ + public QueryOptions getQueryOptions() { + return queryOptions; + } + + /** + * Returns the {@link NettyOptions} instance for this configuration. + * @return the {@link NettyOptions} instance for this configuration. + */ + public NettyOptions getNettyOptions() { + return nettyOptions; + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/Connection.java b/driver-core/src/main/java/com/datastax/driver/core/Connection.java new file mode 100644 index 00000000000..7b4ec8928b6 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/Connection.java @@ -0,0 +1,1320 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.lang.ref.WeakReference; +import java.net.InetSocketAddress; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Queue; +import java.util.concurrent.*; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; + +import com.google.common.collect.Lists; +import com.google.common.collect.MapMaker; +import com.google.common.util.concurrent.*; +import io.netty.bootstrap.Bootstrap; +import io.netty.channel.*; +import io.netty.channel.group.ChannelGroup; +import io.netty.channel.group.DefaultChannelGroup; +import io.netty.channel.socket.SocketChannel; +import io.netty.handler.ssl.SslHandler; +import io.netty.handler.timeout.IdleStateEvent; +import io.netty.handler.timeout.IdleStateHandler; +import io.netty.util.HashedWheelTimer; +import io.netty.util.Timeout; +import io.netty.util.TimerTask; +import io.netty.util.concurrent.GlobalEventExecutor; +import javax.net.ssl.SSLEngine; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import static io.netty.handler.timeout.IdleState.ALL_IDLE; + +import com.datastax.driver.core.exceptions.AuthenticationException; +import com.datastax.driver.core.exceptions.DriverException; +import com.datastax.driver.core.exceptions.DriverInternalError; +import com.datastax.driver.core.utils.MoreFutures; + +// For LoggingHandler +//import org.jboss.netty.handler.logging.LoggingHandler; +//import org.jboss.netty.logging.InternalLogLevel; + +/** + * A connection to a Cassandra Node. + */ +class Connection { + + private static final Logger logger = LoggerFactory.getLogger(Connection.class); + private static final byte[] EMPTY_BYTE_ARRAY = new byte[0]; + + private static final boolean DISABLE_COALESCING = SystemProperties.getBoolean("com.datastax.driver.DISABLE_COALESCING", false); + + enum State {OPEN, TRASHED, RESURRECTING, GONE } + + final AtomicReference state = new AtomicReference(State.OPEN); + + volatile long maxIdleTime; + + public final InetSocketAddress address; + private final String name; + + private volatile Channel channel; + private final Factory factory; + + private final Dispatcher dispatcher; + + // Used by connection pooling to count how many requests are "in flight" on that connection. + public final AtomicInteger inFlight = new AtomicInteger(0); + + private final AtomicInteger writer = new AtomicInteger(0); + private volatile String keyspace; + + private volatile boolean isInitialized; + private volatile boolean isDefunct; + + private final AtomicReference closeFuture = new AtomicReference(); + + private final AtomicReference poolRef = new AtomicReference(); + + /** + /** + * Create a new connection to a Cassandra node and associate it with the given pool. + * + * @param name the connection name + * @param address the remote address + * @param factory the connection factory to use + * @param pool the pool this connection belongs to. May be null if this connection does not belong to a pool. + * Note that an existing connection can also be associated to a pool later with {@link #setPool(HostConnectionPool)}. + */ + protected Connection(String name, InetSocketAddress address, Factory factory, HostConnectionPool pool) { + this.address = address; + this.factory = factory; + this.dispatcher = new Dispatcher(); + this.name = name; + this.poolRef.set(pool); + } + + /** + * Create a new connection to a Cassandra node. + */ + Connection(String name, InetSocketAddress address, Factory factory) { + this(name, address, factory, null); + } + + public ListenableFuture initAsync() { + if (factory.isShutdown) + return Futures.immediateFailedFuture(new ConnectionException(address, "Connection factory is shut down")); + + ProtocolVersion protocolVersion = factory.protocolVersion == null ? ProtocolVersion.NEWEST_SUPPORTED : factory.protocolVersion; + final SettableFuture channelReadyFuture = SettableFuture.create(); + + try { + Bootstrap bootstrap = factory.newBootstrap(); + ProtocolOptions protocolOptions = factory.configuration.getProtocolOptions(); + bootstrap.handler( + new Initializer(this, protocolVersion, protocolOptions.getCompression().compressor(), protocolOptions.getSSLOptions(), + factory.configuration.getPoolingOptions().getHeartbeatIntervalSeconds(), + factory.configuration.getNettyOptions())); + + ChannelFuture future = bootstrap.connect(address); + + writer.incrementAndGet(); + future.addListener(new ChannelFutureListener() { + @Override + public void operationComplete(ChannelFuture future) throws Exception { + writer.decrementAndGet(); + channel = future.channel(); + if (isClosed()) { + channel.close().addListener(new ChannelFutureListener() { + @Override + public void operationComplete(ChannelFuture future) throws Exception { + channelReadyFuture.setException(new TransportException(Connection.this.address, "Connection closed during initialization.")); + } + }); + } else { + Connection.this.factory.allChannels.add(channel); + if (!future.isSuccess()) { + if (logger.isDebugEnabled()) + logger.debug(String.format("%s Error connecting to %s%s", Connection.this, Connection.this.address, extractMessage(future.cause()))); + channelReadyFuture.setException(new TransportException(Connection.this.address, "Cannot connect", future.cause())); + } else { + logger.debug("{} Connection opened successfully", Connection.this); + channel.closeFuture().addListener(new ChannelCloseListener()); + channelReadyFuture.set(null); + } + } + } + }); + } catch (RuntimeException e) { + closeAsync().force(); + throw e; + } + + Executor initExecutor = factory.manager.configuration.getPoolingOptions().getInitializationExecutor(); + + ListenableFuture initializeTransportFuture = Futures.transform(channelReadyFuture, + onChannelReady(protocolVersion, initExecutor), initExecutor); + + // Fallback on initializeTransportFuture so we can properly propagate specific exceptions. + ListenableFuture initFuture = Futures.withFallback(initializeTransportFuture, new FutureFallback() { + @Override + public ListenableFuture create(Throwable t) throws Exception { + SettableFuture future = SettableFuture.create(); + // Make sure the connection gets properly closed. + if (t instanceof ClusterNameMismatchException || t instanceof UnsupportedProtocolVersionException) { + // These exceptions cause the node to be ignored, so just propagate + closeAsync().force(); + future.setException(t); + } else { + // Defunct to ensure that the error will be signaled (marking the host down) + Exception e = (t instanceof ConnectionException || t instanceof DriverException || t instanceof InterruptedException) + ? (Exception)t + : new ConnectionException(Connection.this.address, + String.format("Unexpected error during transport initialization (%s)", t), + t); + future.setException(defunct(e)); + } + return future; + } + }, initExecutor); + + // If initFuture fails, close the connection. This is needed as withFallback doesn't account for cancel. + Futures.addCallback(initFuture, new FutureCallback() { + @Override + public void onSuccess(Void result) { + isInitialized = true; + } + + @Override + public void onFailure(Throwable t) { + if (!isClosed()) { + closeAsync().force(); + } + } + }, initExecutor); + + return initFuture; + } + + private static String extractMessage(Throwable t) { + if (t == null) + return ""; + String msg = t.getMessage() == null || t.getMessage().isEmpty() + ? t.toString() + : t.getMessage(); + return " (" + msg + ')'; + } + + private AsyncFunction onChannelReady(final ProtocolVersion protocolVersion, final Executor initExecutor) { + return new AsyncFunction() { + @Override + public ListenableFuture apply(Void input) throws Exception { + ProtocolOptions.Compression compression = factory.configuration.getProtocolOptions().getCompression(); + Future startupResponseFuture = write(new Requests.Startup(compression)); + return Futures.transform(startupResponseFuture, + onStartupResponse(protocolVersion, initExecutor), initExecutor); + } + }; + } + + private AsyncFunction onStartupResponse(final ProtocolVersion protocolVersion, final Executor initExecutor) { + return new AsyncFunction() { + @Override + public ListenableFuture apply(Message.Response response) throws Exception { + switch (response.type) { + case READY: + return checkClusterName(protocolVersion, initExecutor); + case ERROR: + Responses.Error error = (Responses.Error)response; + // Testing for a specific string is a tad fragile but well, we don't have much choice + if (error.code == ExceptionCode.PROTOCOL_ERROR && error.message.contains("Invalid or unsupported protocol version")) + throw unsupportedProtocolVersionException(protocolVersion, error.serverProtocolVersion); + throw new TransportException(address, String.format("Error initializing connection: %s", error.message)); + case AUTHENTICATE: + Authenticator authenticator = factory.authProvider.newAuthenticator(address); + switch (protocolVersion) { + case V1: + if (authenticator instanceof ProtocolV1Authenticator) + return authenticateV1(authenticator, protocolVersion, initExecutor); + else + // DSE 3.x always uses SASL authentication backported from protocol v2 + return authenticateV2(authenticator, protocolVersion, initExecutor); + case V2: + case V3: + return authenticateV2(authenticator, protocolVersion, initExecutor); + default: + throw defunct(protocolVersion.unsupported()); + } + default: + throw new TransportException(address, String.format("Unexpected %s response message from server to a STARTUP message", response.type)); + } + } + }; + } + + // Due to C* gossip bugs, system.peers may report nodes that are gone from the cluster. + // If these nodes have been recommissionned to another cluster and are up, nothing prevents the driver from connecting + // to them. So we check that the cluster the node thinks it belongs to is our cluster (JAVA-397). + private ListenableFuture checkClusterName(ProtocolVersion protocolVersion, final Executor executor) { + final String expected = factory.manager.metadata.clusterName; + + // At initialization, the cluster is not known yet + if (expected == null) + return MoreFutures.VOID_SUCCESS; + + DefaultResultSetFuture clusterNameFuture = new DefaultResultSetFuture(null, protocolVersion, new Requests.Query("select cluster_name from system.local")); + try { + write(clusterNameFuture); + return Futures.transform(clusterNameFuture, + new AsyncFunction() { + @Override + public ListenableFuture apply(ResultSet rs) throws Exception { + Row row = rs.one(); + String actual = row.getString("cluster_name"); + if (!expected.equals(actual)) + throw new ClusterNameMismatchException(address, actual, expected); + return MoreFutures.VOID_SUCCESS; + } + }, executor); + } catch (Exception e) { + return Futures.immediateFailedFuture(e); + } + } + + private ListenableFuture authenticateV1(Authenticator authenticator, final ProtocolVersion protocolVersion, final Executor executor) { + Requests.Credentials creds = new Requests.Credentials(((ProtocolV1Authenticator)authenticator).getCredentials()); + try { + Future authResponseFuture = write(creds); + return Futures.transform(authResponseFuture, + new AsyncFunction() { + @Override + public ListenableFuture apply(Message.Response authResponse) throws Exception { + switch (authResponse.type) { + case READY: + return checkClusterName(protocolVersion, executor); + case ERROR: + throw new AuthenticationException(address, ((Responses.Error)authResponse).message); + default: + throw new TransportException(address, String.format("Unexpected %s response message from server to a CREDENTIALS message", authResponse.type)); + } + } + }, executor); + } catch (Exception e) { + return Futures.immediateFailedFuture(e); + } + } + + private ListenableFuture authenticateV2(final Authenticator authenticator, final ProtocolVersion protocolVersion, final Executor executor) { + byte[] initialResponse = authenticator.initialResponse(); + if (null == initialResponse) + initialResponse = EMPTY_BYTE_ARRAY; + + try { + Future authResponseFuture = write(new Requests.AuthResponse(initialResponse)); + return Futures.transform(authResponseFuture, onV2AuthResponse(authenticator, protocolVersion, executor), executor); + } catch (Exception e) { + return Futures.immediateFailedFuture(e); + } + } + + private AsyncFunction onV2AuthResponse(final Authenticator authenticator, final ProtocolVersion protocolVersion, final Executor executor) { + return new AsyncFunction() { + @Override + public ListenableFuture apply(Message.Response authResponse) throws Exception { + switch (authResponse.type) { + case AUTH_SUCCESS: + logger.trace("{} Authentication complete", this); + authenticator.onAuthenticationSuccess(((Responses.AuthSuccess)authResponse).token); + return checkClusterName(protocolVersion, executor); + case AUTH_CHALLENGE: + byte[] responseToServer = authenticator.evaluateChallenge(((Responses.AuthChallenge)authResponse).token); + if (responseToServer == null) { + // If we generate a null response, then authentication has completed, proceed without + // sending a further response back to the server. + logger.trace("{} Authentication complete (No response to server)", this); + return checkClusterName(protocolVersion, executor); + } else { + // Otherwise, send the challenge response back to the server + logger.trace("{} Sending Auth response to challenge", this); + Future nextResponseFuture = write(new Requests.AuthResponse(responseToServer)); + return Futures.transform(nextResponseFuture, onV2AuthResponse(authenticator, protocolVersion, executor), executor); + } + case ERROR: + // This is not very nice, but we're trying to identify if we + // attempted v2 auth against a server which only supports v1 + // The AIOOBE indicates that the server didn't recognise the + // initial AuthResponse message + String message = ((Responses.Error)authResponse).message; + if (message.startsWith("java.lang.ArrayIndexOutOfBoundsException: 15")) + message = String.format("Cannot use authenticator %s with protocol version 1, " + + "only plain text authentication is supported with this protocol version", authenticator); + throw new AuthenticationException(address, message); + default: + throw new TransportException(address, String.format("Unexpected %s response message from server to authentication message", authResponse.type)); + } + } + }; + } + + private UnsupportedProtocolVersionException unsupportedProtocolVersionException(ProtocolVersion triedVersion, ProtocolVersion serverProtocolVersion) { + logger.debug("Got unsupported protocol version error from {} for version {} server supports version {}", address, triedVersion, serverProtocolVersion); + return new UnsupportedProtocolVersionException(address, triedVersion, serverProtocolVersion); + } + + public boolean isDefunct() { + return isDefunct; + } + + public int maxAvailableStreams() { + return dispatcher.streamIdHandler.maxAvailableStreams(); + } + + E defunct(E e) { + if (logger.isDebugEnabled()) + logger.debug("Defuncting connection to " + address, e); + isDefunct = true; + + ConnectionException ce = e instanceof ConnectionException + ? (ConnectionException)e + : new ConnectionException(address, "Connection problem", e); + + Host host = factory.manager.metadata.getHost(address); + if (host != null) { + // This will trigger onDown, including when the defunct Connection is part of a reconnection attempt, which is redundant. + // This is not too much of a problem since calling onDown on a node that is already down has no effect. + boolean isDown = factory.manager.signalConnectionFailure(host, ce, host.wasJustAdded()); + notifyOwnerWhenDefunct(isDown); + } + + // Force the connection to close to make sure the future completes. Otherwise force() might never get called and + // threads will wait on the future forever. + // (this also errors out pending handlers) + closeAsync().force(); + + return e; + } + + protected void notifyOwnerWhenDefunct(boolean hostIsDown) { + // If an error happens during initialization, the owner will detect it and take appropriate action + if (!isInitialized) + return; + + HostConnectionPool pool = this.poolRef.get(); + if (pool == null) + return; + + if (hostIsDown) { + pool.closeAsync().force(); + } else { + pool.replaceDefunctConnection(this); + } + } + + public String keyspace() { + return keyspace; + } + + public void setKeyspace(String keyspace) throws ConnectionException { + if (keyspace == null) + return; + + if (this.keyspace != null && this.keyspace.equals(keyspace)) + return; + + Future future = null; + try { + logger.trace("{} Setting keyspace {}", this, keyspace); + long timeout = factory.getConnectTimeoutMillis(); + // Note: we quote the keyspace below, because the name is the one coming from Cassandra, so it's in the right case already + future = write(new Requests.Query("USE \"" + keyspace + '"')); + Message.Response response = Uninterruptibles.getUninterruptibly(future, timeout, TimeUnit.MILLISECONDS); + switch (response.type) { + case RESULT: + this.keyspace = keyspace; + break; + default: + // The code set the keyspace only when a successful 'use' + // has been perform, so there shouldn't be any error here. + // It can happen however that the node we're connecting to + // is not up on the schema yet. In that case, defuncting + // the connection is not a bad choice. + String message = String.format("Problem while setting keyspace, got %s as response", response); + logger.warn("{} {}", this, message); + defunct(new ConnectionException(address, message)); + break; + } + } catch (ConnectionException e) { + throw defunct(e); + } catch (TimeoutException e) { + // We've given up waiting on the future, but it's still running. Cancel to make sure that the request timeout logic + // (readTimeout) will not kick in, because that would release the connection. This will work since connectTimeout is + // generally lower than readTimeout (and if not, we'll get an ExecutionException and defunct below). + future.cancel(true); + logger.warn(String.format("Timeout while setting keyspace on connection to %s. This should not happen but is not critical (it will retried)", address)); + // Rethrow so that the caller will not try to use the connection, but do not defunct as we don't want to mark down + throw new ConnectionException(address, "Timeout while setting keyspace on connection"); + } catch (BusyConnectionException e) { + logger.warn(String.format("Tried to set the keyspace on busy connection to %s. This should not happen but is not critical (it will retried)", address)); + throw new ConnectionException(address, "Tried to set the keyspace on busy connection"); + } catch (ExecutionException e) { + throw defunct(new ConnectionException(address, "Error while setting keyspace", e)); + } + } + + /** + * Write a request on this connection. + * + * @param request the request to send + * @return a future on the server response + * + * @throws ConnectionException if the connection is closed + * @throws TransportException if an I/O error while sending the request + */ + public Future write(Message.Request request) throws ConnectionException, BusyConnectionException { + Future future = new Future(request); + write(future); + return future; + } + + public ResponseHandler write(ResponseCallback callback) throws ConnectionException, BusyConnectionException { + return write(callback, true); + } + + public ResponseHandler write(ResponseCallback callback, boolean startTimeout) throws ConnectionException, BusyConnectionException { + + Message.Request request = callback.request(); + + ResponseHandler handler = new ResponseHandler(this, callback); + dispatcher.add(handler); + request.setStreamId(handler.streamId); + + /* + * We check for close/defunct *after* having set the handler because closing/defuncting + * will set their flag and then error out handler if need. So, by doing the check after + * having set the handler, we guarantee that even if we race with defunct/close, we may + * never leave a handler that won't get an answer or be errored out. + */ + if (isDefunct) { + dispatcher.removeHandler(handler, true); + throw new ConnectionException(address, "Write attempt on defunct connection"); + } + + if (isClosed()) { + dispatcher.removeHandler(handler, true); + throw new ConnectionException(address, "Connection has been closed"); + } + + logger.trace("{} writing request {}", this, request); + writer.incrementAndGet(); + + if (DISABLE_COALESCING) { + channel.writeAndFlush(request).addListener(writeHandler(request, handler)); + } else { + flush(new FlushItem(channel, request, writeHandler(request, handler))); + } + if (startTimeout) + handler.startTimeout(); + + return handler; + } + + private ChannelFutureListener writeHandler(final Message.Request request, final ResponseHandler handler) { + return new ChannelFutureListener() { + @Override + public void operationComplete(ChannelFuture writeFuture) { + + writer.decrementAndGet(); + + if (!writeFuture.isSuccess()) { + logger.debug("{} Error writing request {}", Connection.this, request); + // Remove this handler from the dispatcher so it don't get notified of the error + // twice (we will fail that method already) + dispatcher.removeHandler(handler, true); + + final ConnectionException ce; + if (writeFuture.cause() instanceof java.nio.channels.ClosedChannelException) { + ce = new TransportException(address, "Error writing: Closed channel"); + } else { + ce = new TransportException(address, "Error writing", writeFuture.cause()); + } + final long latency = System.nanoTime() - handler.startTime; + // This handler is executed while holding the writeLock of the channel. + // defunct might close the pool, which will close all of its connections; closing a connection also + // requires its writeLock. + // Therefore if multiple connections in the same pool get a write error, they could deadlock; + // we run defunct on a separate thread to avoid that. + factory.manager.executor.execute(new Runnable() { + @Override + public void run() { + handler.callback.onException(Connection.this, defunct(ce), latency, handler.retryCount); + } + }); + } else { + logger.trace("{} request sent successfully", Connection.this); + } + } + }; + } + + boolean hasPool() { + return this.poolRef.get() != null; + } + + /** @return whether the connection was already associated with a pool */ + boolean setPool(HostConnectionPool pool) { + return poolRef.compareAndSet(null, pool); + } + + /** + * If the connection is part of a pool, return it to the pool. + * The connection should generally not be reused after that. + */ + void release() { + HostConnectionPool pool = poolRef.get(); + if (pool != null) + pool.returnConnection(this); + } + + public boolean isClosed() { + return closeFuture.get() != null; + } + + /** + * Closes the connection: no new writes will be accepted after this method has returned. + * + * However, a closed connection might still have ongoing queries awaiting for their result. + * When all these ongoing queries have completed, the underlying channel will be closed; we + * refer to this final state as "terminated". + * + * @return a future that will complete once the connection has terminated. + * + * @see #tryTerminate(boolean) + */ + public CloseFuture closeAsync() { + + ConnectionCloseFuture future = new ConnectionCloseFuture(); + if (!closeFuture.compareAndSet(null, future)) { + // close had already been called, return the existing future + return closeFuture.get(); + } + + logger.debug("{} closing connection", this); + + boolean terminated = tryTerminate(false); + if (!terminated) { + // The time by which all pending requests should have normally completed (use twice the read timeout for a generous + // estimate -- note that this does not cover the eventuality that read timeout is updated dynamically, but we can live + // with that). + long terminateTime = System.currentTimeMillis() + 2 * factory.getReadTimeoutMillis(); + factory.reaper.register(this, terminateTime); + } + return future; + } + + /** + * Tries to terminate a closed connection, i.e. release system resources. + * + * This is called both by "normal" code and by {@link Cluster.ConnectionReaper}. + * + * @param force whether to proceed if there are still outstanding requests. + * @return whether the connection has actually terminated. + * + * @see #closeAsync() + */ + boolean tryTerminate(boolean force) { + assert isClosed(); + ConnectionCloseFuture future = closeFuture.get(); + + if (future.isDone()) { + logger.debug("{} has already terminated", this); + return true; + } else { + if (force || dispatcher.pending.isEmpty()) { + if (force) + logger.warn("Forcing termination of {}. This should not happen and is likely a bug, please report.", this); + future.force(); + return true; + } else { + logger.debug("Not terminating {}: there are still pending requests", this); + return false; + } + } + } + + @Override + public String toString() { + return String.format("Connection[%s, inFlight=%d, closed=%b]", name, inFlight.get(), isClosed()); + } + + public static class Factory { + + public final HashedWheelTimer timer; + + private final EventLoopGroup eventLoopGroup; + private final Class channelClass; + + private final ChannelGroup allChannels = new DefaultChannelGroup(GlobalEventExecutor.INSTANCE); + + private final ConcurrentMap idGenerators = new ConcurrentHashMap(); + public final DefaultResponseHandler defaultHandler; + final Cluster.Manager manager; + final Cluster.ConnectionReaper reaper; + public final Configuration configuration; + + public final AuthProvider authProvider; + private volatile boolean isShutdown; + + volatile ProtocolVersion protocolVersion; + private final NettyOptions nettyOptions; + + Factory(Cluster.Manager manager, Configuration configuration) { + this.defaultHandler = manager; + this.manager = manager; + this.reaper = manager.reaper; + this.configuration = configuration; + this.authProvider = configuration.getProtocolOptions().getAuthProvider(); + this.protocolVersion = configuration.getProtocolOptions().initialProtocolVersion; + this.nettyOptions = configuration.getNettyOptions(); + this.eventLoopGroup = nettyOptions.eventLoopGroup(manager.threadFactory("nio-worker")); + this.channelClass = nettyOptions.channelClass(); + this.timer = new HashedWheelTimer(manager.threadFactory("timeouter")); + } + + public int getPort() { + return configuration.getProtocolOptions().getPort(); + } + + /** + * Opens a new connection to the node this factory points to. + * + * @return the newly created (and initialized) connection. + * + * @throws ConnectionException if connection attempt fails. + */ + public Connection open(Host host) throws ConnectionException, InterruptedException, UnsupportedProtocolVersionException, ClusterNameMismatchException { + InetSocketAddress address = host.getSocketAddress(); + + if (isShutdown) + throw new ConnectionException(address, "Connection factory is shut down"); + + String name = address.toString() + '-' + getIdGenerator(host).getAndIncrement(); + Connection connection = new Connection(name, address, this); + // This method opens the connection synchronously, so wait until it's initialized + try { + connection.initAsync().get(); + return connection; + } catch (ExecutionException e) { + throw launderAsyncInitException(e); + } + } + + /** + * Same as open, but associate the created connection to the provided connection pool. + */ + public Connection open(HostConnectionPool pool) throws ConnectionException, InterruptedException, UnsupportedProtocolVersionException, ClusterNameMismatchException { + try { + Connection connection = newConnection(pool); + connection.initAsync().get(); + return connection; + } catch (ExecutionException e) { + throw launderAsyncInitException(e); + } + } + + /** + * Creates a new connection and associates it to the provided connection pool, but does not start it. + */ + public Connection newConnection(HostConnectionPool pool) { + InetSocketAddress address = pool.host.getSocketAddress(); + String name = address.toString() + '-' + getIdGenerator(pool.host).getAndIncrement(); + + return new Connection(name, address, this, pool); + } + + static RuntimeException launderAsyncInitException(ExecutionException e) throws ConnectionException, InterruptedException, UnsupportedProtocolVersionException, ClusterNameMismatchException { + Throwable t = e.getCause(); + if (t instanceof ConnectionException) + throw (ConnectionException)t; + if (t instanceof InterruptedException) + throw (InterruptedException)t; + if (t instanceof UnsupportedProtocolVersionException) + throw (UnsupportedProtocolVersionException)t; + if (t instanceof ClusterNameMismatchException) + throw (ClusterNameMismatchException)t; + if (t instanceof DriverException) + throw (DriverException)t; + + return new RuntimeException("Unexpected exception during connection initialization", t); + } + + private AtomicInteger getIdGenerator(Host host) { + AtomicInteger g = idGenerators.get(host); + if (g == null) { + g = new AtomicInteger(1); + AtomicInteger old = idGenerators.putIfAbsent(host, g); + if (old != null) + g = old; + } + return g; + } + + public long getConnectTimeoutMillis() { + return configuration.getSocketOptions().getConnectTimeoutMillis(); + } + + public long getReadTimeoutMillis() { + return configuration.getSocketOptions().getReadTimeoutMillis(); + } + + private Bootstrap newBootstrap() { + Bootstrap b = new Bootstrap(); + b.group(eventLoopGroup) + .channel(channelClass); + + SocketOptions options = configuration.getSocketOptions(); + + b.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, options.getConnectTimeoutMillis()); + Boolean keepAlive = options.getKeepAlive(); + if (keepAlive != null) + b.option(ChannelOption.SO_KEEPALIVE, keepAlive); + Boolean reuseAddress = options.getReuseAddress(); + if (reuseAddress != null) + b.option(ChannelOption.SO_REUSEADDR, reuseAddress); + Integer soLinger = options.getSoLinger(); + if (soLinger != null) + b.option(ChannelOption.SO_LINGER, soLinger); + Boolean tcpNoDelay = options.getTcpNoDelay(); + if (tcpNoDelay != null) + b.option(ChannelOption.TCP_NODELAY, tcpNoDelay); + Integer receiveBufferSize = options.getReceiveBufferSize(); + if (receiveBufferSize != null) + b.option(ChannelOption.SO_RCVBUF, receiveBufferSize); + Integer sendBufferSize = options.getSendBufferSize(); + if (sendBufferSize != null) + b.option(ChannelOption.SO_SNDBUF, sendBufferSize); + + nettyOptions.afterBootstrapInitialized(b); + return b; + } + + public void shutdown() { + // Make sure we skip creating connection from now on. + isShutdown = true; + + // All channels should be closed already, we call this just to be sure. And we know + // we're not on an I/O thread or anything, so just call await. + allChannels.close().awaitUninterruptibly(); + + nettyOptions.onClusterClose(eventLoopGroup); + timer.stop(); + } + } + + private static final class Flusher implements Runnable { + final WeakReference eventLoopRef; + final Queue queued = new ConcurrentLinkedQueue(); + final AtomicBoolean running = new AtomicBoolean(false); + final HashSet channels = new HashSet(); + int runsWithNoWork = 0; + + private Flusher(EventLoop eventLoop) { + this.eventLoopRef = new WeakReference(eventLoop); + } + + void start() { + if (!running.get() && running.compareAndSet(false, true)) { + EventLoop eventLoop = eventLoopRef.get(); + if (eventLoop != null) + eventLoop.execute(this); + } + } + + @Override + public void run() { + + boolean doneWork = false; + FlushItem flush; + while (null != (flush = queued.poll())) { + channels.add(flush.channel); + flush.channel.write(flush.request).addListener(flush.listener); + doneWork = true; + } + + // Always flush what we have (don't artificially delay to try to coalesce more messages) + for (Channel channel : channels) + channel.flush(); + channels.clear(); + + if (doneWork) { + runsWithNoWork = 0; + } else { + // either reschedule or cancel + if (++runsWithNoWork > 5) { + running.set(false); + if (queued.isEmpty() || !running.compareAndSet(false, true)) + return; + } + } + + EventLoop eventLoop = eventLoopRef.get(); + if(eventLoop != null) { + eventLoop.schedule(this, 10000, TimeUnit.NANOSECONDS); + } + } + } + + private static final ConcurrentMap flusherLookup = new MapMaker() + .concurrencyLevel(16) + .weakKeys() + .makeMap(); + + private static class FlushItem { + final Channel channel; + final Object request; + final ChannelFutureListener listener; + + private FlushItem(Channel channel, Object request, ChannelFutureListener listener) { + this.channel = channel; + this.request = request; + this.listener = listener; + } + } + + private void flush(FlushItem item) { + EventLoop loop = item.channel.eventLoop(); + Flusher flusher = flusherLookup.get(loop); + if (flusher == null) { + Flusher alt = flusherLookup.putIfAbsent(loop, flusher = new Flusher(loop)); + if (alt != null) + flusher = alt; + } + + flusher.queued.add(item); + flusher.start(); + } + + private class Dispatcher extends SimpleChannelInboundHandler { + + public final StreamIdGenerator streamIdHandler; + private final ConcurrentMap pending = new ConcurrentHashMap(); + + Dispatcher() { + ProtocolVersion protocolVersion = factory.protocolVersion; + if (protocolVersion == null) { + // This happens for the first control connection because the protocol version has not been + // negociated yet. + assert !Connection.this.hasPool(); + protocolVersion = ProtocolVersion.V2; + } + streamIdHandler = StreamIdGenerator.newInstance(protocolVersion); + } + + public void add(ResponseHandler handler) { + ResponseHandler old = pending.put(handler.streamId, handler); + assert old == null; + } + + public void removeHandler(ResponseHandler handler, boolean releaseStreamId) { + + // If we don't release the ID, mark first so that we can rely later on the fact that if + // we receive a response for an ID with no handler, it's that this ID has been marked. + if (!releaseStreamId) + streamIdHandler.mark(handler.streamId); + + // If a RequestHandler is cancelled right when the response arrives, this method (called with releaseStreamId=false) will race with messageReceived. + // messageReceived could have already released the streamId, which could have already been reused by another request. We must not remove the handler + // if it's not ours, because that would cause the other request to hang forever. + boolean removed = pending.remove(handler.streamId, handler); + if (!removed) { + // We raced, so if we marked the streamId above, that was wrong. + if (!releaseStreamId) + streamIdHandler.unmark(handler.streamId); + return; + } + handler.cancelTimeout(); + + if (releaseStreamId) + streamIdHandler.release(handler.streamId); + + if (isClosed()) + tryTerminate(false); + } + + @Override + protected void channelRead0(ChannelHandlerContext ctx, Message.Response response) throws Exception { + int streamId = response.getStreamId(); + + if(logger.isTraceEnabled()) + logger.trace("{} received: {}", Connection.this, asDebugString(response)); + + if (streamId < 0) { + factory.defaultHandler.handle(response); + return; + } + + ResponseHandler handler = pending.remove(streamId); + streamIdHandler.release(streamId); + if (handler == null) { + /** + * During normal operation, we should not receive responses for which we don't have a handler. There is + * two cases however where this can happen: + * 1) The connection has been defuncted due to some internal error and we've raced between removing the + * handler and actually closing the connection; since the original error has been logged, we're fine + * ignoring this completely. + * 2) This request has timed out. In that case, we've already switched to another host (or errored out + * to the user). So log it for debugging purpose, but it's fine ignoring otherwise. + */ + streamIdHandler.unmark(streamId); + if (logger.isDebugEnabled()) + logger.debug("{} Response received on stream {} but no handler set anymore (either the request has " + + "timed out or it was closed due to another error). Received message is {}", Connection.this, streamId, asDebugString(response)); + return; + } + handler.cancelTimeout(); + handler.callback.onSet(Connection.this, response, System.nanoTime() - handler.startTime, handler.retryCount); + + // If we happen to be closed and we're the last outstanding request, we need to terminate the connection + // (note: this is racy as the signaling can be called more than once, but that's not a problem) + if (isClosed()) + tryTerminate(false); + } + + @Override + public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception { + if (evt instanceof IdleStateEvent && ((IdleStateEvent)evt).state() == ALL_IDLE) { + logger.debug("{} was inactive for {} seconds, sending heartbeat", Connection.this, factory.configuration.getPoolingOptions().getHeartbeatIntervalSeconds()); + write(HEARTBEAT_CALLBACK); + } + } + + // Make sure we don't print huge responses in debug/error logs. + private String asDebugString(Object obj) { + if (obj == null) + return "null"; + + String msg = obj.toString(); + if (msg.length() < 500) + return msg; + + return msg.substring(0, 500) + "... [message of size " + msg.length() + " truncated]"; + } + + @Override + public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { + if (logger.isDebugEnabled()) + logger.debug(String.format("%s connection error", Connection.this), cause); + + // Ignore exception while writing, this will be handled by write() directly + if (writer.get() > 0) + return; + + defunct(new TransportException(address, String.format("Unexpected exception triggered (%s)", cause), cause)); + } + + public void errorOutAllHandler(ConnectionException ce) { + Iterator iter = pending.values().iterator(); + while (iter.hasNext()) + { + ResponseHandler handler = iter.next(); + handler.cancelTimeout(); + handler.callback.onException(Connection.this, ce, System.nanoTime() - handler.startTime, handler.retryCount); + iter.remove(); + } + } + } + + private class ChannelCloseListener implements ChannelFutureListener { + @Override + public void operationComplete(ChannelFuture future) throws Exception { + // If we've closed the channel client side then we don't really want to defunct the connection, but + // if there is remaining thread waiting on us, we still want to wake them up + if (!isInitialized || isClosed()) { + dispatcher.errorOutAllHandler(new TransportException(address, "Channel has been closed")); + // we still want to force so that the future completes + Connection.this.closeAsync().force(); + } else + defunct(new TransportException(address, "Channel has been closed")); + } + } + + private static final ResponseCallback HEARTBEAT_CALLBACK = new ResponseCallback() { + + @Override + public Message.Request request() { + return new Requests.Options(); + } + + @Override + public int retryCount() { + return 0; // no retries here + } + + @Override + public void onSet(Connection connection, Message.Response response, long latency, int retryCount) { + switch (response.type) { + case SUPPORTED: + logger.debug("{} heartbeat query succeeded", connection); + break; + default: + fail(connection, new ConnectionException(connection.address, "Unexpected heartbeat response: " + response)); + } + } + + @Override + public void onException(Connection connection, Exception exception, long latency, int retryCount) { + // Nothing to do: the connection is already defunct if we arrive here + } + + @Override + public boolean onTimeout(Connection connection, long latency, int retryCount) { + fail(connection, new ConnectionException(connection.address, "Heartbeat query timed out")); + return true; + } + + private void fail(Connection connection, Exception e) { + connection.defunct(e); + } + }; + + private class ConnectionCloseFuture extends CloseFuture { + + @Override + public ConnectionCloseFuture force() { + // Note: we must not call releaseExternalResources on the bootstrap, because this shutdown the executors, which are shared + + // This method can be thrown during initialization, at which point channel is not yet set. This is ok. + if (channel == null) { + set(null); + return this; + } + + // We're going to close this channel. If anyone is waiting on that connection, we should defunct it otherwise it'll wait + // forever. In general this won't happen since we get there only when all ongoing query are done, but this can happen + // if the shutdown is forced. This is a no-op if there is no handler set anymore. + dispatcher.errorOutAllHandler(new TransportException(address, "Connection has been closed")); + + ChannelFuture future = channel.close(); + future.addListener(new ChannelFutureListener() { + public void operationComplete(ChannelFuture future) { + factory.allChannels.remove(channel); + if (future.cause() != null) { + logger.warn("Error closing channel", future.cause()); + ConnectionCloseFuture.this.setException(future.cause()); + } else + ConnectionCloseFuture.this.set(null); + } + }); + return this; + } + } + + static class Future extends AbstractFuture implements RequestHandler.Callback { + + private final Message.Request request; + private volatile InetSocketAddress address; + + public Future(Message.Request request) { + this.request = request; + } + + @Override + public void register(RequestHandler handler) { + // noop, we don't care about the handler here so far + } + + @Override + public Message.Request request() { + return request; + } + + @Override + public int retryCount() { + // This is ignored, as there is no retry logic in this class + return 0; + } + + @Override + public void onSet(Connection connection, Message.Response response, ExecutionInfo info, Statement statement, long latency) { + onSet(connection, response, latency, 0); + } + + @Override + public void onSet(Connection connection, Message.Response response, long latency, int retryCount) { + this.address = connection.address; + super.set(response); + } + + @Override + public void onException(Connection connection, Exception exception, long latency, int retryCount) { + // If all nodes are down, we will get a null connection here. This is fine, if we have + // an exception, consumers shouldn't assume the address is not null. + if (connection != null) + this.address = connection.address; + super.setException(exception); + } + + @Override + public boolean onTimeout(Connection connection, long latency, int retryCount) { + assert connection != null; // We always timeout on a specific connection, so this shouldn't be null + this.address = connection.address; + return super.setException(new OperationTimedOutException(connection.address)); + } + + public InetSocketAddress getAddress() { + return address; + } + } + + interface ResponseCallback { + public Message.Request request(); + public int retryCount(); + public void onSet(Connection connection, Message.Response response, long latency, int retryCount); + public void onException(Connection connection, Exception exception, long latency, int retryCount); + public boolean onTimeout(Connection connection, long latency, int retryCount); + } + + static class ResponseHandler { + + public final Connection connection; + public final int streamId; + public final ResponseCallback callback; + public final int retryCount; + + private final long startTime; + private volatile Timeout timeout; + + private final AtomicBoolean isCancelled = new AtomicBoolean(); + + public ResponseHandler(Connection connection, ResponseCallback callback) throws BusyConnectionException { + this.connection = connection; + this.streamId = connection.dispatcher.streamIdHandler.next(); + this.callback = callback; + this.retryCount = callback.retryCount(); + + this.startTime = System.nanoTime(); + } + + void startTimeout() { + long timeoutMs = connection.factory.getReadTimeoutMillis(); + this.timeout = timeoutMs <= 0 ? null : connection.factory.timer.newTimeout(onTimeoutTask(), timeoutMs, TimeUnit.MILLISECONDS); + } + + void cancelTimeout() { + if (timeout != null) + timeout.cancel(); + } + + public void cancelHandler() { + if (!isCancelled.compareAndSet(false, true)) + return; + + // We haven't really received a response: we want to remove the handle because we gave up on that + // request and there is no point in holding the handler, but we don't release the streamId. If we + // were, a new request could reuse that ID but get the answer to the request we just gave up on instead + // of its own answer, and we would have no way to detect that. + connection.dispatcher.removeHandler(this, false); + connection.release(); + } + + private TimerTask onTimeoutTask() { + return new TimerTask() { + @Override + public void run(Timeout timeout) { + if (callback.onTimeout(connection, System.nanoTime() - startTime, retryCount)) + cancelHandler(); + } + }; + } + } + + public interface DefaultResponseHandler { + public void handle(Message.Response response); + } + + private static class Initializer extends ChannelInitializer { + // Stateless handlers + private static final Message.ProtocolDecoder messageDecoder = new Message.ProtocolDecoder(); + private static final Message.ProtocolEncoder messageEncoderV1 = new Message.ProtocolEncoder(ProtocolVersion.V1); + private static final Message.ProtocolEncoder messageEncoderV2 = new Message.ProtocolEncoder(ProtocolVersion.V2); + private static final Message.ProtocolEncoder messageEncoderV3 = new Message.ProtocolEncoder(ProtocolVersion.V3); + private static final Frame.Encoder frameEncoder = new Frame.Encoder(); + + private final ProtocolVersion protocolVersion; + private final Connection connection; + private final FrameCompressor compressor; + private final SSLOptions sslOptions; + private final NettyOptions nettyOptions; + private final ChannelHandler idleStateHandler; + + public Initializer(Connection connection, ProtocolVersion protocolVersion, FrameCompressor compressor, SSLOptions sslOptions, int heartBeatIntervalSeconds, NettyOptions nettyOptions) { + this.connection = connection; + this.protocolVersion = protocolVersion; + this.compressor = compressor; + this.sslOptions = sslOptions; + this.nettyOptions = nettyOptions; + this.idleStateHandler = new IdleStateHandler(0, 0, heartBeatIntervalSeconds); + } + + @Override + protected void initChannel(SocketChannel channel) throws Exception { + ChannelPipeline pipeline = channel.pipeline(); + + if (sslOptions != null) { + SSLEngine engine = sslOptions.context.createSSLEngine(); + engine.setUseClientMode(true); + engine.setEnabledCipherSuites(sslOptions.cipherSuites); + SslHandler handler = new SslHandler(engine); + pipeline.addLast("ssl", handler); + } + +// pipeline.addLast("debug", new LoggingHandler(LogLevel.INFO)); + + pipeline.addLast("frameDecoder", new Frame.Decoder()); + pipeline.addLast("frameEncoder", frameEncoder); + + if (compressor != null) { + pipeline.addLast("frameDecompressor", new Frame.Decompressor(compressor)); + pipeline.addLast("frameCompressor", new Frame.Compressor(compressor)); + } + + pipeline.addLast("messageDecoder", messageDecoder); + pipeline.addLast("messageEncoder", messageEncoderFor(protocolVersion)); + + pipeline.addLast("idleStateHandler", idleStateHandler); + + pipeline.addLast("dispatcher", connection.dispatcher); + + nettyOptions.afterChannelInitialized(channel); + } + + private Message.ProtocolEncoder messageEncoderFor(ProtocolVersion version) { + switch (version) { + case V1: + return messageEncoderV1; + case V2: + return messageEncoderV2; + case V3: + return messageEncoderV3; + default: + throw new DriverInternalError("Unsupported protocol version " + protocolVersion); + } + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/ConnectionException.java b/driver-core/src/main/java/com/datastax/driver/core/ConnectionException.java new file mode 100644 index 00000000000..4be8d9775f6 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/ConnectionException.java @@ -0,0 +1,42 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.net.InetSocketAddress; + +class ConnectionException extends Exception { + + private static final long serialVersionUID = 0; + + public final InetSocketAddress address; + + public ConnectionException(InetSocketAddress address, String msg, Throwable cause) + { + super(msg, cause); + this.address = address; + } + + public ConnectionException(InetSocketAddress address, String msg) + { + super(msg); + this.address = address; + } + + @Override + public String getMessage() { + return String.format("[%s] %s", address, super.getMessage()); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/ConsistencyLevel.java b/driver-core/src/main/java/com/datastax/driver/core/ConsistencyLevel.java new file mode 100644 index 00000000000..0784e359575 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/ConsistencyLevel.java @@ -0,0 +1,67 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import com.datastax.driver.core.exceptions.DriverInternalError; + +public enum ConsistencyLevel { + + ANY (0), + ONE (1), + TWO (2), + THREE (3), + QUORUM (4), + ALL (5), + LOCAL_QUORUM (6), + EACH_QUORUM (7), + SERIAL (8), + LOCAL_SERIAL (9), + LOCAL_ONE (10); + + // Used by the native protocol + final int code; + private static final ConsistencyLevel[] codeIdx; + static { + int maxCode = -1; + for (ConsistencyLevel cl : ConsistencyLevel.values()) + maxCode = Math.max(maxCode, cl.code); + codeIdx = new ConsistencyLevel[maxCode + 1]; + for (ConsistencyLevel cl : ConsistencyLevel.values()) { + if (codeIdx[cl.code] != null) + throw new IllegalStateException("Duplicate code"); + codeIdx[cl.code] = cl; + } + } + + private ConsistencyLevel(int code) { + this.code = code; + } + + static ConsistencyLevel fromCode(int code) { + if (code < 0 || code >= codeIdx.length) + throw new DriverInternalError(String.format("Unknown code %d for a consistency level", code)); + return codeIdx[code]; + } + + /** + * Whether or not the the consistency level applies to the local data-center only. + * + * @return whether this consistency level is {@code LOCAL_ONE} or {@code LOCAL_QUORUM}. + */ + public boolean isDCLocal() { + return this == LOCAL_ONE || this == LOCAL_QUORUM; + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java new file mode 100644 index 00000000000..a37df029693 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java @@ -0,0 +1,729 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.UnknownHostException; +import java.util.*; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +import com.google.common.base.Objects; +import com.google.common.util.concurrent.ListenableFuture; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.datastax.driver.core.exceptions.DriverException; +import com.datastax.driver.core.exceptions.DriverInternalError; +import com.datastax.driver.core.exceptions.NoHostAvailableException; + +import static com.datastax.driver.core.SchemaElement.KEYSPACE; +import static com.datastax.driver.core.SchemaElement.TABLE; +import static com.datastax.driver.core.SchemaElement.TYPE; + +class ControlConnection implements Host.StateListener { + + private static final Logger logger = LoggerFactory.getLogger(ControlConnection.class); + + private static final InetAddress bindAllAddress; + static + { + try { + bindAllAddress = InetAddress.getByAddress(new byte[4]); + } catch (UnknownHostException e) { + throw new RuntimeException(e); + } + } + + private static final String SELECT_KEYSPACES = "SELECT * FROM system.schema_keyspaces"; + private static final String SELECT_COLUMN_FAMILIES = "SELECT * FROM system.schema_columnfamilies"; + private static final String SELECT_COLUMNS = "SELECT * FROM system.schema_columns"; + private static final String SELECT_USERTYPES = "SELECT * FROM system.schema_usertypes"; + + private static final String SELECT_PEERS = "SELECT * FROM system.peers"; + private static final String SELECT_LOCAL = "SELECT * FROM system.local WHERE key='local'"; + + private static final String SELECT_SCHEMA_PEERS = "SELECT peer, rpc_address, schema_version FROM system.peers"; + private static final String SELECT_SCHEMA_LOCAL = "SELECT schema_version FROM system.local WHERE key='local'"; + + private final AtomicReference connectionRef = new AtomicReference(); + + private final Cluster.Manager cluster; + + private final AtomicReference> reconnectionAttempt = new AtomicReference>(); + + private volatile boolean isShutdown; + + public ControlConnection(Cluster.Manager manager) { + this.cluster = manager; + } + + // Only for the initial connection. Does not schedule retries if it fails + public void connect() throws UnsupportedProtocolVersionException { + if (isShutdown) + return; + + // NB: at this stage, allHosts() only contains the initial contact points + setNewConnection(reconnectInternal(cluster.metadata.allHosts().iterator(), true)); + } + + public CloseFuture closeAsync() { + // We don't have to be fancy here. We just set a flag so that we stop trying to reconnect (and thus change the + // connection used) and shutdown the current one. + isShutdown = true; + + // Cancel any reconnection attempt in progress + ListenableFuture r = reconnectionAttempt.get(); + if (r != null) + r.cancel(false); + + Connection connection = connectionRef.get(); + return connection == null ? CloseFuture.immediateFuture() : connection.closeAsync(); + } + + Host connectedHost() { + Connection current = connectionRef.get(); + return cluster.metadata.getHost(current.address); + } + + void reconnect() { + if (isShutdown) + return; + + try { + setNewConnection(reconnectInternal(queryPlan(), false)); + } catch (NoHostAvailableException e) { + logger.error("[Control connection] Cannot connect to any host, scheduling retry"); + backgroundReconnect(-1); + } catch (UnsupportedProtocolVersionException e) { + // reconnectInternal only propagate those if we've not decided on the protocol version yet, + // which should only happen on the initial connection and thus in connect() but never here. + throw new AssertionError(); + } catch (Exception e) { + logger.error("[Control connection] Unknown error during reconnection, scheduling retry", e); + backgroundReconnect(-1); + } + } + + /** + * @param initialDelayMs if >=0, bypass the schedule and use this for the first call + */ + private void backgroundReconnect(long initialDelayMs) { + if (isShutdown) + return; + + new AbstractReconnectionHandler(cluster.reconnectionExecutor, cluster.reconnectionPolicy().newSchedule(), reconnectionAttempt, initialDelayMs) { + @Override + protected Connection tryReconnect() throws ConnectionException { + try { + return reconnectInternal(queryPlan(), false); + } catch (NoHostAvailableException e) { + throw new ConnectionException(null, e.getMessage()); + } catch (UnsupportedProtocolVersionException e) { + // reconnectInternal only propagate those if we've not decided on the protocol version yet, + // which should only happen on the initial connection and thus in connect() but never here. + throw new AssertionError(); + } + } + + @Override + protected void onReconnection(Connection connection) { + setNewConnection(connection); + } + + @Override + protected boolean onConnectionException(ConnectionException e, long nextDelayMs) { + logger.error("[Control connection] Cannot connect to any host, scheduling retry in {} milliseconds", nextDelayMs); + return true; + } + + @Override + protected boolean onUnknownException(Exception e, long nextDelayMs) { + logger.error(String.format("[Control connection] Unknown error during reconnection, scheduling retry in %d milliseconds", nextDelayMs), e); + return true; + } + }.start(); + } + + private Iterator queryPlan() { + return cluster.loadBalancingPolicy().newQueryPlan(null, Statement.DEFAULT); + } + + private void signalError() { + Connection connection = connectionRef.get(); + if (connection != null && connection.isDefunct() && cluster.metadata.getHost(connection.address) != null) { + // If the connection was marked as defunct and the host hadn't left, this already reported the + // host down, which will trigger a reconnect. + return; + } + // If the connection is not defunct, or the host has left, just reconnect manually + backgroundReconnect(0); + } + + private void setNewConnection(Connection newConnection) { + logger.debug("[Control connection] Successfully connected to {}", newConnection.address); + Connection old = connectionRef.getAndSet(newConnection); + if (old != null && !old.isClosed()) + old.closeAsync(); + } + + private Connection reconnectInternal(Iterator iter, boolean isInitialConnection) throws UnsupportedProtocolVersionException { + + Map errors = null; + + Host host = null; + try { + while (iter.hasNext()) { + host = iter.next(); + try { + return tryConnect(host, isInitialConnection); + } catch (ConnectionException e) { + errors = logError(host, e, errors, iter); + if (isInitialConnection) { + // Mark the host down right away so that we don't try it again during the initialization process. + // We don't call cluster.triggerOnDown because it does a bunch of other things we don't want to do here (notify LBP, etc.) + host.setDown(); + cluster.startPeriodicReconnectionAttempt(host, true); + } + } catch (ExecutionException e) { + errors = logError(host, e.getCause(), errors, iter); + } catch (UnsupportedProtocolVersionException e) { + // If it's the very first node we've connected to, rethrow the exception and + // Cluster.init() will handle it. Otherwise, just mark this node in error. + if (cluster.protocolVersion() == null) + throw e; + logger.debug("Ignoring host {}: {}", host, e.getMessage()); + errors = logError(host, e, errors, iter); + } catch (ClusterNameMismatchException e) { + logger.debug("Ignoring host {}: {}", host, e.getMessage()); + errors = logError(host, e, errors, iter); + } + } + } catch (InterruptedException e) { + // Sets interrupted status + Thread.currentThread().interrupt(); + + // Indicates that all remaining hosts are skipped due to the interruption + if (host != null) + errors = logError(host, new DriverException("Connection thread interrupted"), errors, iter); + while (iter.hasNext()) + errors = logError(iter.next(), new DriverException("Connection thread interrupted"), errors, iter); + } + throw new NoHostAvailableException(errors == null ? Collections.emptyMap() : errors); + } + + private static Map logError(Host host, Throwable exception, Map errors, Iterator iter) { + if (errors == null) + errors = new HashMap(); + + errors.put(host.getSocketAddress(), exception); + + if (logger.isDebugEnabled()) { + if (iter.hasNext()) { + logger.debug(String.format("[Control connection] error on %s connection, trying next host", host), exception); + } else { + logger.debug(String.format("[Control connection] error on %s connection, no more host to try", host), exception); + } + } + return errors; + } + + private Connection tryConnect(Host host, boolean isInitialConnection) throws ConnectionException, ExecutionException, InterruptedException, UnsupportedProtocolVersionException, ClusterNameMismatchException + { + Connection connection = cluster.connectionFactory.open(host); + + // If no protocol version was specified, set the default as soon as a connection succeeds (it's needed to parse UDTs in refreshSchema) + if (cluster.connectionFactory.protocolVersion == null) + cluster.connectionFactory.protocolVersion = ProtocolVersion.NEWEST_SUPPORTED; + + try { + logger.trace("[Control connection] Registering for events"); + List evs = Arrays.asList( + ProtocolEvent.Type.TOPOLOGY_CHANGE, + ProtocolEvent.Type.STATUS_CHANGE, + ProtocolEvent.Type.SCHEMA_CHANGE + ); + connection.write(new Requests.Register(evs)); + + // We need to refresh the node list first so we know about the cassandra version of + // the node we're connecting to. + refreshNodeListAndTokenMap(connection, cluster, isInitialConnection, true); + + // Note that refreshing the schema will trigger refreshNodeListAndTokenMap since table == null + // We want that because the token map was not properly initialized by the first call above, since it requires the list of keyspaces + // to be loaded. + logger.debug("[Control connection] Refreshing schema"); + refreshSchema(connection, null, null, null, cluster, isInitialConnection); + return connection; + } catch (BusyConnectionException e) { + connection.closeAsync().force(); + throw new DriverInternalError("Newly created connection should not be busy"); + } catch (InterruptedException e) { + connection.closeAsync().force(); + throw e; + } catch (ConnectionException e) { + connection.closeAsync().force(); + throw e; + } catch (ExecutionException e) { + connection.closeAsync().force(); + throw e; + } catch (RuntimeException e) { + connection.closeAsync().force(); + throw e; + } + } + + public void refreshSchema(SchemaElement targetType, String targetKeyspace, String targetName) throws InterruptedException { + logger.debug("[Control connection] Refreshing schema for {}{}", + targetType == null ? "everything" : targetKeyspace, + (targetType == KEYSPACE) ? "" : "." + targetName + " (" + targetType + ")"); + try { + Connection c = connectionRef.get(); + // At startup, when we add the initial nodes, this will be null, which is ok + if (c == null) + return; + refreshSchema(c, targetType, targetKeyspace, targetName, cluster, false); + } catch (ConnectionException e) { + logger.debug("[Control connection] Connection error while refreshing schema ({})", e.getMessage()); + signalError(); + } catch (ExecutionException e) { + // If we're being shutdown during schema refresh, this can happen. That's fine so don't scare the user. + if (!isShutdown) + logger.error("[Control connection] Unexpected error while refreshing schema", e); + signalError(); + } catch (BusyConnectionException e) { + logger.debug("[Control connection] Connection is busy, reconnecting"); + signalError(); + } + } + + static void refreshSchema(Connection connection, SchemaElement targetType, String targetKeyspace, String targetName, Cluster.Manager cluster, boolean isInitialConnection) throws ConnectionException, BusyConnectionException, ExecutionException, InterruptedException { + Host host = cluster.metadata.getHost(connection.address); + // Neither host, nor it's version should be null. But instead of dying if there is a race or something, we can kind of try to infer + // a Cassandra version from the protocol version (this is not full proof, we can have the protocol 1 against C* 2.0+, but it's worth + // a shot, and since we log in this case, it should be relatively easy to debug when if this ever fail). + VersionNumber cassandraVersion; + if (host == null || host.getCassandraVersion() == null) { + cassandraVersion = cluster.protocolVersion().minCassandraVersion(); + logger.warn("Cannot find Cassandra version for host {} to parse the schema, using {} based on protocol version in use. " + + "If parsing the schema fails, this could be the cause", connection.address, cassandraVersion); + } else { + cassandraVersion = host.getCassandraVersion(); + } + + // Make sure we're up to date on schema + String whereClause = ""; + if (targetType != null) { + whereClause = " WHERE keyspace_name = '" + targetKeyspace + '\''; + if (targetType == TABLE) + whereClause += " AND columnfamily_name = '" + targetName + '\''; + else if (targetType == TYPE) + whereClause += " AND type_name = '" + targetName + '\''; + } + + boolean isSchemaOrKeyspace = (targetType == null || targetType == KEYSPACE); + DefaultResultSetFuture ksFuture = isSchemaOrKeyspace + ? new DefaultResultSetFuture(null, cluster.protocolVersion(), new Requests.Query(SELECT_KEYSPACES + whereClause)) + : null; + DefaultResultSetFuture udtFuture = (isSchemaOrKeyspace && supportsUdts(cassandraVersion) || targetType == TYPE) + ? new DefaultResultSetFuture(null, cluster.protocolVersion(), new Requests.Query(SELECT_USERTYPES + whereClause)) + : null; + DefaultResultSetFuture cfFuture = (isSchemaOrKeyspace || targetType == TABLE) + ? new DefaultResultSetFuture(null, cluster.protocolVersion(), new Requests.Query(SELECT_COLUMN_FAMILIES + whereClause)) + : null; + DefaultResultSetFuture colsFuture = (isSchemaOrKeyspace || targetType == TABLE) + ? new DefaultResultSetFuture(null, cluster.protocolVersion(), new Requests.Query(SELECT_COLUMNS + whereClause)) + : null; + + if (ksFuture != null) + connection.write(ksFuture); + if (udtFuture != null) + connection.write(udtFuture); + if (cfFuture != null) + connection.write(cfFuture); + if (colsFuture != null) + connection.write(colsFuture); + + try { + cluster.metadata.rebuildSchema(targetType, targetKeyspace, targetName, + ksFuture == null ? null : ksFuture.get(), + udtFuture == null ? null : udtFuture.get(), + cfFuture == null ? null : cfFuture.get(), + colsFuture == null ? null : colsFuture.get(), + cassandraVersion); + } catch (RuntimeException e) { + // Failure to parse the schema is definitively wrong so log a full-on error, but this won't generally prevent queries to + // work and this can happen when new Cassandra versions modify stuff in the schema and the driver hasn't yet be modified. + // So log, but let things go otherwise. + logger.error("Error parsing schema from Cassandra system tables: the schema in Cluster#getMetadata() will appear incomplete or stale", e); + } + + // If we rebuild all from scratch or have an updated keyspace, rebuild the token map since some replication on some keyspace + // may have changed + if (isSchemaOrKeyspace) + refreshNodeListAndTokenMap(connection, cluster, false, false); + } + + private static boolean supportsUdts(VersionNumber cassandraVersion) { + return cassandraVersion.getMajor() > 2 || (cassandraVersion.getMajor() == 2 && cassandraVersion.getMinor() >= 1); + } + + public void refreshNodeListAndTokenMap() { + Connection c = connectionRef.get(); + // At startup, when we add the initial nodes, this will be null, which is ok + if (c == null) + return; + + logger.debug("[Control connection] Refreshing node list and token map"); + try { + refreshNodeListAndTokenMap(c, cluster, false, true); + } catch (ConnectionException e) { + logger.debug("[Control connection] Connection error while refreshing node list and token map ({})", e.getMessage()); + signalError(); + } catch (ExecutionException e) { + // If we're being shutdown during refresh, this can happen. That's fine so don't scare the user. + if (!isShutdown) + logger.error("[Control connection] Unexpected error while refreshing node list and token map", e); + signalError(); + } catch (BusyConnectionException e) { + logger.debug("[Control connection] Connection is busy, reconnecting"); + signalError(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + logger.debug("[Control connection] Interrupted while refreshing node list and token map, skipping it."); + } + } + + private static InetSocketAddress addressToUseForPeerHost(Row peersRow, InetSocketAddress connectedHost, Cluster.Manager cluster, boolean logMissingRpcAddresses) { + InetAddress peer = peersRow.getInet("peer"); + InetAddress addr = peersRow.getInet("rpc_address"); + + if (peer.equals(connectedHost.getAddress()) || (addr != null && addr.equals(connectedHost.getAddress()))) { + // Some DSE versions were inserting a line for the local node in peers (with mostly null values). This has been fixed, but if we + // detect that's the case, ignore it as it's not really a big deal. + logger.debug("System.peers on node {} has a line for itself. This is not normal but is a known problem of some DSE version. Ignoring the entry.", connectedHost); + return null; + } else if (addr == null) { + if (logMissingRpcAddresses) + logger.warn("No rpc_address found for host {} in {}'s peers system table. {} will be ignored.", peer, connectedHost, peer); + return null; + } else if (addr.equals(bindAllAddress)) { + logger.warn("Found host with 0.0.0.0 as rpc_address, using listen_address ({}) to contact it instead. If this is incorrect you should avoid the use of 0.0.0.0 server side.", peer); + addr = peer; + } + return cluster.translateAddress(addr); + } + + private Row fetchNodeInfo(Host host, Connection c) throws ConnectionException, BusyConnectionException, ExecutionException, InterruptedException { + boolean isConnectedHost = c.address.equals(host.getSocketAddress()); + if (isConnectedHost || host.listenAddress != null) { + DefaultResultSetFuture future = isConnectedHost + ? new DefaultResultSetFuture(null, cluster.protocolVersion(), new Requests.Query(SELECT_LOCAL)) + : new DefaultResultSetFuture(null, cluster.protocolVersion(), new Requests.Query(SELECT_PEERS + " WHERE peer='" + host.listenAddress.getHostAddress() + '\'')); + c.write(future); + return future.get().one(); + } + + // We have to fetch the whole peers table and find the host we're looking for + DefaultResultSetFuture future = new DefaultResultSetFuture(null, cluster.protocolVersion(), new Requests.Query(SELECT_PEERS)); + c.write(future); + for (Row row : future.get()) { + InetSocketAddress addr = addressToUseForPeerHost(row, c.address, cluster, true); + if (addr != null && addr.equals(host.getSocketAddress())) + return row; + } + return null; + } + + /** + * @return whether we have enough information to bring the node back up + */ + public boolean refreshNodeInfo(Host host) { + + Connection c = connectionRef.get(); + // At startup, when we add the initial nodes, this will be null, which is ok + if (c == null) + return true; + + logger.debug("[Control connection] Refreshing node info on {}", host); + try { + Row row = fetchNodeInfo(host, c); + if (row == null) { + if (c.isDefunct()) { + logger.debug("Control connection is down, could not refresh node info"); + // Keep going with what we currently know about the node, otherwise we will ignore all nodes + // until the control connection is back up (which leads to a catch-22 if there is only one) + return true; + } else { + logger.warn("No row found for host {} in {}'s peers system table. {} will be ignored.", host.getAddress(), c.address, host.getAddress()); + return false; + } + // Ignore hosts with a null rpc_address, as this is most likely a phantom row in system.peers (JAVA-428). + // Don't test this for the control host since we're already connected to it anyway, and we read the info from system.local + // which doesn't have an rpc_address column (JAVA-546). + } else if (!c.address.equals(host.getSocketAddress()) && row.getInet("rpc_address") == null) { + logger.warn("No rpc_address found for host {} in {}'s peers system table. {} will be ignored.", host.getAddress(), c.address, host.getAddress()); + return false; + } + + updateInfo(host, row, cluster, false); + return true; + + } catch (ConnectionException e) { + logger.debug("[Control connection] Connection error while refreshing node info ({})", e.getMessage()); + signalError(); + } catch (ExecutionException e) { + // If we're being shutdown during refresh, this can happen. That's fine so don't scare the user. + if (!isShutdown) + logger.debug("[Control connection] Unexpected error while refreshing node info", e); + signalError(); + } catch (BusyConnectionException e) { + logger.debug("[Control connection] Connection is busy, reconnecting"); + signalError(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + logger.debug("[Control connection] Interrupted while refreshing node info, skipping it."); + } catch (Exception e) { + logger.debug("[Control connection] Unexpected error while refreshing node info", e); + signalError(); + } + // If we got an exception, always return true. Otherwise a faulty control connection would cause + // reconnected hosts to be ignored permanently. + return true; + } + + // row can come either from the 'local' table or the 'peers' one + private static void updateInfo(Host host, Row row, Cluster.Manager cluster, boolean isInitialConnection) { + if (!row.isNull("data_center") || !row.isNull("rack")) + updateLocationInfo(host, row.getString("data_center"), row.getString("rack"), isInitialConnection, cluster); + + String version = row.getString("release_version"); + // We don't know if it's a 'local' or a 'peers' row, and only 'peers' rows have the 'peer' field. + InetAddress listenAddress = row.getColumnDefinitions().contains("peer") + ? row.getInet("peer") + : null; + + host.setVersionAndListenAdress(version, listenAddress); + } + + private static void updateLocationInfo(Host host, String datacenter, String rack, boolean isInitialConnection, Cluster.Manager cluster) { + if (Objects.equal(host.getDatacenter(), datacenter) && Objects.equal(host.getRack(), rack)) + return; + + // If the dc/rack information changes for an existing node, we need to update the load balancing policy. + // For that, we remove and re-add the node against the policy. Not the most elegant, and assumes + // that the policy will update correctly, but in practice this should work. + if (!isInitialConnection) + cluster.loadBalancingPolicy().onDown(host); + host.setLocationInfo(datacenter, rack); + if (!isInitialConnection) + cluster.loadBalancingPolicy().onAdd(host); + } + + private static void refreshNodeListAndTokenMap(Connection connection, Cluster.Manager cluster, boolean isInitialConnection, boolean logMissingRpcAddresses) throws ConnectionException, BusyConnectionException, ExecutionException, InterruptedException { + logger.debug("[Control connection] Refreshing node list and token map"); + + // Make sure we're up to date on nodes and tokens + + DefaultResultSetFuture localFuture = new DefaultResultSetFuture(null, cluster.protocolVersion(), new Requests.Query(SELECT_LOCAL)); + DefaultResultSetFuture peersFuture = new DefaultResultSetFuture(null, cluster.protocolVersion(), new Requests.Query(SELECT_PEERS)); + connection.write(localFuture); + connection.write(peersFuture); + + String partitioner = null; + Map> tokenMap = new HashMap>(); + + // Update cluster name, DC and rack for the one node we are connected to + Row localRow = localFuture.get().one(); + if (localRow != null) { + String clusterName = localRow.getString("cluster_name"); + if (clusterName != null) + cluster.metadata.clusterName = clusterName; + + partitioner = localRow.getString("partitioner"); + if (partitioner != null) + cluster.metadata.partitioner = partitioner; + + Host host = cluster.metadata.getHost(connection.address); + // In theory host can't be null. However there is no point in risking a NPE in case we + // have a race between a node removal and this. + if (host == null) { + logger.debug("Host in local system table ({}) unknown to us (ok if said host just got removed)", connection.address); + } else { + updateInfo(host, localRow, cluster, isInitialConnection); + Set tokens = localRow.getSet("tokens", String.class); + if (partitioner != null && !tokens.isEmpty()) + tokenMap.put(host, tokens); + } + } + + List foundHosts = new ArrayList(); + List dcs = new ArrayList(); + List racks = new ArrayList(); + List cassandraVersions = new ArrayList(); + List listenAddresses = new ArrayList(); + List> allTokens = new ArrayList>(); + + for (Row row : peersFuture.get()) { + InetSocketAddress addr = addressToUseForPeerHost(row, connection.address, cluster, logMissingRpcAddresses); + if (addr == null) + continue; + + foundHosts.add(addr); + dcs.add(row.getString("data_center")); + racks.add(row.getString("rack")); + cassandraVersions.add(row.getString("release_version")); + listenAddresses.add(row.getInet("peer")); + allTokens.add(row.getSet("tokens", String.class)); + } + + for (int i = 0; i < foundHosts.size(); i++) { + Host host = cluster.metadata.getHost(foundHosts.get(i)); + boolean isNew = false; + if (host == null) { + // We don't know that node, create the Host object but wait until we've set the known + // info before signaling the addition. + host = cluster.metadata.add(foundHosts.get(i)); + isNew = true; + } + if (dcs.get(i) != null || racks.get(i) != null) + updateLocationInfo(host, dcs.get(i), racks.get(i), isInitialConnection, cluster); + if (cassandraVersions.get(i) != null) + host.setVersionAndListenAdress(cassandraVersions.get(i), listenAddresses.get(i)); + + if (partitioner != null && !allTokens.get(i).isEmpty()) + tokenMap.put(host, allTokens.get(i)); + + if (isNew && !isInitialConnection) + cluster.triggerOnAdd(host); + } + + // Removes all those that seems to have been removed (since we lost the control connection) + Set foundHostsSet = new HashSet(foundHosts); + for (Host host : cluster.metadata.allHosts()) + if (!host.getSocketAddress().equals(connection.address) && !foundHostsSet.contains(host.getSocketAddress())) + cluster.removeHost(host, isInitialConnection); + + cluster.metadata.rebuildTokenMap(partitioner, tokenMap); + } + + static boolean waitForSchemaAgreement(Connection connection, Cluster.Manager cluster) throws ConnectionException, BusyConnectionException, ExecutionException, InterruptedException { + + long start = System.nanoTime(); + long elapsed = 0; + int maxSchemaAgreementWaitSeconds = cluster.configuration.getProtocolOptions().getMaxSchemaAgreementWaitSeconds(); + while (elapsed < maxSchemaAgreementWaitSeconds * 1000) { + + if (checkSchemaAgreement(connection, cluster)) + return true; + + // let's not flood the node too much + Thread.sleep(200); + + elapsed = Cluster.timeSince(start, TimeUnit.MILLISECONDS); + } + + return false; + } + + private static boolean checkSchemaAgreement(Connection connection, Cluster.Manager cluster) throws ConnectionException, BusyConnectionException, InterruptedException, ExecutionException { + DefaultResultSetFuture peersFuture = new DefaultResultSetFuture(null, cluster.protocolVersion(), new Requests.Query(SELECT_SCHEMA_PEERS)); + DefaultResultSetFuture localFuture = new DefaultResultSetFuture(null, cluster.protocolVersion(), new Requests.Query(SELECT_SCHEMA_LOCAL)); + connection.write(peersFuture); + connection.write(localFuture); + + Set versions = new HashSet(); + + Row localRow = localFuture.get().one(); + if (localRow != null && !localRow.isNull("schema_version")) + versions.add(localRow.getUUID("schema_version")); + + for (Row row : peersFuture.get()) { + + InetSocketAddress addr = addressToUseForPeerHost(row, connection.address, cluster, true); + if (addr == null || row.isNull("schema_version")) + continue; + + Host peer = cluster.metadata.getHost(addr); + if (peer != null && peer.isUp()) + versions.add(row.getUUID("schema_version")); + } + logger.debug("Checking for schema agreement: versions are {}", versions); + return versions.size() <= 1; + } + + boolean checkSchemaAgreement() { + Connection c = connectionRef.get(); + try { + return c != null && checkSchemaAgreement(c, cluster); + } catch (Exception e) { + logger.warn("Error while checking schema agreement", e); + return false; + } + } + + boolean isOpen() { + Connection c = connectionRef.get(); + return c != null && !c.isClosed(); + } + + @Override + public void onUp(Host host) { + } + + @Override + public void onDown(Host host) { + // If that's the host we're connected to, and we haven't yet schedule a reconnection, preemptively start one + Connection current = connectionRef.get(); + if (logger.isDebugEnabled()) + logger.debug("[Control connection] {} is down, currently connected to {}", host, current == null ? "nobody" : current.address); + + if (current != null && current.address.equals(host.getSocketAddress())) { + // This starts an AbstractReconnectionHandler, which will take care of checking if another reconnection is + // already in progress + backgroundReconnect(0); + } + } + + @Override + public void onSuspected(Host host) { + } + + @Override + public void onAdd(Host host) { + // Refresh infos and token map if we didn't knew about that host, i.e. if we either don't have basic infos on it, + // or it's not part of our computed token map + Metadata.TokenMap tkmap = cluster.metadata.tokenMap; + if (host.getCassandraVersion() == null || tkmap == null || !tkmap.hosts.contains(host)) + refreshNodeListAndTokenMap(); + } + + @Override + public void onRemove(Host host) { + Connection current = connectionRef.get(); + if (logger.isDebugEnabled()) + logger.debug("[Control connection] {} has been removed, currently connected to {}", host, current == null ? "nobody" : current.address); + + // Schedule a reconnection if that was our control host + if (current != null && current.address.equals(host.getSocketAddress())) { + backgroundReconnect(0); + } + + refreshNodeListAndTokenMap(); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/ConvictionPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/ConvictionPolicy.java new file mode 100644 index 00000000000..df935d4f15a --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/ConvictionPolicy.java @@ -0,0 +1,80 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +/** + * The policy with which to decide whether a host should be considered down. + * + * TODO: this class is fully abstract (rather than an interface) because I'm + * not sure it's worth exposing (and if we do expose it, we need to expose + * ConnectionException). Maybe just exposing say a threshold of error before + * convicting a node is enough. + */ +abstract class ConvictionPolicy { + + /** + * Called when a connection error occurs on a connection to the host this policy applies to. + * + * @param exception the connection error that occurred. + * + * @return {@code true} if the host should be considered down. + */ + public abstract boolean addFailure(ConnectionException exception); + + /** + * Called when the host has been detected up. + */ + public abstract void reset(); + + /** + * Simple factory interface to allow creating {@link ConvictionPolicy} instances. + */ + public interface Factory { + + /** + * Creates a new ConvictionPolicy instance for {@code host}. + * + * @param host the host this policy applies to + * @return the newly created {@link ConvictionPolicy} instance. + */ + public ConvictionPolicy create(Host host); + } + + public static class Simple extends ConvictionPolicy { + private Simple(Host host) { + } + + @Override + public boolean addFailure(ConnectionException exception) { + return true; + } + + public boolean addFailureFromExternalDetector() { + return true; + } + + @Override + public void reset() {} + + public static class Factory implements ConvictionPolicy.Factory { + + @Override + public ConvictionPolicy create(Host host) { + return new Simple(host); + } + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/DataType.java b/driver-core/src/main/java/com/datastax/driver/core/DataType.java new file mode 100644 index 00000000000..73aa95d3fe9 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/DataType.java @@ -0,0 +1,939 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.math.BigDecimal; +import java.math.BigInteger; +import java.net.InetAddress; +import java.nio.ByteBuffer; +import java.util.*; + +import com.google.common.base.Objects; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; +import com.google.common.reflect.TypeToken; +import io.netty.buffer.ByteBuf; + +import com.datastax.driver.core.exceptions.DriverInternalError; +import com.datastax.driver.core.exceptions.InvalidTypeException; + +/** + * Data types supported by cassandra. + */ +public abstract class DataType { + + /** + * The CQL type name. + */ + public enum Name { + + ASCII (1, String.class), + BIGINT (2, Long.class), + BLOB (3, ByteBuffer.class), + BOOLEAN (4, Boolean.class), + COUNTER (5, Long.class), + DECIMAL (6, BigDecimal.class), + DOUBLE (7, Double.class), + FLOAT (8, Float.class), + INET (16, InetAddress.class), + INT (9, Integer.class), + TEXT (10, String.class), + TIMESTAMP (11, Date.class), + UUID (12, UUID.class), + VARCHAR (13, String.class), + VARINT (14, BigInteger.class), + TIMEUUID (15, UUID.class), + LIST (32, List.class), + SET (34, Set.class), + MAP (33, Map.class), + UDT (48, UDTValue.class), + TUPLE (49, TupleValue.class), + CUSTOM (0, ByteBuffer.class); + + final int protocolId; + final Class javaType; + + private static final Name[] nameToIds; + static { + int maxCode = -1; + for (Name name : Name.values()) + maxCode = Math.max(maxCode, name.protocolId); + nameToIds = new Name[maxCode + 1]; + for (Name name : Name.values()) { + if (nameToIds[name.protocolId] != null) + throw new IllegalStateException("Duplicate Id"); + nameToIds[name.protocolId] = name; + } + } + + private Name(int protocolId, Class javaType) { + this.protocolId = protocolId; + this.javaType = javaType; + } + + static Name fromProtocolId(int id) { + Name name = nameToIds[id]; + if (name == null) + throw new DriverInternalError("Unknown data type protocol id: " + id); + return name; + } + + /** + * Returns whether this data type name represent the name of a collection type + * that is a list, set or map. + * + * @return whether this data type name represent the name of a collection type. + */ + public boolean isCollection() { + switch (this) { + case LIST: + case SET: + case MAP: + return true; + default: + return false; + } + } + + /** + * Returns the Java Class corresponding to this CQL type name. + * + * The correspondence between CQL types and java ones is as follow: + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
DataType to Java class correspondence
DataType (CQL)Java Class
ASCII String
BIGINT Long
BLOB ByteBuffer
BOOLEAN Boolean
COUNTER Long
CUSTOM ByteBuffer
DECIMAL BigDecimal
DOUBLE Double
FLOAT Float
INET InetAddress
INT Integer
LIST List
MAP Map
SET Set
TEXT String
TIMESTAMP Date
UUID UUID
UDT UDTValue
TUPLE TupleValue
VARCHAR String
VARINT BigInteger
TIMEUUID UUID
+ * + * @return the java Class corresponding to this CQL type name. + */ + public Class asJavaClass() { + return javaType; + } + + @Override + public String toString() { + return super.toString().toLowerCase(); + } + } + + protected final DataType.Name name; + + private static final Map primitiveTypeMap = new EnumMap(Name.class); + static { + for (Name name : Name.values()) { + if (!name.isCollection() && name != Name.CUSTOM && name != Name.UDT && name != Name.TUPLE) + primitiveTypeMap.put(name, new DataType.Native(name)); + } + } + private static final Set primitiveTypeSet = ImmutableSet.copyOf(primitiveTypeMap.values()); + + protected DataType(DataType.Name name) { + this.name = name; + } + + static DataType decode(ByteBuf buffer) { + Name name = Name.fromProtocolId(buffer.readUnsignedShort()); + switch (name) { + case CUSTOM: + String className = CBUtil.readString(buffer); + return CassandraTypeParser.isUserType(className) || CassandraTypeParser.isTupleType(className) + ? CassandraTypeParser.parseOne(className) + : custom(className); + case LIST: + return list(decode(buffer)); + case SET: + return set(decode(buffer)); + case MAP: + DataType keys = decode(buffer); + DataType values = decode(buffer); + return map(keys, values); + case UDT: + String keyspace = CBUtil.readString(buffer); + String type = CBUtil.readString(buffer); + int nFields = buffer.readShort() & 0xffff; + List fields = new ArrayList(nFields); + for (int i = 0; i < nFields; i++) { + String fieldName = CBUtil.readString(buffer); + DataType fieldType = decode(buffer); + fields.add(new UserType.Field(fieldName, fieldType)); + } + return new UserType(keyspace, type, fields); + case TUPLE: + nFields = buffer.readShort() & 0xffff; + List types = new ArrayList(nFields); + for (int i = 0; i < nFields; i++) { + types.add(decode(buffer)); + } + return new TupleType(types); + default: + return primitiveTypeMap.get(name); + } + } + + abstract TypeCodec codec(ProtocolVersion protocolVersion); + + /** + * Returns the ASCII type. + * + * @return The ASCII type. + */ + public static DataType ascii() { + return primitiveTypeMap.get(Name.ASCII); + } + + /** + * Returns the BIGINT type. + * + * @return The BIGINT type. + */ + public static DataType bigint() { + return primitiveTypeMap.get(Name.BIGINT); + } + + /** + * Returns the BLOB type. + * + * @return The BLOB type. + */ + public static DataType blob() { + return primitiveTypeMap.get(Name.BLOB); + } + + /** + * Returns the BOOLEAN type. + * + * @return The BOOLEAN type. + */ + public static DataType cboolean() { + return primitiveTypeMap.get(Name.BOOLEAN); + } + + /** + * Returns the COUNTER type. + * + * @return The COUNTER type. + */ + public static DataType counter() { + return primitiveTypeMap.get(Name.COUNTER); + } + + /** + * Returns the DECIMAL type. + * + * @return The DECIMAL type. + */ + public static DataType decimal() { + return primitiveTypeMap.get(Name.DECIMAL); + } + + /** + * Returns the DOUBLE type. + * + * @return The DOUBLE type. + */ + public static DataType cdouble() { + return primitiveTypeMap.get(Name.DOUBLE); + } + + /** + * Returns the FLOAT type. + * + * @return The FLOAT type. + */ + public static DataType cfloat() { + return primitiveTypeMap.get(Name.FLOAT); + } + + /** + * Returns the INET type. + * + * @return The INET type. + */ + public static DataType inet() { + return primitiveTypeMap.get(Name.INET); + } + + /** + * Returns the INT type. + * + * @return The INT type. + */ + public static DataType cint() { + return primitiveTypeMap.get(Name.INT); + } + + /** + * Returns the TEXT type. + * + * @return The TEXT type. + */ + public static DataType text() { + return primitiveTypeMap.get(Name.TEXT); + } + + /** + * Returns the TIMESTAMP type. + * + * @return The TIMESTAMP type. + */ + public static DataType timestamp() { + return primitiveTypeMap.get(Name.TIMESTAMP); + } + + /** + * Returns the UUID type. + * + * @return The UUID type. + */ + public static DataType uuid() { + return primitiveTypeMap.get(Name.UUID); + } + + /** + * Returns the VARCHAR type. + * + * @return The VARCHAR type. + */ + public static DataType varchar() { + return primitiveTypeMap.get(Name.VARCHAR); + } + + /** + * Returns the VARINT type. + * + * @return The VARINT type. + */ + public static DataType varint() { + return primitiveTypeMap.get(Name.VARINT); + } + + /** + * Returns the TIMEUUID type. + * + * @return The TIMEUUID type. + */ + public static DataType timeuuid() { + return primitiveTypeMap.get(Name.TIMEUUID); + } + + /** + * Returns the type of lists of {@code elementType} elements. + * + * @param elementType the type of the list elements. + * @param frozen whether the list is frozen. + * @return the type of lists of {@code elementType} elements. + */ + public static DataType list(DataType elementType, boolean frozen) { + return new DataType.Collection(Name.LIST, ImmutableList.of(elementType), frozen); + } + + /** + * Returns the type of "not frozen" lists of {@code elementType} elements. + *

+ * This is a shorthand for {@code list(elementType, false);}. + * + * @param elementType the type of the list elements. + * @return the type of "not frozen" lists of {@code elementType} elements. + */ + public static DataType list(DataType elementType) { + return list(elementType, false); + } + + /** + * Returns the type of frozen lists of {@code elementType} elements. + *

+ * This is a shorthand for {@code list(elementType, true);}. + * + * @param elementType the type of the list elements. + * @return the type of frozen lists of {@code elementType} elements. + */ + public static DataType frozenList(DataType elementType) { + return list(elementType, true); + } + + /** + * Returns the type of sets of {@code elementType} elements. + * + * @param elementType the type of the set elements. + * @param frozen whether the set is frozen. + * @return the type of sets of {@code elementType} elements. + */ + public static DataType set(DataType elementType, boolean frozen) { + return new DataType.Collection(Name.SET, ImmutableList.of(elementType), frozen); + } + + /** + * Returns the type of "not frozen" sets of {@code elementType} elements. + *

+ * This is a shorthand for {@code set(elementType, false);}. + * + * @param elementType the type of the set elements. + * @return the type of "not frozen" sets of {@code elementType} elements. + */ + public static DataType set(DataType elementType) { + return set(elementType, false); + } + + /** + * Returns the type of frozen sets of {@code elementType} elements. + *

+ * This is a shorthand for {@code set(elementType, true);}. + * + * @param elementType the type of the set elements. + * @return the type of frozen sets of {@code elementType} elements. + */ + public static DataType frozenSet(DataType elementType) { + return set(elementType, true); + } + + /** + * Returns the type of maps of {@code keyType} to {@code valueType} elements. + * + * @param keyType the type of the map keys. + * @param valueType the type of the map values. + * @param frozen whether the map is frozen. + * @return the type of maps of {@code keyType} to {@code valueType} elements. + */ + public static DataType map(DataType keyType, DataType valueType, boolean frozen) { + return new DataType.Collection(Name.MAP, ImmutableList.of(keyType, valueType), frozen); + } + + /** + * Returns the type of "not frozen" maps of {@code keyType} to {@code valueType} elements. + *

+ * This is a shorthand for {@code map(keyType, valueType, false);}. + * + * @param keyType the type of the map keys. + * @param valueType the type of the map values. + * @return the type of "not frozen" maps of {@code keyType} to {@code valueType} elements. + */ + public static DataType map(DataType keyType, DataType valueType) { + return map(keyType, valueType, false); + } + + /** + * Returns the type of frozen maps of {@code keyType} to {@code valueType} elements. + *

+ * This is a shorthand for {@code map(keyType, valueType, true);}. + * + * @param keyType the type of the map keys. + * @param valueType the type of the map values. + * @return the type of frozen maps of {@code keyType} to {@code valueType} elements. + */ + public static DataType frozenMap(DataType keyType, DataType valueType) { + return map(keyType, valueType, true); + } + + /** + * Returns a Custom type. + *

+ * A custom type is defined by the name of the class used on the Cassandra + * side to implement it. Note that the support for custom type by the + * driver is limited: values of a custom type won't be interpreted by the + * driver in any way. They will thus have to be set (by {@link BoundStatement#setBytesUnsafe} + * and retrieved (by {@link Row#getBytesUnsafe}) as raw ByteBuffer. + *

+ * The use of custom types is rarely useful and is thus not encouraged. + * + * @param typeClassName the server-side fully qualified class name for the type. + * @return the custom type for {@code typeClassName}. + */ + public static DataType custom(String typeClassName) { + if (typeClassName == null) + throw new NullPointerException(); + return new DataType.Custom(Name.CUSTOM, typeClassName); + } + + /** + * Returns the name of that type. + * + * @return the name of that type. + */ + public Name getName() { + return name; + } + + /** + * Returns whether this data type is frozen. + *

+ * This applies to User Defined Types, tuples and nested collections. Frozen types are serialized as a single value in + * Cassandra's storage engine, whereas non-frozen types are stored in a form that allows updates to individual subfields. + * + * @return whether this data type is frozen. + */ + public abstract boolean isFrozen(); + + /** + * Returns the type arguments of this type. + *

+ * Note that only the collection types (LIST, MAP, SET) have type + * arguments. For the other types, this will return an empty list. + *

+ * For the collection types: + *

    + *
  • For lists and sets, this method returns one argument, the type of + * the elements.
  • + *
  • For maps, this method returns two arguments, the first one is the + * type of the map keys, the second one is the type of the map + * values.
  • + *
+ * + * @return an immutable list containing the type arguments of this type. + */ + public List getTypeArguments() { + return Collections.emptyList(); + } + + abstract boolean canBeDeserializedAs(TypeToken typeToken); + + /** + * Returns the server-side class name for a custom type. + * + * @return the server-side fully qualified class name for a custom type or + * {@code null} for any other type. + */ + public String getCustomTypeClassName() { + return null; + } + + /** + * Parses a string CQL value for the type this object represent, returning its + * value as a Java object. + * + * @param value the value to parse. + * @return a java object representing {@code value}. If {@code value == null}, then + * {@code null} is returned. + * + * @throws InvalidTypeException if {@code value} is not a valid CQL string + * representation for this type. Please note that values for custom types + * can never be parsed and will always return this exception. + */ + public Object parse(String value) { + // We don't care about the protocol version for parsing + return value == null ? null : codec(ProtocolVersion.NEWEST_SUPPORTED).parse(value); + } + + /** + * Format a Java object as an equivalent CQL value. + * + * @param value the value to format. + * @return a string corresponding to the CQL representation of {@code value}. + * + * @throws InvalidTypeException if {@code value} does not correspond to + * a CQL value (known by the driver). Please note that for custom types this + * method will always return this exception. + */ + public String format(Object value) { + // We don't care about the protocol version for formatting + return value == null ? null : codec(ProtocolVersion.NEWEST_SUPPORTED).format(value); + } + + /** + * Returns whether this type is a collection one, i.e. a list, set or map type. + * + * @return whether this type is a collection one. + */ + public boolean isCollection() { + return name.isCollection(); + } + + /** + * Returns the Java Class corresponding to this type. + * + * This is a shortcut for {@code getName().asJavaClass()}. + * + * @return the java Class corresponding to this type. + * + * @see Name#asJavaClass + */ + public Class asJavaClass() { + return getName().asJavaClass(); + } + + /** + * Returns a set of all the primitive types, where primitive types are + * defined as the types that don't have type arguments (that is excluding + * lists, sets, and maps). + * + * @return returns a set of all the primitive types. + */ + public static Set allPrimitiveTypes() { + return primitiveTypeSet; + } + + /** + * Serialize a value of this type to bytes, with the given protocol version. + *

+ * The actual format of the resulting bytes will correspond to the + * Cassandra encoding for this type (for the requested protocol version). + * + * @param value the value to serialize. + * @param protocolVersion the protocol version to use when serializing + * {@code bytes}. In most cases, the proper value to provide for this argument + * is the value returned by {@link ProtocolOptions#getProtocolVersion} (which + * is the protocol version in use by the driver). + * @return the value serialized, or {@code null} if {@code value} is null. + * + * @throws InvalidTypeException if {@code value} is not a valid object + * for this {@code DataType}. + */ + public ByteBuffer serialize(Object value, ProtocolVersion protocolVersion) { + Class providedClass = value.getClass(); + Class expectedClass = asJavaClass(); + if (!expectedClass.isAssignableFrom(providedClass)) + throw new InvalidTypeException(String.format("Invalid value for CQL type %s, expecting %s but %s provided", toString(), expectedClass, providedClass)); + + try { + return codec(protocolVersion).serialize(value); + } catch (ClassCastException e) { + // With collections, the element type has not been checked, so it can throw + throw new InvalidTypeException("Invalid type for collection element: " + e.getMessage()); + } + } + + /** + * Serialize a value of this type to bytes, with the given numeric protocol version. + * + * @throws IllegalArgumentException if {@code protocolVersion} does not correspond to any known version. + * + * @deprecated This method is provided for backward compatibility. Use + * {@link #serialize(Object, ProtocolVersion)} instead. + */ + @Deprecated + public ByteBuffer serialize(Object value, int protocolVersion) { + return serialize(value, ProtocolVersion.fromInt(protocolVersion)); + } + + /** + * @deprecated This method is provided for binary compatibility only. It is no longer supported, will be removed, + * and simply throws {@link UnsupportedOperationException}. Use {@link #serialize(Object, ProtocolVersion)} instead. + */ + @Deprecated + public ByteBuffer serialize(Object value) { + throw new UnsupportedOperationException("Method no longer supported; use serialize(Object,ProtocolVersion)"); + } + + /** + * Deserialize a value of this type from the provided bytes using the given protocol version. + * + * @param bytes bytes holding the value to deserialize. + * @param protocolVersion the protocol version to use when deserializing + * {@code bytes}. In most cases, the proper value to provide for this argument + * is the value returned by {@link ProtocolOptions#getProtocolVersion} (which + * is the protocol version in use by the driver). + * @return the deserialized value (of class {@code this.asJavaClass()}). + * Will return {@code null} if either {@code bytes} is {@code null} or if + * {@code bytes.remaining() == 0} and this type has no value corresponding + * to an empty byte buffer (the latter somewhat strange behavior is due to + * the fact that for historical/technical reason, Cassandra types always + * accept empty byte buffer as valid value of those type, and so we avoid + * throwing an exception in that case. It is however highly discouraged to + * store empty byte buffers for types for which it doesn't make sense, so + * this detail can generally be ignored). + * + * @throws InvalidTypeException if {@code bytes} is not a valid + * encoding of an object of this {@code DataType}. + */ + public Object deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) { + return codec(protocolVersion).deserialize(bytes); + } + + /** + * Deserialize a value of this type from the provided bytes using the given numeric protocol version. + * + * @throws IllegalArgumentException if {@code protocolVersion} does not correspond to any known version. + * + * @deprecated This method is provided for backward compatibility. Use + * {@link #deserialize(ByteBuffer,ProtocolVersion)} instead. + */ + public Object deserialize(ByteBuffer bytes, int protocolVersion) { + return deserialize(bytes, ProtocolVersion.fromInt(protocolVersion)); + } + + /** + * @deprecated This method is provided for binary compatibility only. It is no longer supported, will be removed, + * and simply throws {@link UnsupportedOperationException}. Use {@link #deserialize(ByteBuffer, ProtocolVersion)} instead. + */ + @Deprecated + public Object deserialize(ByteBuffer bytes) { + throw new UnsupportedOperationException("Method no longer supported; use deserialize(ByteBuffer,ProtocolVersion)"); + } + + /** + * Serialize an object based on its java class. + *

+ * This is equivalent to {@link #serialize} but with the difference that + * the actual {@code DataType} of the resulting value is inferred from the + * java class of {@code value}. The correspondence between CQL {@code DataType} + * and java class used is the one induced by the method {@link Name#asJavaClass}. + * Note that if you know the {@code DataType} of {@code value}, you should use + * the {@link #serialize} method instead as it is going to be faster. + * + * @param value the value to serialize. + * @param protocolVersion the protocol version to use when deserializing + * {@code bytes}. In most cases, the proper value to provide for this argument + * is the value returned by {@link ProtocolOptions#getProtocolVersion} (which + * is the protocol version in use by the driver). + * @return the value serialized, or {@code null} if {@code value} is null. + * + * @throws IllegalArgumentException if {@code value} is not of a type + * corresponding to a CQL3 type, i.e. is not a Class that could be returned + * by {@link DataType#asJavaClass}. + */ + public static ByteBuffer serializeValue(Object value, ProtocolVersion protocolVersion) { + if (value == null) + return null; + + DataType dt = TypeCodec.getDataTypeFor(value); + if (dt == null) + throw new IllegalArgumentException(String.format("Value of type %s does not correspond to any CQL3 type", value.getClass())); + + try { + return dt.serialize(value, protocolVersion); + } catch (InvalidTypeException e) { + // In theory we couldn't get that if getDataTypeFor does his job correctly, + // but there is no point in sending an exception that the user won't expect if we're + // wrong on that. + throw new IllegalArgumentException(e.getMessage()); + } + } + + /** + * Serialize an object based on its java class, with the given numeric protocol version. + * + * @throws IllegalArgumentException if {@code protocolVersion} does not correspond to any known version. + * + * @deprecated This method is provided for backward compatibility. Use + * {@link #serializeValue(Object, ProtocolVersion)} instead. + */ + @Deprecated + public static ByteBuffer serializeValue(Object value, int protocolVersion) { + return serializeValue(value, ProtocolVersion.fromInt(protocolVersion)); + } + + /** + * @deprecated This method is provided for binary compatibility only. It is no longer supported, will be removed, + * and simply throws {@link UnsupportedOperationException}. Use {@link #serializeValue(Object, ProtocolVersion)} instead. + */ + @Deprecated + public static ByteBuffer serializeValue(Object value) { + throw new UnsupportedOperationException("Method no longer supported; use serializeValue(Object,ProtocolVersion)"); + } + + private static class Native extends DataType { + private Native(DataType.Name name) { + super(name); + } + + @Override + public boolean isFrozen() { + return false; + } + + @Override + boolean canBeDeserializedAs(TypeToken typeToken) { + return typeToken.isAssignableFrom(getName().javaType); + } + + @Override + TypeCodec codec(ProtocolVersion protocolVersion) { + return TypeCodec.createFor(name); + } + + @Override + public final int hashCode() { + return (name == Name.TEXT) + ? Name.VARCHAR.hashCode() + : name.hashCode(); + } + + @Override + public final boolean equals(Object o) { + if(!(o instanceof DataType.Native)) + return false; + + Native that = (DataType.Native)o; + return name == that.name || + name == Name.VARCHAR && that.name == Name.TEXT || + name == Name.TEXT && that.name == Name.VARCHAR; + } + + @Override + public String toString() { + return name.toString(); + } + } + + private static class Collection extends DataType { + + private final List typeArguments; + private boolean frozen; + + private Collection(DataType.Name name, List typeArguments, boolean frozen) { + super(name); + this.typeArguments = typeArguments; + this.frozen = frozen; + } + + @Override + public boolean isFrozen() { + return frozen; + } + + @Override + @SuppressWarnings("unchecked") + boolean canBeDeserializedAs(TypeToken typeToken) { + switch (name) { + case LIST: + return typeToken.getRawType().isAssignableFrom(List.class) && + typeArguments.get(0).canBeDeserializedAs(typeToken.resolveType(typeToken.getRawType().getTypeParameters()[0])); + case SET: + return typeToken.getRawType().isAssignableFrom(Set.class) && + typeArguments.get(0).canBeDeserializedAs(typeToken.resolveType(typeToken.getRawType().getTypeParameters()[0])); + case MAP: + return typeToken.getRawType().isAssignableFrom(Map.class) && + typeArguments.get(0).canBeDeserializedAs(typeToken.resolveType(typeToken.getRawType().getTypeParameters()[0])) && + typeArguments.get(1).canBeDeserializedAs(typeToken.resolveType(typeToken.getRawType().getTypeParameters()[1])); + } + throw new AssertionError(); + } + + @SuppressWarnings("unchecked") + @Override + TypeCodec codec(ProtocolVersion protocolVersion) { + switch (name) + { + case LIST: return (TypeCodec)TypeCodec.listOf(typeArguments.get(0), protocolVersion); + case SET: return (TypeCodec)TypeCodec.setOf(typeArguments.get(0), protocolVersion); + case MAP: return (TypeCodec)TypeCodec.mapOf(typeArguments.get(0), typeArguments.get(1), protocolVersion); + } + throw new AssertionError(); + } + + @Override + public List getTypeArguments() { + return typeArguments; + } + + @Override + public final int hashCode() { + return Arrays.hashCode(new Object[]{ name, typeArguments }); + } + + @Override + public final boolean equals(Object o) { + if(!(o instanceof DataType.Collection)) + return false; + + DataType.Collection d = (DataType.Collection)o; + return name == d.name && typeArguments.equals(d.typeArguments); + } + + @Override + public String toString() { + if (name == Name.MAP) { + String template = frozen ? "frozen<%s<%s, %s>>" : "%s<%s, %s>"; + return String.format(template, name, typeArguments.get(0), typeArguments.get(1)); + } + else { + String template = frozen ? "frozen<%s<%s>>" : "%s<%s>"; + return String.format(template, name, typeArguments.get(0)); + } + } + } + + private static class Custom extends DataType { + + private final String customClassName; + + private Custom(DataType.Name name, String className) { + super(name); + this.customClassName = className; + } + + @Override + public boolean isFrozen() { + return false; + } + + @Override + boolean canBeDeserializedAs(TypeToken typeToken) { + return typeToken.getRawType().getName().equals(customClassName); + } + + @SuppressWarnings("unchecked") + @Override + TypeCodec codec(ProtocolVersion protocolVersion) { + return (TypeCodec)TypeCodec.BytesCodec.instance; + } + + @Override + public String getCustomTypeClassName() { + return customClassName; + } + + @Override + public Object parse(String value) { + throw new InvalidTypeException("Cannot parse values of custom types"); + } + + @Override + public String format(Object value) { + throw new InvalidTypeException("Cannot format values of custom types"); + } + + @Override + public final int hashCode() { + return Arrays.hashCode(new Object[]{ name, customClassName }); + } + + @Override + public final boolean equals(Object o) { + if(!(o instanceof DataType.Custom)) + return false; + + DataType.Custom d = (DataType.Custom)o; + return name == d.name && Objects.equal(customClassName, d.customClassName); + } + + @Override + public String toString() { + return String.format("'%s'", customClassName); + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/DefaultPreparedStatement.java b/driver-core/src/main/java/com/datastax/driver/core/DefaultPreparedStatement.java new file mode 100644 index 00000000000..21b86699f5a --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/DefaultPreparedStatement.java @@ -0,0 +1,178 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.nio.ByteBuffer; +import java.util.List; + +import com.datastax.driver.core.policies.RetryPolicy; + +public class DefaultPreparedStatement implements PreparedStatement{ + + final PreparedId preparedId; + + final String query; + final String queryKeyspace; + + volatile ByteBuffer routingKey; + + volatile ConsistencyLevel consistency; + volatile ConsistencyLevel serialConsistency; + volatile boolean traceQuery; + volatile RetryPolicy retryPolicy; + + private DefaultPreparedStatement(PreparedId id, String query, String queryKeyspace) { + this.preparedId = id; + this.query = query; + this.queryKeyspace = queryKeyspace; + } + + static DefaultPreparedStatement fromMessage(Responses.Result.Prepared msg, Metadata clusterMetadata, ProtocolVersion protocolVersion, String query, String queryKeyspace) { + assert msg.metadata.columns != null; + + ColumnDefinitions defs = msg.metadata.columns; + + if (defs.size() == 0) + return new DefaultPreparedStatement(new PreparedId(msg.statementId, defs, msg.resultMetadata.columns, null, protocolVersion), query, queryKeyspace); + + List partitionKeyColumns = null; + int[] pkIndexes = null; + KeyspaceMetadata km = clusterMetadata.getKeyspace(Metadata.quote(defs.getKeyspace(0))); + if (km != null) { + TableMetadata tm = km.getTable(Metadata.quote(defs.getTable(0))); + if (tm != null) { + partitionKeyColumns = tm.getPartitionKey(); + pkIndexes = new int[partitionKeyColumns.size()]; + for (int i = 0; i < pkIndexes.length; ++i) + pkIndexes[i] = -1; + } + } + + // Note: we rely on the fact CQL queries cannot span multiple tables. If that change, we'll have to get smarter. + for (int i = 0; i < defs.size(); i++) + maybeGetIndex(defs.getName(i), i, partitionKeyColumns, pkIndexes); + + PreparedId prepId = new PreparedId(msg.statementId, defs, msg.resultMetadata.columns, allSet(pkIndexes) ? pkIndexes : null, protocolVersion); + + return new DefaultPreparedStatement(prepId, query, queryKeyspace); + } + + private static void maybeGetIndex(String name, int j, List pkColumns, int[] pkIndexes) { + if (pkColumns == null) + return; + + for (int i = 0; i < pkColumns.size(); ++i) { + if (name.equals(pkColumns.get(i).getName())) { + // We may have the same column prepared multiple times, but only pick the first value + pkIndexes[i] = j; + return; + } + } + } + + private static boolean allSet(int[] pkColumns) { + if (pkColumns == null) + return false; + + for (int i = 0; i < pkColumns.length; ++i) + if (pkColumns[i] < 0) + return false; + + return true; + } + + public ColumnDefinitions getVariables() { + return preparedId.metadata; + } + + public BoundStatement bind(Object... values) { + BoundStatement bs = new BoundStatement(this); + return bs.bind(values); + } + + public BoundStatement bind() { + return new BoundStatement(this); + } + + public PreparedStatement setRoutingKey(ByteBuffer routingKey) { + this.routingKey = routingKey; + return this; + } + + public PreparedStatement setRoutingKey(ByteBuffer... routingKeyComponents) { + this.routingKey = SimpleStatement.compose(routingKeyComponents); + return this; + } + + public ByteBuffer getRoutingKey() { + return routingKey; + } + + public PreparedStatement setConsistencyLevel(ConsistencyLevel consistency) { + this.consistency = consistency; + return this; + } + + public ConsistencyLevel getConsistencyLevel() { + return consistency; + } + + public PreparedStatement setSerialConsistencyLevel(ConsistencyLevel serialConsistency) { + if (serialConsistency != ConsistencyLevel.SERIAL && serialConsistency != ConsistencyLevel.LOCAL_SERIAL) + throw new IllegalArgumentException(); + this.serialConsistency = serialConsistency; + return this; + } + + public ConsistencyLevel getSerialConsistencyLevel() { + return serialConsistency; + } + + public String getQueryString() { + return query; + } + + public String getQueryKeyspace() { + return queryKeyspace; + } + + public PreparedStatement enableTracing() { + this.traceQuery = true; + return this; + } + + public PreparedStatement disableTracing() { + this.traceQuery = false; + return this; + } + + public boolean isTracing() { + return traceQuery; + } + + public PreparedStatement setRetryPolicy(RetryPolicy policy) { + this.retryPolicy = policy; + return this; + } + + public RetryPolicy getRetryPolicy() { + return retryPolicy; + } + + public PreparedId getPreparedId() { + return preparedId; + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/DefaultResultSetFuture.java b/driver-core/src/main/java/com/datastax/driver/core/DefaultResultSetFuture.java new file mode 100644 index 00000000000..8b2dd352725 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/DefaultResultSetFuture.java @@ -0,0 +1,280 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import com.google.common.util.concurrent.AbstractFuture; +import com.google.common.util.concurrent.Uninterruptibles; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.datastax.driver.core.exceptions.*; + +/** + * Internal implementation of ResultSetFuture. + */ +class DefaultResultSetFuture extends AbstractFuture implements ResultSetFuture, RequestHandler.Callback { + + private static final Logger logger = LoggerFactory.getLogger(ResultSetFuture.class); + + private final SessionManager session; + private final ProtocolVersion protocolVersion; + private final Message.Request request; + private volatile RequestHandler handler; + + DefaultResultSetFuture(SessionManager session, ProtocolVersion protocolVersion, Message.Request request) { + this.session = session; + this.protocolVersion = protocolVersion; + this.request = request; + } + + @Override + public void register(RequestHandler handler) { + this.handler = handler; + } + + @Override + public Message.Request request() { + return request; + } + + @Override + public void onSet(Connection connection, Message.Response response, ExecutionInfo info, Statement statement, long latency) { + try { + switch (response.type) { + case RESULT: + Responses.Result rm = (Responses.Result)response; + switch (rm.kind) { + case SET_KEYSPACE: + // propagate the keyspace change to other connections + session.poolsState.setKeyspace(((Responses.Result.SetKeyspace)rm).keyspace); + set(ArrayBackedResultSet.fromMessage(rm, session, protocolVersion, info, statement)); + break; + case SCHEMA_CHANGE: + Responses.Result.SchemaChange scc = (Responses.Result.SchemaChange)rm; + ResultSet rs = ArrayBackedResultSet.fromMessage(rm, session, protocolVersion, info, statement); + switch (scc.change) { + case CREATED: + case UPDATED: + session.cluster.manager.refreshSchemaAndSignal(connection, this, rs, scc.targetType, scc.targetKeyspace, scc.targetName); + break; + case DROPPED: + KeyspaceMetadata keyspace; + switch (scc.targetType) { + case KEYSPACE: + // If that the one keyspace we are logged in, reset to null (it shouldn't really happen but ...) + // Note: Actually, Cassandra doesn't do that so we don't either as this could confuse prepared statements. + // We'll add it back if CASSANDRA-5358 changes that behavior + //if (scc.keyspace.equals(session.poolsState.keyspace)) + // session.poolsState.setKeyspace(null); + session.cluster.manager.metadata.removeKeyspace(scc.targetKeyspace); + break; + case TABLE: + keyspace = session.cluster.manager.metadata.getKeyspaceInternal(scc.targetKeyspace); + if (keyspace == null) + logger.warn("Received a DROPPED notification for table {}.{}, but this keyspace is unknown in our metadata", + scc.targetKeyspace, scc.targetName); + else + keyspace.removeTable(scc.targetName); + break; + case TYPE: + keyspace = session.cluster.manager.metadata.getKeyspaceInternal(scc.targetKeyspace); + if (keyspace == null) + logger.warn("Received a DROPPED notification for UDT {}.{}, but this keyspace is unknown in our metadata", + scc.targetKeyspace, scc.targetName); + else + keyspace.removeUserType(scc.targetName); + break; + } + session.cluster.manager.waitForSchemaAgreementAndSignal(connection, this, rs); + break; + default: + logger.info("Ignoring unknown schema change result"); + break; + } + break; + default: + set(ArrayBackedResultSet.fromMessage(rm, session, protocolVersion, info, statement)); + break; + } + break; + case ERROR: + setException(((Responses.Error)response).asException(connection.address)); + break; + default: + // This mean we have probably have a bad node, so defunct the connection + connection.defunct(new ConnectionException(connection.address, String.format("Got unexpected %s response", response.type))); + setException(new DriverInternalError(String.format("Got unexpected %s response from %s", response.type, connection.address))); + break; + } + } catch (RuntimeException e) { + // If we get a bug here, the client will not get it, so better forwarding the error + setException(new DriverInternalError("Unexpected error while processing response from " + connection.address, e)); + } + } + + @Override + public void onSet(Connection connection, Message.Response response, long latency, int retryCount) { + // This is only called for internal calls (i.e, when the callback is not wrapped in ResponseHandler), + // so don't bother with ExecutionInfo. + onSet(connection, response, null, null, latency); + } + + @Override + public void onException(Connection connection, Exception exception, long latency, int retryCount) { + setException(exception); + } + + @Override + public boolean onTimeout(Connection connection, long latency, int retryCount) { + // This is only called for internal calls (i.e, when the future is not wrapped in RequestHandler). + // So just set an exception for the final result, which should be handled correctly by said internal call. + setException(new OperationTimedOutException(connection.address)); + return true; + } + + // We sometimes need (in the driver) to set the future from outside this class, + // but AbstractFuture#set is protected so this method. We don't want it public + // however, no particular reason to give users rope to hang themselves. + void setResult(ResultSet rs) { + set(rs); + } + + /** + * Waits for the query to return and return its result. + * + * This method is usually more convenient than {@link #get} because it: + *
    + *
  • Waits for the result uninterruptibly, and so doesn't throw + * {@link InterruptedException}.
  • + *
  • Returns meaningful exceptions, instead of having to deal + * with ExecutionException.
  • + *
+ * As such, it is the preferred way to get the future result. + * + * @throws NoHostAvailableException if no host in the cluster can be + * contacted successfully to execute this query. + * @throws QueryExecutionException if the query triggered an execution + * exception, that is an exception thrown by Cassandra when it cannot execute + * the query with the requested consistency level successfully. + * @throws QueryValidationException if the query is invalid (syntax error, + * unauthorized or any other validation problem). + */ + public ResultSet getUninterruptibly() { + try { + return Uninterruptibles.getUninterruptibly(this); + } catch (ExecutionException e) { + throw extractCauseFromExecutionException(e); + } + } + + /** + * Waits for the provided time for the query to return and return its + * result if available. + * + * This method is usually more convenient than {@link #get} because it: + *
    + *
  • Waits for the result uninterruptibly, and so doesn't throw + * {@link InterruptedException}.
  • + *
  • Returns meaningful exceptions, instead of having to deal + * with ExecutionException.
  • + *
+ * As such, it is the preferred way to get the future result. + * + * @throws NoHostAvailableException if no host in the cluster can be + * contacted successfully to execute this query. + * @throws QueryExecutionException if the query triggered an execution + * exception, that is an exception thrown by Cassandra when it cannot execute + * the query with the requested consistency level successfully. + * @throws QueryValidationException if the query if invalid (syntax error, + * unauthorized or any other validation problem). + * @throws TimeoutException if the wait timed out (Note that this is + * different from a Cassandra timeout, which is a {@code + * QueryExecutionException}). + */ + public ResultSet getUninterruptibly(long timeout, TimeUnit unit) throws TimeoutException { + try { + return Uninterruptibles.getUninterruptibly(this, timeout, unit); + } catch (ExecutionException e) { + throw extractCauseFromExecutionException(e); + } + } + + /** + * Attempts to cancel the execution of the request corresponding to this + * future. This attempt will fail if the request has already returned. + *

+ * Please note that this only cancels the request driver side, but nothing + * is done to interrupt the execution of the request Cassandra side (and that even + * if {@code mayInterruptIfRunning} is true) since Cassandra does not + * support such interruption. + *

+ * This method can be used to ensure no more work is performed driver side + * (which, while it doesn't include stopping a request already submitted + * to a Cassandra node, may include not retrying another Cassandra host on + * failure/timeout) if the ResultSet is not going to be retried. Typically, + * the code to wait for a request result for a maximum of 1 second could + * look like: + *

+     *   ResultSetFuture future = session.executeAsync(...some query...);
+     *   try {
+     *       ResultSet result = future.get(1, TimeUnit.SECONDS);
+     *       ... process result ...
+     *   } catch (TimeoutException e) {
+     *       future.cancel(true); // Ensure any resource used by this query driver
+     *                            // side is released immediately
+     *       ... handle timeout ...
+     *   }
+     * 
+     *
+     * @param mayInterruptIfRunning the value of this parameter is currently
+     * ignored.
+     * @return {@code false} if the future could not be cancelled (it has already
+     * completed normally); {@code true} otherwise.
+     */
+    @Override
+    public boolean cancel(boolean mayInterruptIfRunning) {
+        if (!super.cancel(mayInterruptIfRunning))
+            return false;
+
+        if(handler != null) {
+            handler.cancel();
+        }
+        return true;
+    }
+
+    static RuntimeException extractCauseFromExecutionException(ExecutionException e) {
+        // We could just rethrow e.getCause(). However, the cause of the ExecutionException has likely been
+        // created on the I/O thread receiving the response. Which means that the stacktrace associated
+        // with said cause will make no mention of the current thread. This is painful for say, finding
+        // out which execute() statement actually raised the exception. So instead, we re-create the
+        // exception.
+        if (e.getCause() instanceof DriverException)
+            throw ((DriverException)e.getCause()).copy();
+        else
+            throw new DriverInternalError("Unexpected exception thrown", e.getCause());
+    }
+
+    @Override
+    public int retryCount() {
+        // This is only called for internal calls (i.e, when the future is not wrapped in RequestHandler).
+        // There is no retry logic in that case, so the value does not really matter.
+        return 0;
+    }
+}
diff --git a/driver-core/src/main/java/com/datastax/driver/core/DelegatingCluster.java b/driver-core/src/main/java/com/datastax/driver/core/DelegatingCluster.java
new file mode 100644
index 00000000000..9cd5838c439
--- /dev/null
+++ b/driver-core/src/main/java/com/datastax/driver/core/DelegatingCluster.java
@@ -0,0 +1,126 @@
+/*
+ *      Copyright (C) 2012-2015 DataStax Inc.
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ */
+package com.datastax.driver.core;
+
+import java.net.InetSocketAddress;
+import java.util.Collections;
+
+/**
+ * Base class for custom {@link Cluster} implementations that wrap another instance (delegate / decorator pattern).
+ */
+public abstract class DelegatingCluster extends Cluster {
+    /**
+     * Builds a new instance.
+     */
+    protected DelegatingCluster() {
+        // Implementation notes:
+        // If Cluster was an interface, delegates would be trivial to write. But, for historical reasons, it's a class,
+        // and changing that would break backward compatibility. That makes delegates rather convoluted and error-prone
+        // to write, so we provide DelegatingCluster to abstract the details.
+        // This class ensures that:
+        // - init() is never called on the parent class, because that would initialize the Cluster.Manager instance and
+        //   create a lot of internal state (thread pools, etc.) that we don't need, since another Cluster instance is
+        //   already handling the calls.
+        // - all public methods are properly forwarded to the delegate (otherwise they would call the parent class and
+        //   return inconsistent results).
+        // These two goals are closely related, since a lot of public methods call init(), so accidentally calling a
+        // parent method could initialize the parent state.
+
+        // Construct parent class with dummy parameters that will never get used (since super.init() is never called).
+        super("delegating_cluster", Collections.emptyList(), null);
+
+        // Immediately close the parent class's internal Manager, to make sure that it will fail fast if it's ever
+        // accidentally invoked.
+        super.closeAsync();
+    }
+
+    /**
+     * Returns the delegate instance where all calls will be forwarded.
+     *
+     * @return the delegate.
+     */
+    protected abstract Cluster delegate();
+
+    @Override
+    public Cluster init() {
+        return delegate().init();
+    }
+
+    @Override
+    public Session newSession() {
+        return delegate().newSession();
+    }
+
+    @Override
+    public Session connect() {
+        return delegate().connect();
+    }
+
+    @Override
+    public Session connect(String keyspace) {
+        return delegate().connect(keyspace);
+    }
+
+    @Override
+    public Metadata getMetadata() {
+        return delegate().getMetadata();
+    }
+
+    @Override
+    public Configuration getConfiguration() {
+        return delegate().getConfiguration();
+    }
+
+    @Override
+    public Metrics getMetrics() {
+        return delegate().getMetrics();
+    }
+
+    @Override
+    public Cluster register(Host.StateListener listener) {
+        return delegate().register(listener);
+    }
+
+    @Override
+    public Cluster unregister(Host.StateListener listener) {
+        return delegate().unregister(listener);
+    }
+
+    @Override
+    public Cluster register(LatencyTracker tracker) {
+        return delegate().register(tracker);
+    }
+
+    @Override
+    public Cluster unregister(LatencyTracker tracker) {
+        return delegate().unregister(tracker);
+    }
+
+    @Override
+    public CloseFuture closeAsync() {
+        return delegate().closeAsync();
+    }
+
+    @Override
+    public void close() {
+        delegate().close();
+    }
+
+    @Override
+    public boolean isClosed() {
+        return delegate().isClosed();
+    }
+}
diff --git a/driver-core/src/main/java/com/datastax/driver/core/ExceptionCatchingRunnable.java b/driver-core/src/main/java/com/datastax/driver/core/ExceptionCatchingRunnable.java
new file mode 100644
index 00000000000..bc87885d32b
--- /dev/null
+++ b/driver-core/src/main/java/com/datastax/driver/core/ExceptionCatchingRunnable.java
@@ -0,0 +1,39 @@
+/*
+ *      Copyright (C) 2012-2015 DataStax Inc.
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ */
+package com.datastax.driver.core;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+// Simple utility class to make sure we don't let exception slip away and kill
+// our executors.
+abstract class ExceptionCatchingRunnable implements Runnable {
+
+    private static final Logger logger = LoggerFactory.getLogger(ExceptionCatchingRunnable.class);
+
+    public abstract void runMayThrow() throws Exception;
+
+    @Override
+    public void run() {
+        try {
+            runMayThrow();
+        } catch (InterruptedException e) {
+            Thread.currentThread().interrupt();
+        } catch (Exception e) {
+            logger.error("Unexpected error while executing task", e);
+        }
+    }
+}
diff --git a/driver-core/src/main/java/com/datastax/driver/core/ExceptionCode.java b/driver-core/src/main/java/com/datastax/driver/core/ExceptionCode.java
new file mode 100644
index 00000000000..742f4da143a
--- /dev/null
+++ b/driver-core/src/main/java/com/datastax/driver/core/ExceptionCode.java
@@ -0,0 +1,66 @@
+/*
+ *      Copyright (C) 2012-2015 DataStax Inc.
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ */
+package com.datastax.driver.core;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import com.datastax.driver.core.exceptions.DriverInternalError;
+
+/**
+ * Exceptions code, as defined by the native protocol.
+ */
+enum ExceptionCode {
+
+    SERVER_ERROR    (0x0000),
+    PROTOCOL_ERROR  (0x000A),
+
+    BAD_CREDENTIALS (0x0100),
+
+    // 1xx: problem during request execution
+    UNAVAILABLE     (0x1000),
+    OVERLOADED      (0x1001),
+    IS_BOOTSTRAPPING(0x1002),
+    TRUNCATE_ERROR  (0x1003),
+    WRITE_TIMEOUT   (0x1100),
+    READ_TIMEOUT    (0x1200),
+
+    // 2xx: problem validating the request
+    SYNTAX_ERROR    (0x2000),
+    UNAUTHORIZED    (0x2100),
+    INVALID         (0x2200),
+    CONFIG_ERROR    (0x2300),
+    ALREADY_EXISTS  (0x2400),
+    UNPREPARED      (0x2500);
+
+    public final int value;
+    private static final Map valueToCode = new HashMap(ExceptionCode.values().length);
+    static {
+        for (ExceptionCode code : ExceptionCode.values())
+            valueToCode.put(code.value, code);
+    }
+
+    private ExceptionCode(int value) {
+        this.value = value;
+    }
+
+    public static ExceptionCode fromValue(int value) {
+        ExceptionCode code = valueToCode.get(value);
+        if (code == null)
+            throw new DriverInternalError(String.format("Unknown error code %d", value));
+        return code;
+    }
+}
diff --git a/driver-core/src/main/java/com/datastax/driver/core/ExecutionInfo.java b/driver-core/src/main/java/com/datastax/driver/core/ExecutionInfo.java
new file mode 100644
index 00000000000..20698c35e18
--- /dev/null
+++ b/driver-core/src/main/java/com/datastax/driver/core/ExecutionInfo.java
@@ -0,0 +1,190 @@
+/*
+ *      Copyright (C) 2012-2015 DataStax Inc.
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ */
+package com.datastax.driver.core;
+
+import java.nio.ByteBuffer;
+import java.util.List;
+
+import com.datastax.driver.core.utils.Bytes;
+
+/**
+ * Basic information on the execution of a query.
+ */
+public class ExecutionInfo {
+    private final List triedHosts;
+    private final ConsistencyLevel achievedConsistency;
+    private final QueryTrace trace;
+    private final ByteBuffer pagingState;
+    private final ProtocolVersion protocolVersion;
+    private final Statement statement;
+    private volatile boolean schemaInAgreement;
+
+    private ExecutionInfo(List triedHosts, ConsistencyLevel achievedConsistency, QueryTrace trace, ByteBuffer pagingState, ProtocolVersion protocolVersion, Statement statement, boolean schemaAgreement) {
+        this.triedHosts = triedHosts;
+        this.achievedConsistency = achievedConsistency;
+        this.trace = trace;
+        this.pagingState = pagingState;
+        this.protocolVersion = protocolVersion;
+        this.statement = statement;
+        this.schemaInAgreement = schemaAgreement;
+    }
+
+    ExecutionInfo(List triedHosts) {
+        this(triedHosts, null, null, null, null, null, true);
+    }
+
+    ExecutionInfo withTrace(QueryTrace newTrace) {
+        return new ExecutionInfo(triedHosts, achievedConsistency, newTrace, pagingState, protocolVersion, statement, schemaInAgreement);
+    }
+
+    ExecutionInfo withAchievedConsistency(ConsistencyLevel newConsistency) {
+        return new ExecutionInfo(triedHosts, newConsistency, trace, pagingState, protocolVersion, statement, schemaInAgreement);
+    }
+
+    ExecutionInfo withPagingState(ByteBuffer pagingState, ProtocolVersion protocolVersion) {
+        return new ExecutionInfo(triedHosts, achievedConsistency, trace, pagingState, protocolVersion, statement, schemaInAgreement);
+    }
+
+    ExecutionInfo withStatement(Statement statement) {
+        return new ExecutionInfo(triedHosts, achievedConsistency, trace, pagingState, protocolVersion, statement, schemaInAgreement);
+    }
+
+    /**
+     * The list of tried hosts for this query.
+     * 

+ * In general, this will be a singleton list with the host that coordinated + * that query. However: + *

    + *
  • if a host is tried by the driver but is dead or in + * error, that host is recorded and the query is retried;
  • + *
  • on a timeout or unavailable exception, some + * {@link com.datastax.driver.core.policies.RetryPolicy} may retry the + * query on the same host, so the same host might appear twice.
  • + *
  • if {@link com.datastax.driver.core.policies.SpeculativeExecutionPolicy speculative executions} + * are enabled, other hosts might have been tried speculatively as well.
  • + *
+ *

+ * If you are only interested in fetching the final (and often only) node + * coordinating the query, {@link #getQueriedHost} provides a shortcut to + * fetch the last element of the list returned by this method. + * + * @return the list of tried hosts for this query, in the order tried. + */ + public List getTriedHosts() { + return triedHosts; + } + + /** + * Return the Cassandra host that coordinated this query. + *

+ * This is a shortcut for {@code getTriedHosts().get(getTriedHosts().size())}. + * + * @return return the Cassandra host that coordinated this query. + */ + public Host getQueriedHost() { + return triedHosts.get(triedHosts.size() - 1); + } + + /** + * If the query returned without achieving the requested consistency level + * due to the {@link com.datastax.driver.core.policies.RetryPolicy}, this + * return the biggest consistency level that has been actually achieved by + * the query. + *

+ * Note that the default {@code RetryPolicy} + * ({@link com.datastax.driver.core.policies.DefaultRetryPolicy}) + * will never allow a query to be successful without achieving the + * initially requested consistency level and hence with that default + * policy, this method will always return {@code null}. However, it + * might occasionally return a non-{@code null} with say, + * {@link com.datastax.driver.core.policies.DowngradingConsistencyRetryPolicy}. + * + * @return {@code null} if the original consistency level of the query was + * achieved, or the consistency level that was ultimately achieved if the + * {@code RetryPolicy} triggered a retry at a different consistency level + * than the original one. + */ + public ConsistencyLevel getAchievedConsistencyLevel() { + return achievedConsistency; + } + + /** + * The query trace if tracing was enabled on this query. + * + * @return the {@code QueryTrace} object for this query if tracing was + * enable for this query, or {@code null} otherwise. + */ + public QueryTrace getQueryTrace() { + return trace; + } + + /** + * The paging state of the query. + * + * This object represents the next page to be fetched if this query is + * multi page. It can be saved and reused later on the same statement. + * + * @return the paging state or null if there is no next page. + * + * @see Statement#setPagingState(PagingState) + */ + public PagingState getPagingState() { + if (this.pagingState == null) + return null; + return new PagingState(this.pagingState, this.statement, this.protocolVersion); + } + + /** + * Returns the "raw" paging state of the query. + * + * Contrary to {@link #getPagingState()}, there will be no validation when + * this is later reinjected into a statement. + * + * @return the paging state or null if there is no next page. + * + * @see Statement#setPagingStateUnsafe(byte[]) + */ + public byte[] getPagingStateUnsafe() { + if (this.pagingState == null) + return null; + return Bytes.getArray(this.pagingState); + } + + /** + * Whether the cluster had reached schema agreement after the execution of this query. + * + * After a successful schema-altering query (ex: creating a table), the driver + * will check if the cluster's nodes agree on the new schema version. If not, + * it will keep retrying for a given delay (configurable via + * {@link Cluster.Builder#withMaxSchemaAgreementWaitSeconds(int)}). + *

+ * If this method returns {@code false}, clients can call {@link Metadata#checkSchemaAgreement()} + * later to perform the check manually. + *

+ * Note that the schema agreement check is only performed for schema-altering queries + * For other query types, this method will always return {@code true}. + * + * @return whether the cluster reached schema agreement, or {@code true} for a non + * schema-altering statement. + */ + public boolean isSchemaInAgreement() { + return schemaInAgreement; + } + + void setSchemaInAgreement(boolean schemaAgreement) { + this.schemaInAgreement = schemaAgreement; + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/Frame.java b/driver-core/src/main/java/com/datastax/driver/core/Frame.java new file mode 100644 index 00000000000..9097bc1eeb9 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/Frame.java @@ -0,0 +1,294 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.util.EnumSet; +import java.util.List; + +import io.netty.buffer.ByteBuf; +import io.netty.channel.ChannelHandler; +import io.netty.channel.ChannelHandlerContext; +import io.netty.handler.codec.*; + +import com.datastax.driver.core.exceptions.DriverInternalError; + +class Frame { + + public final Header header; + public final ByteBuf body; + + /** + * On-wire frame. + * Frames for protocol versions 1+2 are defined as: + * + * 0 8 16 24 32 + * +---------+---------+---------+---------+ + * | version | flags | stream | opcode | + * +---------+---------+---------+---------+ + * | length | + * +---------+---------+---------+---------+ + * + * Frames for protocol version 3 are defined as: + * + * 0 8 16 24 32 + * +---------+---------+---------+---------+ + * | version | flags | stream | + * +---------+---------+---------+---------+ + * | opcode | length | + * +---------+---------+---------+---------+ + * | length | + * +---------+ + */ + private Frame(Header header, ByteBuf body) { + this.header = header; + this.body = body; + } + + private static Frame create(ByteBuf fullFrame) { + assert fullFrame.readableBytes() >= 1 : String.format("Frame too short (%d bytes)", fullFrame.readableBytes()); + + int versionBytes = fullFrame.readByte(); + // version first byte is the "direction" of the frame (request or response) + ProtocolVersion version = ProtocolVersion.fromInt(versionBytes & 0x7F); + int hdrLen = Header.lengthFor(version); + assert fullFrame.readableBytes() >= (hdrLen-1) : String.format("Frame too short (%d bytes)", fullFrame.readableBytes()); + + int flags = fullFrame.readByte(); + int streamId = readStreamid(fullFrame, version); + int opcode = fullFrame.readByte(); + int length = fullFrame.readInt(); + assert length == fullFrame.readableBytes(); + + Header header = new Header(version, flags, streamId, opcode); + return new Frame(header, fullFrame); + } + + private static int readStreamid(ByteBuf fullFrame, ProtocolVersion version) { + switch (version) { + case V1: + case V2: + return fullFrame.readByte(); + case V3: + return fullFrame.readShort(); + default: + throw version.unsupported(); + } + } + + public static Frame create(ProtocolVersion version, int opcode, int streamId, EnumSet flags, ByteBuf body) { + Header header = new Header(version, flags, streamId, opcode); + return new Frame(header, body); + } + + public static class Header { + + public final ProtocolVersion version; + public final EnumSet flags; + public final int streamId; + public final int opcode; + + private Header(ProtocolVersion version, int flags, int streamId, int opcode) { + this(version, Flag.deserialize(flags), streamId, opcode); + } + + private Header(ProtocolVersion version, EnumSet flags, int streamId, int opcode) { + this.version = version; + this.flags = flags; + this.streamId = streamId; + this.opcode = opcode; + } + + public static int lengthFor(ProtocolVersion version) { + switch (version) { + case V1: + case V2: + return 8; + case V3: + return 9; + default: + throw version.unsupported(); + } + } + + public static enum Flag + { + // The order of that enum matters!! + COMPRESSED, + TRACING; + + public static EnumSet deserialize(int flags) { + EnumSet set = EnumSet.noneOf(Flag.class); + Flag[] values = Flag.values(); + for (int n = 0; n < 8; n++) { + if ((flags & (1 << n)) != 0) + set.add(values[n]); + } + return set; + } + + public static int serialize(EnumSet flags) { + int i = 0; + for (Flag flag : flags) + i |= 1 << flag.ordinal(); + return i; + } + } + } + + public Frame with(ByteBuf newBody) { + return new Frame(header, newBody); + } + + public static final class Decoder extends ByteToMessageDecoder { + static final DecoderForStreamIdSize decoderV1 = new DecoderForStreamIdSize(1); + static final DecoderForStreamIdSize decoderV3 = new DecoderForStreamIdSize(2); + + @Override + protected void decode(ChannelHandlerContext ctx, ByteBuf buffer, List out) throws Exception { + if (buffer.readableBytes() < 1) + return; + + int version = buffer.getByte(0); + // version first bit is the "direction" of the frame (request or response) + version = version & 0x7F; + + DecoderForStreamIdSize decoder = (version >= 3) ? decoderV3 : decoderV1; + Object frame = decoder.decode(ctx, buffer); + if (frame != null) + out.add(frame); + } + + static class DecoderForStreamIdSize extends LengthFieldBasedFrameDecoder { + private static final int MAX_FRAME_LENGTH = 256 * 1024 * 1024; // 256 MB + private final int opcodeOffset; + + DecoderForStreamIdSize(int streamIdSize) { + super(MAX_FRAME_LENGTH, /*lengthOffset=*/ 3 + streamIdSize, 4, 0, 0, true); + this.opcodeOffset = 2 + streamIdSize; + } + + @Override + protected Object decode(ChannelHandlerContext ctx, ByteBuf buffer) throws Exception { + try { + if (buffer.readableBytes() < opcodeOffset + 1) + return null; + + // Validate the opcode (this will throw if it's not a response) + Message.Response.Type.fromOpcode(buffer.getByte(opcodeOffset)); + + ByteBuf frame = (ByteBuf) super.decode(ctx, buffer); + if (frame == null) { + return null; + } + // Do not deallocate `frame` just yet, because it is stored as Frame.body and will be used + // in Message.ProtocolDecoder or Frame.Decompressor if compression is enabled (we deallocate + // it there). + return Frame.create(frame); + } catch (CorruptedFrameException e) { + throw new DriverInternalError(e.getMessage()); + } catch (TooLongFrameException e) { + throw new DriverInternalError(e.getMessage()); + } + } + } + } + + @ChannelHandler.Sharable + public static class Encoder extends MessageToMessageEncoder { + + @Override + protected void encode(ChannelHandlerContext ctx, Frame frame, List out) throws Exception { + ProtocolVersion protocolVersion = frame.header.version; + ByteBuf header = ctx.alloc().ioBuffer(Frame.Header.lengthFor(protocolVersion)); + // We don't bother with the direction, we only send requests. + header.writeByte(frame.header.version.toInt()); + header.writeByte(Header.Flag.serialize(frame.header.flags)); + writeStreamId(frame.header.streamId, header, protocolVersion); + header.writeByte(frame.header.opcode); + header.writeInt(frame.body.readableBytes()); + + out.add(header); + out.add(frame.body); + } + + private void writeStreamId(int streamId, ByteBuf header, ProtocolVersion protocolVersion) { + switch (protocolVersion) { + case V1: + case V2: + header.writeByte(streamId); + break; + case V3: + header.writeShort(streamId); + break; + default: + throw protocolVersion.unsupported(); + } + } + } + + public static class Decompressor extends MessageToMessageDecoder { + + private final FrameCompressor compressor; + + public Decompressor(FrameCompressor compressor) { + assert compressor != null; + this.compressor = compressor; + } + + @Override + protected void decode(ChannelHandlerContext ctx, Frame frame, List out) throws Exception { + if (frame.header.flags.contains(Header.Flag.COMPRESSED)) { + // All decompressors allocate a new buffer for the decompressed data, so this is the last time + // we have a reference to the compressed body (and therefore a chance to release it). + ByteBuf compressedBody = frame.body; + try { + out.add(compressor.decompress(frame)); + } finally { + compressedBody.release(); + } + } else { + out.add(frame); + } + } + } + + public static class Compressor extends MessageToMessageEncoder { + + private final FrameCompressor compressor; + + public Compressor(FrameCompressor compressor) { + assert compressor != null; + this.compressor = compressor; + } + + @Override + protected void encode(ChannelHandlerContext ctx, Frame frame, List out) throws Exception { + // Never compress STARTUP messages + if (frame.header.opcode == Message.Request.Type.STARTUP.opcode) { + out.add(frame); + } else { + frame.header.flags.add(Header.Flag.COMPRESSED); + // See comment in decode() + ByteBuf uncompressedBody = frame.body; + try { + out.add(compressor.compress(frame)); + } finally { + uncompressedBody.release(); + } + } + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/FrameCompressor.java b/driver-core/src/main/java/com/datastax/driver/core/FrameCompressor.java new file mode 100644 index 00000000000..4b96e05d4c5 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/FrameCompressor.java @@ -0,0 +1,146 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.io.IOException; + +import io.netty.buffer.Unpooled; +import net.jpountz.lz4.LZ4Factory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.xerial.snappy.Snappy; + +import com.datastax.driver.core.exceptions.DriverInternalError; + +abstract class FrameCompressor { + + private static final Logger logger = LoggerFactory.getLogger(FrameCompressor.class); + + public abstract Frame compress(Frame frame) throws IOException; + public abstract Frame decompress(Frame frame) throws IOException; + + public static class SnappyCompressor extends FrameCompressor { + + public static final SnappyCompressor instance; + static { + SnappyCompressor i; + try { + i = new SnappyCompressor(); + } catch (NoClassDefFoundError e) { + i = null; + logger.warn("Cannot find Snappy class, you should make sure the Snappy library is in the classpath if you intend to use it. Snappy compression will not be available for the protocol."); + } catch (Throwable e) { + i = null; + logger.warn("Error loading Snappy library ({}). Snappy compression will not be available for the protocol.", e.toString()); + } + instance = i; + } + + private SnappyCompressor() { + // this would throw java.lang.NoClassDefFoundError if Snappy class + // wasn't found at runtime which should be processed by the calling method + Snappy.getNativeLibraryVersion(); + } + + public Frame compress(Frame frame) throws IOException { + byte[] input = CBUtil.readRawBytes(frame.body); + byte[] output = new byte[Snappy.maxCompressedLength(input.length)]; + + int written = Snappy.compress(input, 0, input.length, output, 0); + return frame.with(Unpooled.wrappedBuffer(output, 0, written)); + } + + public Frame decompress(Frame frame) throws IOException { + byte[] input = CBUtil.readRawBytes(frame.body); + + if (!Snappy.isValidCompressedBuffer(input, 0, input.length)) + throw new DriverInternalError("Provided frame does not appear to be Snappy compressed"); + + byte[] output = new byte[Snappy.uncompressedLength(input)]; + int size = Snappy.uncompress(input, 0, input.length, output, 0); + return frame.with(Unpooled.wrappedBuffer(output, 0, size)); + } + } + + public static class LZ4Compressor extends FrameCompressor { + + public static final LZ4Compressor instance; + static { + LZ4Compressor i; + try { + i = new LZ4Compressor(); + } catch (NoClassDefFoundError e) { + i = null; + logger.warn("Cannot find LZ4 class, you should make sure the LZ4 library is in the classpath if you intend to use it. LZ4 compression will not be available for the protocol."); + } catch (Throwable e) { + i = null; + logger.warn("Error loading LZ4 library ({}). LZ4 compression will not be available for the protocol.", e.toString()); + } + instance = i; + } + + private static final int INTEGER_BYTES = 4; + private final net.jpountz.lz4.LZ4Compressor compressor; + private final net.jpountz.lz4.LZ4FastDecompressor decompressor; + + private LZ4Compressor() { + final LZ4Factory lz4Factory = LZ4Factory.fastestInstance(); + logger.info("Using {}", lz4Factory.toString()); + compressor = lz4Factory.fastCompressor(); + decompressor = lz4Factory.fastDecompressor(); + } + + public Frame compress(Frame frame) throws IOException { + byte[] input = CBUtil.readRawBytes(frame.body); + + int maxCompressedLength = compressor.maxCompressedLength(input.length); + byte[] output = new byte[INTEGER_BYTES + maxCompressedLength]; + + output[0] = (byte) (input.length >>> 24); + output[1] = (byte) (input.length >>> 16); + output[2] = (byte) (input.length >>> 8); + output[3] = (byte) (input.length); + + try { + int written = compressor.compress(input, 0, input.length, output, INTEGER_BYTES, maxCompressedLength); + return frame.with(Unpooled.wrappedBuffer(output, 0, INTEGER_BYTES + written)); + } catch (Exception e) { + throw new IOException(e); + } + } + + public Frame decompress(Frame frame) throws IOException { + byte[] input = CBUtil.readRawBytes(frame.body); + + int uncompressedLength = ((input[0] & 0xFF) << 24) + | ((input[1] & 0xFF) << 16) + | ((input[2] & 0xFF) << 8) + | ((input[3] & 0xFF)); + + byte[] output = new byte[uncompressedLength]; + + try { + int read = decompressor.decompress(input, INTEGER_BYTES, output, 0, uncompressedLength); + if (read != input.length - INTEGER_BYTES) + throw new IOException("Compressed lengths mismatch"); + + return frame.with(Unpooled.wrappedBuffer(output)); + } catch (Exception e) { + throw new IOException(e); + } + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/GettableByIndexData.java b/driver-core/src/main/java/com/datastax/driver/core/GettableByIndexData.java new file mode 100644 index 00000000000..ba8e2d6dc57 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/GettableByIndexData.java @@ -0,0 +1,368 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.math.BigDecimal; +import java.math.BigInteger; +import java.net.InetAddress; +import java.nio.ByteBuffer; +import java.util.*; + +import com.google.common.reflect.TypeToken; + +import com.datastax.driver.core.exceptions.InvalidTypeException; + +/** + * Collection of (typed) CQL values that can be retrieved by index (starting a 0). + */ +public interface GettableByIndexData { + + /** + * Returns whether the {@code i}th value is NULL. + * + * @param i the index ({@code 0 <= i < size()}) of the value to check. + * @return whether the {@code i}th value is NULL. + * + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for + * this object. + */ + public boolean isNull(int i); + + /** + * Returns the {@code i}th value as a boolean. + * + * @param i the index ({@code 0 <= i < size()}) to retrieve. + * @return the boolean value of the {@code i}th element. If the + * value is NULL, {@code false} is returned. + * + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws InvalidTypeException if value {@code i} is not of type BOOLEAN. + */ + public boolean getBool(int i); + + /** + * Returns the {@code i}th value as an integer. + * + * @param i the index ({@code 0 <= i < size()}) to retrieve. + * @return the value of the {@code i}th element as an integer. If the + * value is NULL, {@code 0} is returned. + * + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws InvalidTypeException if value {@code i} is not of type INT. + */ + public int getInt(int i); + + /** + * Returns the {@code i}th value as a long. + * + * @param i the index ({@code 0 <= i < size()}) to retrieve. + * @return the value of the {@code i}th element as a long. If the + * value is NULL, {@code 0L} is returned. + * + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws InvalidTypeException if value {@code i} is not of type BIGINT or COUNTER. + */ + public long getLong(int i); + + /** + * Returns the {@code i}th value as a date. + * + * @param i the index ({@code 0 <= i < size()}) to retrieve. + * @return the value of the {@code i}th element as a data. If the + * value is NULL, {@code null} is returned. + * + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws InvalidTypeException if value {@code i} is not of type TIMESTAMP. + */ + public Date getDate(int i); + + /** + * Returns the {@code i}th value as a float. + * + * @param i the index ({@code 0 <= i < size()}) to retrieve. + * @return the value of the {@code i}th element as a float. If the + * value is NULL, {@code 0.0f} is returned. + * + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws InvalidTypeException if value {@code i} is not of type FLOAT. + */ + public float getFloat(int i); + + /** + * Returns the {@code i}th value as a double. + * + * @param i the index ({@code 0 <= i < size()}) to retrieve. + * @return the value of the {@code i}th element as a double. If the + * value is NULL, {@code 0.0} is returned. + * + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws InvalidTypeException if value {@code i} is not of type DOUBLE. + */ + public double getDouble(int i); + + /** + * Returns the {@code i}th value as a ByteBuffer. + * + * Note: this method always return the bytes composing the value, even if + * the column is not of type BLOB. That is, this method never throw an + * InvalidTypeException. However, if the type is not BLOB, it is up to the + * caller to handle the returned value correctly. + * + * @param i the index ({@code 0 <= i < size()}) to retrieve. + * @return the value of the {@code i}th element as a ByteBuffer. If the + * value is NULL, {@code null} is returned. + * + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + */ + public ByteBuffer getBytesUnsafe(int i); + + /** + * Returns the {@code i}th value as a byte array. + *

+ * Note that this method validate that the column is of type BLOB. If you want to retrieve + * the bytes for any type, use {@link #getBytesUnsafe(int)} instead. + * + * @param i the index ({@code 0 <= i < size()}) to retrieve. + * @return the value of the {@code i}th element as a byte array. If the + * value is NULL, {@code null} is returned. + * + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws InvalidTypeException if value {@code i} type is not of type BLOB. + */ + public ByteBuffer getBytes(int i); + + /** + * Returns the {@code i}th value as a string. + * + * @param i the index ({@code 0 <= i < size()}) to retrieve. + * @return the value of the {@code i}th element as a string. If the + * value is NULL, {@code null} is returned. + * + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws InvalidTypeException if value {@code i} type is none of: + * VARCHAR, TEXT or ASCII. + */ + public String getString(int i); + + /** + * Returns the {@code i}th value as a variable length integer. + * + * @param i the index ({@code 0 <= i < size()}) to retrieve. + * @return the value of the {@code i}th element as a variable + * length integer. If the value is NULL, {@code null} is returned. + * + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws InvalidTypeException if value {@code i} is not of type VARINT. + */ + public BigInteger getVarint(int i); + + /** + * Returns the {@code i}th value as a variable length decimal. + * + * @param i the index ({@code 0 <= i < size()}) to retrieve. + * @return the value of the {@code i}th element as a variable + * length decimal. If the value is NULL, {@code null} is returned. + * + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws InvalidTypeException if value {@code i} is not of type DECIMAL. + */ + public BigDecimal getDecimal(int i); + + /** + * Returns the {@code i}th value as a UUID. + * + * @param i the index ({@code 0 <= i < size()}) to retrieve. + * @return the value of the {@code i}th element as a UUID. + * If the value is NULL, {@code null} is returned. + * + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws InvalidTypeException if value {@code i} is not of type UUID + * or TIMEUUID. + */ + public UUID getUUID(int i); + + /** + * Returns the {@code i}th value as an InetAddress. + * + * @param i the index ({@code 0 <= i < size()}) to retrieve. + * @return the value of the {@code i}th element as an InetAddress. + * If the value is NULL, {@code null} is returned. + * + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws InvalidTypeException if value {@code i} is not of type INET. + */ + public InetAddress getInet(int i); + + /** + * Returns the {@code i}th value as a list. + *

+ * If the type of the elements is generic, use {@link #getList(int, TypeToken)}. + * + * @param i the index ({@code 0 <= i < size()}) to retrieve. + * @param elementsClass the class for the elements of the list to retrieve. + * @return the value of the {@code i}th element as a list of + * {@code T} objects. If the value is NULL, an empty list is + * returned (note that Cassandra makes no difference between an empty list + * and column of type list that is not set). The returned list is immutable. + * + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws InvalidTypeException if value {@code i} is not a list or if its + * elements are not of class {@code T}. + */ + public List getList(int i, Class elementsClass); + + /** + * Returns the {@code i}th value as a list. + *

+ * Use this variant with nested collections, which produce a generic element type: + *

+     * {@code List> l = row.getList(1, new TypeToken>() {});}
+     * 
+ * + * @param i the index ({@code 0 <= i < size()}) to retrieve. + * @param elementsType the type of the elements of the list to retrieve. + * @return the value of the {@code i}th element as a list of + * {@code T} objects. If the value is NULL, an empty list is + * returned (note that Cassandra makes no difference between an empty list + * and column of type list that is not set). The returned list is immutable. + * + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws InvalidTypeException if value {@code i} is not a list or if its + * elements are not of class {@code T}. + */ + public List getList(int i, TypeToken elementsType); + + /** + * Returns the {@code i}th value as a set. + *

+ * If the type of the elements is generic, use {@link #getSet(int, TypeToken)}. + * + * @param i the index ({@code 0 <= i < size()}) to retrieve. + * @param elementsClass the class for the elements of the set to retrieve. + * @return the value of the {@code i}th element as a set of + * {@code T} objects. If the value is NULL, an empty set is + * returned (note that Cassandra makes no difference between an empty set + * and column of type set that is not set). The returned set is immutable. + * + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws InvalidTypeException if value {@code i} is not a set or if its + * elements are not of class {@code T}. + */ + public Set getSet(int i, Class elementsClass); + + /** + * Returns the {@code i}th value as a set. + *

+ * Use this variant with nested collections, which produce a generic element type: + *

+     * {@code Set> l = row.getSet(1, new TypeToken>() {});}
+     * 
+ * + * @param i the index ({@code 0 <= i < size()}) to retrieve. + * @param elementsType the type for the elements of the set to retrieve. + * @return the value of the {@code i}th element as a set of + * {@code T} objects. If the value is NULL, an empty set is + * returned (note that Cassandra makes no difference between an empty set + * and column of type set that is not set). The returned set is immutable. + * + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws InvalidTypeException if value {@code i} is not a set or if its + * elements are not of class {@code T}. + */ + public Set getSet(int i, TypeToken elementsType); + + /** + * Returns the {@code i}th value as a map. + *

+ * If the type of the keys and/or values is generic, use {@link #getMap(int, TypeToken, TypeToken)}. + * + * @param i the index ({@code 0 <= i < size()}) to retrieve. + * @param keysClass the class for the keys of the map to retrieve. + * @param valuesClass the class for the values of the map to retrieve. + * @return the value of the {@code i}th element as a map of + * {@code K} to {@code V} objects. If the value is NULL, + * an empty map is returned (note that Cassandra makes no difference + * between an empty map and column of type map that is not set). The + * returned map is immutable. + * + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws InvalidTypeException if value {@code i} is not a map, if its + * keys are not of class {@code K} or if its values are not of + * class {@code V}. + */ + public Map getMap(int i, Class keysClass, Class valuesClass); + + + /** + * Returns the {@code i}th value as a map. + *

+ * Use this variant with nested collections, which produce a generic element type: + *

+     * {@code Map> l = row.getMap(1, TypeToken.of(Integer.class), new TypeToken>() {});}
+     * 
+ * + * @param i the index ({@code 0 <= i < size()}) to retrieve. + * @param keysType the type for the keys of the map to retrieve. + * @param valuesType the type for the values of the map to retrieve. + * @return the value of the {@code i}th element as a map of + * {@code K} to {@code V} objects. If the value is NULL, + * an empty map is returned (note that Cassandra makes no difference + * between an empty map and column of type map that is not set). The + * returned map is immutable. + * + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws InvalidTypeException if value {@code i} is not a map, if its + * keys are not of class {@code K} or if its values are not of + * class {@code V}. + */ + public Map getMap(int i, TypeToken keysType, TypeToken valuesType); + + /** + * Return the {@code i}th value as a UDT value. + * + * @param i the index ({@code 0 <= i < size()}) to retrieve. + * @return the value of the {@code i}th element as a UDT value. If the value is NULL, + * then {@code null} will be returned. + * + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws InvalidTypeException if value {@code i} is not a UDT value. + */ + public UDTValue getUDTValue(int i); + + /** + * Return the {@code i}th value as a tuple value. + * + * @param i the index ({@code 0 <= i < size()}) to retrieve. + * @return the value of the {@code i}th element as a tuple value. If the value is NULL, + * then {@code null} will be returned. + * + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws InvalidTypeException if value {@code i} is not a tuple value. + */ + public TupleValue getTupleValue(int i); + + /** + * Returns the {@code i}th value as the Java type matching its CQL type. + * + * @param i the index to retrieve. + * @return the value of the {@code i}th value as the Java type matching its CQL type. + * If the value is NULL and is a simple type, UDT or tuple, {@code null} is returned. + * If it is NULL and is a collection type, an empty (immutable) collection is returned. + * + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + */ + public Object getObject(int i); +} \ No newline at end of file diff --git a/driver-core/src/main/java/com/datastax/driver/core/GettableByNameData.java b/driver-core/src/main/java/com/datastax/driver/core/GettableByNameData.java new file mode 100644 index 00000000000..872b4c656a9 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/GettableByNameData.java @@ -0,0 +1,367 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.math.BigDecimal; +import java.math.BigInteger; +import java.net.InetAddress; +import java.nio.ByteBuffer; +import java.util.*; + +import com.google.common.reflect.TypeToken; + +import com.datastax.driver.core.exceptions.InvalidTypeException; + +/** + * Collection of (typed) CQL values that can be retrieved by name. + */ +public interface GettableByNameData { + + /** + * Returns whether the value for {@code name} is NULL. + * + * @param name the name to check. + * @return whether the value for {@code name} is NULL. + * + * @throws IllegalArgumentException if {@code name} is not valid name for this object. + */ + public boolean isNull(String name); + + /** + * Returns the value for {@code name} as a boolean. + * + * @param name the name to retrieve. + * @return the boolean value for {@code name}. If the value is NULL, + * {@code false} is returned. + * + * @throws IllegalArgumentException if {@code name} is not valid name for this object. + * @throws InvalidTypeException if value {@code name} is not of type BOOLEAN. + */ + public boolean getBool(String name); + + /** + * Returns the value for {@code name} as an integer. + * + * @param name the name to retrieve. + * @return the value for {@code name} as an integer. If the value is NULL, + * {@code 0} is returned. + * + * @throws IllegalArgumentException if {@code name} is not valid name for this object. + * @throws InvalidTypeException if value {@code name} is not of type INT. + */ + public int getInt(String name); + + /** + * Returns the value for {@code name} as a long. + * + * @param name the name to retrieve. + * @return the value for {@code name} as a long. If the value is NULL, + * {@code 0L} is returned. + * + * @throws IllegalArgumentException if {@code name} is not valid name for this object. + * @throws InvalidTypeException if value {@code i} is not of type BIGINT or COUNTER. + */ + public long getLong(String name); + + /** + * Returns the value for {@code name} as a date. + * + * @param name the name to retrieve. + * @return the value for {@code name} as a date. If the value is NULL, + * {@code null} is returned. + * + * @throws IllegalArgumentException if {@code name} is not valid name for this object. + * @throws InvalidTypeException if value {@code name} is not of type TIMESTAMP. + */ + public Date getDate(String name); + + /** + * Returns the value for {@code name} as a float. + * + * @param name the name to retrieve. + * @return the value for {@code name} as a float. If the value is NULL, + * {@code 0.0f} is returned. + * + * @throws IllegalArgumentException if {@code name} is not valid name for this object. + * @throws InvalidTypeException if value {@code name} is not of type FLOAT. + */ + public float getFloat(String name); + + /** + * Returns the value for {@code name} as a double. + * + * @param name the name to retrieve. + * @return the value for {@code name} as a double. If the value is NULL, + * {@code 0.0} is returned. + * + * @throws IllegalArgumentException if {@code name} is not valid name for this object. + * @throws InvalidTypeException if value {@code name} is not of type DOUBLE. + */ + public double getDouble(String name); + + /** + * Returns the value for {@code name} as a ByteBuffer. + * + * Note: this method always return the bytes composing the value, even if + * the column is not of type BLOB. That is, this method never throw an + * InvalidTypeException. However, if the type is not BLOB, it is up to the + * caller to handle the returned value correctly. + * + * @param name the name to retrieve. + * @return the value for {@code name} as a ByteBuffer. If the value is NULL, + * {@code null} is returned. + * + * @throws IllegalArgumentException if {@code name} is not valid name for this object. + */ + public ByteBuffer getBytesUnsafe(String name); + + /** + * Returns the value for {@code name} as a byte array. + *

+ * Note that this method validate that the column is of type BLOB. If you want to retrieve + * the bytes for any type, use {@link #getBytesUnsafe(String)} instead. + * + * @param name the name to retrieve. + * @return the value for {@code name} as a byte array. If the value is NULL, + * {@code null} is returned. + * + * @throws IllegalArgumentException if {@code name} is not valid name for this object. + * @throws InvalidTypeException if value {@code i} type is not of type BLOB. + */ + public ByteBuffer getBytes(String name); + + /** + * Returns the value for {@code name} as a string. + * + * @param name the name to retrieve. + * @return the value for {@code name} as a string. If the value is NULL, + * {@code null} is returned. + * + * @throws IllegalArgumentException if {@code name} is not valid name for this object. + * @throws InvalidTypeException if value {@code name} type is none of: + * VARCHAR, TEXT or ASCII. + */ + public String getString(String name); + + /** + * Returns the value for {@code name} as a variable length integer. + * + * @param name the name to retrieve. + * @return the value for {@code name} as a variable length integer. + * If the value is NULL, {@code null} is returned. + * + * @throws IllegalArgumentException if {@code name} is not valid name for this object. + * @throws InvalidTypeException if value {@code name} is not of type VARINT. + */ + public BigInteger getVarint(String name); + + /** + * Returns the value for {@code name} as a variable length decimal. + * + * @param name the name to retrieve. + * @return the value for {@code name} as a variable length decimal. + * If the value is NULL, {@code null} is returned. + * + * @throws IllegalArgumentException if {@code name} is not valid name for this object. + * @throws InvalidTypeException if value {@code name} is not of type DECIMAL. + */ + public BigDecimal getDecimal(String name); + + /** + * Returns the value for {@code name} as a UUID. + * + * @param name the name to retrieve. + * @return the value for {@code name} as a UUID. + * If the value is NULL, {@code null} is returned. + * + * @throws IllegalArgumentException if {@code name} is not valid name for this object. + * @throws InvalidTypeException if value {@code name} is not of type + * UUID or TIMEUUID. + */ + public UUID getUUID(String name); + + /** + * Returns the value for {@code name} as an InetAddress. + * + * @param name the name to retrieve. + * @return the value for {@code name} as an InetAddress. + * If the value is NULL, {@code null} is returned. + * + * @throws IllegalArgumentException if {@code name} is not valid name for this object. + * @throws InvalidTypeException if value {@code name} is not of type + * INET. + */ + public InetAddress getInet(String name); + + /** + * Returns the value for {@code name} as a list. + *

+ * If the type of the elements is generic, use {@link #getList(String, TypeToken)}. + * + * @param name the name to retrieve. + * @param elementsClass the class for the elements of the list to retrieve. + * @return the value of the {@code i}th element as a list of + * {@code T} objects. If the value is NULL, an empty list is + * returned (note that Cassandra makes no difference between an empty list + * and column of type list that is not set). The returned list is immutable. + * + * @throws IllegalArgumentException if {@code name} is not valid name for this object. + * @throws InvalidTypeException if value {@code name} is not a list or if its + * elements are not of class {@code T}. + */ + public List getList(String name, Class elementsClass); + + /** + * Returns the value for {@code name} as a list. + *

+ * Use this variant with nested collections, which produce a generic element type: + *

+     * {@code List> l = row.getList("theColumn", new TypeToken>() {});}
+     * 
+ * + * @param name the name to retrieve. + * @param elementsType the type for the elements of the list to retrieve. + * @return the value of the {@code i}th element as a list of + * {@code T} objects. If the value is NULL, an empty list is + * returned (note that Cassandra makes no difference between an empty list + * and column of type list that is not set). The returned list is immutable. + * + * @throws IllegalArgumentException if {@code name} is not valid name for this object. + * @throws InvalidTypeException if value {@code name} is not a list or if its + * elements are not of class {@code T}. + */ + public List getList(String name, TypeToken elementsType); + + /** + * Returns the value for {@code name} as a set. + *

+ * If the type of the elements is generic, use {@link #getSet(String, TypeToken)}. + * + * @param name the name to retrieve. + * @param elementsClass the class for the elements of the set to retrieve. + * @return the value of the {@code i}th element as a set of + * {@code T} objects. If the value is NULL, an empty set is + * returned (note that Cassandra makes no difference between an empty set + * and column of type set that is not set). The returned set is immutable. + * + * @throws IllegalArgumentException if {@code name} is not valid name for this object. + * @throws InvalidTypeException if value {@code name} is not a set or if its + * elements are not of class {@code T}. + */ + public Set getSet(String name, Class elementsClass); + + /** + * Returns the value for {@code name} as a set. + *

+ * Use this variant with nested collections, which produce a generic element type: + *

+     * {@code Set> l = row.getSet("theColumn", new TypeToken>() {});}
+     * 
+ * + * @param name the name to retrieve. + * @param elementsType the type for the elements of the set to retrieve. + * @return the value of the {@code i}th element as a set of + * {@code T} objects. If the value is NULL, an empty set is + * returned (note that Cassandra makes no difference between an empty set + * and column of type set that is not set). The returned set is immutable. + * + * @throws IllegalArgumentException if {@code name} is not valid name for this object. + * @throws InvalidTypeException if value {@code name} is not a set or if its + * elements are not of class {@code T}. + */ + public Set getSet(String name, TypeToken elementsType); + + /** + * Returns the value for {@code name} as a map. + *

+ * If the type of the keys and/or values is generic, use {@link #getMap(String, TypeToken, TypeToken)}. + * + * @param name the name to retrieve. + * @param keysClass the class for the keys of the map to retrieve. + * @param valuesClass the class for the values of the map to retrieve. + * @return the value of {@code name} as a map of + * {@code K} to {@code V} objects. If the value is NULL, + * an empty map is returned (note that Cassandra makes no difference + * between an empty map and column of type map that is not set). The + * returned map is immutable. + * + * @throws IllegalArgumentException if {@code name} is not valid name for this object. + * @throws InvalidTypeException if value {@code name} is not a map, if its + * keys are not of class {@code K} or if its values are not of + * class {@code V}. + */ + public Map getMap(String name, Class keysClass, Class valuesClass); + + /** + * Returns the value for {@code name} as a map. + *

+ * Use this variant with nested collections, which produce a generic element type: + *

+     * {@code Map> l = row.getMap("theColumn", TypeToken.of(Integer.class), new TypeToken>() {});}
+     * 
+ * + * @param name the name to retrieve. + * @param keysType the class for the keys of the map to retrieve. + * @param valuesType the class for the values of the map to retrieve. + * @return the value of {@code name} as a map of + * {@code K} to {@code V} objects. If the value is NULL, + * an empty map is returned (note that Cassandra makes no difference + * between an empty map and column of type map that is not set). The + * returned map is immutable. + * + * @throws IllegalArgumentException if {@code name} is not valid name for this object. + * @throws InvalidTypeException if value {@code name} is not a map, if its + * keys are not of class {@code K} or if its values are not of + * class {@code V}. + */ + public Map getMap(String name, TypeToken keysType, TypeToken valuesType); + + /** + * Return the value for {@code name} as a UDT value. + * + * @param name the name to retrieve. + * @return the value of {@code name} as a UDT value. If the value is NULL, + * then {@code null} will be returned. + * + * @throws IllegalArgumentException if {@code name} is not valid name for this object. + * @throws InvalidTypeException if value {@code i} is not a UDT value. + */ + public UDTValue getUDTValue(String name); + + /** + * Return the value for {@code name} as a tuple value. + * + * @param name the name to retrieve. + * @return the value of {@code name} as a tuple value. If the value is NULL, + * then {@code null} will be returned. + * + * @throws IllegalArgumentException if {@code name} is not valid name for this object. + * @throws InvalidTypeException if value {@code i} is not a tuple value. + */ + public TupleValue getTupleValue(String name); + + /** + * Returns the value for {@code name} as the Java type matching its CQL type. + * + * @param name the name to retrieve. + * @return the value of the {@code i}th value as the Java type matching its CQL type. + * If the value is NULL and is a simple type, UDT or tuple, {@code null} is returned. + * If it is NULL and is a collection type, an empty (immutable) collection is returned. + * + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + */ + Object getObject(String name); +} \ No newline at end of file diff --git a/driver-core/src/main/java/com/datastax/driver/core/GettableData.java b/driver-core/src/main/java/com/datastax/driver/core/GettableData.java new file mode 100644 index 00000000000..3b875ead72f --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/GettableData.java @@ -0,0 +1,22 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +/** + * Collection of (typed) CQL values that can be retrieved either by index (starting a 0) or by name. + */ +public interface GettableData extends GettableByIndexData, GettableByNameData { +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/Host.java b/driver-core/src/main/java/com/datastax/driver/core/Host.java new file mode 100644 index 00000000000..a92a4fb2083 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/Host.java @@ -0,0 +1,354 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.util.Set; +import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.locks.ReentrantLock; + +import com.google.common.collect.ImmutableList; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * A Cassandra node. + * + * This class keeps the information the driver maintain on a given Cassandra node. + */ +public class Host { + + private static final Logger logger = LoggerFactory.getLogger(Host.class); + + + private final InetSocketAddress address; + + enum State { ADDED, DOWN, UP } + volatile State state; + /** Ensures state change notifications for that host are handled serially */ + final ReentrantLock notificationsLock = new ReentrantLock(true); + + private final ConvictionPolicy policy; + private final Cluster.Manager manager; + + // Tracks later reconnection attempts to that host so we avoid adding multiple tasks. + final AtomicReference> reconnectionAttempt = new AtomicReference>(); + + final ExecutionInfo defaultExecutionInfo; + + private volatile String datacenter; + private volatile String rack; + private volatile VersionNumber cassandraVersion; + + // The listen_address (really, the broadcast one) as know by Cassandra. We use that internally because + // that's the 'peer' in the 'System.peers' table and avoids querying the full peers table in + // ControlConnection.refreshNodeInfo. We don't want to expose however because we don't always have the info + // (partly because the 'System.local' doesn't have it for some weird reason for instance). + volatile InetAddress listenAddress; + + private volatile Set tokens; + + // ClusterMetadata keeps one Host object per inet address and we rely on this (more precisely, + // we rely on the fact that we can use Object equality as a valid equality), so don't use + // that constructor but ClusterMetadata.getHost instead. + Host(InetSocketAddress address, ConvictionPolicy.Factory policy, Cluster.Manager manager) { + if (address == null || policy == null) + throw new NullPointerException(); + + this.address = address; + this.policy = policy.create(this); + this.manager = manager; + this.defaultExecutionInfo = new ExecutionInfo(ImmutableList.of(this)); + this.state = State.ADDED; + } + + void setLocationInfo(String datacenter, String rack) { + this.datacenter = datacenter; + this.rack = rack; + } + + void setVersionAndListenAdress(String cassandraVersion, InetAddress listenAddress) { + if (listenAddress != null) + this.listenAddress = listenAddress; + + if (cassandraVersion == null) + return; + try { + this.cassandraVersion = VersionNumber.parse(cassandraVersion); + } catch (IllegalArgumentException e) { + logger.warn("Error parsing Cassandra version {}. This shouldn't have happened", cassandraVersion); + } + } + + /** + * Returns the node address. + *

+ * This is a shortcut for {@code getSocketAddress().getAddress()}. + * + * @return the node {@link InetAddress}. + */ + public InetAddress getAddress() { + return address.getAddress(); + } + + /** + * Returns the node socket address. + * + * @return the node {@link InetSocketAddress}. + */ + public InetSocketAddress getSocketAddress() { + return address; + } + + /** + * Returns the name of the datacenter this host is part of. + *

+ * The returned datacenter name is the one as known by Cassandra. + * It is also possible for this information to be unavailable. In that + * case this method returns {@code null}, and the caller should always be aware + * of this possibility. + * + * @return the Cassandra datacenter name or null if datacenter is unavailable. + */ + public String getDatacenter() { + return datacenter; + } + + /** + * Returns the name of the rack this host is part of. + *

+ * The returned rack name is the one as known by Cassandra. + * It is also possible for this information to be unavailable. In that case + * this method returns {@code null}, and the caller should always aware of this + * possibility. + * + * @return the Cassandra rack name or null if the rack is unavailable + */ + public String getRack() { + return rack; + } + + /** + * The Cassandra version the host is running. + *

+ * As for other host information fetch from Cassandra above, the returned + * version can theoretically be null if the information is unavailable. + * + * @return the Cassandra version the host is running. + */ + public VersionNumber getCassandraVersion() { + return cassandraVersion; + } + + /** + * Returns the tokens that this host owns. + * + * @return the (immutable) set of tokens. + */ + public Set getTokens() { + return tokens; + } + + void setTokens(Set tokens) { + this.tokens = tokens; + } + + /** + * Returns whether the host is considered up by the driver. + *

+ * Please note that this is only the view of the driver and may not reflect + * reality. In particular a node can be down but the driver hasn't detected + * it yet, or it can have been restarted and the driver hasn't detected it + * yet (in particular, for hosts to which the driver does not connect (because + * the {@code LoadBalancingPolicy.distance} method says so), this information + * may be durably inaccurate). This information should thus only be + * considered as best effort and should not be relied upon too strongly. + * + * @return whether the node is considered up. + */ + public boolean isUp() { + return state == State.UP; + } + + /** + * Returns a description of the host's state, as seen by the driver. + *

+ * This is exposed for debugging purposes only; the format of this string might + * change between driver versions, so clients should not make any assumptions + * about it. + * + * @return a description of the host's state. + */ + public String getState() { + return state.name(); + } + + /** + * Returns a {@code ListenableFuture} representing the completion of the first + * reconnection attempt after a node has been suspected. + *

+ * This is useful in load balancing policies when there are no more live nodes and + * we are trying suspected nodes. + * + * @return the future. + * + * @deprecated the suspicion mechanism has been disabled. This will always return + * a completed future. + */ + @Deprecated + public ListenableFuture getInitialReconnectionAttemptFuture() { + return Futures.immediateFuture(null); + } + + /** + * Returns a {@code ListenableFuture} representing the completion of the reconnection + * attempts scheduled after a host is marked {@code DOWN}. + *

+ * If the caller cancels this future, the driver will not try to reconnect to + * this host until it receives an UP event for it. Note that this could mean never, if + * the node was marked down because of a driver-side error (e.g. read timeout) but no + * failure was detected by Cassandra. The caller might decide to trigger an explicit + * reconnection attempt at a later point with {@link #tryReconnectOnce()}. + * + * @return the future, or {@code null} if no reconnection attempt was in progress. + */ + public ListenableFuture getReconnectionAttemptFuture() { + return reconnectionAttempt.get(); + } + + /** + * Triggers an asynchronous reconnection attempt to this host. + *

+ * This method is intended for load balancing policies that mark hosts as {@link HostDistance#IGNORED IGNORED}, + * but still need a way to periodically check these hosts' states (UP / DOWN). + *

+ * For a host that is at distance {@code IGNORED}, this method will try to reconnect exactly once: if + * reconnection succeeds, the host is marked {@code UP}; otherwise, no further attempts will be scheduled. + * It has no effect if the node is already {@code UP}, or if a reconnection attempt is already in progress. + *

+ * Note that if the host is not a distance {@code IGNORED}, this method will trigger a periodic + * reconnection attempt if the reconnection fails. + */ + public void tryReconnectOnce() { + this.manager.startSingleReconnectionAttempt(this); + } + + @Override + public boolean equals(Object other) { + if (other instanceof Host) { + Host that = (Host)other; + return this.address.equals(that.address); + } + return false; + } + + @Override + public int hashCode() { + return address.hashCode(); + } + + boolean wasJustAdded() { + return state == State.ADDED; + } + + @Override + public String toString() { + return address.toString(); + } + + void setDown() { + state = State.DOWN; + } + + void setUp() { + policy.reset(); + state = State.UP; + } + + boolean signalConnectionFailure(ConnectionException exception) { + return policy.addFailure(exception); + } + + /** + * Interface for listeners that are interested in hosts added, up, down and + * removed events. + *

+ * It is possible for the same event to be fired multiple times, + * particularly for up or down events. Therefore, a listener should ignore + * the same event if it has already been notified of a node's state. + */ + public interface StateListener { + + /** + * Called when a new node is added to the cluster. + *

+ * The newly added node should be considered up. + * + * @param host the host that has been newly added. + */ + public void onAdd(Host host); + + /** + * Called when a node is determined to be up. + * + * @param host the host that has been detected up. + */ + public void onUp(Host host); + + /** + * Called when a node is suspected to be dead. + *

+ * A node is suspected to be dead when an error occurs on one of it's + * opened connection. As soon as an host is suspected, a connection attempt + * to that host is immediately tried. If this succeed, then it means that + * the connection was disfunctional but that the node was not really down. + * If this fails however, this means the node is truly dead, onDown() is + * called and further reconnection attempts are scheduled according to the + * {@link com.datastax.driver.core.policies.ReconnectionPolicy} in place. + *

+ * When this event is triggered, it is possible to call the host + * {@link #getInitialReconnectionAttemptFuture} method to wait until the + * initial and immediate reconnection attempt succeed or fail. + *

+ * Note that some StateListener may ignore that event. If a node that + * that is suspected down turns out to be truly down (that is, the driver + * cannot successfully connect to it right away), then {@link #onDown} will + * be called. + * + * @deprecated the suspicion mechanism has been disabled. This will never + * get called. + */ + @Deprecated + public void onSuspected(Host host); + + /** + * Called when a node is determined to be down. + * + * @param host the host that has been detected down. + */ + public void onDown(Host host); + + /** + * Called when a node is removed from the cluster. + * + * @param host the removed host. + */ + public void onRemove(Host host); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/HostConnectionPool.java b/driver-core/src/main/java/com/datastax/driver/core/HostConnectionPool.java new file mode 100644 index 00000000000..acdda1e0192 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/HostConnectionPool.java @@ -0,0 +1,630 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.util.ArrayList; +import java.util.List; +import java.util.Set; +import java.util.concurrent.*; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.Lists; +import com.google.common.util.concurrent.*; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.datastax.driver.core.exceptions.AuthenticationException; +import com.datastax.driver.core.utils.MoreFutures; + +import static com.datastax.driver.core.Connection.State.GONE; +import static com.datastax.driver.core.Connection.State.OPEN; +import static com.datastax.driver.core.Connection.State.RESURRECTING; +import static com.datastax.driver.core.Connection.State.TRASHED; + +class HostConnectionPool { + + private static final Logger logger = LoggerFactory.getLogger(HostConnectionPool.class); + + private static final int MAX_SIMULTANEOUS_CREATION = 1; + + final Host host; + volatile HostDistance hostDistance; + protected final SessionManager manager; + + final List connections; + private final AtomicInteger open; + /** The total number of in-flight requests on all connections of this pool. */ + final AtomicInteger totalInFlight = new AtomicInteger(); + /** The maximum value of {@link #totalInFlight} since the last call to {@link #cleanupIdleConnections(long)}*/ + private final AtomicInteger maxTotalInFlight = new AtomicInteger(); + @VisibleForTesting + final Set trash = new CopyOnWriteArraySet(); + + private volatile int waiter = 0; + private final Lock waitLock = new ReentrantLock(true); + private final Condition hasAvailableConnection = waitLock.newCondition(); + + private final Runnable newConnectionTask; + + private final AtomicInteger scheduledForCreation = new AtomicInteger(); + + protected final AtomicReference closeFuture = new AtomicReference(); + + private enum Phase {INITIALIZING, READY, INIT_FAILED, CLOSING} + + protected final AtomicReference phase = new AtomicReference(Phase.INITIALIZING); + + // When a request times out, we may never release its stream ID. So over time, a given connection + // may get less an less available streams. When the number of available ones go below the + // following threshold, we just replace the connection by a new one. + private final int minAllowedStreams; + + public HostConnectionPool(Host host, HostDistance hostDistance, SessionManager manager) { + assert hostDistance != HostDistance.IGNORED; + this.host = host; + this.hostDistance = hostDistance; + this.manager = manager; + + this.newConnectionTask = new Runnable() { + @Override + public void run() { + addConnectionIfUnderMaximum(); + scheduledForCreation.decrementAndGet(); + } + }; + + this.connections = new CopyOnWriteArrayList(); + this.open = new AtomicInteger(); + + this.minAllowedStreams = options().getMaxRequestsPerConnection(hostDistance) * 3 / 4; + } + + /** + * @param reusedConnection an existing connection (from a reconnection attempt) that we want to + * reuse as part of this pool. Might be null or already used by another + * pool. + */ + ListenableFuture initAsync(Connection reusedConnection) { + // Create initial core connections + int capacity = options().getCoreConnectionsPerHost(hostDistance); + final List connections = Lists.newArrayListWithCapacity(capacity); + final List> connectionFutures = Lists.newArrayListWithCapacity(capacity); + for (int i = 0; i < capacity; i++) { + Connection connection; + ListenableFuture connectionFuture; + // reuse the existing connection only once + if (reusedConnection != null && reusedConnection.setPool(this)) { + connection = reusedConnection; + connectionFuture = MoreFutures.VOID_SUCCESS; + } else { + connection = manager.connectionFactory().newConnection(this); + connectionFuture = connection.initAsync(); + } + reusedConnection = null; + connections.add(connection); + connectionFutures.add(connectionFuture); + } + + Executor initExecutor = manager.cluster.manager.configuration.getPoolingOptions().getInitializationExecutor(); + + ListenableFuture> allConnectionsFuture = Futures.allAsList(connectionFutures); + + final SettableFuture initFuture = SettableFuture.create(); + Futures.addCallback(allConnectionsFuture, new FutureCallback>() { + @Override + public void onSuccess(List l) { + HostConnectionPool.this.connections.addAll(connections); + open.set(l.size()); + if (isClosed()) { + initFuture.setException(new ConnectionException(host.getSocketAddress(), "Pool was closed during initialization")); + // we're not sure if closeAsync() saw the connections, so ensure they get closed + forceClose(connections); + } else { + logger.trace("Created connection pool to host {}", host); + phase.compareAndSet(Phase.INITIALIZING, Phase.READY); + initFuture.set(null); + } + } + + @Override + public void onFailure(Throwable t) { + phase.compareAndSet(Phase.INITIALIZING, Phase.INIT_FAILED); + forceClose(connections); + initFuture.setException(t); + } + }, initExecutor); + return initFuture; + } + + // Clean up if we got an error at construction time but still created part of the core connections + private void forceClose(List connections) { + for (Connection connection : connections) { + connection.closeAsync().force(); + } + } + + private PoolingOptions options() { + return manager.configuration().getPoolingOptions(); + } + + public Connection borrowConnection(long timeout, TimeUnit unit) throws ConnectionException, TimeoutException { + Phase phase = this.phase.get(); + if (phase != Phase.READY) + // Note: throwing a ConnectionException is probably fine in practice as it will trigger the creation of a new host. + // That being said, maybe having a specific exception could be cleaner. + throw new ConnectionException(host.getSocketAddress(), "Pool is " + phase); + + if (connections.isEmpty()) { + for (int i = 0; i < options().getCoreConnectionsPerHost(hostDistance); i++) { + // We don't respect MAX_SIMULTANEOUS_CREATION here because it's only to + // protect against creating connection in excess of core too quickly + scheduledForCreation.incrementAndGet(); + manager.blockingExecutor().submit(newConnectionTask); + } + Connection c = waitForConnection(timeout, unit); + totalInFlight.incrementAndGet(); + c.setKeyspace(manager.poolsState.keyspace); + return c; + } + + int minInFlight = Integer.MAX_VALUE; + Connection leastBusy = null; + for (Connection connection : connections) { + int inFlight = connection.inFlight.get(); + if (inFlight < minInFlight) { + minInFlight = inFlight; + leastBusy = connection; + } + } + + if (leastBusy == null) { + // We could have raced with a shutdown since the last check + if (isClosed()) + throw new ConnectionException(host.getSocketAddress(), "Pool is shutdown"); + // This might maybe happen if the number of core connections per host is 0 and a connection was trashed between + // the previous check to connections and now. But in that case, the line above will have trigger the creation of + // a new connection, so just wait that connection and move on + leastBusy = waitForConnection(timeout, unit); + } else { + while (true) { + int inFlight = leastBusy.inFlight.get(); + + if (inFlight >= Math.min(leastBusy.maxAvailableStreams(), options().getMaxRequestsPerConnection(hostDistance))) { + leastBusy = waitForConnection(timeout, unit); + break; + } + + if (leastBusy.inFlight.compareAndSet(inFlight, inFlight + 1)) + break; + } + } + + int totalInFlightCount = totalInFlight.incrementAndGet(); + // update max atomically: + while (true) { + int oldMax = maxTotalInFlight.get(); + if (totalInFlightCount <= oldMax || maxTotalInFlight.compareAndSet(oldMax, totalInFlightCount)) + break; + } + + int connectionCount = open.get() + scheduledForCreation.get(); + if (connectionCount < options().getMaxConnectionsPerHost(hostDistance)) { + // Add a connection if we fill the first n-1 connections and almost fill the last one + int currentCapacity = (connectionCount - 1) * options().getMaxRequestsPerConnection(hostDistance) + + options().getNewConnectionThreshold(hostDistance); + if (totalInFlightCount > currentCapacity) + maybeSpawnNewConnection(); + } + + leastBusy.setKeyspace(manager.poolsState.keyspace); + return leastBusy; + } + + private void awaitAvailableConnection(long timeout, TimeUnit unit) throws InterruptedException { + waitLock.lock(); + waiter++; + try { + hasAvailableConnection.await(timeout, unit); + } finally { + waiter--; + waitLock.unlock(); + } + } + + private void signalAvailableConnection() { + // Quick check if it's worth signaling to avoid locking + if (waiter == 0) + return; + + waitLock.lock(); + try { + hasAvailableConnection.signal(); + } finally { + waitLock.unlock(); + } + } + + private void signalAllAvailableConnection() { + // Quick check if it's worth signaling to avoid locking + if (waiter == 0) + return; + + waitLock.lock(); + try { + hasAvailableConnection.signalAll(); + } finally { + waitLock.unlock(); + } + } + + private Connection waitForConnection(long timeout, TimeUnit unit) throws ConnectionException, TimeoutException { + if (timeout == 0) + throw new TimeoutException(); + + long start = System.nanoTime(); + long remaining = timeout; + do { + try { + awaitAvailableConnection(remaining, unit); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + // If we're interrupted fine, check if there is a connection available but stop waiting otherwise + timeout = 0; // this will make us stop the loop if we don't get a connection right away + } + + if (isClosed()) + throw new ConnectionException(host.getSocketAddress(), "Pool is shutdown"); + + int minInFlight = Integer.MAX_VALUE; + Connection leastBusy = null; + for (Connection connection : connections) { + int inFlight = connection.inFlight.get(); + if (inFlight < minInFlight) { + minInFlight = inFlight; + leastBusy = connection; + } + } + + // If we race with shutdown, leastBusy could be null. In that case we just loop and we'll throw on the next + // iteration anyway + if (leastBusy != null) { + while (true) { + int inFlight = leastBusy.inFlight.get(); + + if (inFlight >= Math.min(leastBusy.maxAvailableStreams(), options().getMaxRequestsPerConnection(hostDistance))) + break; + + if (leastBusy.inFlight.compareAndSet(inFlight, inFlight + 1)) + return leastBusy; + } + } + + remaining = timeout - Cluster.timeSince(start, unit); + } while (remaining > 0); + + throw new TimeoutException(); + } + + public void returnConnection(Connection connection) { + connection.inFlight.decrementAndGet(); + totalInFlight.decrementAndGet(); + + if (isClosed()) { + close(connection); + return; + } + + if (connection.isDefunct()) { + // As part of making it defunct, we have already replaced it or + // closed the pool. + return; + } + + if (connection.state.get() != TRASHED) { + if (connection.maxAvailableStreams() < minAllowedStreams) { + replaceConnection(connection); + } else { + signalAvailableConnection(); + } + } + } + + // Trash the connection and create a new one, but we don't call trashConnection + // directly because we want to make sure the connection is always trashed. + private void replaceConnection(Connection connection) { + if (!connection.state.compareAndSet(OPEN, TRASHED)) + return; + open.decrementAndGet(); + maybeSpawnNewConnection(); + connection.maxIdleTime = Long.MIN_VALUE; + doTrashConnection(connection); + } + + private boolean trashConnection(Connection connection) { + if (!connection.state.compareAndSet(OPEN, TRASHED)) + return true; + + // First, make sure we don't go below core connections + for (; ; ) { + int opened = open.get(); + if (opened <= options().getCoreConnectionsPerHost(hostDistance)) { + connection.state.set(OPEN); + return false; + } + + if (open.compareAndSet(opened, opened - 1)) + break; + } + logger.trace("Trashing {}", connection); + connection.maxIdleTime = System.currentTimeMillis() + options().getIdleTimeoutSeconds() * 1000; + doTrashConnection(connection); + return true; + } + + private void doTrashConnection(Connection connection) { + connections.remove(connection); + trash.add(connection); + } + + private boolean addConnectionIfUnderMaximum() { + + // First, make sure we don't cross the allowed limit of open connections + for (; ; ) { + int opened = open.get(); + if (opened >= options().getMaxConnectionsPerHost(hostDistance)) + return false; + + if (open.compareAndSet(opened, opened + 1)) + break; + } + + if (phase.get() != Phase.READY) { + open.decrementAndGet(); + return false; + } + + // Now really open the connection + try { + Connection newConnection = tryResurrectFromTrash(); + if (newConnection == null) { + logger.debug("Creating new connection on busy pool to {}", host); + newConnection = manager.connectionFactory().open(this); + } + connections.add(newConnection); + + newConnection.state.compareAndSet(RESURRECTING, OPEN); // no-op if it was already OPEN + + // We might have raced with pool shutdown since the last check; ensure the connection gets closed in case the pool did not do it. + if (isClosed() && !newConnection.isClosed()) { + close(newConnection); + open.decrementAndGet(); + return false; + } + + signalAvailableConnection(); + return true; + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + // Skip the open but ignore otherwise + open.decrementAndGet(); + return false; + } catch (ConnectionException e) { + open.decrementAndGet(); + logger.debug("Connection error to {} while creating additional connection", host); + return false; + } catch (AuthenticationException e) { + // This shouldn't really happen in theory + open.decrementAndGet(); + logger.error("Authentication error while creating additional connection (error is: {})", e.getMessage()); + return false; + } catch (UnsupportedProtocolVersionException e) { + // This shouldn't happen since we shouldn't have been able to connect in the first place + open.decrementAndGet(); + logger.error("UnsupportedProtocolVersionException error while creating additional connection (error is: {})", e.getMessage()); + return false; + } catch (ClusterNameMismatchException e) { + open.decrementAndGet(); + logger.error("ClusterNameMismatchException error while creating additional connection (error is: {})", e.getMessage()); + return false; + } + } + + private Connection tryResurrectFromTrash() { + long highestMaxIdleTime = System.currentTimeMillis(); + Connection chosen = null; + + while (true) { + for (Connection connection : trash) + if (connection.maxIdleTime > highestMaxIdleTime && connection.maxAvailableStreams() > minAllowedStreams) { + chosen = connection; + highestMaxIdleTime = connection.maxIdleTime; + } + + if (chosen == null) + return null; + else if (chosen.state.compareAndSet(TRASHED, RESURRECTING)) + break; + } + logger.trace("Resurrecting {}", chosen); + trash.remove(chosen); + return chosen; + } + + private void maybeSpawnNewConnection() { + while (true) { + int inCreation = scheduledForCreation.get(); + if (inCreation >= MAX_SIMULTANEOUS_CREATION) + return; + if (scheduledForCreation.compareAndSet(inCreation, inCreation + 1)) + break; + } + + manager.blockingExecutor().submit(newConnectionTask); + } + + void replaceDefunctConnection(final Connection connection) { + if (connection.state.compareAndSet(OPEN, GONE)) + open.decrementAndGet(); + if (connections.remove(connection)) + manager.blockingExecutor().submit(new Runnable() { + @Override + public void run() { + addConnectionIfUnderMaximum(); + } + }); + } + + void cleanupIdleConnections(long now) { + if (isClosed()) + return; + + shrinkIfBelowCapacity(); + cleanupTrash(now); + } + + /** If we have more active connections than needed, trash some of them */ + private void shrinkIfBelowCapacity() { + int currentLoad = maxTotalInFlight.getAndSet(totalInFlight.get()); + + int maxRequestsPerConnection = options().getMaxRequestsPerConnection(hostDistance); + int needed = currentLoad / maxRequestsPerConnection + 1; + if (currentLoad % maxRequestsPerConnection > options().getNewConnectionThreshold(hostDistance)) + needed += 1; + needed = Math.max(needed, options().getCoreConnectionsPerHost(hostDistance)); + int actual = open.get(); + int toTrash = Math.max(0, actual - needed); + + logger.trace("Current inFlight = {}, {} connections needed, {} connections available, trashing {}", + currentLoad, needed, actual, toTrash); + + if (toTrash <= 0) + return; + + for (Connection connection : connections) + if (trashConnection(connection)) { + toTrash -= 1; + if (toTrash == 0) + return; + } + } + + /** Close connections that have been sitting in the trash for too long */ + private void cleanupTrash(long now) { + for (Connection connection : trash) { + if (connection.maxIdleTime < now && connection.state.compareAndSet(TRASHED, GONE)) { + if (connection.inFlight.get() == 0) { + logger.trace("Cleaning up {}", connection); + trash.remove(connection); + close(connection); + } else { + // Given that idleTimeout >> request timeout, all outstanding requests should + // have finished by now, so we should not get here. + // Restore the status so that it's retried on the next cleanup. + connection.state.set(TRASHED); + } + } + } + } + + private void close(final Connection connection) { + connection.closeAsync(); + } + + public final boolean isClosed() { + return closeFuture.get() != null; + } + + public final CloseFuture closeAsync() { + + CloseFuture future = closeFuture.get(); + if (future != null) + return future; + + phase.set(Phase.CLOSING); + + // Wake up all threads that wait + signalAllAvailableConnection(); + + future = new CloseFuture.Forwarding(discardAvailableConnections()); + + return closeFuture.compareAndSet(null, future) + ? future + : closeFuture.get(); // We raced, it's ok, return the future that was actually set + } + + public int opened() { + return open.get(); + } + + int trashed() { + return trash.size(); + } + + private List discardAvailableConnections() { + // Note: if this gets called before initialization has completed, both connections and trash will be empty, + // so this will return an empty list + + List futures = new ArrayList(connections.size() + trash.size()); + + for (final Connection connection : connections) { + CloseFuture future = connection.closeAsync(); + future.addListener(new Runnable() { + public void run() { + if (connection.state.compareAndSet(OPEN, GONE)) + open.decrementAndGet(); + } + }, MoreExecutors.sameThreadExecutor()); + futures.add(future); + } + + // Some connections in the trash might still be open if they hadn't reached their idle timeout + for (Connection connection : trash) + futures.add(connection.closeAsync()); + + return futures; + } + + // This creates connections if we have less than core connections (if we + // have more than core, connection will just get trash when we can). + public void ensureCoreConnections() { + if (isClosed()) + return; + + // Note: this process is a bit racy, but it doesn't matter since we're still guaranteed to not create + // more connection than maximum (and if we create more than core connection due to a race but this isn't + // justified by the load, the connection in excess will be quickly trashed anyway) + int opened = open.get(); + for (int i = opened; i < options().getCoreConnectionsPerHost(hostDistance); i++) { + // We don't respect MAX_SIMULTANEOUS_CREATION here because it's only to + // protect against creating connection in excess of core too quickly + scheduledForCreation.incrementAndGet(); + manager.blockingExecutor().submit(newConnectionTask); + } + } + + static class PoolState { + volatile String keyspace; + + public void setKeyspace(String keyspace) { + this.keyspace = keyspace; + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/HostDistance.java b/driver-core/src/main/java/com/datastax/driver/core/HostDistance.java new file mode 100644 index 00000000000..48077f49ce6 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/HostDistance.java @@ -0,0 +1,41 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +/** + * The distance to a Cassandra node as assigned by a + * {@link com.datastax.driver.core.policies.LoadBalancingPolicy} (through its {@code + * distance} method). + * + * The distance assigned to an host influences how many connections the driver + * maintains towards this host. If for a given host the assigned {@code HostDistance} + * is {@code LOCAL} or {@code REMOTE}, some connections will be maintained by + * the driver to this host. More active connections will be kept to + * {@code LOCAL} host than to a {@code REMOTE} one (and thus well behaving + * {@code LoadBalancingPolicy} should assign a {@code REMOTE} distance only to + * hosts that are the less often queried). + *

+ * However, if a host is assigned the distance {@code IGNORED}, no connection + * to that host will maintained active. In other words, {@code IGNORED} should + * be assigned to hosts that should not be used by this driver (because they + * are in a remote data center for instance). + */ +public enum HostDistance { + // Note: PoolingOptions rely on the order of the enum. + LOCAL, + REMOTE, + IGNORED +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/KeyspaceMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/KeyspaceMetadata.java new file mode 100644 index 00000000000..74086b9d8cc --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/KeyspaceMetadata.java @@ -0,0 +1,217 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.util.*; +import java.util.concurrent.ConcurrentHashMap; + +/** + * Describes a keyspace defined in this cluster. + */ +public class KeyspaceMetadata { + + public static final String KS_NAME = "keyspace_name"; + private static final String DURABLE_WRITES = "durable_writes"; + private static final String STRATEGY_CLASS = "strategy_class"; + private static final String STRATEGY_OPTIONS = "strategy_options"; + + private final String name; + private final boolean durableWrites; + + private final ReplicationStrategy strategy; + private final Map replication; + + // TODO: I don't think we change those, so there is probably no need for ConcurrentHashMap. Check if + // that's the case. + private final Map tables = new ConcurrentHashMap(); + private final Map userTypes = new ConcurrentHashMap(); + + private KeyspaceMetadata(String name, boolean durableWrites, Map replication) { + this.name = name; + this.durableWrites = durableWrites; + this.replication = replication; + this.strategy = ReplicationStrategy.create(replication); + } + + static KeyspaceMetadata build(Row row, List udtRows) { + + String name = row.getString(KS_NAME); + boolean durableWrites = row.getBool(DURABLE_WRITES); + + Map replicationOptions = new HashMap(); + replicationOptions.put("class", row.getString(STRATEGY_CLASS)); + replicationOptions.putAll(SimpleJSONParser.parseStringMap(row.getString(STRATEGY_OPTIONS))); + + KeyspaceMetadata ksm = new KeyspaceMetadata(name, durableWrites, replicationOptions); + + if (udtRows == null) + return ksm; + + ksm.addUserTypes(udtRows); + + return ksm; + } + + /** + * Returns the name of this keyspace. + * + * @return the name of this CQL keyspace. + */ + public String getName() { + return name; + } + + /** + * Returns whether durable writes are set on this keyspace. + * + * @return {@code true} if durable writes are set on this keyspace (the + * default), {@code false} otherwise. + */ + public boolean isDurableWrites() { + return durableWrites; + } + + /** + * Returns the replication options for this keyspace. + * + * @return a map containing the replication options for this keyspace. + */ + public Map getReplication() { + return Collections.unmodifiableMap(replication); + } + + /** + * Returns the metadata for a table contained in this keyspace. + * + * @param name the name of table to retrieve + * @return the metadata for table {@code name} if it exists in this keyspace, + * {@code null} otherwise. + */ + public TableMetadata getTable(String name) { + return tables.get(Metadata.handleId(name)); + } + + void removeTable(String table) { + tables.remove(table); + } + + /** + * Returns the tables defined in this keyspace. + * + * @return a collection of the metadata for the tables defined in this + * keyspace. + */ + public Collection getTables() { + return Collections.unmodifiableCollection(tables.values()); + } + + /** + * Returns the definition for a user defined type (UDT) in this keyspace. + * + * @param name the name of UDT definition to retrieve + * @return the definition for {@code name} if it exists in this keyspace, + * {@code null} otherwise. + */ + public UserType getUserType(String name) { + return userTypes.get(Metadata.handleId(name)); + } + + /** + * Returns the user types defined in this keyspace. + * + * @return a collection of the definition for the user types defined in this + * keyspace. + */ + public Collection getUserTypes() { + return Collections.unmodifiableCollection(userTypes.values()); + } + + void addUserTypes(List udtRows) { + for (Row r : udtRows) { + UserType def = UserType.build(r); + userTypes.put(def.getTypeName(), def); + } + } + + void removeUserType(String userType) { + userTypes.remove(userType); + } + + /** + * Returns a {@code String} containing CQL queries representing this + * keyspace and the user types and tables it contains. + *

+ * In other words, this method returns the queries that would allow to + * recreate the schema of this keyspace, along with all its user + * types/tables. + *

+ * Note that the returned String is formatted to be human readable (for + * some definition of human readable at least). + * + * @return the CQL queries representing this keyspace schema as a {code + * String}. + */ + public String exportAsString() { + StringBuilder sb = new StringBuilder(); + + sb.append(asCQLQuery()).append('\n'); + + for (UserType udt : userTypes.values()) + sb.append('\n').append(udt.exportAsString()).append('\n'); + + for (TableMetadata tm : tables.values()) + sb.append('\n').append(tm.exportAsString()).append('\n'); + + return sb.toString(); + } + + /** + * Returns a CQL query representing this keyspace. + *

+ * This method returns a single 'CREATE KEYSPACE' query with the options + * corresponding to this keyspace definition. + * + * @return the 'CREATE KEYSPACE' query corresponding to this keyspace. + * @see #exportAsString + */ + public String asCQLQuery() { + StringBuilder sb = new StringBuilder(); + + sb.append("CREATE KEYSPACE ").append(Metadata.escapeId(name)).append(" WITH "); + sb.append("REPLICATION = { 'class' : '").append(replication.get("class")).append('\''); + for (Map.Entry entry : replication.entrySet()) { + if (entry.getKey().equals("class")) + continue; + sb.append(", '").append(entry.getKey()).append("': '").append(entry.getValue()).append('\''); + } + sb.append(" } AND DURABLE_WRITES = ").append(durableWrites); + sb.append(';'); + return sb.toString(); + } + + @Override + public String toString() { + return asCQLQuery(); + } + + void add(TableMetadata tm) { + tables.put(tm.getName(), tm); + } + + ReplicationStrategy replicationStrategy() { + return strategy; + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/LatencyTracker.java b/driver-core/src/main/java/com/datastax/driver/core/LatencyTracker.java new file mode 100644 index 00000000000..1dec306f1f9 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/LatencyTracker.java @@ -0,0 +1,51 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +/** + * Interface for objects that are interested in tracking the latencies + * of the driver queries to each Cassandra nodes. + *

+ * An implementation of this interface can be registered against a Cluster + * object trough the {@link Cluster#register} method, after which the + * {@link #update(Host, Statement, Exception, long)} method will be called after each query of the driver to a Cassandra + * host with the latency/duration (in nanoseconds) of this operation. + */ +public interface LatencyTracker { + + /** + * A method that is called after each request to a Cassandra node with + * the duration of that operation. + *

+ * Note that there is no guarantee that this method won't be called + * concurrently by multiple threads, so implementations should synchronize + * internally if need be. + * + * @param host The Cassandra host on which a request has been performed. + * This parameter is never {@code null}. + * @param statement The {@link com.datastax.driver.core.Statement} that has been executed. + * This parameter is never {@code null}. + * @param exception An {@link Exception} thrown when receiving the response, or {@code null} + * if the response was successful. + * @param newLatencyNanos the latency in nanoseconds of the operation. + * This latency corresponds to the time elapsed between + * when the query was sent to {@code host} and + * when the response was received by the driver + * (or the operation timed out, in which {@code newLatencyNanos} + * will approximately be the timeout value). + */ + public void update(Host host, Statement statement, Exception exception, long newLatencyNanos); +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/MD5Digest.java b/driver-core/src/main/java/com/datastax/driver/core/MD5Digest.java new file mode 100644 index 00000000000..9e4cd28f077 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/MD5Digest.java @@ -0,0 +1,61 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.util.Arrays; + +import com.datastax.driver.core.utils.Bytes; + +/** + * The result of the computation of an MD5 digest. + * + * A MD5 is really just a byte[] but arrays are a no go as map keys. We could + * wrap it in a ByteBuffer but: + * 1. MD5Digest is a more explicit name than ByteBuffer to represent a md5. + * 2. Using our own class allows to use our FastByteComparison for equals. + */ +class MD5Digest { + + public final byte[] bytes; + + private MD5Digest(byte[] bytes) { + this.bytes = bytes; + } + + public static MD5Digest wrap(byte[] digest) { + return new MD5Digest(digest); + } + + @Override + public final int hashCode() { + return Arrays.hashCode(bytes); + } + + @Override + public final boolean equals(Object o) { + if(!(o instanceof MD5Digest)) + return false; + MD5Digest that = (MD5Digest)o; + // handles nulls properly + return Arrays.equals(this.bytes, that.bytes); + } + + @Override + public String toString() { + return Bytes.toHexString(bytes); + } +} + diff --git a/driver-core/src/main/java/com/datastax/driver/core/Message.java b/driver-core/src/main/java/com/datastax/driver/core/Message.java new file mode 100644 index 00000000000..62a9e279022 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/Message.java @@ -0,0 +1,245 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.nio.ByteBuffer; +import java.util.EnumSet; +import java.util.List; +import java.util.UUID; + +import io.netty.buffer.ByteBuf; +import io.netty.channel.ChannelHandler; +import io.netty.channel.ChannelHandlerContext; +import io.netty.handler.codec.MessageToMessageDecoder; +import io.netty.handler.codec.MessageToMessageEncoder; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.datastax.driver.core.exceptions.DriverInternalError; + +/** + * A message from the CQL binary protocol. + */ +abstract class Message { + + protected static final Logger logger = LoggerFactory.getLogger(Message.class); + + public interface Coder { + public void encode(R request, ByteBuf dest, ProtocolVersion version); + public int encodedSize(R request, ProtocolVersion version); + } + + public interface Decoder { + public R decode(ByteBuf body, ProtocolVersion version); + } + + private volatile int streamId; + + protected Message() {} + + public Message setStreamId(int streamId) { + this.streamId = streamId; + return this; + } + + public int getStreamId() { + return streamId; + } + + public static abstract class Request extends Message { + + public enum Type { + STARTUP (1, Requests.Startup.coder), + CREDENTIALS (4, Requests.Credentials.coder), + OPTIONS (5, Requests.Options.coder), + QUERY (7, Requests.Query.coder), + PREPARE (9, Requests.Prepare.coder), + EXECUTE (10, Requests.Execute.coder), + REGISTER (11, Requests.Register.coder), + BATCH (13, Requests.Batch.coder), + AUTH_RESPONSE (15, Requests.AuthResponse.coder); + + public final int opcode; + public final Coder coder; + + private Type(int opcode, Coder coder) { + this.opcode = opcode; + this.coder = coder; + } + } + + public final Type type; + private final boolean tracingRequested; + + protected Request(Type type) { + this(type, false); + } + + protected Request(Type type, boolean tracingRequested) { + this.type = type; + this.tracingRequested = tracingRequested; + } + + public boolean isTracingRequested() { + return tracingRequested; + } + + ConsistencyLevel consistency() { + switch (this.type) { + case QUERY: return ((Requests.Query)this).options.consistency; + case EXECUTE: return ((Requests.Execute)this).options.consistency; + case BATCH: return ((Requests.Batch)this).options.consistency; + default: return null; + } + } + + ConsistencyLevel serialConsistency() { + switch (this.type) { + case QUERY: return ((Requests.Query)this).options.serialConsistency; + case EXECUTE: return ((Requests.Execute)this).options.serialConsistency; + case BATCH: return ((Requests.Batch)this).options.serialConsistency; + default: return null; + } + } + + long defaultTimestamp() { + switch (this.type) { + case QUERY: return ((Requests.Query)this).options.defaultTimestamp; + case EXECUTE: return ((Requests.Execute)this).options.defaultTimestamp; + case BATCH: return ((Requests.Batch)this).options.defaultTimestamp; + default: return 0; + } + } + + ByteBuffer pagingState() { + switch (this.type) { + case QUERY: return ((Requests.Query)this).options.pagingState; + case EXECUTE: return ((Requests.Execute)this).options.pagingState; + default: return null; + } + } + + Request copy() { + throw new UnsupportedOperationException(); + } + + Request copy(ConsistencyLevel newConsistencyLevel) { + throw new UnsupportedOperationException(); + } + } + + public static abstract class Response extends Message { + + public enum Type { + ERROR (0, Responses.Error.decoder), + READY (2, Responses.Ready.decoder), + AUTHENTICATE (3, Responses.Authenticate.decoder), + SUPPORTED (6, Responses.Supported.decoder), + RESULT (8, Responses.Result.decoder), + EVENT (12, Responses.Event.decoder), + AUTH_CHALLENGE (14, Responses.AuthChallenge.decoder), + AUTH_SUCCESS (16, Responses.AuthSuccess.decoder); + + public final int opcode; + public final Decoder decoder; + + private static final Type[] opcodeIdx; + static { + int maxOpcode = -1; + for (Type type : Type.values()) + maxOpcode = Math.max(maxOpcode, type.opcode); + opcodeIdx = new Type[maxOpcode + 1]; + for (Type type : Type.values()) { + if (opcodeIdx[type.opcode] != null) + throw new IllegalStateException("Duplicate opcode"); + opcodeIdx[type.opcode] = type; + } + } + + private Type(int opcode, Decoder decoder) { + this.opcode = opcode; + this.decoder = decoder; + } + + public static Type fromOpcode(int opcode) { + if (opcode < 0 || opcode >= opcodeIdx.length) + throw new DriverInternalError(String.format("Unknown response opcode %d", opcode)); + Type t = opcodeIdx[opcode]; + if (t == null) + throw new DriverInternalError(String.format("Unknown response opcode %d", opcode)); + return t; + } + } + + public final Type type; + protected UUID tracingId; + + protected Response(Type type) { + this.type = type; + } + + public Response setTracingId(UUID tracingId) { + this.tracingId = tracingId; + return this; + } + + public UUID getTracingId() { + return tracingId; + } + } + + @ChannelHandler.Sharable + public static class ProtocolDecoder extends MessageToMessageDecoder { + + @Override + protected void decode(ChannelHandlerContext ctx, Frame frame, List out) throws Exception { + boolean isTracing = frame.header.flags.contains(Frame.Header.Flag.TRACING); + UUID tracingId = isTracing ? CBUtil.readUUID(frame.body) : null; + + try { + Response response = Response.Type.fromOpcode(frame.header.opcode).decoder.decode(frame.body, frame.header.version); + response.setTracingId(tracingId).setStreamId(frame.header.streamId); + out.add(response); + } finally { + frame.body.release(); + } + } + } + + @ChannelHandler.Sharable + public static class ProtocolEncoder extends MessageToMessageEncoder { + + private final ProtocolVersion protocolVersion; + + public ProtocolEncoder(ProtocolVersion version) { + this.protocolVersion = version; + } + + @Override + protected void encode(ChannelHandlerContext ctx, Request request, List out) throws Exception { + EnumSet flags = EnumSet.noneOf(Frame.Header.Flag.class); + if (request.isTracingRequested()) + flags.add(Frame.Header.Flag.TRACING); + + @SuppressWarnings("unchecked") + Coder coder = (Coder)request.type.coder; + ByteBuf body = ctx.alloc().buffer(coder.encodedSize(request, protocolVersion)); + coder.encode(request, body, protocolVersion); + + out.add(Frame.create(protocolVersion, request.type.opcode, request.getStreamId(), flags, body)); + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/Metadata.java b/driver-core/src/main/java/com/datastax/driver/core/Metadata.java new file mode 100644 index 00000000000..db4ff4d0440 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/Metadata.java @@ -0,0 +1,640 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.net.InetSocketAddress; +import java.nio.ByteBuffer; +import java.util.*; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.regex.Pattern; + +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Maps; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.datastax.driver.core.exceptions.DriverInternalError; + +import static com.datastax.driver.core.SchemaElement.KEYSPACE; +import static com.datastax.driver.core.SchemaElement.TABLE; +import static com.datastax.driver.core.SchemaElement.TYPE; + +/** + * Keeps metadata on the connected cluster, including known nodes and schema definitions. + */ +public class Metadata { + + private static final Logger logger = LoggerFactory.getLogger(Metadata.class); + + private final Cluster.Manager cluster; + volatile String clusterName; + volatile String partitioner; + private final ConcurrentMap hosts = new ConcurrentHashMap(); + private final ConcurrentMap keyspaces = new ConcurrentHashMap(); + volatile TokenMap tokenMap; + + private static final Pattern cqlId = Pattern.compile("\\w+"); + private static final Pattern lowercaseId = Pattern.compile("[a-z][a-z0-9_]*"); + + Metadata(Cluster.Manager cluster) { + this.cluster = cluster; + } + + // Synchronized to make it easy to detect dropped keyspaces + synchronized void rebuildSchema(SchemaElement targetType, String targetKeyspace, String targetName, ResultSet ks, ResultSet udts, ResultSet cfs, ResultSet cols, VersionNumber cassandraVersion) { + + Map> cfDefs = new HashMap>(); + Map> udtDefs = new HashMap>(); + Map>> colsDefs = new HashMap>>(); + + // Gather cf defs + if (cfs != null) { + for (Row row : cfs) { + String ksName = row.getString(KeyspaceMetadata.KS_NAME); + List l = cfDefs.get(ksName); + if (l == null) { + l = new ArrayList(); + cfDefs.put(ksName, l); + } + l.add(row); + } + } + + // Gather udt defs + if (udts != null) { + for (Row row : udts) { + String ksName = row.getString(KeyspaceMetadata.KS_NAME); + List l = udtDefs.get(ksName); + if (l == null) { + l = new ArrayList(); + udtDefs.put(ksName, l); + } + l.add(row); + } + } + + // Gather columns per Cf + if (cols != null) { + for (Row row : cols) { + String ksName = row.getString(KeyspaceMetadata.KS_NAME); + String cfName = row.getString(TableMetadata.CF_NAME); + Map> colsByCf = colsDefs.get(ksName); + if (colsByCf == null) { + colsByCf = new HashMap>(); + colsDefs.put(ksName, colsByCf); + } + Map l = colsByCf.get(cfName); + if (l == null) { + l = new HashMap(); + colsByCf.put(cfName, l); + } + ColumnMetadata.Raw c = ColumnMetadata.Raw.fromRow(row, cassandraVersion); + l.put(c.name, c); + } + } + + if (targetType == null || targetType == KEYSPACE) { // Refresh one or all keyspaces + assert ks != null; + Set addedKs = new HashSet(); + for (Row ksRow : ks) { + String ksName = ksRow.getString(KeyspaceMetadata.KS_NAME); + KeyspaceMetadata ksm = KeyspaceMetadata.build(ksRow, udtDefs.get(ksName)); + + if (cfDefs.containsKey(ksName)) { + buildTableMetadata(ksm, cfDefs.get(ksName), colsDefs.get(ksName), cassandraVersion); + } + addedKs.add(ksName); + keyspaces.put(ksName, ksm); + } + + // If keyspace is null, it means we're rebuilding from scratch, so + // remove anything that was not just added as it means it's a dropped keyspace + if (targetKeyspace == null) { + Iterator iter = keyspaces.keySet().iterator(); + while (iter.hasNext()) { + if (!addedKs.contains(iter.next())) + iter.remove(); + } + } + } else if (targetType == TABLE) { + assert targetKeyspace != null; + KeyspaceMetadata ksm = keyspaces.get(targetKeyspace); + + // If we update a keyspace we don't know about, something went + // wrong. Log an error an schedule a full schema rebuilt. + if (ksm == null) { + logger.error(String.format("Asked to rebuild table %s.%s but I don't know keyspace %s", targetKeyspace, targetName, targetKeyspace)); + cluster.submitSchemaRefresh(null, null, null); + return; + } + + if (cfDefs.containsKey(targetKeyspace)) + buildTableMetadata(ksm, cfDefs.get(targetKeyspace), colsDefs.get(targetKeyspace), cassandraVersion); + } else if (targetType == TYPE) { + assert targetKeyspace != null; + KeyspaceMetadata ksm = keyspaces.get(targetKeyspace); + + if (ksm == null) { + logger.error(String.format("Asked to rebuild type %s.%s but I don't know keyspace %s", targetKeyspace, targetName, targetKeyspace)); + cluster.submitSchemaRefresh(null, null, null); + return; + } + + if (udtDefs.containsKey(targetKeyspace)) + ksm.addUserTypes(udtDefs.get(targetKeyspace)); + } + } + + private void buildTableMetadata(KeyspaceMetadata ksm, List cfRows, Map> colsDefs, VersionNumber cassandraVersion) { + for (Row cfRow : cfRows) { + String cfName = cfRow.getString(TableMetadata.CF_NAME); + try { + Map cols = colsDefs == null ? null : colsDefs.get(cfName); + if (cols == null || cols.isEmpty()) { + if (cassandraVersion.getMajor() >= 2) { + // In C* >= 2.0, we should never have no columns metadata because at the very least we should + // have the metadata corresponding to the default CQL metadata. So if we don't have any columns, + // that can only mean that the table got creating concurrently with our schema queries, and the + // query for columns metadata reached the node before the table was persisted while the table + // metadata one reached it afterwards. We could make the query to the column metadata sequential + // with the table metadata instead of in parallel, but it's probably not worth making it slower + // all the time to avoid this race since 1) it's very very uncommon and 2) we can just ignore the + // incomplete table here for now and it'll get updated next time with no particular consequence + // (if the table creation was concurrent with our querying, we'll get a notifciation later and + // will reupdate the schema for it anyway). See JAVA-320 for why we need this. + continue; + } else { + // C* 1.2 don't persists default CQL metadata, so it's possible not to have columns (for thirft + // tables). But in that case TableMetadata.build() knows how to handle it. + cols = Collections.emptyMap(); + } + } + TableMetadata.build(ksm, cfRow, cols, cassandraVersion); + } catch (RuntimeException e) { + // See ControlConnection#refreshSchema for why we'd rather not probably this further + logger.error(String.format("Error parsing schema for table %s.%s: " + + "Cluster.getMetadata().getKeyspace(\"%s\").getTable(\"%s\") will be missing or incomplete", + ksm.getName(), cfName, ksm.getName(), cfName), e); + } + } + } + + synchronized void rebuildTokenMap(String partitioner, Map> allTokens) { + if (allTokens.isEmpty()) + return; + + Token.Factory factory = partitioner == null + ? (tokenMap == null ? null : tokenMap.factory) + : Token.getFactory(partitioner); + if (factory == null) + return; + + this.tokenMap = TokenMap.build(factory, allTokens, keyspaces.values()); + } + + Host add(InetSocketAddress address) { + Host newHost = new Host(address, cluster.convictionPolicyFactory, cluster); + Host previous = hosts.putIfAbsent(address, newHost); + return previous == null ? newHost : null; + } + + boolean remove(Host host) { + return hosts.remove(host.getSocketAddress()) != null; + } + + Host getHost(InetSocketAddress address) { + return hosts.get(address); + } + + // For internal use only + Collection allHosts() { + return hosts.values(); + } + + // Deal with case sensitivity for a given keyspace or table id + static String handleId(String id) { + // Shouldn't really happen for this method, but no reason to fail here + if (id == null) + return null; + + if (cqlId.matcher(id).matches()) + return id.toLowerCase(); + + // Check if it's enclosed in quotes. If it is, remove them + if (id.charAt(0) == '"' && id.charAt(id.length() - 1) == '"') + return id.substring(1, id.length() - 1); + + // otherwise, just return the id. + return id; + } + + // Escape a CQL3 identifier based on its value as read from the schema + // tables. Because it comes from Cassandra, we could just always quote it, + // but to get a nicer output we don't do it if it's not necessary. + static String escapeId(String ident) { + // we don't need to escape if it's lowercase and match non-quoted CQL3 ids. + return lowercaseId.matcher(ident).matches() ? ident : quote(ident); + } + + /** + * Quote a keyspace, table or column identifier to make it case sensitive. + *

+ * CQL identifiers, including keyspace, table and column ones, are case insensitive + * by default. Case sensitive identifiers can however be provided by enclosing + * the identifier in double quotes (see the + * CQL documentation + * for details). If you are using case sensitive identifiers, this method + * can be used to enclose such identifier in double quotes, making it case + * sensitive. + * + * @param id the keyspace or table identifier. + * @return {@code id} enclosed in double-quotes, for use in methods like + * {@link #getReplicas}, {@link #getKeyspace}, {@link KeyspaceMetadata#getTable} + * or even {@link Cluster#connect(String)}. + */ + public static String quote(String id) { + return '"' + id + '"'; + } + + /** + * Returns the token ranges that define data distribution in the ring. + *

+ * Note that this information is refreshed asynchronously by the control + * connection, when schema or ring topology changes. It might occasionally + * be stale. + * + * @return the token ranges. + */ + public Set getTokenRanges() { + TokenMap current = tokenMap; + return (current == null) ? Collections.emptySet() : current.tokenRanges; + } + + /** + * Returns the token ranges that are replicated on the given host, for the given + * keyspace. + *

+ * Note that this information is refreshed asynchronously by the control + * connection, when schema or ring topology changes. It might occasionally + * be stale (or even empty). + * + * @param keyspace the name of the keyspace to get token ranges for. + * @param host the host. + * @return the (immutable) set of token ranges for {@code host} as known + * by the driver. + */ + public Set getTokenRanges(String keyspace, Host host) { + keyspace = handleId(keyspace); + TokenMap current = tokenMap; + if (current == null) { + return Collections.emptySet(); + } else { + Map> dcRanges = current.hostsToRanges.get(keyspace); + if (dcRanges == null) { + return Collections.emptySet(); + } else { + Set ranges = dcRanges.get(host); + return (ranges == null) ? Collections.emptySet() : ranges; + } + } + } + + /** + * Returns the set of hosts that are replica for a given partition key. + *

+ * Note that this information is refreshed asynchronously by the control + * connection, when schema or ring topology changes. It might occasionally + * be stale (or even empty). + * + * @param keyspace the name of the keyspace to get replicas for. + * @param partitionKey the partition key for which to find the set of + * replica. + * @return the (immutable) set of replicas for {@code partitionKey} as known + * by the driver. + */ + public Set getReplicas(String keyspace, ByteBuffer partitionKey) { + keyspace = handleId(keyspace); + TokenMap current = tokenMap; + if (current == null) { + return Collections.emptySet(); + } else { + Set hosts = current.getReplicas(keyspace, current.factory.hash(partitionKey)); + return hosts == null ? Collections.emptySet() : hosts; + } + } + + /** + * Returns the set of hosts that are replica for a given token range. + *

+ * Note that this information is refreshed asynchronously by the control + * connection, when schema or ring topology changes. It might occasionally + * be stale (or even empty). + * + * @param keyspace the name of the keyspace to get replicas for. + * @param range the token range. + * @return the (immutable) set of replicas for {@code range} as known by the driver. + */ + public Set getReplicas(String keyspace, TokenRange range) { + keyspace = handleId(keyspace); + TokenMap current = tokenMap; + if (current == null) { + return Collections.emptySet(); + } else { + Set hosts = current.getReplicas(keyspace, range.getEnd()); + return hosts == null ? Collections.emptySet() : hosts; + } + } + + /** + * The Cassandra name for the cluster connect to. + * + * @return the Cassandra name for the cluster connect to. + */ + public String getClusterName() { + return clusterName; + } + + /** + * The partitioner in use as reported by the Cassandra nodes. + * + * @return the partitioner in use as reported by the Cassandra nodes. + */ + public String getPartitioner() { + return partitioner; + } + + /** + * Returns the known hosts of this cluster. + * + * @return A set will all the know host of this cluster. + */ + public Set getAllHosts() { + return new HashSet(allHosts()); + } + + /** + * Checks whether hosts that are currently up agree on the schema definition. + *

+ * This method performs a one-time check only, without any form of retry; therefore {@link Cluster.Builder#withMaxSchemaAgreementWaitSeconds(int)} + * does not apply in this case. + * + * @return {@code true} if all hosts agree on the schema; {@code false} if they don't agree, or if the check could not be performed + * (for example, if the control connection is down). + */ + public boolean checkSchemaAgreement() { + return cluster.controlConnection.checkSchemaAgreement(); + } + + /** + * Returns the metadata of a keyspace given its name. + * + * @param keyspace the name of the keyspace for which metadata should be + * returned. + * @return the metadata of the requested keyspace or {@code null} if {@code + * keyspace} is not a known keyspace. + */ + public KeyspaceMetadata getKeyspace(String keyspace) { + return keyspaces.get(handleId(keyspace)); + } + + /** + * Used when the keyspace name is unquoted and in the exact case we store it in + * (typically when we got it from an internal call, not from the user). + */ + KeyspaceMetadata getKeyspaceInternal(String keyspace) { + return keyspaces.get(keyspace); + } + + void removeKeyspace(String keyspace) { + keyspaces.remove(keyspace); + if (tokenMap != null) + tokenMap.tokenToHosts.remove(keyspace); + } + + /** + * Returns a list of all the defined keyspaces. + * + * @return a list of all the defined keyspaces. + */ + public List getKeyspaces() { + return new ArrayList(keyspaces.values()); + } + + /** + * Returns a {@code String} containing CQL queries representing the schema + * of this cluster. + * + * In other words, this method returns the queries that would allow to + * recreate the schema of this cluster. + * + * Note that the returned String is formatted to be human readable (for + * some definition of human readable at least). + * + * @return the CQL queries representing this cluster schema as a {code + * String}. + */ + public String exportSchemaAsString() { + StringBuilder sb = new StringBuilder(); + + for (KeyspaceMetadata ksm : keyspaces.values()) + sb.append(ksm.exportAsString()).append('\n'); + + return sb.toString(); + } + + /** + * Builds a new {@link Token} from its string representation, according to the partitioner + * reported by the Cassandra nodes. + * + * @param tokenStr the string representation. + * @return the token. + */ + public Token newToken(String tokenStr) { + TokenMap current = tokenMap; + if (current == null) + throw new DriverInternalError("Token factory not set. This should only happen at initialization time"); + + return current.factory.fromString(tokenStr); + } + + /** + * Builds a new {@link TokenRange}. + * + * @param start the start token. + * @param end the end token. + * @return the range. + */ + public TokenRange newTokenRange(Token start, Token end) { + TokenMap current = tokenMap; + if (current == null) + throw new DriverInternalError("Token factory not set. This should only happen at initialization time"); + + return new TokenRange(start, end, current.factory); + } + + Token.Factory tokenFactory() { + TokenMap current = tokenMap; + return (current == null) ? null : current.factory; + } + + static class TokenMap { + + private final Token.Factory factory; + private final Map>> tokenToHosts; + private final Map>> hostsToRanges; + private final List ring; + private final Set tokenRanges; + final Set hosts; + + private TokenMap(Token.Factory factory, + Map> primaryToTokens, + Map>> tokenToHosts, + Map>> hostsToRanges, + List ring, Set tokenRanges, Set hosts) { + this.factory = factory; + this.tokenToHosts = tokenToHosts; + this.hostsToRanges = hostsToRanges; + this.ring = ring; + this.tokenRanges = tokenRanges; + this.hosts = hosts; + for (Map.Entry> entry : primaryToTokens.entrySet()) { + Host host = entry.getKey(); + host.setTokens(ImmutableSet.copyOf(entry.getValue())); + } + } + + public static TokenMap build(Token.Factory factory, Map> allTokens, Collection keyspaces) { + + Set hosts = allTokens.keySet(); + Map tokenToPrimary = new HashMap(); + Map> primaryToTokens = new HashMap>(); + Set allSorted = new TreeSet(); + + for (Map.Entry> entry : allTokens.entrySet()) { + Host host = entry.getKey(); + for (String tokenStr : entry.getValue()) { + try { + Token t = factory.fromString(tokenStr); + allSorted.add(t); + tokenToPrimary.put(t, host); + Set hostTokens = primaryToTokens.get(host); + if (hostTokens == null) { + hostTokens = new HashSet(); + primaryToTokens.put(host, hostTokens); + } + hostTokens.add(t); + } catch (IllegalArgumentException e) { + // If we failed parsing that token, skip it + } + } + } + + List ring = new ArrayList(allSorted); + Set tokenRanges = makeTokenRanges(ring, factory); + + Map>> tokenToHosts = new HashMap>>(); + Map>> hostsToRanges = new HashMap>>(); + for (KeyspaceMetadata keyspace : keyspaces) + { + ReplicationStrategy strategy = keyspace.replicationStrategy(); + Map> ksTokens = (strategy == null) + ? makeNonReplicatedMap(tokenToPrimary) + : strategy.computeTokenToReplicaMap(tokenToPrimary, ring); + + tokenToHosts.put(keyspace.getName(), ksTokens); + + Map> ksRanges; + if (ring.size() == 1) { + // We forced the single range to ]minToken,minToken], make sure to use that instead of relying on the host's token + ImmutableMap.Builder> builder = ImmutableMap.builder(); + for (Host host : allTokens.keySet()) + builder.put(host, tokenRanges); + ksRanges = builder.build(); + } else { + ksRanges = computeHostsToRangesMap(tokenRanges, ksTokens, hosts.size()); + } + hostsToRanges.put(keyspace.getName(), ksRanges); + } + return new TokenMap(factory, primaryToTokens, tokenToHosts, hostsToRanges, ring, tokenRanges, hosts); + } + + private Set getReplicas(String keyspace, Token token) { + + Map> keyspaceHosts = tokenToHosts.get(keyspace); + if (keyspaceHosts == null) + return Collections.emptySet(); + + // If the token happens to be one of the "primary" tokens, get result directly + Set hosts = keyspaceHosts.get(token); + if (hosts != null) + return hosts; + + // Otherwise, find closest "primary" token on the ring + int i = Collections.binarySearch(ring, token); + if (i < 0) { + i = -i - 1; + if (i >= ring.size()) + i = 0; + } + + return keyspaceHosts.get(ring.get(i)); + } + + private static Map> makeNonReplicatedMap(Map input) { + Map> output = new HashMap>(input.size()); + for (Map.Entry entry : input.entrySet()) + output.put(entry.getKey(), ImmutableSet.of(entry.getValue())); + return output; + } + + private static Set makeTokenRanges(List ring, Token.Factory factory) { + ImmutableSet.Builder builder = ImmutableSet.builder(); + // JAVA-684: if there is only one token, return the range ]minToken, minToken] + if(ring.size() == 1) { + builder.add(new TokenRange(factory.minToken(), factory.minToken(), factory)); + } else { + for (int i = 0; i < ring.size(); i++) { + Token start = ring.get(i); + Token end = ring.get((i + 1) % ring.size()); + builder.add(new TokenRange(start, end, factory)); + } + } + return builder.build(); + } + + private static Map> computeHostsToRangesMap(Set tokenRanges, Map> ksTokens, int hostCount) { + Map> builders = Maps.newHashMapWithExpectedSize(hostCount); + for (TokenRange range : tokenRanges) { + Set replicas = ksTokens.get(range.getEnd()); + for (Host host : replicas) { + ImmutableSet.Builder hostRanges = builders.get(host); + if (hostRanges == null) { + hostRanges = ImmutableSet.builder(); + builders.put(host, hostRanges); + } + hostRanges.add(range); + } + } + Map> ksRanges = Maps.newHashMapWithExpectedSize(hostCount); + for (Map.Entry> entry : builders.entrySet()) { + ksRanges.put(entry.getKey(), entry.getValue().build()); + } + return ksRanges; + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/Metrics.java b/driver-core/src/main/java/com/datastax/driver/core/Metrics.java new file mode 100644 index 00000000000..20558c4ef8c --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/Metrics.java @@ -0,0 +1,461 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.util.HashSet; +import java.util.Set; + +import com.codahale.metrics.*; + +import com.datastax.driver.core.policies.SpeculativeExecutionPolicy; + +/** + * Metrics exposed by the driver. + *

+ * The metrics exposed by this class use the Metrics + * library and you should refer its documentation + * for details on how to handle the exposed metric objects. + *

+ * By default, metrics are exposed through JMX, which is very useful for + * development and browsing, but for production environments you may want to + * have a look at the reporters + * provided by the Metrics library which could be more efficient/adapted. + */ +public class Metrics { + + private final Cluster.Manager manager; + private final MetricRegistry registry = new MetricRegistry(); + private final JmxReporter jmxReporter; + private final Errors errors = new Errors(); + + private final Timer requests = registry.timer("requests"); + + private final Gauge knownHosts = registry.register("known-hosts", new Gauge() { + @Override + public Integer getValue() { + return manager.metadata.allHosts().size(); + } + }); + private final Gauge connectedTo = registry.register("connected-to", new Gauge() { + @Override + public Integer getValue() { + Set s = new HashSet(); + for (SessionManager session : manager.sessions) + s.addAll(session.pools.keySet()); + return s.size(); + } + }); + private final Gauge openConnections = registry.register("open-connections", new Gauge() { + @Override + public Integer getValue() { + int value = manager.controlConnection.isOpen() ? 1 : 0; + for (SessionManager session : manager.sessions) + for (HostConnectionPool pool : session.pools.values()) + value += pool.opened(); + return value; + } + }); + private final Gauge trashedConnections = registry.register("trashed-connections", new Gauge() { + @Override + public Integer getValue() { + int value = 0; + for (SessionManager session : manager.sessions) + for (HostConnectionPool pool : session.pools.values()) + value += pool.trashed(); + return value; + } + }); + + private final Gauge executorQueueDepth = registry.register("executor-queue-depth", new Gauge() { + @Override + public Integer getValue() { + return manager.executorQueue.size(); + } + }); + + private final Gauge blockingExecutorQueueDepth = registry.register("blocking-executor-queue-depth", new Gauge() { + @Override + public Integer getValue() { + return manager.blockingExecutorQueue.size(); + } + }); + + private final Gauge reconnectionSchedulerQueueSize= registry.register("reconnection-scheduler-task-count", new Gauge() { + @Override + public Integer getValue() { + return manager.reconnectionExecutor.getQueue().size(); + } + }); + + private final Gauge taskSchedulerQueueSize = registry.register("task-scheduler-task-count", new Gauge() { + @Override + public Integer getValue() { + return manager.scheduledTasksExecutor.getQueue().size(); + } + }); + + Metrics(Cluster.Manager manager) { + this.manager = manager; + if (manager.configuration.getMetricsOptions().isJMXReportingEnabled()) { + this.jmxReporter = JmxReporter.forRegistry(registry).inDomain(manager.clusterName + "-metrics").build(); + this.jmxReporter.start(); + } else { + this.jmxReporter = null; + } + } + + /** + * Returns the registry containing all metrics. + *

+ * The metrics registry allows you to easily use the reporters that ship + * with Metrics + * or a custom written one. + *

+ * For instance, if {@code metrics} is {@code this} object, you could export the + * metrics to csv files using: + *

+     *     com.codahale.metrics.CsvReporter.forRegistry(metrics.getRegistry()).build(new File("measurements/")).start(1, TimeUnit.SECONDS);
+     * 
+ *

+ * If you already have a {@code MetricRegistry} in your application and wish to + * add the driver's metrics to it, the recommended approach is to use a listener: + *

+     *     // Your existing registry:
+     *     final com.codahale.metrics.MetricRegistry myRegistry = ...
+     *
+     *     cluster.getMetrics().getRegistry().addListener(new com.codahale.metrics.MetricRegistryListener() {
+     *         @Override
+     *         public void onGaugeAdded(String name, Gauge<?> gauge) {
+     *             if (myRegistry.getNames().contains(name)) {
+     *                 // name is already taken, maybe prefix with a namespace
+     *                 ...
+     *             } else {
+     *                 myRegistry.register(name, gauge);
+     *             }
+     *         }
+     *
+     *         ... // Implement other methods in a similar fashion
+     *     });
+     * 
+ * Since reporting is handled by your registry, you'll probably also want to disable + * JMX reporting with {@link Cluster.Builder#withoutJMXReporting()}. + * + * @return the registry containing all metrics. + */ + public MetricRegistry getRegistry() { + return registry; + } + + /** + * Returns metrics on the user requests performed on the Cluster. + *

+ * This metric exposes + *

    + *
  • the total number of requests.
  • + *
  • the requests rate (in requests per seconds), including 1, 5 and 15 minute rates.
  • + *
  • the mean, min and max latencies, as well as latency at a given percentile.
  • + *
+ * + * @return a {@code Timer} metric object exposing the rate and latency for + * user requests. + */ + public Timer getRequestsTimer() { + return requests; + } + + /** + * Returns an object grouping metrics related to the errors encountered. + * + * @return an object grouping metrics related to the errors encountered. + */ + public Errors getErrorMetrics() { + return errors; + } + + /** + * Returns the number of Cassandra hosts currently known by the driver (that is + * whether they are currently considered up or down). + * + * @return the number of Cassandra hosts currently known by the driver. + */ + public Gauge getKnownHosts() { + return knownHosts; + } + + /** + * Returns the number of Cassandra hosts the driver is currently connected to + * (that is have at least one connection opened to). + * + * @return the number of Cassandra hosts the driver is currently connected to. + */ + public Gauge getConnectedToHosts() { + return connectedTo; + } + + /** + * Returns the total number of currently opened connections to Cassandra hosts. + * + * @return The total number of currently opened connections to Cassandra hosts. + */ + public Gauge getOpenConnections() { + return openConnections; + } + + /** + * Returns the total number of currently "trashed" connections to Cassandra hosts. + *

+ * When the load to a host decreases, the driver will reclaim some connections in order to save + * resources. No requests are sent to these connections anymore, but they are kept open for an + * additional amount of time ({@link PoolingOptions#getIdleTimeoutSeconds()}), in case the load + * goes up again. This metric counts connections in that state. + * + * @return The total number of currently trashed connections to Cassandra hosts. + */ + public Gauge getTrashedConnections() { + return trashedConnections; + } + + /** + * @return The number of queued up tasks in the non-blocking executor (Cassandra Java Driver workers). + */ + public Gauge getExecutorQueueDepth() { + return executorQueueDepth; + } + + /** + * @return The number of queued up tasks in the blocking executor (Cassandra Java Driver blocking tasks worker). + */ + public Gauge getBlockingExecutorQueueDepth() { + return blockingExecutorQueueDepth; + } + + /** + * @return The size of the work queue for the reconnection scheduler (Reconnection). A queue size > 0 does not + * necessarily indicate a backlog as some tasks may not have been scheduled to execute yet. + */ + public Gauge getReconnectionSchedulerQueueSize() { + return reconnectionSchedulerQueueSize; + } + + /** + * @return The size of the work queue for the task scheduler (Scheduled Tasks). A queue size > 0 does not + * necessarily indicate a backlog as some tasks may not have been scheduled to execute yet. + */ + public Gauge getTaskSchedulerQueueSize() { + return taskSchedulerQueueSize; + } + + void shutdown() { + if (jmxReporter != null) + jmxReporter.stop(); + } + + /** + * Metrics on errors encountered. + */ + public class Errors { + + private final Counter connectionErrors = registry.counter("connection-errors"); + + private final Counter writeTimeouts = registry.counter("write-timeouts"); + private final Counter readTimeouts = registry.counter("read-timeouts"); + private final Counter unavailables = registry.counter("unavailables"); + + private final Counter otherErrors = registry.counter("other-errors"); + + private final Counter retries = registry.counter("retries"); + private final Counter retriesOnWriteTimeout = registry.counter("retries-on-write-timeout"); + private final Counter retriesOnReadTimeout = registry.counter("retries-on-read-timeout"); + private final Counter retriesOnUnavailable = registry.counter("retries-on-unavailable"); + private final Counter ignores = registry.counter("ignores"); + private final Counter ignoresOnWriteTimeout = registry.counter("ignores-on-write-timeout"); + private final Counter ignoresOnReadTimeout = registry.counter("ignores-on-read-timeout"); + private final Counter ignoresOnUnavailable = registry.counter("ignores-on-unavailable"); + + private final Counter speculativeExecutions = registry.counter("speculative-executions"); + + /** + * Returns the number of connection to Cassandra nodes errors. + *

+ * This represents the number of times that a request to a Cassandra node + * has failed due to a connection problem. This thus also corresponds to + * how often the driver had to pick a fallback host for a request. + *

+ * You can expect a few connection errors when a Cassandra node fails + * (or is stopped) ,but if that number grows continuously you likely have + * a problem. + * + * @return the number of connection to Cassandra nodes errors. + */ + public Counter getConnectionErrors() { + return connectionErrors; + } + + /** + * Returns the number of write requests that returned a timeout (independently + * of the final decision taken by the {@link com.datastax.driver.core.policies.RetryPolicy}). + * + * @return the number of write timeout. + */ + public Counter getWriteTimeouts() { + return writeTimeouts; + } + + /** + * Returns the number of read requests that returned a timeout (independently + * of the final decision taken by the {@link com.datastax.driver.core.policies.RetryPolicy}). + * + * @return the number of read timeout. + */ + public Counter getReadTimeouts() { + return readTimeouts; + } + + /** + * Returns the number of requests that returned an unavailable exception + * (independently of the final decision taken by the + * {@link com.datastax.driver.core.policies.RetryPolicy}). + * + * @return the number of unavailable exceptions. + */ + public Counter getUnavailables() { + return unavailables; + } + + /** + * Returns the number of requests that returned errors not accounted for by + * another metric. This includes all types of invalid requests. + * + * @return the number of requests errors not accounted by another + * metric. + */ + public Counter getOthers() { + return otherErrors; + } + + /** + * Returns the number of times a request was retried due to the + * {@link com.datastax.driver.core.policies.RetryPolicy}. + * + * @return the number of times a requests was retried due to the + * {@link com.datastax.driver.core.policies.RetryPolicy}. + */ + public Counter getRetries() { + return retries; + } + + /** + * Returns the number of times a request was retried due to the + * {@link com.datastax.driver.core.policies.RetryPolicy}, after a + * read timed out. + * + * @return the number of times a requests was retried due to the + * {@link com.datastax.driver.core.policies.RetryPolicy}, after a + * read timed out. + */ + public Counter getRetriesOnReadTimeout() { + return retriesOnReadTimeout; + } + + /** + * Returns the number of times a request was retried due to the + * {@link com.datastax.driver.core.policies.RetryPolicy}, after a + * write timed out. + * + * @return the number of times a requests was retried due to the + * {@link com.datastax.driver.core.policies.RetryPolicy}, after a + * write timed out. + */ + public Counter getRetriesOnWriteTimeout() { + return retriesOnWriteTimeout; + } + + /** + * Returns the number of times a request was retried due to the + * {@link com.datastax.driver.core.policies.RetryPolicy}, after an + * unavailable exception. + * + * @return the number of times a requests was retried due to the + * {@link com.datastax.driver.core.policies.RetryPolicy}, after an + * unavailable exception. + */ + public Counter getRetriesOnUnavailable() { + return retriesOnUnavailable; + } + + /** + * Returns the number of times a request was ignored + * due to the {@link com.datastax.driver.core.policies.RetryPolicy}, for + * example due to timeouts or unavailability. + * + * @return the number of times a request was ignored due to the + * {@link com.datastax.driver.core.policies.RetryPolicy}. + */ + public Counter getIgnores() { + return ignores; + } + + /** + * Returns the number of times a request was ignored due to the + * {@link com.datastax.driver.core.policies.RetryPolicy}, after a + * read timed out. + * + * @return the number of times a requests was ignored due to the + * {@link com.datastax.driver.core.policies.RetryPolicy}, after a + * read timed out. + */ + public Counter getIgnoresOnReadTimeout() { + return ignoresOnReadTimeout; + } + + /** + * Returns the number of times a request was ignored due to the + * {@link com.datastax.driver.core.policies.RetryPolicy}, after a + * write timed out. + * + * @return the number of times a requests was ignored due to the + * {@link com.datastax.driver.core.policies.RetryPolicy}, after a + * write timed out. + */ + public Counter getIgnoresOnWriteTimeout() { + return ignoresOnWriteTimeout; + } + + /** + * Returns the number of times a request was ignored due to the + * {@link com.datastax.driver.core.policies.RetryPolicy}, after an + * unavailable exception. + * + * @return the number of times a requests was ignored due to the + * {@link com.datastax.driver.core.policies.RetryPolicy}, after an + * unavailable exception. + */ + public Counter getIgnoresOnUnavailable() { + return ignoresOnUnavailable; + } + + /** + * Returns the number of times a speculative execution was started + * because a previous execution did not complete within the delay + * specified by {@link SpeculativeExecutionPolicy}. + * + * @return the number of speculative executions. + */ + public Counter getSpeculativeExecutions() { + return speculativeExecutions; + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/MetricsOptions.java b/driver-core/src/main/java/com/datastax/driver/core/MetricsOptions.java new file mode 100644 index 00000000000..aaf5cf19dba --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/MetricsOptions.java @@ -0,0 +1,52 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +/** + * {@link Metrics} options. + */ +public class MetricsOptions { + + private final boolean jmxEnabled; + + /** + * Creates a new {@code MetricsOptions} object with default values. + */ + public MetricsOptions() + { + this(true); + } + + /** + * Creates a new {@code MetricsOptions} object. + * + * @param jmxEnabled whether to enable JMX reporting or not. + */ + public MetricsOptions(boolean jmxEnabled) + { + this.jmxEnabled = jmxEnabled; + } + + /** + * Returns whether JMX reporting is enabled (the default). + * + * @return whether JMX reporting is enabled. + */ + public boolean isJMXReportingEnabled() + { + return jmxEnabled; + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/NettyOptions.java b/driver-core/src/main/java/com/datastax/driver/core/NettyOptions.java new file mode 100644 index 00000000000..f57d6cc62fa --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/NettyOptions.java @@ -0,0 +1,202 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; + +import static java.util.concurrent.TimeUnit.SECONDS; + +import io.netty.bootstrap.Bootstrap; +import io.netty.buffer.ByteBufAllocator; +import io.netty.buffer.PooledByteBufAllocator; +import io.netty.channel.ChannelOption; +import io.netty.channel.EventLoopGroup; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.SocketChannel; +import io.netty.channel.socket.nio.NioSocketChannel; + +/** + * A set of hooks that allow clients to customize the driver's underlying Netty layer. + *

+ * Clients that need to hook into the driver's underlying Netty layer can + * subclass this class and provide the necessary customization by overriding + * its methods. + *

+ * Typically, clients would register this class with {@link Cluster#builder()}: + * + *

+ *     NettyOptions nettyOptions = ...
+ *     Cluster cluster = Cluster.builder()
+ *          .addContactPoint(...)
+ *          .withNettyOptions(nettyOptions)
+ *          .build();
+ * 
+ * + * Extending the NettyOptions API + *

+ * Contrary to other driver options, the options available in this class should + * be considered as advanced features and as such, they should only be + * modified by expert users. + *

+ * A misconfiguration introduced by the means of this API can have unexpected results + * and cause the driver to completely fail to connect. + *

+ * Moreover, since versions 2.0.9 and 2.1.4 (see JAVA-538), + * the driver is available in two different flavors: with a standard Maven dependency on Netty, + * or with a "shaded" (internalized) Netty dependency. + *

+ * Given that NettyOptions API exposes Netty classes ({@link SocketChannel}, etc.), + * it should only be extended by clients using the non-shaded + * version of driver. + *

+ * Extending this API with shaded Netty classes is not supported, + * and in particular for OSGi applications, it is likely that such a configuration would lead to + * compile and/or runtime errors. + * + * @jira_ticket JAVA-640 + * @since 2.0.10 + */ +public class NettyOptions { + + /** + * The default instance of {@link NettyOptions} to use. + */ + public static final NettyOptions DEFAULT_INSTANCE = new NettyOptions(); + + /** + * Return the {@link EventLoopGroup} instance to use. + *

+ * This hook is invoked only once at {@link Cluster} initialization; + * the returned instance will be kept in use throughout the cluster lifecycle. + *

+ * Typically, implementors would return a newly-created instance; + * it is however possible to re-use a shared instance, but in this + * case implementors should also override {@link #onClusterClose(EventLoopGroup)} + * to prevent the shared instance to be closed when the cluster is closed. + *

+ * The default implementation returns a new instance of {@link io.netty.channel.epoll.EpollEventLoopGroup} + * if {@link NettyUtil#isEpollAvailable() epoll is available}, + * or {@link NioEventLoopGroup} otherwise. + * + * @param threadFactory The {@link ThreadFactory} to use when creating a new {@link EventLoopGroup} instance; + * The driver will provide its own internal thread factory here. + * It is safe to ignore it and use another thread factory. + * @return the {@link EventLoopGroup} instance to use. + */ + public EventLoopGroup eventLoopGroup(ThreadFactory threadFactory) { + return NettyUtil.newEventLoopGroupInstance(threadFactory); + } + + /** + * Return the specific {@link SocketChannel} subclass to use. + *

+ * This hook is invoked only once at {@link Cluster} initialization; + * the returned instance will then be used each time the driver creates a new {@link Connection} + * and configures a new instance of {@link Bootstrap} for it. + *

+ * The default implementation returns {@link io.netty.channel.epoll.EpollSocketChannel} if {@link NettyUtil#isEpollAvailable() epoll is available}, + * or {@link NioSocketChannel} otherwise. + * + * @return The {@link SocketChannel} subclass to use. + */ + public Class channelClass() { + return NettyUtil.channelClass(); + } + + /** + * Hook invoked each time the driver creates a new {@link Connection} + * and configures a new instance of {@link Bootstrap} for it. + *

+ * This hook is guaranteed to be called after the driver has applied all + * {@link SocketOptions}s. + *

+ * This is a good place to add extra {@link io.netty.channel.ChannelHandler ChannelOption}s to the boostrap; e.g. + * plug a custom {@link io.netty.buffer.ByteBufAllocator ByteBufAllocator} implementation: + * + *

+     * ByteBufAllocator myCustomByteBufAllocator = ...
+     *
+     * public void afterBootstrapInitialized(Bootstrap bootstrap) {
+     *     bootstrap.option(ChannelOption.ALLOCATOR, myCustomByteBufAllocator);
+     * }
+     * 
+ * + * Note that the default implementation of this method configures a pooled {@code ByteBufAllocator} (Netty 4.0 + * defaults to unpooled). If you override this method to set unrelated options, make sure you call + * {@code super.afterBootstrapInitialized(bootstrap)}. + * + * @param bootstrap the {@link Bootstrap} being initialized. + */ + public void afterBootstrapInitialized(Bootstrap bootstrap) { + // In Netty 4.1.x, pooled will be the default, so this won't be necessary anymore + bootstrap.option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT); + } + + /** + * Hook invoked each time the driver creates a new {@link Connection} + * and initializes the {@link SocketChannel channel}. + *

+ * This hook is guaranteed to be called after the driver has registered + * all its internal channel handlers, and applied the configured {@link SSLOptions}, if any. + *

+ * This is a good place to add extra {@link io.netty.channel.ChannelHandler ChannelHandler}s + * to the channel's pipeline; e.g. to add a custom SSL handler to the beginning of the handler chain, + * do the following: + * + *

+     * ChannelPipeline pipeline = channel.pipeline();
+     * SSLEngine myCustomSSLEngine = ...
+     * SslHandler myCustomSSLHandler = new SslHandler(myCustomSSLEngine);
+     * pipeline.addFirst("ssl", myCustomSSLHandler);
+     * 
+ * + * Note: if you intend to provide your own SSL implementation, + * do not enable the driver's built-in {@link SSLOptions} at the same time. + * + * @param channel the {@link SocketChannel} instance, after being initialized by the driver. + * @throws Exception if this methods encounters any errors. + */ + public void afterChannelInitialized(SocketChannel channel) throws Exception { + //noop + } + + /** + * Hook invoked when the cluster is shutting down after a call to {@link Cluster#close()}. + *

+ * This is guaranteed to be called only after all connections have been individually + * closed, and their channels closed, and only once per {@link EventLoopGroup} instance. + *

+ * This gives the implementor a chance to close the {@link EventLoopGroup} properly, if required. + *

+ * The default implementation initiates a {@link EventLoopGroup#shutdownGracefully(long, long, TimeUnit) graceful shutdown} + * of the passed {@link EventLoopGroup} instance with no "quiet period" and a timeout of 15 seconds; + * then waits uninterruptibly for the shutdown to complete, or the timeout to occur, whichever happens first. + *

+ * Implementation note: if the {@link EventLoopGroup} instance is being shared, or used for other purposes than to + * coordinate Netty events for the current cluster, than it should not be shut down here; + * subclasses would have to override this method accordingly to take the appropriate action. + * + * @param eventLoopGroup the event loop group used by the cluster being closed + */ + public void onClusterClose(EventLoopGroup eventLoopGroup) { + // shutdownGracefully with default parameters employs a quiet period of 2 seconds + // where in pre-Netty4 versions, closing a cluster instance was very quick (milliseconds). + // Since we close the channels before shutting down the eventLoopGroup, + // it is safe to reduce the quiet period to 0 seconds + eventLoopGroup.shutdownGracefully(0, 15, SECONDS).syncUninterruptibly(); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/NettyUtil.java b/driver-core/src/main/java/com/datastax/driver/core/NettyUtil.java new file mode 100644 index 00000000000..09a0f1973d4 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/NettyUtil.java @@ -0,0 +1,160 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.lang.reflect.Constructor; +import java.util.Locale; +import java.util.concurrent.ThreadFactory; + +import com.google.common.base.Throwables; +import io.netty.channel.EventLoopGroup; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.SocketChannel; +import io.netty.channel.socket.nio.NioSocketChannel; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * A set of utilities related to the underlying Netty layer. + */ +@SuppressWarnings("unchecked") +class NettyUtil { + + private static final boolean FORCE_NIO = SystemProperties.getBoolean("com.datastax.driver.FORCE_NIO", false); + + private static final Logger LOGGER = LoggerFactory.getLogger(NettyUtil.class); + + private static final boolean SHADED; + + private static final boolean USE_EPOLL; + + private static final Constructor EPOLL_EVENT_LOOP_GROUP_CONSTRUCTOR; + + private static final Class EPOLL_CHANNEL_CLASS; + + private static final Class[] EVENT_GROUP_ARGUMENTS = { int.class, ThreadFactory.class }; + + static { + boolean shaded; + try { + // prevent this string from being shaded + Class.forName(String.format("%s.%s.channel.Channel", "io", "netty")); + shaded = false; + } catch (ClassNotFoundException e) { + try { + Class.forName("com.datastax.shaded.netty.channel.Channel"); + shaded = true; + } catch (ClassNotFoundException e1) { + throw new AssertionError("Cannot locate Netty classes in the classpath:" + e1); + } + } + SHADED = shaded; + boolean useEpoll = false; + if (!SHADED) { + try { + Class epoll = Class.forName("io.netty.channel.epoll.Epoll"); + if (FORCE_NIO) { + LOGGER.info("Found Netty's native epoll transport in the classpath, " + + "but NIO was forced through the FORCE_NIO system property."); + } else if (!System.getProperty("os.name", "").toLowerCase(Locale.US).equals("linux")) { + LOGGER.warn("Found Netty's native epoll transport, but not running on linux-based operating " + + "system. Using NIO instead."); + } else if (!(Boolean)epoll.getMethod("isAvailable").invoke(null)) { + LOGGER.warn("Found Netty's native epoll transport in the classpath, but epoll is not available. " + + "Using NIO instead.", (Throwable)epoll.getMethod("unavailabilityCause").invoke(null)); + } else { + LOGGER.info("Found Netty's native epoll transport in the classpath, using it"); + useEpoll = true; + } + } catch (ClassNotFoundException e) { + LOGGER.info("Did not find Netty's native epoll transport in the classpath, defaulting to NIO."); + } catch (Exception e) { + LOGGER.warn("Unexpected error trying to find Netty's native epoll transport in the classpath, defaulting to NIO.", e); + } + } else { + LOGGER.info("Detected shaded Netty classes in the classpath; native epoll transport will not work properly, " + + "defaulting to NIO."); + } + USE_EPOLL = useEpoll; + Constructor constructor = null; + Class channelClass = null; + if (USE_EPOLL) { + try { + channelClass = (Class)Class.forName("io.netty.channel.epoll.EpollSocketChannel"); + Class epoolEventLoupGroupClass = Class.forName("io.netty.channel.epoll.EpollEventLoopGroup"); + constructor = (Constructor)epoolEventLoupGroupClass.getDeclaredConstructor(EVENT_GROUP_ARGUMENTS); + } catch (Exception e) { + throw new AssertionError("Netty's native epoll is in use but cannot locate Epoll classes, this should not happen: " + e); + } + } + EPOLL_EVENT_LOOP_GROUP_CONSTRUCTOR = constructor; + EPOLL_CHANNEL_CLASS = channelClass; + } + + /** + * + * @return true if the current driver bundle is using shaded Netty classes, false otherwise. + */ + + public static boolean isShaded() { + return SHADED; + } + + /** + * @return true if native epoll transport is available in the classpath, false otherwise. + */ + public static boolean isEpollAvailable() { + return USE_EPOLL; + } + + /** + * Return a new instance of {@link EventLoopGroup}. + *

+ * Returns an instance of {@link io.netty.channel.epoll.EpollEventLoopGroup} if {@link #isEpollAvailable() epoll is available}, + * or an instance of {@link NioEventLoopGroup} otherwise. + * + * @param factory the {@link ThreadFactory} instance to use to create the new instance of {@link EventLoopGroup} + * @return a new instance of {@link EventLoopGroup} + */ + public static EventLoopGroup newEventLoopGroupInstance(ThreadFactory factory) { + if (isEpollAvailable()) { + try { + return EPOLL_EVENT_LOOP_GROUP_CONSTRUCTOR.newInstance(0, factory); + } catch (Exception e) { + throw Throwables.propagate(e); // should not happen + } + } else { + return new NioEventLoopGroup(0, factory); + } + } + + /** + * Return the SocketChannel class to use. + *

+ * Returns an instance of {@link io.netty.channel.epoll.EpollSocketChannel} if {@link #isEpollAvailable() epoll is available}, + * or an instance of {@link NioSocketChannel} otherwise. + * + * @return the SocketChannel class to use. + */ + public static Class channelClass() { + if (isEpollAvailable()) { + return EPOLL_CHANNEL_CLASS; + } else { + return NioSocketChannel.class; + } + } + +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/OperationTimedOutException.java b/driver-core/src/main/java/com/datastax/driver/core/OperationTimedOutException.java new file mode 100644 index 00000000000..1767fac1ffb --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/OperationTimedOutException.java @@ -0,0 +1,28 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.net.InetSocketAddress; + +/** + * Thrown on a client-side timeout, i.e. when the client didn't hear back from the server within + * {@link SocketOptions#getReadTimeoutMillis()}. + */ +class OperationTimedOutException extends ConnectionException { + public OperationTimedOutException(InetSocketAddress address) { + super(address, "Operation timed out"); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/PagingState.java b/driver-core/src/main/java/com/datastax/driver/core/PagingState.java new file mode 100644 index 00000000000..cb914ef80e2 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/PagingState.java @@ -0,0 +1,172 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.datastax.driver.core; + +import java.nio.ByteBuffer; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.util.Arrays; + +import com.datastax.driver.core.exceptions.PagingStateException; +import com.datastax.driver.core.utils.Bytes; + +/** + * The paging state of a query. + * + * This object represents the next page to be fetched if the query is + * multi page. It can be saved and reused later on the same statement. + * + * The PagingState can be serialized and deserialized either as a String + * or as a byte array. + * + * @see Statement#setPagingState(PagingState) + */ +public class PagingState { + + private byte[] pagingState; + private byte[] hash; + private ProtocolVersion protocolVersion; + + PagingState(ByteBuffer pagingState, Statement statement, ProtocolVersion protocolVersion) { + this.pagingState = Bytes.getArray(pagingState); + this.hash = hash(statement, protocolVersion); + this.protocolVersion = protocolVersion; + } + + // The serialized form of the paging state is: + // size of raw state|size of hash|raw state|hash|protocol version + // + // The protocol version might be absent, in which case it defaults to V2 (this is for backward + // compatibility with 2.0.10 where it is always absent). + private PagingState(byte[] complete) { + // Check the sizes in the beginning of the buffer, otherwise we cannot build the paging state object + ByteBuffer pagingStateBB = ByteBuffer.wrap(complete); + int pagingSize = pagingStateBB.getShort(); + int hashSize = pagingStateBB.getShort(); + if (pagingSize + hashSize != pagingStateBB.remaining() && pagingSize + hashSize + 2 != pagingStateBB.remaining()) { + throw new PagingStateException("Cannot deserialize paging state, invalid format. " + + "The serialized form was corrupted, or not initially generated from a PagingState object."); + } + this.pagingState = new byte[pagingSize]; + pagingStateBB.get(this.pagingState); + this.hash = new byte[hashSize]; + pagingStateBB.get(this.hash); + this.protocolVersion = (pagingStateBB.remaining() > 0) + ? ProtocolVersion.fromInt(pagingStateBB.getShort()) + : ProtocolVersion.V2; + } + + private byte[] hash(Statement statement, ProtocolVersion protocolVersion) { + byte[] digest; + ByteBuffer[] values; + MessageDigest md; + assert !(statement instanceof BatchStatement); + try { + md = MessageDigest.getInstance("MD5"); + if (statement instanceof BoundStatement) { + BoundStatement bs = ((BoundStatement)statement); + md.update(bs.preparedStatement().getQueryString().getBytes()); + values = bs.wrapper.values; + } else { + //it is a RegularStatement since Batch statements are not allowed + RegularStatement rs = (RegularStatement)statement; + md.update(rs.getQueryString().getBytes()); + values = rs.getValues(protocolVersion); + } + if (values != null) { + for (ByteBuffer value : values) { + md.update(value.duplicate()); + } + } + md.update(this.pagingState); + digest = md.digest(); + + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException("MD5 doesn't seem to be available on this JVM", e); + } + return digest; + } + + boolean matches(Statement statement) { + byte[] toTest = hash(statement, protocolVersion); + return Arrays.equals(toTest, this.hash); + } + + private ByteBuffer generateCompleteOutput() { + ByteBuffer res = ByteBuffer.allocate(pagingState.length + hash.length + 6); + + res.putShort((short)pagingState.length); + res.putShort((short)hash.length); + + res.put(pagingState); + res.put(hash); + + res.putShort((short)protocolVersion.toInt()); + + res.rewind(); + + return res; + } + + ByteBuffer getRawState() { + return ByteBuffer.wrap(this.pagingState); + } + + @Override + public String toString() { + return Bytes.toRawHexString(generateCompleteOutput()); + } + + /** + * Create a PagingState object from a string previously generated with {@link #toString()}. + * + * @param string the string value. + * @return the PagingState object created. + * + * @throws PagingStateException if the string does not have the correct format. + */ + public static PagingState fromString(String string) { + try { + byte[] complete = Bytes.fromRawHexString(string, 0); + return new PagingState(complete); + } catch (Exception e) { + throw new PagingStateException("Cannot deserialize paging state, invalid format. " + + "The serialized form was corrupted, or not initially generated from a PagingState object.", e); + } + } + + /** + * Return a representation of the paging state object as a byte array. + * + * @return the paging state as a byte array. + */ + public byte[] toBytes() { + return generateCompleteOutput().array(); + } + + /** + * Create a PagingState object from a byte array previously generated with {@link #toBytes()}. + * + * @param pagingState The byte array representation. + * @return the PagingState object created. + * + * @throws PagingStateException if the byte array does not have the correct format. + */ + public static PagingState fromBytes(byte[] pagingState) { + return new PagingState(pagingState); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/ParseUtils.java b/driver-core/src/main/java/com/datastax/driver/core/ParseUtils.java new file mode 100644 index 00000000000..471775d2346 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/ParseUtils.java @@ -0,0 +1,163 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +/** + * Simple utility method used to help parsing CQL values (mainly UDT and collection ones). + */ +abstract class ParseUtils { + + private ParseUtils() {} + + /** + * Returns the index of the first character in toParse from idx that is not a "space". + * + * @param toParse the string to skip space on. + * @param idx the index to start skipping space from. + * @return the index of the first character in toParse from idx that is not a "space. + */ + public static int skipSpaces(String toParse, int idx) { + while (isBlank(toParse.charAt(idx)) && idx < toParse.length()) + ++idx; + return idx; + } + + /** + * Assuming that idx points to the beginning of a CQL value in toParse, returns the + * index of the first character after this value. + * + * @param toParse the string to skip a value form. + * @param idx the index to start parsing a value from. + * @return the index ending the CQL value starting at {@code idx}. + * + * @throws IllegalArgumentException if idx doesn't point to the start of a valid CQL + * value. + */ + public static int skipCQLValue(String toParse, int idx) { + if (idx >= toParse.length()) + throw new IllegalArgumentException(); + + if (isBlank(toParse.charAt(idx))) + throw new IllegalArgumentException(); + + int cbrackets = 0; + int sbrackets = 0; + int parens = 0; + boolean inString = false; + + do { + char c = toParse.charAt(idx); + if (inString) { + if (c == '\'') { + if (idx + 1 < toParse.length() && toParse.charAt(idx + 1) == '\'') { + ++idx; // this is an escaped quote, skip it + } else { + inString = false; + if (cbrackets == 0 && sbrackets == 0 && parens == 0) + return idx + 1; + } + } + // Skip any other character + } else if (c == '\'') { + inString = true; + } else if (c == '{') { + ++cbrackets; + } else if (c == '[') { + ++sbrackets; + } else if (c == '(') { + ++parens; + } else if (c == '}') { + if (cbrackets == 0) + return idx; + + --cbrackets; + if (cbrackets == 0 && sbrackets == 0 && parens == 0) + return idx + 1; + } else if (c == ']') { + if (sbrackets == 0) + return idx; + + --sbrackets; + if (cbrackets == 0 && sbrackets == 0 && parens == 0) + return idx + 1; + } else if (c == ')') { + if (parens == 0) + return idx; + + --parens; + if (cbrackets == 0 && sbrackets == 0 && parens == 0) + return idx + 1; + } else if (isBlank(c) || !isIdentifierChar(c)) { + if (cbrackets == 0 && sbrackets == 0 && parens == 0) + return idx; + } + } while (++idx < toParse.length()); + + if (inString || cbrackets != 0 || sbrackets != 0 || parens != 0) + throw new IllegalArgumentException(); + return idx; + } + + /** + * Assuming that idx points to the beginning of a CQL identifier in toParse, returns the + * index of the first character after this identifier. + * + * @param toParse the string to skip an identifier from. + * @param idx the index to start parsing an identifier from. + * @return the index ending the CQL identifier starting at {@code idx}. + * + * @throws IllegalArgumentException if idx doesn't point to the start of a valid CQL + * identifier. + */ + public static int skipCQLId(String toParse, int idx) { + if (idx >= toParse.length()) + throw new IllegalArgumentException(); + + char c = toParse.charAt(idx); + if (isIdentifierChar(c)) { + while (idx < toParse.length() && isIdentifierChar(toParse.charAt(idx))) + idx++; + return idx; + } + + if (c != '"') + throw new IllegalArgumentException(); + + while (++idx < toParse.length()) { + c = toParse.charAt(idx); + if (c != '"') + continue; + + if (idx + 1 < toParse.length() && toParse.charAt(idx + 1) == '\"') + ++idx; // this is an escaped double quote, skip it + else + return idx + 1; + } + throw new IllegalArgumentException(); + } + + // [0..9a..zA..Z-+._&] + public static boolean isIdentifierChar(int c) { + return (c >= '0' && c <= '9') + || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') + || c == '-' || c == '+' || c == '.' || c == '_' || c == '&'; + } + + // [ \t\n] + public static boolean isBlank(int c) { + return c == ' ' || c == '\t' || c == '\n'; + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/PerHostPercentileTracker.java b/driver-core/src/main/java/com/datastax/driver/core/PerHostPercentileTracker.java new file mode 100644 index 00000000000..3f5a22c9d7c --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/PerHostPercentileTracker.java @@ -0,0 +1,317 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.util.Set; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static java.util.concurrent.TimeUnit.MINUTES; +import static java.util.concurrent.TimeUnit.NANOSECONDS; + +import com.google.common.annotations.Beta; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.MapMaker; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.SettableFuture; +import org.HdrHistogram.Histogram; +import org.HdrHistogram.Recorder; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import static com.google.common.base.Preconditions.checkArgument; + +import com.datastax.driver.core.exceptions.*; + +/** + * A {@link LatencyTracker} that records latencies for each host over a sliding time interval, and exposes an + * API to retrieve the latency at a given percentile. + *

+ * To use this class, build an instance with {@link #builderWithHighestTrackableLatencyMillis(long)} and register + * it with your {@link com.datastax.driver.core.Cluster} instance: + *

+ * PerHostPercentileTracker tracker = PerHostPercentileTracker
+ *     .builderWithHighestTrackableLatencyMillis(15000)
+ *     .build();
+ *
+ * cluster.register(tracker);
+ * ...
+ * tracker.getLatencyAtPercentile(host1, 99.0);
+ * 
+ *

+ * This class uses HdrHistogram to record latencies: + * for each host, there is a "live" histogram where current latencies are recorded, and a "cached", read-only histogram + * that is used when clients call {@link #getLatencyAtPercentile(Host, double)}. Each time the cached histogram becomes + * older than the interval, the two histograms are switched. Note that statistics will not be available during the first + * interval at cluster startup, since we don't have a cached histogram yet. + *

+ * Note that this class is currently marked "beta": it hasn't been extensively tested yet, and the API is still subject + * to change. + */ +@Beta +public class PerHostPercentileTracker implements LatencyTracker { + private static final Logger logger = LoggerFactory.getLogger(PerHostPercentileTracker.class); + + private final ConcurrentMap recorders; + private final ConcurrentMap cachedHistograms; + private final long highestTrackableLatencyMillis; + private final int numberOfSignificantValueDigits; + private final int minRecordedValues; + private final long intervalMs; + + private PerHostPercentileTracker(long highestTrackableLatencyMillis, int numberOfSignificantValueDigits, + int numberOfHosts, + int minRecordedValues, + long intervalMs) { + this.highestTrackableLatencyMillis = highestTrackableLatencyMillis; + this.numberOfSignificantValueDigits = numberOfSignificantValueDigits; + this.minRecordedValues = minRecordedValues; + this.intervalMs = intervalMs; + this.recorders = new MapMaker().initialCapacity(numberOfHosts).makeMap(); + this.cachedHistograms = new MapMaker().initialCapacity(numberOfHosts).makeMap(); + } + + /** + * Returns a builder to create a new instance. + * + * @param highestTrackableLatencyMillis the highest expected latency. If a higher value is reported, it will be ignored and a + * warning will be logged. A good rule of thumb is to set it slightly higher than + * {@link SocketOptions#getReadTimeoutMillis()}. + * @return the builder. + */ + public static Builder builderWithHighestTrackableLatencyMillis(long highestTrackableLatencyMillis) { + return new Builder(highestTrackableLatencyMillis); + } + + /** + * Helper class to builder {@code PerHostPercentileTracker} instances with a fluent interface. + */ + public static class Builder { + private final long highestTrackableLatencyMillis; + private int numberOfSignificantValueDigits = 3; + private int minRecordedValues = 1000; + private int numberOfHosts = 16; + private long intervalMs = MINUTES.toMillis(5); + + Builder(long highestTrackableLatencyMillis) { + this.highestTrackableLatencyMillis = highestTrackableLatencyMillis; + } + + /** + * Sets the number of significant decimal digits to which histograms will maintain value + * resolution and separation. This must be an integer between 0 and 5. + *

+ * If not set explicitly, this value defaults to 3. + * + * @param numberOfSignificantValueDigits the new value. + * @return this builder. + * + * @see the HdrHistogram Javadocs + * for a more detailed explanation on how this parameter affects the resolution of recorded samples. + */ + public Builder withNumberOfSignificantValueDigits(int numberOfSignificantValueDigits) { + this.numberOfSignificantValueDigits = numberOfSignificantValueDigits; + return this; + } + + /** + * Sets the minimum number of values that must be recorded for a host before we consider + * the sample size significant. + *

+ * If this count is not reached during a given interval, {@link #getLatencyAtPercentile(Host, double)} + * will return a negative value, indicating that statistics are not available. In particular, this is true + * during the first interval. + *

+ * If not set explicitly, this value default to 1000. + * + * @param minRecordedValues the new value. + * @return this builder. + */ + public Builder withMinRecordedValues(int minRecordedValues) { + this.minRecordedValues = minRecordedValues; + return this; + } + + /** + * Sets the number of distinct hosts that the driver will ever connect to. + *

+ * This parameter is only used to pre-size internal maps in order to avoid unnecessary rehashing. + *

+ * If not set explicitly, this value defaults to 16. + * + * @param numberOfHosts the new value. + * @return this builder. + */ + public Builder withNumberOfHosts(int numberOfHosts) { + this.numberOfHosts = numberOfHosts; + return this; + } + + /** + * Sets the time interval over which samples are recorded. + *

+ * For each host, there is a "live" histogram where current latencies are recorded, and a "cached", read-only histogram + * that is used when clients call {@link #getLatencyAtPercentile(Host, double)}. Each time the cached histogram becomes + * older than the interval, the two histograms are switched. Note that statistics will not be available during the first + * interval at cluster startup, since we don't have a cached histogram yet. + *

+ * If not set explicitly, this value defaults to 5 minutes. + * + * @param interval the new interval. + * @param unit the unit that the interval is expressed in. + * @return this builder. + */ + public Builder withInterval(long interval, TimeUnit unit) { + this.intervalMs = MILLISECONDS.convert(interval, unit); + return this; + } + + /** + * Builds the {@code PerHostPercentileTracker} instance configured with this builder. + * + * @return the instance. + */ + public PerHostPercentileTracker build() { + return new PerHostPercentileTracker(highestTrackableLatencyMillis, numberOfSignificantValueDigits, numberOfHosts, minRecordedValues, intervalMs); + } + } + + @Override + public void update(Host host, Statement statement, Exception exception, long newLatencyNanos) { + if (!shouldConsiderNewLatency(statement, exception)) + return; + + long latencyMs = NANOSECONDS.toMillis(newLatencyNanos); + try { + getRecorder(host).recordValue(latencyMs); + } catch (ArrayIndexOutOfBoundsException e) { + logger.warn("Got request with latency of {} ms, which exceeds the configured maximum trackable value {}", + latencyMs, highestTrackableLatencyMillis); + } + } + + /** + * Returns the request latency for a host at a given percentile. + * + * @param host the host. + * @param percentile the percentile (for example, {@code 99.0} for the 99th percentile). + * @return the latency (in milliseconds) at the given percentile, or a negative value if it's not available yet. + */ + public long getLatencyAtPercentile(Host host, double percentile) { + checkArgument(percentile >= 0.0 && percentile < 100, + "percentile must be between 0.0 and 100 (was %f)"); + Histogram histogram = getLastIntervalHistogram(host); + if (histogram == null || histogram.getTotalCount() < minRecordedValues) + return -1; + + return histogram.getValueAtPercentile(percentile); + } + + private Recorder getRecorder(Host host) { + Recorder recorder = recorders.get(host); + if (recorder == null) { + recorder = new Recorder(highestTrackableLatencyMillis, numberOfSignificantValueDigits); + Recorder old = recorders.putIfAbsent(host, recorder); + if (old != null) { + // We got beaten at creating the recorder, use the actual instance and discard ours + recorder = old; + } else { + // Also set an empty cache entry to remember the time we started recording: + cachedHistograms.putIfAbsent(host, CachedHistogram.empty()); + } + } + return recorder; + } + + /** @return null if no histogram is available yet (no entries recorded, or not for long enough) */ + private Histogram getLastIntervalHistogram(Host host) { + try { + while (true) { + CachedHistogram entry = cachedHistograms.get(host); + if (entry == null) + return null; + + long age = System.currentTimeMillis() - entry.timestamp; + if (age < intervalMs) { // current histogram is recent enough + return entry.histogram.get(); + } else { // need to refresh + Recorder recorder = recorders.get(host); + // intervalMs should be much larger than the time it takes to replace a histogram, so this future should never block + Histogram staleHistogram = entry.histogram.get(0, MILLISECONDS); + SettableFuture future = SettableFuture.create(); + CachedHistogram newEntry = new CachedHistogram(future); + if (cachedHistograms.replace(host, entry, newEntry)) { + // Only get the new histogram if we successfully replaced the cache entry. + // This ensures that only one thread will do it. + Histogram newHistogram = recorder.getIntervalHistogram(staleHistogram); + future.set(newHistogram); + return newHistogram; + } + // If we couldn't replace the entry it means we raced, so loop to try again + } + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + return null; + } catch (ExecutionException e) { + throw new DriverInternalError("Unexpected error", e.getCause()); + } catch (TimeoutException e) { + throw new DriverInternalError("Unexpected timeout while getting histogram", e); + } + } + + static class CachedHistogram { + final ListenableFuture histogram; + final long timestamp; + + CachedHistogram(ListenableFuture histogram) { + this.histogram = histogram; + this.timestamp = System.currentTimeMillis(); + } + + static CachedHistogram empty() { + return new CachedHistogram(Futures.immediateFuture(null)); + } + } + + // TODO this was copy/pasted from LatencyAwarePolicy, maybe it could be refactored as a shared method + private boolean shouldConsiderNewLatency(Statement statement, Exception exception) { + // query was successful: always consider + if (exception == null) + return true; + // filter out "fast" errors + return !EXCLUDED_EXCEPTIONS.contains(exception.getClass()); + } + + /** + * A set of DriverException subclasses that we should prevent from updating the host's score. + * The intent behind it is to filter out "fast" errors: when a host replies with such errors, + * it usually does so very quickly, because it did not involve any actual + * coordination work. Such errors are not good indicators of the host's responsiveness, + * and tend to make the host's score look better than it actually is. + */ + private static final Set> EXCLUDED_EXCEPTIONS = ImmutableSet.>of( + UnavailableException.class, // this is done via the snitch and is usually very fast + OverloadedException.class, + BootstrappingException.class, + UnpreparedException.class, + QueryValidationException.class // query validation also happens at early stages in the coordinator + ); +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/PlainTextAuthProvider.java b/driver-core/src/main/java/com/datastax/driver/core/PlainTextAuthProvider.java new file mode 100644 index 00000000000..a404f1ba355 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/PlainTextAuthProvider.java @@ -0,0 +1,102 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import javax.security.sasl.SaslException; +import java.net.InetSocketAddress; +import java.util.Map; + +import com.google.common.base.Charsets; +import com.google.common.collect.ImmutableMap; + +/** + * A simple {@code AuthProvider} implementation. + *

+ * This provider allows to programmatically define authentication + * information that will then apply to all hosts. The + * PlainTextAuthenticator instances it returns support SASL + * authentication using the PLAIN mechanism for version 2 of the + * CQL native protocol. + */ +public class PlainTextAuthProvider implements AuthProvider { + + private final String username; + private final String password; + + /** + * Creates a new simple authentication information provider with the + * supplied credentials. + * @param username to use for authentication requests + * @param password to use for authentication requests + */ + public PlainTextAuthProvider(String username, String password) { + this.username = username; + this.password = password; + } + + /** + * Uses the supplied credentials and the SASL PLAIN mechanism to login + * to the server. + * + * @param host the Cassandra host with which we want to authenticate + * @return an Authenticator instance which can be used to perform + * authentication negotiations on behalf of the client + */ + public Authenticator newAuthenticator(InetSocketAddress host) { + return new PlainTextAuthenticator(username, password); + } + + /** + * Simple implementation of {@link Authenticator} which can + * perform authentication against Cassandra servers configured + * with PasswordAuthenticator. + */ + private static class PlainTextAuthenticator extends ProtocolV1Authenticator implements Authenticator { + + private final byte[] username; + private final byte[] password; + + public PlainTextAuthenticator(String username, String password) { + this.username = username.getBytes(Charsets.UTF_8); + this.password = password.getBytes(Charsets.UTF_8); + } + + @Override + public byte[] initialResponse() { + byte[] initialToken = new byte[username.length + password.length + 2]; + initialToken[0] = 0; + System.arraycopy(username, 0, initialToken, 1, username.length); + initialToken[username.length + 1] = 0; + System.arraycopy(password, 0, initialToken, username.length + 2, password.length); + return initialToken; + } + + @Override + public byte[] evaluateChallenge(byte[] challenge) { + return null; + } + + @Override + public void onAuthenticationSuccess(byte[] token) { + // no-op, the server should send nothing anyway + } + + Map getCredentials() { + return ImmutableMap.of("username", new String(username, Charsets.UTF_8), + "password", new String(password, Charsets.UTF_8)); + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/PoolingOptions.java b/driver-core/src/main/java/com/datastax/driver/core/PoolingOptions.java new file mode 100644 index 00000000000..d29face06fa --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/PoolingOptions.java @@ -0,0 +1,572 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.util.Map; +import java.util.concurrent.Executor; + +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import com.google.common.util.concurrent.MoreExecutors; + +import static com.datastax.driver.core.HostDistance.LOCAL; +import static com.datastax.driver.core.HostDistance.REMOTE; + +/** + * Options related to connection pooling. + *

+ * The driver uses connections in an asynchronous manner, meaning that + * multiple requests can be submitted on the same connection at the same + * time. Therefore only a relatively small number of connections is needed. + * For each host, the driver uses a connection pool that may have a variable + * size (it will automatically adjust to the current load). + *

+ * With {@code ProtocolVersion#V2} or below, there are at most 128 simultaneous + * requests per connection, so the pool defaults to a variable size. You will + * typically raise the maximum capacity by adding more connections with + * {@link #setMaxRequestsPerConnection(HostDistance, int)}. + *

+ * With {@code ProtocolVersion#V3} or above, there are up to 32768 requests per + * connection, and the pool defaults to a fixed size of 1. You will typically + * raise the maximum capacity by allowing more simultaneous requests per connection + * ({@link #setMaxRequestsPerConnection(HostDistance, int)}). + *

+ * All parameters can be separately set for {@code LOCAL} and + * {@code REMOTE} hosts ({@link HostDistance}). For {@code IGNORED} hosts, + * no connections are created so these settings cannot be changed. + */ +public class PoolingOptions { + + static final int UNSET = Integer.MIN_VALUE; + + private static final Map> DEFAULTS = ImmutableMap.>of( + ProtocolVersion.V2, ImmutableMap.builder() + .put("corePoolLocal", 2) + .put("maxPoolLocal", 8) + .put("corePoolRemote", 1) + .put("maxPoolRemote", 2) + .put("newConnectionThresholdLocal", 100) + .put("newConnectionThresholdRemote", 100) + .put("maxRequestsPerConnectionLocal", 128) + .put("maxRequestsPerConnectionRemote", 128) + .build(), + + ProtocolVersion.V3, ImmutableMap.builder() + .put("corePoolLocal", 1) + .put("maxPoolLocal", 1) + .put("corePoolRemote", 1) + .put("maxPoolRemote", 1) + .put("newConnectionThresholdLocal", 800) + .put("newConnectionThresholdRemote", 200) + .put("maxRequestsPerConnectionLocal", 1024) + .put("maxRequestsPerConnectionRemote", 256) + .build() + ); + + private static final int DEFAULT_IDLE_TIMEOUT_SECONDS = 120; + private static final int DEFAULT_POOL_TIMEOUT_MILLIS = 5000; + private static final int DEFAULT_HEARTBEAT_INTERVAL_SECONDS = 30; + + private static final Executor DEFAULT_INITIALIZATION_EXECUTOR = MoreExecutors.sameThreadExecutor(); + + private volatile Cluster.Manager manager; + private volatile ProtocolVersion protocolVersion; + + // The defaults for these fields depend on the protocol version, which is only known after control connection initialization. + // Yet if the user set them before initialization, we want to keep their values. So we use -1 to mean "uninitialized". + private final int[] coreConnections = new int[]{ UNSET, UNSET, 0 }; + private final int[] maxConnections = new int[]{ UNSET, UNSET, 0 }; + private final int[] newConnectionThreshold = new int[]{ UNSET, UNSET, 0 }; + private volatile int maxRequestsPerConnectionLocal = UNSET; + private volatile int maxRequestsPerConnectionRemote = UNSET; + + private volatile int idleTimeoutSeconds = DEFAULT_IDLE_TIMEOUT_SECONDS; + private volatile int poolTimeoutMillis = DEFAULT_POOL_TIMEOUT_MILLIS; + private volatile int heartbeatIntervalSeconds = DEFAULT_HEARTBEAT_INTERVAL_SECONDS; + + private volatile Executor initializationExecutor = DEFAULT_INITIALIZATION_EXECUTOR; + + public PoolingOptions() { + } + + void register(Cluster.Manager manager) { + this.manager = manager; + } + + /** + * Returns the core number of connections per host. + * + * @param distance the {@code HostDistance} for which to return this threshold. + * @return the core number of connections per host at distance {@code distance}. + */ + public int getCoreConnectionsPerHost(HostDistance distance) { + return coreConnections[distance.ordinal()]; + } + + /** + * Sets the core number of connections per host. + *

+ * For the provided {@code distance}, this corresponds to the number of + * connections initially created and kept open to each host of that + * distance. + *

+ * The default value is: + *

    + *
  • with {@code ProtocolVersion#V2} or below: 2 for {@code LOCAL} hosts and 1 for {@code REMOTE} hosts.
  • + *
  • with {@code ProtocolVersion#V3} or above: 1 for all hosts.
  • + *
+ * + * @param distance the {@code HostDistance} for which to set this threshold. + * @param newCoreConnections the value to set + * @return this {@code PoolingOptions}. + * + * @throws IllegalArgumentException if {@code distance == HostDistance.IGNORED}, + * or if {@code newCoreConnections} is greater than the maximum value for this distance. + * + * @see #setConnectionsPerHost(HostDistance, int, int) + */ + public synchronized PoolingOptions setCoreConnectionsPerHost(HostDistance distance, int newCoreConnections) { + if (distance == HostDistance.IGNORED) + throw new IllegalArgumentException("Cannot set core connections per host for " + distance + " hosts"); + + if (maxConnections[distance.ordinal()] != UNSET) + checkConnectionsPerHostOrder(newCoreConnections, maxConnections[distance.ordinal()], distance); + + int oldCore = coreConnections[distance.ordinal()]; + coreConnections[distance.ordinal()] = newCoreConnections; + if (oldCore < newCoreConnections && manager != null) + manager.ensurePoolsSizing(); + return this; + } + + /** + * Returns the maximum number of connections per host. + * + * @param distance the {@code HostDistance} for which to return this threshold. + * @return the maximum number of connections per host at distance {@code distance}. + */ + public int getMaxConnectionsPerHost(HostDistance distance) { + return maxConnections[distance.ordinal()]; + } + + /** + * Sets the maximum number of connections per host. + *

+ * For the provided {@code distance}, this corresponds to the maximum + * number of connections that can be created per host at that distance. + *

+ * The default value is: + *

    + *
  • with {@code ProtocolVersion#V2} or below: 8 for {@code LOCAL} hosts and 2 for {@code REMOTE} hosts.
  • + *
  • with {@code ProtocolVersion#V3} or above: 1 for all hosts.
  • + *
+ * + * @param distance the {@code HostDistance} for which to set this threshold. + * @param newMaxConnections the value to set + * @return this {@code PoolingOptions}. + * + * @throws IllegalArgumentException if {@code distance == HostDistance.IGNORED}, + * or if {@code newMaxConnections} is less than the core value for this distance. + * + * @see #setConnectionsPerHost(HostDistance, int, int) + */ + public synchronized PoolingOptions setMaxConnectionsPerHost(HostDistance distance, int newMaxConnections) { + if (distance == HostDistance.IGNORED) + throw new IllegalArgumentException("Cannot set max connections per host for " + distance + " hosts"); + + if (coreConnections[distance.ordinal()] != UNSET) + checkConnectionsPerHostOrder(coreConnections[distance.ordinal()], newMaxConnections, distance); + + maxConnections[distance.ordinal()] = newMaxConnections; + return this; + } + + /** + * Sets the core and maximum number of connections per host in one call. + *

+ * This is a convenience method that is equivalent to calling {@link #setCoreConnectionsPerHost(HostDistance, int)} + * and {@link #setMaxConnectionsPerHost(HostDistance, int)}. + * + * @param distance the {@code HostDistance} for which to set these threshold. + * @param core the core number of connections. + * @param max the max number of connections. + * @return this {@code PoolingOptions}. + * + * @throws IllegalArgumentException if {@code distance == HostDistance.IGNORED}, + * or if {@code core} > {@code max}. + */ + public synchronized PoolingOptions setConnectionsPerHost(HostDistance distance, int core, int max) { + if (distance == HostDistance.IGNORED) + throw new IllegalArgumentException("Cannot set connections per host for " + distance + " hosts"); + + checkConnectionsPerHostOrder(core, max, distance); + coreConnections[distance.ordinal()] = core; + maxConnections[distance.ordinal()] = max; + return this; + } + + /** + * Returns the threshold that triggers the creation of a new connection to a host. + * + * @param distance the {@code HostDistance} for which to return this threshold. + * @return the configured threshold, or the default one if none have been set. + * + * @see #setNewConnectionThreshold(HostDistance, int) + */ + public int getNewConnectionThreshold(HostDistance distance) { + return newConnectionThreshold[distance.ordinal()]; + } + + /** + * Sets the threshold that triggers the creation of a new connection to a host. + *

+ * A new connection gets created if: + *

    + *
  • N connections are open
  • + *
  • N < {@link #getMaxConnectionsPerHost(HostDistance)}
  • + *
  • the number of active requests is more than + * (N - 1) * {@link #getMaxRequestsPerConnection(HostDistance)} + {@link #getNewConnectionThreshold(HostDistance)} + *
  • + *
+ * In other words, if all but the last connection are full, and the last connection is above this threshold. + *

+ * The default value is: + *

    + *
  • with {@code ProtocolVersion#V2} or below: 100 for all hosts.
  • + *
  • with {@code ProtocolVersion#V3} or above: 800 for {@code LOCAL} hosts and 200 for {@code REMOTE} hosts.
  • + *
+ * + * @param distance the {@code HostDistance} for which to configure this threshold. + * @param newValue the value to set (between 0 and 128). + * @return this {@code PoolingOptions}. + * + * @throws IllegalArgumentException if {@code distance == HostDistance.IGNORED}, or if {@code maxSimultaneousRequests} + * is not in range, or if {@code newValue} is less than the minimum value for this distance. + */ + public synchronized PoolingOptions setNewConnectionThreshold(HostDistance distance, int newValue) { + if (distance == HostDistance.IGNORED) + throw new IllegalArgumentException("Cannot set new connection threshold for " + distance + " hosts"); + + checkRequestsPerConnectionRange(newValue, "New connection threshold", distance); + newConnectionThreshold[distance.ordinal()] = newValue; + return this; + } + + /** + * Returns the maximum number of requests per connection. + * + * @param distance the {@code HostDistance} for which to return this threshold. + * @return the maximum number of requests per connection at distance {@code distance}. + * + * @see #setMaxRequestsPerConnection(HostDistance, int) + */ + public int getMaxRequestsPerConnection(HostDistance distance) { + switch (distance) { + case LOCAL: + return maxRequestsPerConnectionLocal; + case REMOTE: + return maxRequestsPerConnectionRemote; + default: + return 0; + } + } + + /** + * Sets the maximum number of requests per connection. + *

+ * The default value is: + *

    + *
  • with {@code ProtocolVersion#V2} or below: 128 for all hosts (there should not be any reason to change this).
  • + *
  • with {@code ProtocolVersion#V3} or above: 1024 for {@code LOCAL} hosts and 256 for {@code REMOTE} hosts. + * These values were chosen so that the default V2 and V3 configuration generate the same load on a Cassandra cluster. + * Protocol V3 can go much higher (up to 32768), so if your number of clients is low, don't hesitate to experiment with + * higher values. If you have more than one connection per host, consider also adjusting + * {@link #setNewConnectionThreshold(HostDistance, int)}. + *
  • + *
+ * + * @param distance the {@code HostDistance} for which to set this threshold. + * @param newMaxRequests the value to set. + * @return this {@code PoolingOptions}. + * + * @throws IllegalArgumentException if {@code distance == HostDistance.IGNORED}, + * or if {@code newMaxConnections} is not within the allowed range. + */ + public PoolingOptions setMaxRequestsPerConnection(HostDistance distance, int newMaxRequests) { + checkRequestsPerConnectionRange(newMaxRequests, "Max requests per connection", distance); + + switch (distance) { + case LOCAL: + maxRequestsPerConnectionLocal = newMaxRequests; + break; + case REMOTE: + maxRequestsPerConnectionRemote = newMaxRequests; + break; + default: + throw new IllegalArgumentException("Cannot set max requests per host for " + distance + " hosts"); + } + return this; + } + + /** + * Returns the timeout before an idle connection is removed. + * + * @return the timeout. + */ + public int getIdleTimeoutSeconds() { + return idleTimeoutSeconds; + } + + /** + * Sets the timeout before an idle connection is removed. + *

+ * The order of magnitude should be a few minutes (the default is 120 seconds). The + * timeout that triggers the removal has a granularity of 10 seconds. + * + * @param idleTimeoutSeconds the new timeout in seconds. + * @return this {@code PoolingOptions}. + * + * @throws IllegalArgumentException if the timeout is negative. + */ + public PoolingOptions setIdleTimeoutSeconds(int idleTimeoutSeconds) { + if (idleTimeoutSeconds < 0) + throw new IllegalArgumentException("Idle timeout must be positive"); + this.idleTimeoutSeconds = idleTimeoutSeconds; + return this; + } + + /** + * Returns the timeout when trying to acquire a connection from a host's pool. + * + * @return the timeout. + */ + public int getPoolTimeoutMillis() { + return poolTimeoutMillis; + } + + /** + * Sets the timeout when trying to acquire a connection from a host's pool. + *

+ * If no connection is available within that time, the driver will try the + * next host from the query plan. + *

+ * The default is 5 seconds. If this option is set to zero, the driver won't wait at all. + * + * @param poolTimeoutMillis the new value in milliseconds. + * @return this {@code PoolingOptions} + * + * @throws IllegalArgumentException if the timeout is negative. + */ + public PoolingOptions setPoolTimeoutMillis(int poolTimeoutMillis) { + if (poolTimeoutMillis < 0) + throw new IllegalArgumentException("Pool timeout must be positive"); + this.poolTimeoutMillis = poolTimeoutMillis; + return this; + } + + /** + * Returns the heart beat interval, after which a message is sent on an idle connection to make sure it's still alive. + * @return the interval. + */ + public int getHeartbeatIntervalSeconds() { + return heartbeatIntervalSeconds; + } + + /** + * Sets the heart beat interval, after which a message is sent on an idle connection to make sure it's still alive. + *

+ * This is an application-level keep-alive, provided for convenience since adjusting the TCP keep-alive might not be + * practical in all environments. + *

+ * This option should be set higher than {@link SocketOptions#getReadTimeoutMillis()}. + *

+ * The default value for this option is 30 seconds. + * + * @param heartbeatIntervalSeconds the new value in seconds. If set to 0, it will disable the feature. + * @return this {@code PoolingOptions} + * + * @throws IllegalArgumentException if the interval is negative. + */ + public PoolingOptions setHeartbeatIntervalSeconds(int heartbeatIntervalSeconds) { + if (heartbeatIntervalSeconds < 0) + throw new IllegalArgumentException("Heartbeat interval must be positive"); + + this.heartbeatIntervalSeconds = heartbeatIntervalSeconds; + return this; + } + + /** + * Returns the executor to use for connection initialization. + * + * @return the executor. + * @see #setInitializationExecutor(java.util.concurrent.Executor) + */ + public Executor getInitializationExecutor() { + return initializationExecutor; + } + + /** + * Sets the executor to use for connection initialization. + *

+ * Connections are open in a completely asynchronous manner. Since initializing the transport + * requires separate CQL queries, the futures representing the completion of these queries are + * transformed and chained. This executor is where these transformations happen. + *

+ * This is an advanced option, which should be rarely needed in practice. It defaults to + * Guava's {@code MoreExecutors.sameThreadExecutor()}, which results in running the transformations + * on the network I/O threads; this is fine if the transformations are fast and not I/O bound + * (which is the case by default). + * One reason why you might want to provide a custom executor is if you use authentication with + * a custom {@link com.datastax.driver.core.Authenticator} implementation that performs blocking + * calls. + * + * @param initializationExecutor the executor to use + * @return this {@code PoolingOptions} + * + * @throws java.lang.NullPointerException if the executor is null + */ + public PoolingOptions setInitializationExecutor(Executor initializationExecutor) { + Preconditions.checkNotNull(initializationExecutor); + this.initializationExecutor = initializationExecutor; + return this; + } + + synchronized void setProtocolVersion(ProtocolVersion protocolVersion) { + this.protocolVersion = protocolVersion; + + ProtocolVersion referenceVersion = (protocolVersion.compareTo(ProtocolVersion.V2) <= 0) + ? ProtocolVersion.V2 + : ProtocolVersion.V3; + Map defaults = DEFAULTS.get(referenceVersion); + + if (coreConnections[LOCAL.ordinal()] == UNSET) + coreConnections[LOCAL.ordinal()] = defaults.get("corePoolLocal"); + if (maxConnections[LOCAL.ordinal()] == UNSET) + maxConnections[LOCAL.ordinal()] = defaults.get("maxPoolLocal"); + checkConnectionsPerHostOrder(coreConnections[LOCAL.ordinal()], maxConnections[LOCAL.ordinal()], LOCAL); + + if (coreConnections[REMOTE.ordinal()] == UNSET) + coreConnections[REMOTE.ordinal()] = defaults.get("corePoolRemote"); + if (maxConnections[REMOTE.ordinal()] == UNSET) + maxConnections[REMOTE.ordinal()] = defaults.get("maxPoolRemote"); + checkConnectionsPerHostOrder(coreConnections[REMOTE.ordinal()], maxConnections[REMOTE.ordinal()], REMOTE); + + if (newConnectionThreshold[LOCAL.ordinal()] == UNSET) + newConnectionThreshold[LOCAL.ordinal()] = defaults.get("newConnectionThresholdLocal"); + checkRequestsPerConnectionRange(newConnectionThreshold[LOCAL.ordinal()], "New connection threshold", LOCAL); + + if (newConnectionThreshold[REMOTE.ordinal()] == UNSET) + newConnectionThreshold[REMOTE.ordinal()] = defaults.get("newConnectionThresholdRemote"); + checkRequestsPerConnectionRange(newConnectionThreshold[REMOTE.ordinal()], "New connection threshold", REMOTE); + + if (maxRequestsPerConnectionLocal == UNSET) + maxRequestsPerConnectionLocal = defaults.get("maxRequestsPerConnectionLocal"); + checkRequestsPerConnectionRange(maxRequestsPerConnectionLocal, "Max requests per connection", LOCAL); + + if (maxRequestsPerConnectionRemote == UNSET) + maxRequestsPerConnectionRemote = defaults.get("maxRequestsPerConnectionRemote"); + checkRequestsPerConnectionRange(maxRequestsPerConnectionRemote, "Max requests per connection", REMOTE); + } + + /** + * Requests the driver to re-evaluate the {@link HostDistance} (through the configured + * {@link com.datastax.driver.core.policies.LoadBalancingPolicy#distance}) for every known + * hosts and to drop/add connections to each hosts according to the computed distance. + */ + public void refreshConnectedHosts() { + manager.refreshConnectedHosts(); + } + + /** + * Requests the driver to re-evaluate the {@link HostDistance} for a given node. + * + * @param host the host to refresh. + * + * @see #refreshConnectedHosts() + */ + public void refreshConnectedHost(Host host) { + manager.refreshConnectedHost(host); + } + + private void checkRequestsPerConnectionRange(int value, String description, HostDistance distance) { + // If we don't know the protocol version yet, use the highest possible upper bound, this will get checked again when possible + int max = (protocolVersion == null || protocolVersion.compareTo(ProtocolVersion.V3) >= 0) + ? StreamIdGenerator.MAX_STREAM_PER_CONNECTION_V3 + : StreamIdGenerator.MAX_STREAM_PER_CONNECTION_V2; + + if (value < 0 || value > max) + throw new IllegalArgumentException(String.format("%s for %s hosts must be in the range (0, %d)", + description, distance, max)); + } + + private static void checkConnectionsPerHostOrder(int core, int max, HostDistance distance) { + if (core > max) + throw new IllegalArgumentException(String.format("Core connections for %s hosts must be less than max (%d > %d)", + distance, core, max)); + } + + /** + * @deprecated this option isn't used anymore with the current pool resizing algorithm. + */ + @Deprecated + public int getMinSimultaneousRequestsPerConnectionThreshold(HostDistance distance) { + return 0; + } + + /** + * @deprecated this option isn't used anymore with the current pool resizing algorithm. + */ + @Deprecated + public synchronized PoolingOptions setMinSimultaneousRequestsPerConnectionThreshold(HostDistance distance, int newMinSimultaneousRequests) { + return this; + } + + /** + * @deprecated this method has been renamed to {@link #getNewConnectionThreshold(HostDistance)}. This method calls the new one internally, + * but you should use the new one in your code. + */ + @Deprecated + public int getMaxSimultaneousRequestsPerConnectionThreshold(HostDistance distance) { + return getNewConnectionThreshold(distance); + } + + /** + * @deprecated this method has been renamed to {@link #setNewConnectionThreshold(HostDistance, int)}. This method calls the new one + * internally, but you should use the new one in your code. + */ + @Deprecated + public PoolingOptions setMaxSimultaneousRequestsPerConnectionThreshold(HostDistance distance, int newMaxSimultaneousRequests) { + return this.setNewConnectionThreshold(distance, newMaxSimultaneousRequests); + } + + /** + * @deprecated this method has been renamed to {@link #getMaxRequestsPerConnection(HostDistance)}. This method calls the new one internally, + * but you should use the new one in your code. + */ + @Deprecated + public int getMaxSimultaneousRequestsPerHostThreshold(HostDistance distance) { + return this.getMaxRequestsPerConnection(distance); + } + + /** + * @deprecated this method has been renamed to {@link #setMaxRequestsPerConnection(HostDistance, int)}. This method calls the new one internally, + * but you should use the new one in your code. + */ + @Deprecated + public PoolingOptions setMaxSimultaneousRequestsPerHostThreshold(HostDistance distance, int newMaxRequests) { + return this.setMaxRequestsPerConnection(distance, newMaxRequests); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/PreparedId.java b/driver-core/src/main/java/com/datastax/driver/core/PreparedId.java new file mode 100644 index 00000000000..d580ba3f86c --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/PreparedId.java @@ -0,0 +1,41 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +/** + * Identifies a PreparedStatement. + */ +public class PreparedId +{ + // This class is mostly here to group PreparedStatement data that are need for + // execution but that we don't want to expose publicly (see JAVA-195) + final MD5Digest id; + + final ColumnDefinitions metadata; + final ColumnDefinitions resultSetMetadata; + + final int[] routingKeyIndexes; + final ProtocolVersion protocolVersion; + + PreparedId(MD5Digest id, ColumnDefinitions metadata, ColumnDefinitions resultSetMetadata, int[] routingKeyIndexes, ProtocolVersion protocolVersion) + { + this.id = id; + this.metadata = metadata; + this.resultSetMetadata = resultSetMetadata; + this.routingKeyIndexes = routingKeyIndexes; + this.protocolVersion = protocolVersion; + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java b/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java new file mode 100644 index 00000000000..cf6acf8a5be --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java @@ -0,0 +1,271 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.nio.ByteBuffer; + +import com.datastax.driver.core.exceptions.InvalidTypeException; +import com.datastax.driver.core.policies.RetryPolicy; + +/** + * Represents a prepared statement, a query with bound variables that has been + * prepared (pre-parsed) by the database. + *

+ * A prepared statement can be executed once concrete values have been provided + * for the bound variables. A prepared statement and the values for its + * bound variables constitute a BoundStatement and can be executed (by + * {@link Session#execute}). + *

+ * A {@code PreparedStatement} object allows you to define specific defaults + * for the different properties of a {@link Statement} (Consistency level, tracing, ...), + * in which case those properties will be inherited as default by every + * BoundedStatement created from the {PreparedStatement}. The default for those + * {@code PreparedStatement} properties is the same that in {@link Statement} if the + * PreparedStatement is created by {@link Session#prepare(String)} but will inherit + * of the properties of the {@link RegularStatement} used for the preparation if + * {@link Session#prepare(RegularStatement)} is used. + */ +public interface PreparedStatement { + + /** + * Returns metadata on the bounded variables of this prepared statement. + * + * @return the variables bounded in this prepared statement. + */ + public ColumnDefinitions getVariables(); + + /** + * Creates a new BoundStatement object and bind its variables to the + * provided values. + *

+ * While the number of {@code values} cannot be greater than the number of bound + * variables, the number of {@code values} may be fewer than the number of bound + * variables. In that case, the remaining variables will have to be bound + * to values by another mean because the resulting {@code BoundStatement} + * being executable. + *

+ * This method is a convenience for {@code new BoundStatement(this).bind(...)}. + * + * @param values the values to bind to the variables of the newly created + * BoundStatement. + * @return the newly created {@code BoundStatement} with its variables + * bound to {@code values}. + * + * @throws IllegalArgumentException if more {@code values} are provided + * than there is of bound variables in this statement. + * @throws InvalidTypeException if any of the provided value is not of + * correct type to be bound to the corresponding bind variable. + * @throws NullPointerException if one of {@code values} is a collection + * (List, Set or Map) containing a null value. Nulls are not supported in + * collections by CQL. + * + * @see BoundStatement#bind + */ + public BoundStatement bind(Object... values); + + /** + * Creates a new BoundStatement object for this prepared statement. + *

+ * This method do not bind any values to any of the prepared variables. Said + * values need to be bound on the resulting statement using BoundStatement's + * setters methods ({@link BoundStatement#setInt}, {@link BoundStatement#setLong}, ...). + * + * @return the newly created {@code BoundStatement}. + */ + public BoundStatement bind(); + + /** + * Sets the routing key for this prepared statement. + *

+ * While you can provide a fixed routing key for all executions of this prepared + * statement with this method, it is not mandatory to provide + * one through this method. This method should only be used + * if the partition key of the prepared query is not part of the prepared + * variables (that is if the partition key is fixed). + *

+ * Note that if the partition key is part of the prepared variables, the + * routing key will be automatically computed once those variables are bound. + *

+ * If the partition key is neither fixed nor part of the prepared variables (e.g. + * a composite partition key where only some of the components are bound), the + * routing key can also be set on each bound statement. + * + * @param routingKey the raw (binary) value to use as routing key. + * @return this {@code PreparedStatement} object. + * + * @see Statement#getRoutingKey + * @see BoundStatement#getRoutingKey + */ + public PreparedStatement setRoutingKey(ByteBuffer routingKey); + + /** + * Sets the routing key for this query. + *

+ * See {@link #setRoutingKey(ByteBuffer)} for more information. This + * method is a variant for when the query partition key is composite and + * the routing key must be built from multiple values. + * + * @param routingKeyComponents the raw (binary) values to compose to obtain + * the routing key. + * @return this {@code PreparedStatement} object. + * + * @see Statement#getRoutingKey + */ + public PreparedStatement setRoutingKey(ByteBuffer... routingKeyComponents); + + /** + * Returns the routing key set for this query. + * + * @return the routing key for this query or {@code null} if none has been + * explicitly set on this PreparedStatement. + */ + public ByteBuffer getRoutingKey(); + + /** + * Sets a default consistency level for all bound statements + * created from this prepared statement. + *

+ * If no consistency level is set through this method, the bound statement + * created from this object will use the default consistency level (ONE). + *

+ * Changing the default consistency level is not retroactive, it only + * applies to BoundStatement created after the change. + * + * @param consistency the default consistency level to set. + * @return this {@code PreparedStatement} object. + */ + public PreparedStatement setConsistencyLevel(ConsistencyLevel consistency); + + /** + * Returns the default consistency level set through {@link #setConsistencyLevel}. + * + * @return the default consistency level. Returns {@code null} if no + * consistency level has been set through this object {@code setConsistencyLevel} + * method. + */ + public ConsistencyLevel getConsistencyLevel(); + + /** + * Sets a default serial consistency level for all bound statements + * created from this prepared statement. + *

+ * If no serial consistency level is set through this method, the bound statement + * created from this object will use the default serial consistency level (SERIAL). + *

+ * Changing the default serial consistency level is not retroactive, it only + * applies to BoundStatement created after the change. + * + * @param serialConsistency the default serial consistency level to set. + * @return this {@code PreparedStatement} object. + * + * @throws IllegalArgumentException if {@code serialConsistency} is not one of + * {@code ConsistencyLevel.SERIAL} or {@code ConsistencyLevel.LOCAL_SERIAL}. + */ + public PreparedStatement setSerialConsistencyLevel(ConsistencyLevel serialConsistency); + + /** + * Returns the default serial consistency level set through {@link #setSerialConsistencyLevel}. + * + * @return the default serial consistency level. Returns {@code null} if no + * consistency level has been set through this object {@code setSerialConsistencyLevel} + * method. + */ + public ConsistencyLevel getSerialConsistencyLevel(); + + /** + * Returns the string of the query that was prepared to yield this {@code + * PreparedStatement}. + *

+ * Note that a CQL3 query may be implicitly applied to the current keyspace + * (that is, if the keyspace is not explicitly qualified in the query + * itself). For prepared queries, the current keyspace used is the one at + * the time of the preparation, not the one at execution time. The current + * keyspace at the time of the preparation can be retrieved through + * {@link #getQueryKeyspace}. + * + * @return the query that was prepared to yield this + * {@code PreparedStatement}. + */ + public String getQueryString(); + + /** + * Returns the keyspace at the time that this prepared statement was prepared, + * (that is the one on which this statement applies unless it specified a + * keyspace explicitly). + * + * @return the keyspace at the time that this statement was prepared or + * {@code null} if no keyspace was set when the query was prepared (which + * is possible since keyspaces can be explicitly qualified in queries and + * so may not require a current keyspace to be set). + */ + public String getQueryKeyspace(); + + /** + * Convenience method to enables tracing for all bound statements created + * from this prepared statement. + * + * @return this {@code Query} object. + */ + public PreparedStatement enableTracing(); + + /** + * Convenience method to disable tracing for all bound statements created + * from this prepared statement. + * + * @return this {@code PreparedStatement} object. + */ + public PreparedStatement disableTracing(); + + /** + * Returns whether tracing is enabled for this prepared statement, i.e. if + * BoundStatement created from it will use tracing by default. + * + * @return {@code true} if this prepared statement has tracing enabled, + * {@code false} otherwise. + */ + public boolean isTracing(); + + /** + * Convenience method to set a default retry policy for the {@code BoundStatement} + * created from this prepared statement. + *

+ * Note that this method is completely optional. By default, the retry policy + * used is the one returned {@link com.datastax.driver.core.policies.Policies#getRetryPolicy} + * in the cluster configuration. This method is only useful if you want + * to override this default policy for the {@code BoundStatement} created from + * this {@code PreparedStatement}. + * to punctually override the default policy for this request. + * + * @param policy the retry policy to use for this prepared statement. + * @return this {@code PreparedStatement} object. + */ + public PreparedStatement setRetryPolicy(RetryPolicy policy); + + /** + * Returns the retry policy sets for this prepared statement, if any. + * + * @return the retry policy sets specifically for this prepared statement or + * {@code null} if none have been set. + */ + public RetryPolicy getRetryPolicy(); + + /** + * Returns the prepared Id for this statement. + * + * @return the PreparedId corresponding to this statement. + */ + public PreparedId getPreparedId(); +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/ProtocolEvent.java b/driver-core/src/main/java/com/datastax/driver/core/ProtocolEvent.java new file mode 100644 index 00000000000..b404603832b --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/ProtocolEvent.java @@ -0,0 +1,145 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.net.InetSocketAddress; + +import io.netty.buffer.ByteBuf; + +import static com.datastax.driver.core.SchemaElement.KEYSPACE; +import static com.datastax.driver.core.SchemaElement.TABLE; + +class ProtocolEvent { + + public enum Type { TOPOLOGY_CHANGE, STATUS_CHANGE, SCHEMA_CHANGE } + + public final Type type; + + private ProtocolEvent(Type type) { + this.type = type; + } + + public static ProtocolEvent deserialize(ByteBuf bb, ProtocolVersion version) { + switch (CBUtil.readEnumValue(Type.class, bb)) { + case TOPOLOGY_CHANGE: + return TopologyChange.deserializeEvent(bb); + case STATUS_CHANGE: + return StatusChange.deserializeEvent(bb); + case SCHEMA_CHANGE: + return SchemaChange.deserializeEvent(bb, version); + } + throw new AssertionError(); + } + + public static class TopologyChange extends ProtocolEvent { + public enum Change { NEW_NODE, REMOVED_NODE, MOVED_NODE } + + public final Change change; + public final InetSocketAddress node; + + private TopologyChange(Change change, InetSocketAddress node) { + super(Type.TOPOLOGY_CHANGE); + this.change = change; + this.node = node; + } + + // Assumes the type has already been deserialized + private static TopologyChange deserializeEvent(ByteBuf bb) { + Change change = CBUtil.readEnumValue(Change.class, bb); + InetSocketAddress node = CBUtil.readInet(bb); + return new TopologyChange(change, node); + } + + @Override + public String toString() { + return change + " " + node; + } + } + + public static class StatusChange extends ProtocolEvent { + + public enum Status { UP, DOWN } + + public final Status status; + public final InetSocketAddress node; + + private StatusChange(Status status, InetSocketAddress node) { + super(Type.STATUS_CHANGE); + this.status = status; + this.node = node; + } + + // Assumes the type has already been deserialized + private static StatusChange deserializeEvent(ByteBuf bb) { + Status status = CBUtil.readEnumValue(Status.class, bb); + InetSocketAddress node = CBUtil.readInet(bb); + return new StatusChange(status, node); + } + + @Override + public String toString() { + return status + " " + node; + } + } + + public static class SchemaChange extends ProtocolEvent { + + public enum Change { CREATED, UPDATED, DROPPED } + + public final Change change; + public final SchemaElement targetType; + public final String targetKeyspace; + public final String targetName; + + public SchemaChange(Change change, SchemaElement targetType, String targetKeyspace, String targetName) { + super(Type.SCHEMA_CHANGE); + this.change = change; + this.targetType = targetType; + this.targetKeyspace = targetKeyspace; + this.targetName = targetName; + } + + // Assumes the type has already been deserialized + private static SchemaChange deserializeEvent(ByteBuf bb, ProtocolVersion version) { + Change change; + SchemaElement target; + String keyspace, name; + switch (version) { + case V1: + case V2: + change = CBUtil.readEnumValue(Change.class, bb); + keyspace = CBUtil.readString(bb); + name = CBUtil.readString(bb); + target = name.isEmpty() ? KEYSPACE : TABLE; + return new SchemaChange(change, target, keyspace, name); + case V3: + change = CBUtil.readEnumValue(Change.class, bb); + target = CBUtil.readEnumValue(SchemaElement.class, bb); + keyspace = CBUtil.readString(bb); + name = (target == KEYSPACE) ? "" : CBUtil.readString(bb); + return new SchemaChange(change, target, keyspace, name); + default: + throw version.unsupported(); + } + } + + @Override + public String toString() { + return change.toString() + ' ' + targetType + ' ' + targetKeyspace + (targetName.isEmpty() ? "" : '.' + targetName); + } + } + +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/ProtocolOptions.java b/driver-core/src/main/java/com/datastax/driver/core/ProtocolOptions.java new file mode 100644 index 00000000000..c10a2b739e3 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/ProtocolOptions.java @@ -0,0 +1,255 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import com.google.common.annotations.VisibleForTesting; + +/** + * Options of the Cassandra native binary protocol. + */ +public class ProtocolOptions { + + /** + * Compression supported by the Cassandra binary protocol. + */ + public enum Compression { + /** No compression */ + NONE("") { + @Override + FrameCompressor compressor() { + return null; + } + }, + /** Snappy compression */ + SNAPPY("snappy") { + @Override + FrameCompressor compressor() { + return FrameCompressor.SnappyCompressor.instance; + } + }, + /** LZ4 compression */ + LZ4("lz4") { + @Override + FrameCompressor compressor() { + return FrameCompressor.LZ4Compressor.instance; + } + }; + + final String protocolName; + + private Compression(String protocolName) { + this.protocolName = protocolName; + } + + abstract FrameCompressor compressor(); + + static Compression fromString(String str) { + for (Compression c : values()) { + if (c.protocolName.equalsIgnoreCase(str)) + return c; + } + return null; + } + + @Override + public String toString() { + return protocolName; + } + }; + + /** + * The default port for Cassandra native binary protocol: 9042. + */ + public static final int DEFAULT_PORT = 9042; + + /** + * The default value for {@link #getMaxSchemaAgreementWaitSeconds()}: 10. + */ + public static final int DEFAULT_MAX_SCHEMA_AGREEMENT_WAIT_SECONDS = 10; + + /** + * The newest version of the protocol that this version of the driver support. + * + * @deprecated This is provided for backward compatibility; use {@link ProtocolVersion#NEWEST_SUPPORTED} instead. + */ + @Deprecated + public static final int NEWEST_SUPPORTED_PROTOCOL_VERSION = ProtocolVersion.NEWEST_SUPPORTED.toInt(); + + private volatile Cluster.Manager manager; + + private final int port; + final ProtocolVersion initialProtocolVersion; // What the user asked us. Will be null by default. + + @VisibleForTesting + volatile int maxSchemaAgreementWaitSeconds; + + private final SSLOptions sslOptions; // null if no SSL + private final AuthProvider authProvider; + + private volatile Compression compression = Compression.NONE; + + /** + * Creates a new {@code ProtocolOptions} instance using the {@code DEFAULT_PORT} + * (and without SSL). + */ + public ProtocolOptions() { + this(DEFAULT_PORT); + } + + /** + * Creates a new {@code ProtocolOptions} instance using the provided port + * (without SSL nor authentication). + *

+ * This is a shortcut for {@code new ProtocolOptions(port, null, AuthProvider.NONE)}. + * + * @param port the port to use for the binary protocol. + */ + public ProtocolOptions(int port) { + this(port, null, DEFAULT_MAX_SCHEMA_AGREEMENT_WAIT_SECONDS, null, AuthProvider.NONE); + } + + /** + * Creates a new {@code ProtocolOptions} instance using the provided port + * and SSL context. + * + * @param port the port to use for the binary protocol. + * @param protocolVersion the protocol version to use. This can be {@code null}, in which case the + * version used will be the biggest version supported by the first node the driver connects to. + * See {@link Cluster.Builder#withProtocolVersion} for more details. + * @param sslOptions the SSL options to use. Use {@code null} if SSL is not + * to be used. + * @param authProvider the {@code AuthProvider} to use for authentication against + * the Cassandra nodes. + */ + public ProtocolOptions(int port, ProtocolVersion protocolVersion, int maxSchemaAgreementWaitSeconds, SSLOptions sslOptions, AuthProvider authProvider) { + this.port = port; + this.initialProtocolVersion = protocolVersion; + this.maxSchemaAgreementWaitSeconds = maxSchemaAgreementWaitSeconds; + this.sslOptions = sslOptions; + this.authProvider = authProvider; + } + + /** + * @throws IllegalArgumentException if {@code protocolVersion} does not correspond to any known version. + * + * @deprecated This is provided for backward compatibility; use {@link #ProtocolOptions(int, ProtocolVersion, int, SSLOptions, AuthProvider))} instead. + */ + @Deprecated + public ProtocolOptions(int port, int protocolVersion, SSLOptions sslOptions, AuthProvider authProvider) { + this(port, ProtocolVersion.fromInt(protocolVersion), DEFAULT_MAX_SCHEMA_AGREEMENT_WAIT_SECONDS, sslOptions, authProvider); + } + + void register(Cluster.Manager manager) { + this.manager = manager; + } + + /** + * Returns the port used to connect to the Cassandra hosts. + * + * @return the port used to connect to the Cassandra hosts. + */ + public int getPort() { + return port; + } + + /** + * The protocol version used by the Cluster instance. + * + * @return the protocol version in use. This might return {@code null} if a particular + * version hasn't been forced by the user (using say {Cluster.Builder#withProtocolVersion}) + * and this Cluster instance has not yet connected to any node (but as soon as the + * Cluster instance is connected, this is guaranteed to return a non-null value). Note that + * nodes that do not support this protocol version will be ignored. + */ + public ProtocolVersion getProtocolVersionEnum() { + return manager.connectionFactory.protocolVersion; + } + + /** + * The protocol version used by the Cluster instance, as a number. + * + * @deprecated This is provided for backward compatibility, use {@link #getProtocolVersionEnum()} instead. + */ + @Deprecated + public int getProtocolVersion() { + return getProtocolVersionEnum().toInt(); + } + + /** + * Returns the compression used by the protocol. + *

+ * By default, compression is not used. + * + * @return the compression used. + */ + public Compression getCompression() { + return compression; + } + + /** + * Sets the compression to use. + *

+ * Note that while this setting can be changed at any time, it will + * only apply to newly created connections. + * + * @param compression the compression algorithm to use (or {@code + * Compression.NONE} to disable compression). + * @return this {@code ProtocolOptions} object. + * + * @throws IllegalStateException if the compression requested is not + * available. Most compression algorithms require that the relevant be + * present in the classpath. If not, the compression will be + * unavailable. + */ + public ProtocolOptions setCompression(Compression compression) { + if (compression != Compression.NONE && compression.compressor() == null) + throw new IllegalStateException("The requested compression is not available (some compression require a JAR to be found in the classpath)"); + + this.compression = compression; + return this; + } + + /** + * Returns the maximum time to wait for schema agreement before returning from a DDL query. + * + * @return the time. + */ + public int getMaxSchemaAgreementWaitSeconds() { + return maxSchemaAgreementWaitSeconds; + } + + /** + * The {@code SSLOptions} used by this cluster. + * + * @return the {@code SSLOptions} used by this cluster (set at the cluster creation time) + * or {@code null} if SSL is not in use. + */ + public SSLOptions getSSLOptions() { + return sslOptions; + } + + /** + * The {@code AuthProvider} used by this cluster. + * + * @return the {@code AuthProvided} used by this cluster (set at the cluster creation + * time). If no authentication mechanism is in use (the default), {@code AuthProvided.NONE} + * will be returned. + */ + public AuthProvider getAuthProvider() { + return authProvider; + } + +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/ProtocolV1Authenticator.java b/driver-core/src/main/java/com/datastax/driver/core/ProtocolV1Authenticator.java new file mode 100644 index 00000000000..094a0090765 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/ProtocolV1Authenticator.java @@ -0,0 +1,24 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.util.Map; + +// Not an interface because we don't want to expose it. We only support password authentication +// for the protocol V1 similarly to what the driver 1.x branch do. +abstract class ProtocolV1Authenticator { + abstract Map getCredentials(); +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/ProtocolVersion.java b/driver-core/src/main/java/com/datastax/driver/core/ProtocolVersion.java new file mode 100644 index 00000000000..28ec91ad548 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/ProtocolVersion.java @@ -0,0 +1,99 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.util.Map; + +import com.google.common.collect.ImmutableMap.Builder; +import com.google.common.collect.ImmutableMap; + +import com.datastax.driver.core.exceptions.DriverInternalError; + +/** + * Versions of the native protocol supported by the driver. + */ +public enum ProtocolVersion { + + V1("1.2.0", 1), + V2("2.0.0", 2), + V3("2.1.0", 3), + ; + + /** + * The most recent protocol version supported by the driver. + */ + public static final ProtocolVersion NEWEST_SUPPORTED = V3; + + private final VersionNumber minCassandraVersion; + private final int asInt; + + private ProtocolVersion(String minCassandraVersion, int asInt) { + this.minCassandraVersion = VersionNumber.parse(minCassandraVersion); + this.asInt = asInt; + } + + boolean isSupportedBy(Host host) { + return host.getCassandraVersion() == null || + isSupportedBy(host.getCassandraVersion()); + } + + VersionNumber minCassandraVersion() { + return minCassandraVersion; + } + + private boolean isSupportedBy(VersionNumber cassandraVersion) { + return minCassandraVersion.compareTo(cassandraVersion.nextStable()) <= 0; + } + + DriverInternalError unsupported() { + return new DriverInternalError("Unsupported protocol version " + this); + } + + /** + * Get the int representation of a protocol version. + * This is for use internally by Cassandra to ensure that the + * correct version is specified when serializing and deserializing + * using classes from the imported driver library. + * @return the protocol version as an int + */ + public int toInt() { + return asInt; + } + + private static final Map INT_TO_VERSION; + static { + Builder builder = ImmutableMap.builder(); + for (ProtocolVersion version : values()) { + builder.put(version.asInt, version); + } + INT_TO_VERSION = builder.build(); + } + + /** + * Returns the value matching an integer version. + * + * @param i the version as an integer. + * @return the matching enum value. + * + * @throws IllegalArgumentException if the argument doesn't match any known version. + */ + public static ProtocolVersion fromInt(int i) { + ProtocolVersion version = INT_TO_VERSION.get(i); + if (version == null) + throw new IllegalArgumentException("No protocol version matching integer version " + i); + return version; + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/QueryLogger.java b/driver-core/src/main/java/com/datastax/driver/core/QueryLogger.java new file mode 100644 index 00000000000..0fcf2a78040 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/QueryLogger.java @@ -0,0 +1,812 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.nio.ByteBuffer; +import java.util.List; + +import static java.util.concurrent.TimeUnit.NANOSECONDS; + +import com.google.common.annotations.Beta; +import com.google.common.annotations.VisibleForTesting; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * A configurable {@link LatencyTracker} that logs all executed statements. + *

+ * Typically, client applications would instantiate one single query logger (using its {@link Builder}), + * configure it and register it on the relevant {@link Cluster} instance, e.g.: + * + *

+ * Cluster cluster = ...
+ * QueryLogger queryLogger = QueryLogger.builder(cluster)
+ *     .withConstantThreshold(...)
+ *     .withMaxQueryStringLength(...)
+ *     .build();
+ * cluster.register(queryLogger);
+ * 
+ * + * Refer to the {@link Builder} documentation for more information on + * configuration settings for the query logger. + *

+ * Once registered, the query logger will log every {@link RegularStatement}, {@link BoundStatement} or {@link BatchStatement} + * executed by the driver; + * note that it will never log other types of statement, null statements nor any special statement used internally by the driver. + *

+ * There is one log for each request to a Cassandra node; because the driver sometimes retries the same statement on multiple nodes, + * a single statement execution (for example, a single call to {@link Session#execute(Statement)}) can produce multiple logs on + * different nodes. + *

+ * For more flexibility, the query logger uses 3 different {@link Logger} instances: + * + *

    + *
  1. {@link #NORMAL_LOGGER}: used to log normal queries, i.e., queries that completed successfully + * within a configurable threshold in milliseconds.
  2. + *
  3. {@link #SLOW_LOGGER}: used to log slow queries, i.e., queries that completed successfully + * but that took longer than a configurable threshold in milliseconds to complete.
  4. + *
  5. {@link #ERROR_LOGGER}: used to log unsuccessful queries, i.e., + * queries that did not completed normally and threw an exception. + * Note this this logger will also print the full stack trace of the reported exception.
  6. + *
+ * + *

+ * The appropriate logger is chosen according to the following algorithm: + *

    + *
  1. if an exception has been thrown: use {@link #ERROR_LOGGER};
  2. + *
  3. otherwise, if the reported latency is greater than the configured threshold in milliseconds: use {@link #SLOW_LOGGER};
  4. + *
  5. otherwise, use {@link #NORMAL_LOGGER}.
  6. + *
+ * + *

+ * All loggers are activated by setting their levels to {@code DEBUG} or {@code TRACE} (including {@link #ERROR_LOGGER}). + * If the level is set to {@code TRACE} and the statement being logged is a {@link BoundStatement}, + * then the query parameters (if any) will be logged as well (names and actual values). + * + *

+ * Constant thresholds vs. Dynamic thresholds + *

+ * Currently the QueryLogger can track slow queries in two different ways: + * using a constant threshold in milliseconds (which is the default behavior), + * or using a dynamic threshold based on per-host percentiles computed by + * {@link PerHostPercentileTracker}. + *

+ * Note that the dynamic threshold version is currently provided as a beta preview: it hasn't been extensively + * tested yet, and the API is still subject to change. To use it, you must first obtain and register + * an instance of {@link PerHostPercentileTracker}, then create your QueryLogger as follows: + * + *

+ * Cluster cluster = ...
+ * // create an instance of PerHostPercentileTracker and register it
+ * PerHostPercentileTracker tracker = ...;
+ * cluster.register(tracker);
+ * // create an instance of QueryLogger and register it
+ * QueryLogger queryLogger = QueryLogger.builder(cluster)
+ *     .withDynamicThreshold(tracker, ...)
+ *     .withMaxQueryStringLength(...)
+ *     .build();
+ * cluster.register(queryLogger);
+ * 
+ + *

+ * This class is thread-safe. + * + * @since 2.0.10 + */ +public abstract class QueryLogger implements LatencyTracker { + + /** + * The default latency threshold in milliseconds beyond which queries are considered 'slow' + * and logged as such by the driver. + */ + public static final long DEFAULT_SLOW_QUERY_THRESHOLD_MS = 5000; + + /** + * The default latency percentile beyond which queries are considered 'slow' + * and logged as such by the driver. + */ + public static final double DEFAULT_SLOW_QUERY_THRESHOLD_PERCENTILE = 99.0; + + /** + * The default maximum length of a CQL query string that can be logged verbatim + * by the driver. Query strings longer than this value will be truncated + * when logged. + */ + public static final int DEFAULT_MAX_QUERY_STRING_LENGTH = 500; + + /** + * The default maximum length of a query parameter value that can be logged verbatim + * by the driver. Parameter values longer than this value will be truncated + * when logged. + */ + public static final int DEFAULT_MAX_PARAMETER_VALUE_LENGTH = 50; + + /** + * The default maximum number of query parameters that can be logged + * by the driver. Queries with a number of parameters higher than this value + * will not have all their parameters logged. + */ + public static final int DEFAULT_MAX_LOGGED_PARAMETERS = 50; + + // Loggers + + /** + * The logger used to log normal queries, i.e., queries that completed successfully + * within a configurable threshold in milliseconds. + *

+ * This logger is activated by setting its level to {@code DEBUG} or {@code TRACE}. + * Additionally, if the level is set to {@code TRACE} and the statement being logged is a {@link BoundStatement}, + * then the query parameters (if any) will be logged as well (names and actual values). + *

+ * The name of this logger is {@code com.datastax.driver.core.QueryLogger.NORMAL}. + */ + public static final Logger NORMAL_LOGGER = LoggerFactory.getLogger("com.datastax.driver.core.QueryLogger.NORMAL"); + + /** + * The logger used to log slow queries, i.e., queries that completed successfully + * but whose execution time exceeded a configurable threshold in milliseconds. + *

+ * This logger is activated by setting its level to {@code DEBUG} or {@code TRACE}. + * Additionally, if the level is set to {@code TRACE} and the statement being logged is a {@link BoundStatement}, + * then the query parameters (if any) will be logged as well (names and actual values). + *

+ * The name of this logger is {@code com.datastax.driver.core.QueryLogger.SLOW}. + */ + public static final Logger SLOW_LOGGER = LoggerFactory.getLogger("com.datastax.driver.core.QueryLogger.SLOW"); + + /** + * The logger used to log unsuccessful queries, i.e., queries that did not completed normally and threw an exception. + *

+ * This logger is activated by setting its level to {@code DEBUG} or {@code TRACE}. + * Additionally, if the level is set to {@code TRACE} and the statement being logged is a {@link BoundStatement}, + * then the query parameters (if any) will be logged as well (names and actual values). + * Note this this logger will also print the full stack trace of the reported exception. + *

+ * The name of this logger is {@code com.datastax.driver.core.QueryLogger.ERROR}. + */ + public static final Logger ERROR_LOGGER = LoggerFactory.getLogger("com.datastax.driver.core.QueryLogger.ERROR"); + + // Message templates + + private static final String NORMAL_TEMPLATE = "[%s] [%s] Query completed normally, took %s ms: %s"; + + private static final String SLOW_TEMPLATE_MILLIS = "[%s] [%s] Query too slow, took %s ms: %s"; + + private static final String SLOW_TEMPLATE_PERCENTILE = "[%s] [%s] Query too slow, took %s ms (%s percentile = %s ms): %s"; + + private static final String ERROR_TEMPLATE = "[%s] [%s] Query error after %s ms: %s"; + + @VisibleForTesting + static final String TRUNCATED_OUTPUT = "... [truncated output]"; + + @VisibleForTesting + static final String FURTHER_PARAMS_OMITTED = " [further parameters omitted]"; + + protected final Cluster cluster; + + private volatile ProtocolVersion protocolVersion; + + protected volatile int maxQueryStringLength; + + protected volatile int maxParameterValueLength; + + protected volatile int maxLoggedParameters; + + /** + * Private constructor. Instances of QueryLogger should be obtained via the {@link #builder(Cluster)} method. + */ + private QueryLogger(Cluster cluster, int maxQueryStringLength, int maxParameterValueLength, int maxLoggedParameters) { + this.cluster = cluster; + this.maxQueryStringLength = maxQueryStringLength; + this.maxParameterValueLength = maxParameterValueLength; + this.maxLoggedParameters = maxLoggedParameters; + } + + /** + * Creates a new {@link QueryLogger.Builder} instance. + *

+ * This is a convenience method for {@code new QueryLogger.Builder()}. + * + * @param cluster the {@link Cluster} this QueryLogger will be attached to. + * @return the new QueryLogger builder. + * @throws NullPointerException if {@code cluster} is {@code null}. + */ + public static QueryLogger.Builder builder(Cluster cluster) { + if(cluster == null) throw new NullPointerException("QueryLogger.Builder: cluster parameter cannot be null"); + return new QueryLogger.Builder(cluster); + } + + /** + * A QueryLogger that uses a constant threshold in milliseconds + * to track slow queries. + * This implementation is the default and should be preferred to {@link DynamicThresholdQueryLogger} + * which is still in beta state. + */ + public static class ConstantThresholdQueryLogger extends QueryLogger { + + private volatile long slowQueryLatencyThresholdMillis; + + private ConstantThresholdQueryLogger(Cluster cluster, int maxQueryStringLength, int maxParameterValueLength, int maxLoggedParameters, long slowQueryLatencyThresholdMillis) { + super(cluster, maxQueryStringLength, maxParameterValueLength, maxLoggedParameters); + this.setSlowQueryLatencyThresholdMillis(slowQueryLatencyThresholdMillis); + } + + /** + * Return the threshold in milliseconds beyond which queries are considered 'slow' + * and logged as such by the driver. + * The default value is {@link #DEFAULT_SLOW_QUERY_THRESHOLD_MS}. + * + * @return The threshold in milliseconds beyond which queries are considered 'slow' + * and logged as such by the driver. + */ + public long getSlowQueryLatencyThresholdMillis() { + return slowQueryLatencyThresholdMillis; + } + + /** + * Set the threshold in milliseconds beyond which queries are considered 'slow' + * and logged as such by the driver. + * + * @param slowQueryLatencyThresholdMillis Slow queries threshold in milliseconds. + * It must be strictly positive. + * @throws IllegalArgumentException if {@code slowQueryLatencyThresholdMillis <= 0}. + */ + public void setSlowQueryLatencyThresholdMillis(long slowQueryLatencyThresholdMillis) { + if (slowQueryLatencyThresholdMillis <= 0) + throw new IllegalArgumentException("Invalid slowQueryLatencyThresholdMillis, should be > 0, got " + slowQueryLatencyThresholdMillis); + this.slowQueryLatencyThresholdMillis = slowQueryLatencyThresholdMillis; + } + + @Override + protected void maybeLogNormalOrSlowQuery(Host host, Statement statement, long latencyMs) { + if (latencyMs > slowQueryLatencyThresholdMillis) { + maybeLogSlowQuery(host, statement, latencyMs); + } else { + maybeLogNormalQuery(host, statement, latencyMs); + } + } + + protected void maybeLogSlowQuery(Host host, Statement statement, long latencyMs) { + if (SLOW_LOGGER.isDebugEnabled()) { + String message = String.format(SLOW_TEMPLATE_MILLIS, cluster.getClusterName(), host, latencyMs, statementAsString(statement)); + logQuery(statement, null, SLOW_LOGGER, message); + } + } + } + + /** + * A QueryLogger that uses a dynamic threshold in milliseconds + * to track slow queries. + *

+ * Dynamic thresholds are based on per-host latency percentiles, as computed + * by {@link PerHostPercentileTracker}. + *

+ * This class is currently provided as a beta preview: it hasn't been extensively tested yet, and the API is still subject + * to change. + */ + @Beta + public static class DynamicThresholdQueryLogger extends QueryLogger { + + private volatile double slowQueryLatencyThresholdPercentile; + + private volatile PerHostPercentileTracker perHostPercentileLatencyTracker; + + private DynamicThresholdQueryLogger(Cluster cluster, int maxQueryStringLength, int maxParameterValueLength, int maxLoggedParameters, double slowQueryLatencyThresholdPercentile, PerHostPercentileTracker perHostPercentileLatencyTracker) { + super(cluster, maxQueryStringLength, maxParameterValueLength, maxLoggedParameters); + this.setSlowQueryLatencyThresholdPercentile(slowQueryLatencyThresholdPercentile); + this.setPerHostPercentileLatencyTracker(perHostPercentileLatencyTracker); + } + + /** + * Return the {@link PerHostPercentileTracker} instance to use for recording per-host latency histograms. + * Cannot be {@code null}. + * + * @return the {@link PerHostPercentileTracker} instance to use. + */ + public PerHostPercentileTracker getPerHostPercentileLatencyTracker() { + return perHostPercentileLatencyTracker; + } + + /** + * Set the {@link PerHostPercentileTracker} instance to use for recording per-host latency histograms. + * Cannot be {@code null}. + * + * @param perHostPercentileLatencyTracker the {@link PerHostPercentileTracker} instance to use. + * @throws IllegalArgumentException if {@code perHostPercentileLatencyTracker == null}. + */ + public void setPerHostPercentileLatencyTracker(PerHostPercentileTracker perHostPercentileLatencyTracker) { + if (perHostPercentileLatencyTracker == null) + throw new IllegalArgumentException("perHostPercentileLatencyTracker cannot be null"); + this.perHostPercentileLatencyTracker = perHostPercentileLatencyTracker; + } + + /** + * Return the threshold percentile beyond which queries are considered 'slow' + * and logged as such by the driver. + * The default value is {@link #DEFAULT_SLOW_QUERY_THRESHOLD_PERCENTILE}. + * + * @return threshold percentile beyond which queries are considered 'slow' + * and logged as such by the driver. + */ + public double getSlowQueryLatencyThresholdPercentile() { + return slowQueryLatencyThresholdPercentile; + } + + /** + * Set the threshold percentile beyond which queries are considered 'slow' + * and logged as such by the driver. + * + * @param slowQueryLatencyThresholdPercentile Slow queries threshold percentile. + * It must be comprised between 0 inclusive and 100 exclusive. + * @throws IllegalArgumentException if {@code slowQueryLatencyThresholdPercentile < 0 || slowQueryLatencyThresholdPercentile >= 100}. + */ + public void setSlowQueryLatencyThresholdPercentile(double slowQueryLatencyThresholdPercentile) { + if (slowQueryLatencyThresholdPercentile < 0.0 || slowQueryLatencyThresholdPercentile >= 100.0) + throw new IllegalArgumentException("Invalid slowQueryLatencyThresholdPercentile, should be >= 0 and < 100, got " + slowQueryLatencyThresholdPercentile); + this.slowQueryLatencyThresholdPercentile = slowQueryLatencyThresholdPercentile; + } + + @Override + protected void maybeLogNormalOrSlowQuery(Host host, Statement statement, long latencyMs) { + long threshold = perHostPercentileLatencyTracker.getLatencyAtPercentile(host, slowQueryLatencyThresholdPercentile); + if (threshold >= 0 && latencyMs > threshold) { + maybeLogSlowQuery(host, statement, latencyMs, threshold); + } else { + maybeLogNormalQuery(host, statement, latencyMs); + } + } + + protected void maybeLogSlowQuery(Host host, Statement statement, long latencyMs, long threshold) { + if (SLOW_LOGGER.isDebugEnabled()) { + String message = String.format(SLOW_TEMPLATE_PERCENTILE, cluster.getClusterName(), host, latencyMs, slowQueryLatencyThresholdPercentile, threshold, statementAsString(statement)); + logQuery(statement, null, SLOW_LOGGER, message); + } + } + } + + /** + * Helper class to build {@link QueryLogger} instances with a fluent API. + */ + public static class Builder { + + private final Cluster cluster; + + private int maxQueryStringLength = DEFAULT_MAX_QUERY_STRING_LENGTH; + + private int maxParameterValueLength = DEFAULT_MAX_PARAMETER_VALUE_LENGTH; + + private int maxLoggedParameters = DEFAULT_MAX_LOGGED_PARAMETERS; + + private long slowQueryLatencyThresholdMillis = DEFAULT_SLOW_QUERY_THRESHOLD_MS; + + private double slowQueryLatencyThresholdPercentile = DEFAULT_SLOW_QUERY_THRESHOLD_PERCENTILE; + + private PerHostPercentileTracker perHostPercentileLatencyTracker; + + private boolean constantThreshold = true; + + public Builder(Cluster cluster) { + this.cluster = cluster; + } + + /** + * Enables slow query latency tracking based on constant thresholds. + *

+ * Note: You should either use {@link #withConstantThreshold(long) constant thresholds} + * or {@link #withDynamicThreshold(PerHostPercentileTracker, double) dynamic thresholds}, + * not both. + * + * @param slowQueryLatencyThresholdMillis The threshold in milliseconds beyond which queries are considered 'slow' + * and logged as such by the driver. + * The default value is {@link #DEFAULT_SLOW_QUERY_THRESHOLD_MS} + * @return this {@link Builder} instance (for method chaining). + */ + public Builder withConstantThreshold(long slowQueryLatencyThresholdMillis) { + this.slowQueryLatencyThresholdMillis = slowQueryLatencyThresholdMillis; + constantThreshold = true; + return this; + } + + /** + * Enables slow query latency tracking based on dynamic thresholds. + *

+ * Dynamic thresholds are based on per-host latency percentiles, as computed + * by {@link PerHostPercentileTracker}. + *

+ * Note: You should either use {@link #withConstantThreshold(long) constant thresholds} + * or {@link #withDynamicThreshold(PerHostPercentileTracker, double) dynamic thresholds}, + * not both. + *

+ * This feature is currently provided as a beta preview: it hasn't been extensively tested yet, and the API is still subject + * to change. + * + * @param perHostPercentileLatencyTracker the {@link PerHostPercentileTracker} instance to use for recording per-host latency histograms. + * Cannot be {@code null}. + * @param slowQueryLatencyThresholdPercentile Slow queries threshold percentile. + * It must be comprised between 0 inclusive and 100 exclusive. + * The default value is {@link #DEFAULT_SLOW_QUERY_THRESHOLD_PERCENTILE} + * @return this {@link Builder} instance (for method chaining). + */ + @Beta + public Builder withDynamicThreshold(PerHostPercentileTracker perHostPercentileLatencyTracker, double slowQueryLatencyThresholdPercentile) { + this.perHostPercentileLatencyTracker = perHostPercentileLatencyTracker; + this.slowQueryLatencyThresholdPercentile = slowQueryLatencyThresholdPercentile; + constantThreshold = false; + return this; + } + + /** + * Set the maximum length of a CQL query string that can be logged verbatim + * by the driver. Query strings longer than this value will be truncated + * when logged. + * + * @param maxQueryStringLength The maximum length of a CQL query string + * that can be logged verbatim by the driver. + * It must be strictly positive or {@code -1}, + * in which case the query is never truncated + * (use with care). + * The default value is {@link #DEFAULT_MAX_QUERY_STRING_LENGTH}. + * @return this {@link Builder} instance (for method chaining). + */ + public Builder withMaxQueryStringLength(int maxQueryStringLength) { + this.maxQueryStringLength = maxQueryStringLength; + return this; + } + + /** + * Set the maximum length of a query parameter value that can be logged verbatim + * by the driver. Parameter values longer than this value will be truncated + * when logged. + * + * @param maxParameterValueLength The maximum length of a query parameter value + * that can be logged verbatim by the driver. + * It must be strictly positive or {@code -1}, + * in which case the parameter value is never truncated + * (use with care). + * The default value is {@link #DEFAULT_MAX_PARAMETER_VALUE_LENGTH}. + * @return this {@link Builder} instance (for method chaining). + */ + public Builder withMaxParameterValueLength(int maxParameterValueLength) { + this.maxParameterValueLength = maxParameterValueLength; + return this; + } + + /** + * Set the maximum number of query parameters that can be logged + * by the driver. Queries with a number of parameters higher than this value + * will not have all their parameters logged. + * + * @param maxLoggedParameters The maximum number of query parameters that can be logged + * by the driver. It must be strictly positive or {@code -1}, + * in which case all parameters will be logged, regardless of their number + * (use with care). + * The default value is {@link #DEFAULT_MAX_LOGGED_PARAMETERS}. + * @return this {@link Builder} instance (for method chaining). + */ + public Builder withMaxLoggedParameters(int maxLoggedParameters) { + this.maxLoggedParameters = maxLoggedParameters; + return this; + } + + /** + * Build the {@link QueryLogger} instance. + * @return the {@link QueryLogger} instance. + * @throws IllegalArgumentException if the builder is unable to build a valid instance due to incorrect settings. + */ + public QueryLogger build() { + if(constantThreshold) { + return new ConstantThresholdQueryLogger(cluster, maxQueryStringLength, maxParameterValueLength, maxLoggedParameters, slowQueryLatencyThresholdMillis); + } else { + return new DynamicThresholdQueryLogger(cluster, maxQueryStringLength, maxParameterValueLength, maxLoggedParameters, slowQueryLatencyThresholdPercentile, perHostPercentileLatencyTracker); + } + } + + } + + // Getters and Setters + + /** + * Return the maximum length of a CQL query string that can be logged verbatim + * by the driver. Query strings longer than this value will be truncated + * when logged. + * The default value is {@link #DEFAULT_MAX_QUERY_STRING_LENGTH}. + * + * @return The maximum length of a CQL query string that can be logged verbatim + * by the driver. + */ + public int getMaxQueryStringLength() { + return maxQueryStringLength; + } + + /** + * Set the maximum length of a CQL query string that can be logged verbatim + * by the driver. Query strings longer than this value will be truncated + * when logged. + * + * @param maxQueryStringLength The maximum length of a CQL query string + * that can be logged verbatim by the driver. + * It must be strictly positive or {@code -1}, + * in which case the query is never truncated + * (use with care). + * @throws IllegalArgumentException if {@code maxQueryStringLength <= 0 && maxQueryStringLength != -1}. + */ + public void setMaxQueryStringLength(int maxQueryStringLength) { + if (maxQueryStringLength <= 0 && maxQueryStringLength != -1) + throw new IllegalArgumentException("Invalid maxQueryStringLength, should be > 0 or -1, got " + maxQueryStringLength); + this.maxQueryStringLength = maxQueryStringLength; + } + + /** + * Return the maximum length of a query parameter value that can be logged verbatim + * by the driver. Parameter values longer than this value will be truncated + * when logged. + * The default value is {@link #DEFAULT_MAX_PARAMETER_VALUE_LENGTH}. + * + * @return The maximum length of a query parameter value that can be logged verbatim + * by the driver. + */ + public int getMaxParameterValueLength() { + return maxParameterValueLength; + } + + /** + * Set the maximum length of a query parameter value that can be logged verbatim + * by the driver. Parameter values longer than this value will be truncated + * when logged. + * + * @param maxParameterValueLength The maximum length of a query parameter value + * that can be logged verbatim by the driver. + * It must be strictly positive or {@code -1}, + * in which case the parameter value is never truncated + * (use with care). + * @throws IllegalArgumentException if {@code maxParameterValueLength <= 0 && maxParameterValueLength != -1}. + */ + public void setMaxParameterValueLength(int maxParameterValueLength) { + if (maxParameterValueLength <= 0 && maxParameterValueLength != -1) + throw new IllegalArgumentException("Invalid maxParameterValueLength, should be > 0 or -1, got " + maxParameterValueLength); + this.maxParameterValueLength = maxParameterValueLength; + } + + /** + * Return the maximum number of query parameters that can be logged + * by the driver. Queries with a number of parameters higher than this value + * will not have all their parameters logged. + * The default value is {@link #DEFAULT_MAX_LOGGED_PARAMETERS}. + * + * @return The maximum number of query parameters that can be logged + * by the driver. + */ + public int getMaxLoggedParameters() { + return maxLoggedParameters; + } + + /** + * Set the maximum number of query parameters that can be logged + * by the driver. Queries with a number of parameters higher than this value + * will not have all their parameters logged. + * + * @param maxLoggedParameters the maximum number of query parameters that can be logged + * by the driver. It must be strictly positive or {@code -1}, + * in which case all parameters will be logged, regardless of their number + * (use with care). + * @throws IllegalArgumentException if {@code maxLoggedParameters <= 0 && maxLoggedParameters != -1}. + */ + public void setMaxLoggedParameters(int maxLoggedParameters) { + if (maxLoggedParameters <= 0 && maxLoggedParameters != -1) + throw new IllegalArgumentException("Invalid maxLoggedParameters, should be > 0 or -1, got " + maxLoggedParameters); + this.maxLoggedParameters = maxLoggedParameters; + } + + /** + * {@inheritDoc} + */ + @Override + public void update(Host host, Statement statement, Exception exception, long newLatencyNanos) { + long latencyMs = NANOSECONDS.toMillis(newLatencyNanos); + if (exception == null) { + maybeLogNormalOrSlowQuery(host, statement, latencyMs); + } else { + maybeLogErrorQuery(host, statement, exception, latencyMs); + } + } + + protected abstract void maybeLogNormalOrSlowQuery(Host host, Statement statement, long latencyMs); + + protected void maybeLogNormalQuery(Host host, Statement statement, long latencyMs) { + if (NORMAL_LOGGER.isDebugEnabled()) { + String message = String.format(NORMAL_TEMPLATE, cluster.getClusterName(), host, latencyMs, statementAsString(statement)); + logQuery(statement, null, NORMAL_LOGGER, message); + } + } + + protected void maybeLogErrorQuery(Host host, Statement statement, Exception exception, long latencyMs) { + if (ERROR_LOGGER.isDebugEnabled()) { + String message = String.format(ERROR_TEMPLATE, cluster.getClusterName(), host, latencyMs, statementAsString(statement)); + logQuery(statement, exception, ERROR_LOGGER, message); + } + } + + protected void logQuery(Statement statement, Exception exception, Logger logger, String message) { + boolean showParameterValues = logger.isTraceEnabled(); + if (showParameterValues) { + StringBuilder params = new StringBuilder(); + if (statement instanceof BoundStatement) { + appendParameters((BoundStatement)statement, params, maxLoggedParameters); + } else if (statement instanceof BatchStatement) { + BatchStatement batchStatement = (BatchStatement)statement; + int remaining = maxLoggedParameters; + for (Statement inner : batchStatement.getStatements()) { + if (inner instanceof BoundStatement) { + remaining = appendParameters((BoundStatement)inner, params, remaining); + } + } + } + if (params.length() > 0) + params.append("]"); + logger.trace(message + params, exception); + } else { + logger.debug(message, exception); + } + } + + protected String statementAsString(Statement statement) { + StringBuilder sb = new StringBuilder(); + if (statement instanceof BatchStatement) { + BatchStatement bs = (BatchStatement)statement; + int statements = bs.getStatements().size(); + int boundValues = countBoundValues(bs); + sb.append("[" + statements + " statements, " + boundValues + " bound values] "); + } else if (statement instanceof BoundStatement) { + int boundValues = ((BoundStatement)statement).wrapper.values.length; + sb.append("[" + boundValues + " bound values] "); + } + + append(statement, sb, maxQueryStringLength); + return sb.toString(); + } + + protected int countBoundValues(BatchStatement bs) { + int count = 0; + for (Statement s : bs.getStatements()) { + if (s instanceof BoundStatement) + count += ((BoundStatement)s).wrapper.values.length; + } + return count; + } + + protected int appendParameters(BoundStatement statement, StringBuilder buffer, int remaining) { + if (remaining == 0) + return 0; + ColumnDefinitions metadata = statement.preparedStatement().getVariables(); + int numberOfParameters = metadata.size(); + if (numberOfParameters > 0) { + List definitions = metadata.asList(); + int numberOfLoggedParameters; + if (remaining == -1) { + numberOfLoggedParameters = numberOfParameters; + } else { + numberOfLoggedParameters = remaining > numberOfParameters ? numberOfParameters : remaining; + remaining -= numberOfLoggedParameters; + } + for (int i = 0; i < numberOfLoggedParameters; i++) { + if (buffer.length() == 0) + buffer.append(" ["); + else + buffer.append(", "); + buffer.append(String.format("%s:%s", metadata.getName(i), parameterValueAsString(definitions.get(i), statement.wrapper.values[i]))); + } + if (numberOfLoggedParameters < numberOfParameters) { + buffer.append(FURTHER_PARAMS_OMITTED); + } + } + return remaining; + } + + protected String parameterValueAsString(ColumnDefinitions.Definition definition, ByteBuffer raw) { + String valueStr; + if (raw == null || raw.remaining() == 0) { + valueStr = "NULL"; + } else { + DataType type = definition.getType(); + int maxParameterValueLength = this.maxParameterValueLength; + if (type.equals(DataType.blob()) && maxParameterValueLength != -1) { + // prevent large blobs from being converted to strings + int maxBufferLength = Math.max(2, (maxParameterValueLength - 2) / 2); + boolean bufferTooLarge = raw.remaining() > maxBufferLength; + if (bufferTooLarge) { + raw = (ByteBuffer)raw.duplicate().limit(maxBufferLength); + } + Object value = type.deserialize(raw, protocolVersion()); + valueStr = type.format(value); + if (bufferTooLarge) { + valueStr = valueStr + TRUNCATED_OUTPUT; + } + } else { + Object value = type.deserialize(raw, protocolVersion()); + valueStr = type.format(value); + if (maxParameterValueLength != -1 && valueStr.length() > maxParameterValueLength) { + valueStr = valueStr.substring(0, maxParameterValueLength) + TRUNCATED_OUTPUT; + } + } + } + return valueStr; + } + + private ProtocolVersion protocolVersion() { + // Since the QueryLogger can be registered before the Cluster was initialized, we can't retrieve + // it at construction time. Cache it field at first use (a volatile field is good enough since we + // don't need mutual exclusion). + if (protocolVersion == null) { + protocolVersion = cluster.getConfiguration().getProtocolOptions().getProtocolVersionEnum(); + // At least one connection was established when QueryLogger is invoked + assert protocolVersion != null : "protocol version should be defined"; + } + return protocolVersion; + } + + protected int append(Statement statement, StringBuilder buffer, int remaining) { + if (statement instanceof StatementWrapper) + statement = ((StatementWrapper)statement).getWrappedStatement(); + + if (statement instanceof RegularStatement) { + remaining = append(((RegularStatement)statement).getQueryString().trim(), buffer, remaining); + } else if (statement instanceof BoundStatement) { + remaining = append(((BoundStatement)statement).preparedStatement().getQueryString().trim(), buffer, remaining); + } else if (statement instanceof BatchStatement) { + BatchStatement batchStatement = (BatchStatement)statement; + remaining = append("BEGIN", buffer, remaining); + switch (batchStatement.batchType) { + case UNLOGGED: + append(" UNLOGGED", buffer, remaining); + break; + case COUNTER: + append(" COUNTER", buffer, remaining); + break; + } + remaining = append(" BATCH", buffer, remaining); + for (Statement stmt : batchStatement.getStatements()) { + remaining = append(" ", buffer, remaining); + remaining = append(stmt, buffer, remaining); + } + remaining = append(" APPLY BATCH", buffer, remaining); + } else { + // Unknown types of statement + // Call toString() as a last resort + remaining = append(statement.toString(), buffer, remaining); + } + if (buffer.charAt(buffer.length() - 1) != ';') { + remaining = append(";", buffer, remaining); + } + return remaining; + } + + protected int append(CharSequence str, StringBuilder buffer, int remaining) { + if (remaining == -2) { + // capacity exceeded + } else if (remaining == -1) { + // unlimited capacity + buffer.append(str); + } else if (str.length() > remaining) { + buffer.append(str, 0, remaining).append(TRUNCATED_OUTPUT); + remaining = -2; + } else { + buffer.append(str); + remaining -= str.length(); + } + return remaining; + } + +} \ No newline at end of file diff --git a/driver-core/src/main/java/com/datastax/driver/core/QueryOptions.java b/driver-core/src/main/java/com/datastax/driver/core/QueryOptions.java new file mode 100644 index 00000000000..2a160927b1d --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/QueryOptions.java @@ -0,0 +1,172 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import com.datastax.driver.core.exceptions.UnsupportedFeatureException; + +/** + * Options related to defaults for individual queries. + */ +public class QueryOptions { + + /** + * The default consistency level for queries: {@link ConsistencyLevel#ONE}. + */ + public static final ConsistencyLevel DEFAULT_CONSISTENCY_LEVEL = ConsistencyLevel.ONE; + + /** + * The default serial consistency level for conditional updates: {@link ConsistencyLevel#SERIAL}. + */ + public static final ConsistencyLevel DEFAULT_SERIAL_CONSISTENCY_LEVEL = ConsistencyLevel.SERIAL; + + /** + * The default fetch size for SELECT queries: 5000. + */ + public static final int DEFAULT_FETCH_SIZE = 5000; + + /** + * The default value for {@link #getDefaultIdempotence()}: {@code false}. + */ + public static final boolean DEFAULT_IDEMPOTENCE = false; + + private volatile ConsistencyLevel consistency = DEFAULT_CONSISTENCY_LEVEL; + private volatile ConsistencyLevel serialConsistency = DEFAULT_SERIAL_CONSISTENCY_LEVEL; + private volatile int fetchSize = DEFAULT_FETCH_SIZE; + private volatile boolean defaultIdempotence = DEFAULT_IDEMPOTENCE; + private volatile Cluster.Manager manager; + + /** + * Creates a new {@link QueryOptions} instance using the {@link #DEFAULT_CONSISTENCY_LEVEL}, + * {@link #DEFAULT_SERIAL_CONSISTENCY_LEVEL} and {@link #DEFAULT_FETCH_SIZE}. + */ + public QueryOptions() {} + + void register(Cluster.Manager manager) { + this.manager = manager; + } + + /** + * Sets the default consistency level to use for queries. + *

+ * The consistency level set through this method will be use for queries + * that don't explicitly have a consistency level, i.e. when {@link Statement#getConsistencyLevel} + * returns {@code null}. + * + * @param consistencyLevel the new consistency level to set as default. + * @return this {@code QueryOptions} instance. + */ + public QueryOptions setConsistencyLevel(ConsistencyLevel consistencyLevel) { + this.consistency = consistencyLevel; + return this; + } + + /** + * The default consistency level used by queries. + * + * @return the default consistency level used by queries. + */ + public ConsistencyLevel getConsistencyLevel() { + return consistency; + } + + /** + * Sets the default serial consistency level to use for queries. + *

+ * The serial consistency level set through this method will be use for queries + * that don't explicitly have a serial consistency level, i.e. when {@link Statement#getSerialConsistencyLevel} + * returns {@code null}. + * + * @param serialConsistencyLevel the new serial consistency level to set as default. + * @return this {@code QueryOptions} instance. + */ + public QueryOptions setSerialConsistencyLevel(ConsistencyLevel serialConsistencyLevel) { + this.serialConsistency = serialConsistencyLevel; + return this; + } + + /** + * The default serial consistency level used by queries. + * + * @return the default serial consistency level used by queries. + */ + public ConsistencyLevel getSerialConsistencyLevel() { + return serialConsistency; + } + + /** + * Sets the default fetch size to use for SELECT queries. + *

+ * The fetch size set through this method will be use for queries + * that don't explicitly have a fetch size, i.e. when {@link Statement#getFetchSize} + * is less or equal to 0. + * + * @param fetchSize the new fetch size to set as default. It must be + * strictly positive but you can use {@code Integer.MAX_VALUE} to disable + * paging. + * @return this {@code QueryOptions} instance. + * + * @throws IllegalArgumentException if {@code fetchSize <e; 0}. + * @throws UnsupportedFeatureException if version 1 of the native protocol is in + * use and {@code fetchSize != Integer.MAX_VALUE} as paging is not supported by + * version 1 of the protocol. See {@link Cluster.Builder#withProtocolVersion} + * for more details on protocol versions. + */ + public QueryOptions setFetchSize(int fetchSize) { + if (fetchSize <= 0) + throw new IllegalArgumentException("Invalid fetchSize, should be > 0, got " + fetchSize); + + ProtocolVersion version = manager == null ? null : manager.protocolVersion(); + if (fetchSize != Integer.MAX_VALUE && version == ProtocolVersion.V1) + throw new UnsupportedFeatureException(version, "Paging is not supported"); + + this.fetchSize = fetchSize; + return this; + } + + /** + * The default fetch size used by queries. + * + * @return the default fetch size used by queries. + */ + public int getFetchSize() { + return fetchSize; + } + + /** + * Sets the default idempotence for queries. + *

+ * This will be used for statements for which {@link com.datastax.driver.core.Statement#isIdempotent()} + * returns {@code null}. + * + * @param defaultIdempotence the new value to set as default idempotence. + * @return this {@code QueryOptions} instance. + */ + public QueryOptions setDefaultIdempotence(boolean defaultIdempotence) { + this.defaultIdempotence = defaultIdempotence; + return this; + } + + /** + * The default idempotence for queries. + *

+ * It defaults to {@link #DEFAULT_IDEMPOTENCE}. + * + * @return the default idempotence for queries. + */ + public boolean getDefaultIdempotence() { + return defaultIdempotence; + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/QueryTrace.java b/driver-core/src/main/java/com/datastax/driver/core/QueryTrace.java new file mode 100644 index 00000000000..2d7bb3a45eb --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/QueryTrace.java @@ -0,0 +1,311 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.net.InetAddress; +import java.util.*; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; + +import com.google.common.util.concurrent.Uninterruptibles; + +import com.datastax.driver.core.exceptions.TraceRetrievalException; + +/** + * The Cassandra trace for a query. + *

+ * A trace is generated by Cassandra when query tracing is enabled for the + * query. The trace itself is stored in Cassandra in the {@code sessions} and + * {@code events} table in the {@code system_traces} keyspace and can be + * retrieve manually using the trace identifier (the one returned by + * {@link #getTraceId}). + *

+ * This class provides facilities to fetch the traces from Cassandra. Please + * note that the writing of the trace is done asynchronously in Cassandra. So + * accessing the trace too soon after the query may result in the trace being + * incomplete. + */ +public class QueryTrace { + private static final String SELECT_SESSIONS_FORMAT = "SELECT * FROM system_traces.sessions WHERE session_id = %s"; + private static final String SELECT_EVENTS_FORMAT = "SELECT * FROM system_traces.events WHERE session_id = %s"; + + private static final int MAX_TRIES = 5; + private static final long BASE_SLEEP_BETWEEN_TRIES_IN_MS = 3; + + private final UUID traceId; + + private volatile String requestType; + // We use the duration to figure out if the trace is complete, because + // that's the last event that is written (and it is written asynchronously + // so it's possible that a fetch gets all the trace except the duration). + private volatile int duration = Integer.MIN_VALUE; + private volatile InetAddress coordinator; + private volatile Map parameters; + private volatile long startedAt; + private volatile List events; + + private final SessionManager session; + private final Lock fetchLock = new ReentrantLock(); + + QueryTrace(UUID traceId, SessionManager session) { + this.traceId = traceId; + this.session = session; + } + + /** + * Returns the identifier of this trace. + *

+ * Note that contrary to the other methods in this class, this + * does not entail fetching query trace details from Cassandra. + * + * @return the identifier of this trace. + */ + public UUID getTraceId() { + return traceId; + } + + /** + * Returns the type of request. + * + * @return the type of request or {@code null} if the request + * type is not yet available. + * + * @throws TraceRetrievalException if the trace details cannot be retrieve + * from Cassandra successfully. + */ + public String getRequestType() { + maybeFetchTrace(); + return requestType; + } + + /** + * Returns the server-side duration of the query in microseconds. + * + * @return the (server side) duration of the query in microseconds. This + * method will return {@code Integer.MIN_VALUE} if the duration is not yet + * available. + * + * @throws TraceRetrievalException if the trace details cannot be retrieve + * from Cassandra successfully. + */ + public int getDurationMicros() { + maybeFetchTrace(); + return duration; + } + + /** + * Returns the coordinator host of the query. + * + * @return the coordinator host of the query or {@code null} + * if the coordinator is not yet available. + * + * @throws TraceRetrievalException if the trace details cannot be retrieve + * from Cassandra successfully. + */ + public InetAddress getCoordinator() { + maybeFetchTrace(); + return coordinator; + } + + /** + * Returns the parameters attached to this trace. + * + * @return the parameters attached to this trace. or + * {@code null} if the coordinator is not yet available. + * + * @throws TraceRetrievalException if the trace details cannot be retrieve + * from Cassandra successfully. + */ + public Map getParameters() { + maybeFetchTrace(); + return parameters; + } + + /** + * Returns the server-side timestamp of the start of this query. + * + * @return the server side timestamp of the start of this query or + * 0 if the start timestamp is not available. + * + * @throws TraceRetrievalException if the trace details cannot be retrieve + * from Cassandra successfully. + */ + public long getStartedAt() { + maybeFetchTrace(); + return startedAt; + } + + /** + * Returns the events contained in this trace. + *

+ * Query tracing is asynchronous in Cassandra. Hence, it + * is possible for the list returned to be missing some events for some of + * the replica involved in the query if the query trace is requested just + * after the return of the query it is a trace of (the only guarantee being + * that the list will contain the events pertaining to the coordinator of + * the query). + * + * @return the events contained in this trace. + * + * @throws TraceRetrievalException if the trace details cannot be retrieve + * from Cassandra successfully. + */ + public List getEvents() { + maybeFetchTrace(); + return events; + } + + @Override + public String toString() { + maybeFetchTrace(); + return String.format("%s [%s] - %dµs", requestType, traceId, duration); + } + + private void maybeFetchTrace() { + if (duration != Integer.MIN_VALUE) + return; + + fetchLock.lock(); + try { + doFetchTrace(); + } finally { + fetchLock.unlock(); + } + } + + private void doFetchTrace() { + int tries = 0; + try { + // We cannot guarantee the trace is complete. But we can't at least wait until we have all the information + // the coordinator log in the trace. Since the duration is the last thing the coordinator log, that's + // what we check to know if the trace is "complete" (again, it may not contain the log of replicas). + while (duration == Integer.MIN_VALUE && tries <= MAX_TRIES) { + ++tries; + + ResultSetFuture sessionsFuture = session.executeQuery(new Requests.Query(String.format(SELECT_SESSIONS_FORMAT, traceId)), Statement.DEFAULT); + ResultSetFuture eventsFuture = session.executeQuery(new Requests.Query(String.format(SELECT_EVENTS_FORMAT, traceId)), Statement.DEFAULT); + + Row sessRow = sessionsFuture.get().one(); + if (sessRow != null && !sessRow.isNull("duration")) { + + requestType = sessRow.getString("request"); + coordinator = sessRow.getInet("coordinator"); + if (!sessRow.isNull("parameters")) + parameters = Collections.unmodifiableMap(sessRow.getMap("parameters", String.class, String.class)); + startedAt = sessRow.getDate("started_at").getTime(); + + events = new ArrayList(); + for (Row evRow : eventsFuture.get()) { + events.add(new Event(evRow.getString("activity"), + evRow.getUUID("event_id").timestamp(), + evRow.getInet("source"), + evRow.getInt("source_elapsed"), + evRow.getString("thread"))); + } + events = Collections.unmodifiableList(events); + + // Set the duration last as it's our test to know if the trace is complete + duration = sessRow.getInt("duration"); + } else { + // The trace is not ready. Give it a few milliseconds before trying again. + // Notes: granted, sleeping uninterruptibly is bad, but having all method propagate + // InterruptedException bothers me. + Uninterruptibles.sleepUninterruptibly(tries * BASE_SLEEP_BETWEEN_TRIES_IN_MS, TimeUnit.MILLISECONDS); + } + } + } catch (Exception e) { + throw new TraceRetrievalException("Unexpected exception while fetching query trace", e); + } + + if (tries > MAX_TRIES) + throw new TraceRetrievalException(String.format("Unable to retrieve complete query trace for id %s after %d tries", traceId, MAX_TRIES)); + } + + /** + * A trace event. + *

+ * A query trace is composed of a list of trace events. + */ + public static class Event { + private final String name; + private final long timestamp; + private final InetAddress source; + private final int sourceElapsed; + private final String threadName; + + private Event(String name, long timestamp, InetAddress source, int sourceElapsed, String threadName) { + this.name = name; + // Convert the UUID timestamp to an epoch timestamp; I stole this seemingly random value from cqlsh, hopefully it's correct. + this.timestamp = (timestamp - 0x01b21dd213814000L) / 10000; + this.source = source; + this.sourceElapsed = sourceElapsed; + this.threadName = threadName; + } + + /** + * The event description, that is which activity this event correspond to. + * + * @return the event description. + */ + public String getDescription() { + return name; + } + + /** + * Returns the server side timestamp of the event. + * + * @return the server side timestamp of the event. + */ + public long getTimestamp() { + return timestamp; + } + + /** + * Returns the address of the host having generated this event. + * + * @return the address of the host having generated this event. + */ + public InetAddress getSource() { + return source; + } + + /** + * Returns the number of microseconds elapsed on the source when this event + * occurred since when the source started handling the query. + * + * @return the elapsed time on the source host when that event happened + * in microseconds. + */ + public int getSourceElapsedMicros() { + return sourceElapsed; + } + + /** + * Returns the name of the thread on which this event occurred. + * + * @return the name of the thread on which this event occurred. + */ + public String getThreadName() { + return threadName; + } + + @Override + public String toString() { + return String.format("%s on %s[%s] at %s", name, source, threadName, new Date(timestamp)); + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/RegularStatement.java b/driver-core/src/main/java/com/datastax/driver/core/RegularStatement.java new file mode 100644 index 00000000000..c891ef8a0d8 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/RegularStatement.java @@ -0,0 +1,96 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.nio.ByteBuffer; + +/** + * A regular (non-prepared and non batched) CQL statement. + *

+ * This class represents a query string along with query options (and optionally + * binary values, see {@code getValues}). It can be extended but {@link SimpleStatement} + * is provided as a simple implementation to build a {@code RegularStatement} directly + * from its query string. + */ +public abstract class RegularStatement extends Statement { + + /** + * Creates a new RegularStatement. + */ + protected RegularStatement() {} + + /** + * Returns the query string for this statement. + * + * @return a valid CQL query string. + */ + public abstract String getQueryString(); + + /** + * The values to use for this statement. + *

+ * Note: Values for a RegularStatement (i.e. if this method does not return + * {@code null}) are not supported with the native protocol version 1: you + * will get an {@link UnsupportedProtocolVersionException} when submitting + * one if version 1 of the protocol is in use (i.e. if you've force version + * 1 through {@link Cluster.Builder#withProtocolVersion} or you use + * Cassandra 1.2). + * + * @param protocolVersion the protocol version in which the returned values + * must be serialized for. + * @return the values to use for this statement or {@code null} if there is + * no such values. + * + * @see SimpleStatement#SimpleStatement(String, Object...) + */ + public abstract ByteBuffer[] getValues(ProtocolVersion protocolVersion); + + /** + * The values to use for this statement, for the given numeric protocol version. + * + * @throws IllegalArgumentException if {@code protocolVersion} does not correspond to any known version. + * + * @deprecated This method is provided for backward compatibility. Use + * {@link #getValues(ProtocolVersion)} instead. + */ + @Deprecated + public ByteBuffer[] getValues(int protocolVersion) { + return getValues(ProtocolVersion.fromInt(protocolVersion)); + } + + /** + * @deprecated This method is provided for binary compatibility only. It is no longer supported, will be removed, + * and simply throws {@link UnsupportedOperationException}. Use {@link #getValues(ProtocolVersion)} instead. + */ + @Deprecated + public ByteBuffer[] getValues() { + throw new UnsupportedOperationException("Method no longer supported; use getValues(ProtocolVersion)"); + } + + /** + * Whether or not this statement has values, that is if {@code getValues} + * will return {@code null} or not. + * + * @return {@code false} if {@link #getValues} returns {@code null}, {@code true} + * otherwise. + */ + public abstract boolean hasValues(); + + @Override + public String toString() { + return getQueryString(); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/ReplicationStategy.java b/driver-core/src/main/java/com/datastax/driver/core/ReplicationStategy.java new file mode 100644 index 00000000000..c1227685fe2 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/ReplicationStategy.java @@ -0,0 +1,201 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.util.*; + +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Sets; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/* + * Computes the token->list association, given the token ring and token->primary token map. + * + * Note: it's not an interface mainly because we don't want to expose it. + */ +abstract class ReplicationStrategy { + + static ReplicationStrategy create(Map replicationOptions) { + + String strategyClass = replicationOptions.get("class"); + if (strategyClass == null) + return null; + + try { + if (strategyClass.contains("SimpleStrategy")) { + String repFactorString = replicationOptions.get("replication_factor"); + return repFactorString == null ? null : new SimpleStrategy(Integer.parseInt(repFactorString)); + } else if (strategyClass.contains("NetworkTopologyStrategy")) { + Map dcRfs = new HashMap(); + for (Map.Entry entry : replicationOptions.entrySet()) + { + if (entry.getKey().equals("class")) + continue; + + dcRfs.put(entry.getKey(), Integer.parseInt(entry.getValue())); + } + return new NetworkTopologyStrategy(dcRfs); + } else { + // We might want to support oldNetworkTopologyStrategy, though not sure anyone still using that + return null; + } + } catch (NumberFormatException e) { + // Cassandra wouldn't let that pass in the first place so this really should never happen + return null; + } + } + + abstract Map> computeTokenToReplicaMap(Map tokenToPrimary, List ring); + + private static Token getTokenWrapping(int i, List ring) { + return ring.get(i % ring.size()); + } + + static class SimpleStrategy extends ReplicationStrategy { + + private final int replicationFactor; + + private SimpleStrategy(int replicationFactor) { + this.replicationFactor = replicationFactor; + } + + Map> computeTokenToReplicaMap(Map tokenToPrimary, List ring) { + + int rf = Math.min(replicationFactor, ring.size()); + + Map> replicaMap = new HashMap>(tokenToPrimary.size()); + for (int i = 0; i < ring.size(); i++) { + // Consecutive sections of the ring can assigned to the same host + Set replicas = new LinkedHashSet(); + for (int j = 0; j < ring.size() && replicas.size() < rf; j++) + replicas.add(tokenToPrimary.get(getTokenWrapping(i+j, ring))); + replicaMap.put(ring.get(i), ImmutableSet.copyOf(replicas)); + } + return replicaMap; + } + } + + static class NetworkTopologyStrategy extends ReplicationStrategy { + private static final Logger logger = LoggerFactory.getLogger(NetworkTopologyStrategy.class); + + private final Map replicationFactors; + + private NetworkTopologyStrategy(Map replicationFactors) { + this.replicationFactors = replicationFactors; + } + + Map> computeTokenToReplicaMap(Map tokenToPrimary, List ring) { + + // This is essentially a copy of org.apache.cassandra.locator.NetworkTopologyStrategy + Map> racks = getRacksInDcs(tokenToPrimary.values()); + Map> replicaMap = new HashMap>(tokenToPrimary.size()); + + Set warnedDcs = Sets.newHashSetWithExpectedSize(replicationFactors.size()); + + for (int i = 0; i < ring.size(); i++) { + Map> allDcReplicas = new HashMap>(); + Map> seenRacks = new HashMap>(); + Map> skippedDcEndpoints = new HashMap>(); + for (String dc : replicationFactors.keySet()) { + allDcReplicas.put(dc, new HashSet()); + seenRacks.put(dc, new HashSet()); + skippedDcEndpoints.put(dc, new LinkedHashSet()); // preserve order + } + + // Preserve order - primary replica will be first + Set replicas = new LinkedHashSet(); + for (int j = 0; j < ring.size() && !allDone(allDcReplicas); j++) { + Host h = tokenToPrimary.get(getTokenWrapping(i + j, ring)); + String dc = h.getDatacenter(); + if (dc == null || !allDcReplicas.containsKey(dc)) + continue; + + Integer rf = replicationFactors.get(dc); + Set dcReplicas = allDcReplicas.get(dc); + if (rf == null || dcReplicas.size() >= rf) + continue; + + String rack = h.getRack(); + // Check if we already visited all racks in dc + if (rack == null || seenRacks.get(dc).size() == racks.get(dc).size()) { + replicas.add(h); + dcReplicas.add(h); + } else { + // Is this a new rack? + if (seenRacks.get(dc).contains(rack)) { + skippedDcEndpoints.get(dc).add(h); + } else { + replicas.add(h); + dcReplicas.add(h); + seenRacks.get(dc).add(rack); + // If we've run out of distinct racks, add the nodes skipped so far + if (seenRacks.get(dc).size() == racks.get(dc).size()) { + Iterator skippedIt = skippedDcEndpoints.get(dc).iterator(); + while (skippedIt.hasNext() && dcReplicas.size() < rf) { + Host nextSkipped = skippedIt.next(); + replicas.add(nextSkipped); + dcReplicas.add(nextSkipped); + } + } + } + } + } + + // If we haven't found enough replicas after a whole trip around the ring, this probably + // means that the replication factors are broken. + // Warn the user because that leads to quadratic performance of this method (JAVA-702). + for (Map.Entry> entry : allDcReplicas.entrySet()) { + String dcName = entry.getKey(); + int expectedFactor = replicationFactors.get(dcName); + int achievedFactor = entry.getValue().size(); + if (achievedFactor < expectedFactor && !warnedDcs.contains(dcName)) { + logger.warn("Error while computing token map for datacenter {}: " + + "could not achieve replication factor {} (found {} replicas only), " + + "check your keyspace replication settings. " + + "Note that this can affect the performance of the driver.", + dcName, expectedFactor, achievedFactor); + // only warn once per DC + warnedDcs.add(dcName); + } + } + + replicaMap.put(ring.get(i), ImmutableSet.copyOf(replicas)); + } + return replicaMap; + } + + private boolean allDone(Map> map) { + for (Map.Entry> entry : map.entrySet()) + if (entry.getValue().size() < replicationFactors.get(entry.getKey())) + return false; + return true; + } + + private Map> getRacksInDcs(Iterable hosts) { + Map> result = new HashMap>(); + for (Host host : hosts) { + Set racks = result.get(host.getDatacenter()); + if (racks == null) { + racks = new HashSet(); + result.put(host.getDatacenter(), racks); + } + racks.add(host.getRack()); + } + return result; + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/RequestHandler.java b/driver-core/src/main/java/com/datastax/driver/core/RequestHandler.java new file mode 100644 index 00000000000..28bf35bfb11 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/RequestHandler.java @@ -0,0 +1,813 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.net.InetSocketAddress; +import java.util.*; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; + +import com.codahale.metrics.Timer; +import com.google.common.collect.Sets; +import io.netty.util.HashedWheelTimer; +import io.netty.util.Timeout; +import io.netty.util.TimerTask; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.datastax.driver.core.exceptions.*; +import com.datastax.driver.core.policies.RetryPolicy; +import com.datastax.driver.core.policies.RetryPolicy.RetryDecision.Type; +import com.datastax.driver.core.policies.SpeculativeExecutionPolicy.SpeculativeExecutionPlan; + +/** + * Handles a request to cassandra, dealing with host failover and retries on + * unavailable/timeout. + */ +class RequestHandler { + private static final Logger logger = LoggerFactory.getLogger(RequestHandler.class); + + final String id; + + private final SessionManager manager; + private final Callback callback; + + private final QueryPlan queryPlan; + private final SpeculativeExecutionPlan speculativeExecutionPlan; + private final boolean allowSpeculativeExecutions; + private final Set runningExecutions = Sets.newCopyOnWriteArraySet(); + private final Set scheduledExecutions = Sets.newCopyOnWriteArraySet(); + private final Statement statement; + private final HashedWheelTimer scheduler; + + private volatile List triedHosts; + + private volatile Map errors; + + private final Timer.Context timerContext; + private final long startTime; + + private final AtomicBoolean isDone = new AtomicBoolean(); + private AtomicInteger executionCount = new AtomicInteger(); + + public RequestHandler(SessionManager manager, Callback callback, Statement statement) { + this.id = Long.toString(System.identityHashCode(this)); + if(logger.isTraceEnabled()) + logger.trace("[{}] {}", id, statement); + this.manager = manager; + this.callback = callback; + this.scheduler = manager.cluster.manager.connectionFactory.timer; + + callback.register(this); + + this.queryPlan = new QueryPlan(manager.loadBalancingPolicy().newQueryPlan(manager.poolsState.keyspace, statement)); + this.speculativeExecutionPlan = manager.speculativeRetryPolicy().newPlan(manager.poolsState.keyspace, statement); + this.allowSpeculativeExecutions = statement != Statement.DEFAULT + && statement.isIdempotentWithDefault(manager.configuration().getQueryOptions()); + this.statement = statement; + + this.timerContext = metricsEnabled() + ? metrics().getRequestsTimer().time() + : null; + this.startTime = System.nanoTime(); + } + + void sendRequest() { + startNewExecution(); + } + + // Called when the corresponding ResultSetFuture is cancelled by the client + void cancel() { + if (!isDone.compareAndSet(false, true)) + return; + + cancelPendingExecutions(null); + } + + private void startNewExecution() { + if (isDone.get()) + return; + + Message.Request request = callback.request(); + int position = executionCount.incrementAndGet(); + // Clone the request after the first execution, since we set the streamId on it later and we + // don't want to share that across executions. + if (position > 1) + request = request.copy(); + + SpeculativeExecution execution = new SpeculativeExecution(request, position); + runningExecutions.add(execution); + execution.sendRequest(); + } + + private void scheduleExecution(long delayMillis) { + if (isDone.get() || delayMillis <= 0) + return; + if(logger.isTraceEnabled()) + logger.trace("[{}] Schedule next speculative execution in {} ms", id, delayMillis); + scheduledExecutions.add(scheduler.newTimeout(newExecutionTask, delayMillis, TimeUnit.MILLISECONDS)); + } + + private final TimerTask newExecutionTask = new TimerTask() { + @Override + public void run(final Timeout timeout) throws Exception { + scheduledExecutions.remove(timeout); + if (!isDone.get()) + // We're on the timer thread so reschedule to another executor + manager.executor().execute(new Runnable() { + @Override + public void run() { + metrics().getErrorMetrics().getSpeculativeExecutions().inc(); + startNewExecution(); + } + }); + } + }; + + private void cancelPendingExecutions(SpeculativeExecution ignore) { + for (SpeculativeExecution execution : runningExecutions) + if (execution != ignore) // not vital but this produces nicer logs + execution.cancel(); + for (Timeout execution : scheduledExecutions) + execution.cancel(); + } + + private void logError(InetSocketAddress address, Throwable exception) { + logger.debug("Error querying {}, trying next host (error is: {})", address, exception.toString()); + if (errors == null) + errors = new ConcurrentHashMap(); + errors.put(address, exception); + } + + private void setFinalResult(SpeculativeExecution execution, Connection connection, Message.Response response) { + if (!isDone.compareAndSet(false, true)) { + if(logger.isTraceEnabled()) + logger.trace("[{}] Got beaten to setting the result", execution.id); + return; + } + + if(logger.isTraceEnabled()) + logger.trace("[{}] Setting final result", execution.id); + + cancelPendingExecutions(execution); + + try { + if (timerContext != null) + timerContext.stop(); + + ExecutionInfo info = execution.current.defaultExecutionInfo; + if (triedHosts != null) { + triedHosts.add(execution.current); + info = new ExecutionInfo(triedHosts); + } + if (execution.retryConsistencyLevel != null) + info = info.withAchievedConsistency(execution.retryConsistencyLevel); + callback.onSet(connection, response, info, statement, System.nanoTime() - startTime); + } catch (Exception e) { + callback.onException(connection, + new DriverInternalError("Unexpected exception while setting final result from " + response, e), + System.nanoTime() - startTime, /*unused*/0); + } + } + + private void setFinalException(SpeculativeExecution execution, Connection connection, Exception exception) { + if (!isDone.compareAndSet(false, true)) { + if(logger.isTraceEnabled()) + logger.trace("[{}] Got beaten to setting final exception", execution.id); + return; + } + + if(logger.isTraceEnabled()) + logger.trace("[{}] Setting final exception", execution.id); + + cancelPendingExecutions(execution); + + try { + if (timerContext != null) + timerContext.stop(); + } finally { + callback.onException(connection, exception, System.nanoTime() - startTime, /*unused*/0); + } + } + + // Triggered when an execution reaches the end of the query plan. + // This is only a failure if there are no other running executions. + private void reportNoMoreHosts(SpeculativeExecution execution) { + runningExecutions.remove(execution); + if (runningExecutions.isEmpty()) + setFinalException(execution, null, new NoHostAvailableException( + errors == null ? Collections.emptyMap() : errors)); + } + + private boolean metricsEnabled() { + return manager.configuration().getMetricsOptions() != null; + } + + private Metrics metrics() { + return manager.cluster.manager.metrics; + } + + interface Callback extends Connection.ResponseCallback { + void onSet(Connection connection, Message.Response response, ExecutionInfo info, Statement statement, long latency); + void register(RequestHandler handler); + } + + /** + * An execution of the query against the cluster. + * There is at least one instance per RequestHandler, and possibly more (depending on the SpeculativeExecutionPolicy). + * Each instance may retry on the same host, or on other hosts as defined by the RetryPolicy. + * All instances run concurrently and share the same query plan. + * There are three ways a SpeculativeExecution can stop: + * - it completes the query (with either a success or a fatal error), and reports the result to the RequestHandler + * - it gets cancelled, either because another execution completed the query, or because the RequestHandler was cancelled + * - it reaches the end of the query plan and informs the RequestHandler, which will decide what to do + */ + class SpeculativeExecution implements Connection.ResponseCallback { + final String id; + private final Message.Request request; + private volatile Host current; + private volatile ConsistencyLevel retryConsistencyLevel; + private final AtomicReference queryStateRef; + private final AtomicBoolean nextExecutionScheduled = new AtomicBoolean(); + + // This represents the number of times a retry has been triggered by the RetryPolicy (this is different from + // queryStateRef.get().retryCount, because some retries don't involve the policy, for example after an + // OVERLOADED error). + // This is incremented by one writer at a time, so volatile is good enough. + private volatile int retriesByPolicy; + + private volatile Connection.ResponseHandler connectionHandler; + + SpeculativeExecution(Message.Request request, int position) { + this.id = RequestHandler.this.id + "-" + position; + this.request = request; + this.queryStateRef = new AtomicReference(QueryState.INITIAL); + if(logger.isTraceEnabled()) + logger.trace("[{}] Starting", id); + } + + void sendRequest() { + try { + Host host; + while (!isDone.get() && (host = queryPlan.next()) != null && !queryStateRef.get().isCancelled()) { + if(logger.isTraceEnabled()) + logger.trace("[{}] Querying node {}", id, host); + if (query(host)) + return; + } + reportNoMoreHosts(this); + } catch (Exception e) { + // Shouldn't happen really, but if ever the loadbalancing policy returned iterator throws, we don't want to block. + setFinalException(null, new DriverInternalError("An unexpected error happened while sending requests", e)); + } + } + + private boolean query(final Host host) { + HostConnectionPool currentPool = manager.pools.get(host); + if (currentPool == null || currentPool.isClosed()) + return false; + + if (allowSpeculativeExecutions && nextExecutionScheduled.compareAndSet(false, true)) + scheduleExecution(speculativeExecutionPlan.nextExecution(host)); + + Connection connection = null; + try { + connection = currentPool.borrowConnection(manager.configuration().getPoolingOptions().getPoolTimeoutMillis(), TimeUnit.MILLISECONDS); + if (current != null) { + if (triedHosts == null) + triedHosts = new CopyOnWriteArrayList(); + triedHosts.add(current); + } + current = host; + write(connection, this); + return true; + } catch (ConnectionException e) { + // If we have any problem with the connection, move to the next node. + if (metricsEnabled()) + metrics().getErrorMetrics().getConnectionErrors().inc(); + if (connection != null) + connection.release(); + logError(host.getSocketAddress(), e); + return false; + } catch (BusyConnectionException e) { + // The pool shouldn't have give us a busy connection unless we've maxed up the pool, so move on to the next host. + connection.release(); + logError(host.getSocketAddress(), e); + return false; + } catch (TimeoutException e) { + // We timeout, log it but move to the next node. + logError(host.getSocketAddress(), new DriverException("Timeout while trying to acquire available connection (you may want to increase the driver number of per-host connections)")); + return false; + } catch (RuntimeException e) { + if (connection != null) + connection.release(); + logger.error("Unexpected error while querying " + host.getAddress(), e); + logError(host.getSocketAddress(), e); + return false; + } + } + + private void write(Connection connection, Connection.ResponseCallback responseCallback) throws ConnectionException, BusyConnectionException { + // Make sure cancel() does not see a stale connectionHandler if it sees the new query state + // before connection.write has completed + connectionHandler = null; + + // Ensure query state is "in progress" (can be already if connection.write failed on a previous node and we're retrying) + while (true) { + QueryState previous = queryStateRef.get(); + if (previous.isCancelled()) { + connection.release(); + return; + } + if (previous.inProgress || queryStateRef.compareAndSet(previous, previous.startNext())) + break; + } + + connectionHandler = connection.write(responseCallback, false); + // Only start the timeout when we're sure connectionHandler is set. This avoids an edge case where onTimeout() was triggered + // *before* the call to connection.write had returned. + connectionHandler.startTimeout(); + + // Note that we could have already received the response here (so onSet() / onException() would have been called). This is + // why we only test for CANCELLED_WHILE_IN_PROGRESS below. + + // If cancel() was called after we set the state to "in progress", but before connection.write had completed, it might have + // missed the new value of connectionHandler. So make sure that cancelHandler() gets called here (we might call it twice, + // but it knows how to deal with it). + if (queryStateRef.get() == QueryState.CANCELLED_WHILE_IN_PROGRESS) + connectionHandler.cancelHandler(); + } + + private void retry(final boolean retryCurrent, ConsistencyLevel newConsistencyLevel) { + final Host h = current; + this.retryConsistencyLevel = newConsistencyLevel; + + // We should not retry on the current thread as this will be an IO thread. + manager.executor().execute(new Runnable() { + @Override + public void run() { + if (queryStateRef.get().isCancelled()) + return; + try { + if (retryCurrent) { + if (query(h)) + return; + } + sendRequest(); + } catch (Exception e) { + setFinalException(null, new DriverInternalError("Unexpected exception while retrying query", e)); + } + } + }); + } + + void cancel() { + // Atomically set a special QueryState, that will cause any further operation to abort. + // We want to remember whether a request was in progress when we did this, so there are two cancel states. + while (true) { + QueryState previous = queryStateRef.get(); + if (previous.isCancelled()) { + return; + } else if (previous.inProgress && queryStateRef.compareAndSet(previous, QueryState.CANCELLED_WHILE_IN_PROGRESS)) { + if(logger.isTraceEnabled()) + logger.trace("[{}] Cancelled while in progress", id); + // The connectionHandler should be non-null, but we might miss the update if we're racing with write(). + // If it's still null, this will be handled by re-checking queryStateRef at the end of write(). + if (connectionHandler != null) + connectionHandler.cancelHandler(); + return; + } else if (!previous.inProgress && queryStateRef.compareAndSet(previous, QueryState.CANCELLED_WHILE_COMPLETE)) { + if(logger.isTraceEnabled()) + logger.trace("[{}] Cancelled while complete", id); + return; + } + } + } + + @Override + public Message.Request request() { + if (retryConsistencyLevel != null && retryConsistencyLevel != request.consistency()) + return request.copy(retryConsistencyLevel); + else + return request; + } + + @Override + public void onSet(Connection connection, Message.Response response, long latency, int retryCount) { + QueryState queryState = queryStateRef.get(); + if (!queryState.isInProgressAt(retryCount) || + !queryStateRef.compareAndSet(queryState, queryState.complete())) { + logger.debug("onSet triggered but the response was completed by another thread, cancelling (retryCount = {}, queryState = {}, queryStateRef = {})", + retryCount, queryState, queryStateRef.get()); + return; + } + + Host queriedHost = current; + Exception exceptionToReport = null; + try { + switch (response.type) { + case RESULT: + connection.release(); + setFinalResult(connection, response); + break; + case ERROR: + Responses.Error err = (Responses.Error)response; + exceptionToReport = err.asException(connection.address); + RetryPolicy.RetryDecision retry = null; + RetryPolicy retryPolicy = statement.getRetryPolicy() == null + ? manager.configuration().getPolicies().getRetryPolicy() + : statement.getRetryPolicy(); + switch (err.code) { + case READ_TIMEOUT: + connection.release(); + assert err.infos instanceof ReadTimeoutException; + if (metricsEnabled()) + metrics().getErrorMetrics().getReadTimeouts().inc(); + + ReadTimeoutException rte = (ReadTimeoutException)err.infos; + retry = retryPolicy.onReadTimeout(statement, + rte.getConsistencyLevel(), + rte.getRequiredAcknowledgements(), + rte.getReceivedAcknowledgements(), + rte.wasDataRetrieved(), + retriesByPolicy); + + if (metricsEnabled()) { + if (retry.getType() == Type.RETRY) + metrics().getErrorMetrics().getRetriesOnReadTimeout().inc(); + if (retry.getType() == Type.IGNORE) + metrics().getErrorMetrics().getIgnoresOnReadTimeout().inc(); + } + break; + case WRITE_TIMEOUT: + connection.release(); + assert err.infos instanceof WriteTimeoutException; + if (metricsEnabled()) + metrics().getErrorMetrics().getWriteTimeouts().inc(); + + WriteTimeoutException wte = (WriteTimeoutException)err.infos; + retry = retryPolicy.onWriteTimeout(statement, + wte.getConsistencyLevel(), + wte.getWriteType(), + wte.getRequiredAcknowledgements(), + wte.getReceivedAcknowledgements(), + retriesByPolicy); + + if (metricsEnabled()) { + if (retry.getType() == Type.RETRY) + metrics().getErrorMetrics().getRetriesOnWriteTimeout().inc(); + if (retry.getType() == Type.IGNORE) + metrics().getErrorMetrics().getIgnoresOnWriteTimeout().inc(); + } + break; + case UNAVAILABLE: + connection.release(); + assert err.infos instanceof UnavailableException; + if (metricsEnabled()) + metrics().getErrorMetrics().getUnavailables().inc(); + + UnavailableException ue = (UnavailableException)err.infos; + retry = retryPolicy.onUnavailable(statement, + ue.getConsistencyLevel(), + ue.getRequiredReplicas(), + ue.getAliveReplicas(), + retriesByPolicy); + + if (metricsEnabled()) { + if (retry.getType() == Type.RETRY) + metrics().getErrorMetrics().getRetriesOnUnavailable().inc(); + if (retry.getType() == Type.IGNORE) + metrics().getErrorMetrics().getIgnoresOnUnavailable().inc(); + } + break; + case OVERLOADED: + connection.release(); + // Try another node + logger.warn("Host {} is overloaded, trying next host.", connection.address); + DriverException overloaded = new DriverException("Host overloaded"); + logError(connection.address, overloaded); + if (metricsEnabled()) + metrics().getErrorMetrics().getOthers().inc(); + retry(false, null); + return; + case SERVER_ERROR: + connection.release(); + // Defunct connection and try another node + logger.warn("{} replied with server error ({}), trying next host.", connection.address, err.message); + DriverException exception = new DriverException("Host replied with server error: " + err.message); + logError(connection.address, exception); + connection.defunct(exception); + if (metricsEnabled()) + metrics().getErrorMetrics().getOthers().inc(); + retry(false, null); + return; + case IS_BOOTSTRAPPING: + connection.release(); + // Try another node + logger.error("Query sent to {} but it is bootstrapping. This shouldn't happen but trying next host.", connection.address); + DriverException bootstrapping = new DriverException("Host is bootstrapping"); + logError(connection.address, bootstrapping); + if (metricsEnabled()) + metrics().getErrorMetrics().getOthers().inc(); + retry(false, null); + return; + case UNPREPARED: + // Do not release connection yet, because we might reuse it to send the PREPARE message (see write() call below) + assert err.infos instanceof MD5Digest; + MD5Digest id = (MD5Digest)err.infos; + PreparedStatement toPrepare = manager.cluster.manager.preparedQueries.get(id); + if (toPrepare == null) { + // This shouldn't happen + connection.release(); + String msg = String.format("Tried to execute unknown prepared query %s", id); + logger.error(msg); + setFinalException(connection, new DriverInternalError(msg)); + return; + } + + String currentKeyspace = connection.keyspace(); + String prepareKeyspace = toPrepare.getQueryKeyspace(); + if (prepareKeyspace != null && (currentKeyspace == null || !currentKeyspace.equals(prepareKeyspace))) { + // This shouldn't happen in normal use, because a user shouldn't try to execute + // a prepared statement with the wrong keyspace set. + // Fail fast (we can't change the keyspace to reprepare, because we're using a pooled connection + // that's shared with other requests). + connection.release(); + throw new IllegalStateException(String.format("Statement was prepared on keyspace %s, can't execute it on %s (%s)", + toPrepare.getQueryKeyspace(), connection.keyspace(), toPrepare.getQueryString())); + } + + logger.info("Query {} is not prepared on {}, preparing before retrying executing. " + + "Seeing this message a few times is fine, but seeing it a lot may be source of performance problems", + toPrepare.getQueryString(), connection.address); + + write(connection, prepareAndRetry(toPrepare.getQueryString())); + // we're done for now, the prepareAndRetry callback will handle the rest + return; + default: + connection.release(); + if (metricsEnabled()) + metrics().getErrorMetrics().getOthers().inc(); + break; + } + + if (retry == null) + setFinalResult(connection, response); + else { + switch (retry.getType()) { + case RETRY: + ++retriesByPolicy; + if (logger.isDebugEnabled()) + logger.debug("Doing retry {} for query {} at consistency {}", retriesByPolicy, statement, retry.getRetryConsistencyLevel()); + if (metricsEnabled()) + metrics().getErrorMetrics().getRetries().inc(); + if (!retry.isRetryCurrent()) + logError(connection.address, exceptionToReport); + retry(retry.isRetryCurrent(), retry.getRetryConsistencyLevel()); + break; + case RETHROW: + setFinalResult(connection, response); + break; + case IGNORE: + if (metricsEnabled()) + metrics().getErrorMetrics().getIgnores().inc(); + setFinalResult(connection, new Responses.Result.Void()); + break; + } + } + break; + default: + connection.release(); + setFinalResult(connection, response); + break; + } + } catch (Exception e) { + exceptionToReport = e; + setFinalException(connection, e); + } finally { + if (queriedHost != null && statement != Statement.DEFAULT) + manager.cluster.manager.reportLatency(queriedHost, statement, exceptionToReport, latency); + } + } + + private Connection.ResponseCallback prepareAndRetry(final String toPrepare) { + return new Connection.ResponseCallback() { + + @Override + public Message.Request request() { + return new Requests.Prepare(toPrepare); + } + + @Override + public int retryCount() { + return SpeculativeExecution.this.retryCount(); + } + + @Override + public void onSet(Connection connection, Message.Response response, long latency, int retryCount) { + QueryState queryState = queryStateRef.get(); + if (!queryState.isInProgressAt(retryCount) || + !queryStateRef.compareAndSet(queryState, queryState.complete())) { + logger.debug("onSet triggered but the response was completed by another thread, cancelling (retryCount = {}, queryState = {}, queryStateRef = {})", + retryCount, queryState, queryStateRef.get()); + return; + } + + connection.release(); + + // TODO should we check the response ? + switch (response.type) { + case RESULT: + if (((Responses.Result)response).kind == Responses.Result.Kind.PREPARED) { + logger.debug("Scheduling retry now that query is prepared"); + retry(true, null); + } else { + logError(connection.address, new DriverException("Got unexpected response to prepare message: " + response)); + retry(false, null); + } + break; + case ERROR: + logError(connection.address, new DriverException("Error preparing query, got " + response)); + if (metricsEnabled()) + metrics().getErrorMetrics().getOthers().inc(); + retry(false, null); + break; + default: + // Something's wrong, so we return but we let setFinalResult propagate the exception + SpeculativeExecution.this.setFinalResult(connection, response); + break; + } + } + + @Override + public void onException(Connection connection, Exception exception, long latency, int retryCount) { + SpeculativeExecution.this.onException(connection, exception, latency, retryCount); + } + + @Override + public boolean onTimeout(Connection connection, long latency, int retryCount) { + QueryState queryState = queryStateRef.get(); + if (!queryState.isInProgressAt(retryCount) || + !queryStateRef.compareAndSet(queryState, queryState.complete())) { + logger.debug("onTimeout triggered but the response was completed by another thread, cancelling (retryCount = {}, queryState = {}, queryStateRef = {})", + retryCount, queryState, queryStateRef.get()); + return false; + } + logError(connection.address, new DriverException("Timeout waiting for response to prepare message")); + retry(false, null); + return true; + } + }; + } + + @Override + public void onException(Connection connection, Exception exception, long latency, int retryCount) { + QueryState queryState = queryStateRef.get(); + if (!queryState.isInProgressAt(retryCount) || + !queryStateRef.compareAndSet(queryState, queryState.complete())) { + logger.debug("onException triggered but the response was completed by another thread, cancelling (retryCount = {}, queryState = {}, queryStateRef = {})", + retryCount, queryState, queryStateRef.get()); + return; + } + + Host queriedHost = current; + try { + connection.release(); + + if (exception instanceof ConnectionException) { + if (metricsEnabled()) + metrics().getErrorMetrics().getConnectionErrors().inc(); + ConnectionException ce = (ConnectionException)exception; + logError(ce.address, ce); + retry(false, null); + return; + } + setFinalException(connection, exception); + } catch (Exception e) { + // This shouldn't happen, but if it does, we want to signal the callback, not let it hang indefinitely + setFinalException(null, new DriverInternalError("An unexpected error happened while handling exception " + exception, e)); + } finally { + if (queriedHost != null && statement != Statement.DEFAULT) + manager.cluster.manager.reportLatency(queriedHost, statement, exception, latency); + } + } + + @Override + public boolean onTimeout(Connection connection, long latency, int retryCount) { + QueryState queryState = queryStateRef.get(); + if (!queryState.isInProgressAt(retryCount) || + !queryStateRef.compareAndSet(queryState, queryState.complete())) { + logger.debug("onTimeout triggered but the response was completed by another thread, cancelling (retryCount = {}, queryState = {}, queryStateRef = {})", + retryCount, queryState, queryStateRef.get()); + return false; + } + + Host queriedHost = current; + OperationTimedOutException timeoutException = new OperationTimedOutException(connection.address); + try { + logError(connection.address, timeoutException); + retry(false, null); + } catch (Exception e) { + // This shouldn't happen, but if it does, we want to signal the callback, not let it hang indefinitely + setFinalException(null, new DriverInternalError("An unexpected error happened while handling timeout", e)); + } finally { + if (queriedHost != null && statement != Statement.DEFAULT) + manager.cluster.manager.reportLatency(queriedHost, statement, timeoutException, latency); + } + return true; + } + + @Override + public int retryCount() { + return queryStateRef.get().retryCount; + } + + private void setFinalException(Connection connection, Exception exception) { + RequestHandler.this.setFinalException(this, connection, exception); + } + + private void setFinalResult(Connection connection, Message.Response response) { + RequestHandler.this.setFinalResult(this, connection, response); + } + } + + /** + * The state of a SpeculativeExecution. + * + * This is used to prevent races between request completion (either success or error) and timeout. + * A retry is in progress once we have written the request to the connection and until we get back a response (see onSet + * or onException) or a timeout (see onTimeout). + * The count increments on each retry. + */ + static class QueryState { + static final QueryState INITIAL = new QueryState(-1, false); + static final QueryState CANCELLED_WHILE_IN_PROGRESS = new QueryState(Integer.MIN_VALUE, false); + static final QueryState CANCELLED_WHILE_COMPLETE = new QueryState(Integer.MIN_VALUE + 1, false); + + final int retryCount; + final boolean inProgress; + + private QueryState(int count, boolean inProgress) { + this.retryCount = count; + this.inProgress = inProgress; + } + + boolean isInProgressAt(int retryCount) { + return inProgress && this.retryCount == retryCount; + } + + QueryState complete() { + assert inProgress; + return new QueryState(retryCount, false); + } + + QueryState startNext() { + assert !inProgress; + return new QueryState(retryCount + 1, true); + } + + public boolean isCancelled() { + return this == CANCELLED_WHILE_IN_PROGRESS || this == CANCELLED_WHILE_COMPLETE; + } + + @Override + public String toString() { + return String.format("QueryState(count=%d, inProgress=%s, cancelled=%s)", retryCount, inProgress, isCancelled()); + } + } + + /** + * Wraps the iterator return by {@link com.datastax.driver.core.policies.LoadBalancingPolicy} to make it safe for + * concurrent access by multiple threads. + */ + static class QueryPlan { + private final Iterator iterator; + + QueryPlan(Iterator iterator) { + this.iterator = iterator; + } + + /** @return null if there are no more hosts */ + synchronized Host next() { + return iterator.hasNext() ? iterator.next() : null; + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/Requests.java b/driver-core/src/main/java/com/datastax/driver/core/Requests.java new file mode 100644 index 00000000000..7e34ad69827 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/Requests.java @@ -0,0 +1,557 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.nio.ByteBuffer; +import java.util.Collections; +import java.util.EnumSet; +import java.util.List; +import java.util.Map; + +import com.google.common.collect.ImmutableMap; +import io.netty.buffer.ByteBuf; + +class Requests { + + private Requests() {} + + public static class Startup extends Message.Request { + private static final String CQL_VERSION_OPTION = "CQL_VERSION"; + private static final String CQL_VERSION = "3.0.0"; + + public static final String COMPRESSION_OPTION = "COMPRESSION"; + + public static final Message.Coder coder = new Message.Coder() { + public void encode(Startup msg, ByteBuf dest, ProtocolVersion version) { + CBUtil.writeStringMap(msg.options, dest); + } + + public int encodedSize(Startup msg, ProtocolVersion version) { + return CBUtil.sizeOfStringMap(msg.options); + } + }; + + private final Map options; + + public Startup(ProtocolOptions.Compression compression) { + super(Message.Request.Type.STARTUP); + + ImmutableMap.Builder map = new ImmutableMap.Builder(); + map.put(CQL_VERSION_OPTION, CQL_VERSION); + if (compression != ProtocolOptions.Compression.NONE) + map.put(COMPRESSION_OPTION, compression.toString()); + this.options = map.build(); + } + + @Override + public String toString() { + return "STARTUP " + options; + } + } + + // Only for protocol v1 + public static class Credentials extends Message.Request { + + public static final Message.Coder coder = new Message.Coder() { + + public void encode(Credentials msg, ByteBuf dest, ProtocolVersion version) { + assert version == ProtocolVersion.V1; + CBUtil.writeStringMap(msg.credentials, dest); + } + + public int encodedSize(Credentials msg, ProtocolVersion version) { + assert version == ProtocolVersion.V1; + return CBUtil.sizeOfStringMap(msg.credentials); + } + }; + + private final Map credentials; + + public Credentials(Map credentials) { + super(Message.Request.Type.CREDENTIALS); + this.credentials = credentials; + } + } + + public static class Options extends Message.Request { + + public static final Message.Coder coder = new Message.Coder() + { + public void encode(Options msg, ByteBuf dest, ProtocolVersion version) {} + + public int encodedSize(Options msg, ProtocolVersion version) { + return 0; + } + }; + + public Options() { + super(Message.Request.Type.OPTIONS); + } + + @Override + public String toString() { + return "OPTIONS"; + } + } + + public static class Query extends Message.Request { + + public static final Message.Coder coder = new Message.Coder() { + public void encode(Query msg, ByteBuf dest, ProtocolVersion version) { + CBUtil.writeLongString(msg.query, dest); + msg.options.encode(dest, version); + } + + public int encodedSize(Query msg, ProtocolVersion version) { + return CBUtil.sizeOfLongString(msg.query) + + msg.options.encodedSize(version); + } + }; + + public final String query; + public final QueryProtocolOptions options; + + public Query(String query) { + this(query, QueryProtocolOptions.DEFAULT, false); + } + + public Query(String query, QueryProtocolOptions options, boolean tracingRequested) { + super(Type.QUERY, tracingRequested); + this.query = query; + this.options = options; + } + + @Override + Request copy() { + return new Query(this.query, options, isTracingRequested()); + } + + @Override + Request copy(ConsistencyLevel newConsistencyLevel) { + return new Query(this.query, options.copy(newConsistencyLevel), isTracingRequested()); + } + + @Override + public String toString() { + return "QUERY " + query + '(' + options + ')'; + } + } + + public static class Execute extends Message.Request { + + public static final Message.Coder coder = new Message.Coder() { + public void encode(Execute msg, ByteBuf dest, ProtocolVersion version) { + CBUtil.writeBytes(msg.statementId.bytes, dest); + msg.options.encode(dest, version); + } + + public int encodedSize(Execute msg, ProtocolVersion version) { + return CBUtil.sizeOfBytes(msg.statementId.bytes) + + msg.options.encodedSize(version); + } + }; + + public final MD5Digest statementId; + public final QueryProtocolOptions options; + + public Execute(MD5Digest statementId, QueryProtocolOptions options, boolean tracingRequested) { + super(Message.Request.Type.EXECUTE, tracingRequested); + this.statementId = statementId; + this.options = options; + } + + @Override + Request copy() { + return new Execute(statementId, options, isTracingRequested()); + } + + @Override + Request copy(ConsistencyLevel newConsistencyLevel) { + return new Execute(statementId, options.copy(newConsistencyLevel), isTracingRequested()); + } + + @Override + public String toString() { + return "EXECUTE " + statementId + " (" + options + ')'; + } + } + + static enum QueryFlag { + // The order of that enum matters!! + VALUES, + SKIP_METADATA, + PAGE_SIZE, + PAGING_STATE, + SERIAL_CONSISTENCY, + DEFAULT_TIMESTAMP, + VALUE_NAMES; + + public static EnumSet deserialize(int flags) { + EnumSet set = EnumSet.noneOf(QueryFlag.class); + QueryFlag[] values = QueryFlag.values(); + for (int n = 0; n < values.length; n++) + { + if ((flags & (1 << n)) != 0) + set.add(values[n]); + } + return set; + } + + public static int serialize(EnumSet flags) { + int i = 0; + for (QueryFlag flag : flags) + i |= 1 << flag.ordinal(); + return i; + } + } + + public static class QueryProtocolOptions { + + public static final QueryProtocolOptions DEFAULT = new QueryProtocolOptions(ConsistencyLevel.ONE, + Collections.emptyList(), + false, + -1, + null, + ConsistencyLevel.SERIAL, + Long.MIN_VALUE); + + private final EnumSet flags = EnumSet.noneOf(QueryFlag.class); + public final ConsistencyLevel consistency; + public final List values; + public final boolean skipMetadata; + public final int pageSize; + public final ByteBuffer pagingState; + public final ConsistencyLevel serialConsistency; + public final long defaultTimestamp; + + public QueryProtocolOptions(ConsistencyLevel consistency, + List values, + boolean skipMetadata, + int pageSize, + ByteBuffer pagingState, + ConsistencyLevel serialConsistency, + long defaultTimestamp) { + + this.consistency = consistency; + this.values = values; + this.skipMetadata = skipMetadata; + this.pageSize = pageSize; + this.pagingState = pagingState; + this.serialConsistency = serialConsistency; + this.defaultTimestamp = defaultTimestamp; + + // Populate flags + if (!values.isEmpty()) + flags.add(QueryFlag.VALUES); + if (skipMetadata) + flags.add(QueryFlag.SKIP_METADATA); + if (pageSize >= 0) + flags.add(QueryFlag.PAGE_SIZE); + if (pagingState != null) + flags.add(QueryFlag.PAGING_STATE); + if (serialConsistency != ConsistencyLevel.SERIAL) + flags.add(QueryFlag.SERIAL_CONSISTENCY); + if (defaultTimestamp != Long.MIN_VALUE) + flags.add(QueryFlag.DEFAULT_TIMESTAMP); + } + + public QueryProtocolOptions copy(ConsistencyLevel newConsistencyLevel) { + return new QueryProtocolOptions(newConsistencyLevel, values, skipMetadata, pageSize, pagingState, serialConsistency, defaultTimestamp); + } + + public void encode(ByteBuf dest, ProtocolVersion version) { + switch (version) { + case V1: + if (flags.contains(QueryFlag.VALUES)) + CBUtil.writeValueList(values, dest); + CBUtil.writeConsistencyLevel(consistency, dest); + break; + case V2: + case V3: + CBUtil.writeConsistencyLevel(consistency, dest); + dest.writeByte((byte)QueryFlag.serialize(flags)); + if (flags.contains(QueryFlag.VALUES)) + CBUtil.writeValueList(values, dest); + if (flags.contains(QueryFlag.PAGE_SIZE)) + dest.writeInt(pageSize); + if (flags.contains(QueryFlag.PAGING_STATE)) + CBUtil.writeValue(pagingState, dest); + if (flags.contains(QueryFlag.SERIAL_CONSISTENCY)) + CBUtil.writeConsistencyLevel(serialConsistency, dest); + if (version == ProtocolVersion.V3 && flags.contains(QueryFlag.DEFAULT_TIMESTAMP)) + dest.writeLong(defaultTimestamp); + break; + default: + throw version.unsupported(); + } + } + + public int encodedSize(ProtocolVersion version) { + switch (version) { + case V1: + return CBUtil.sizeOfValueList(values) + + CBUtil.sizeOfConsistencyLevel(consistency); + case V2: + case V3: + int size = 0; + size += CBUtil.sizeOfConsistencyLevel(consistency); + size += 1; // flags + if (flags.contains(QueryFlag.VALUES)) + size += CBUtil.sizeOfValueList(values); + if (flags.contains(QueryFlag.PAGE_SIZE)) + size += 4; + if (flags.contains(QueryFlag.PAGING_STATE)) + size += CBUtil.sizeOfValue(pagingState); + if (flags.contains(QueryFlag.SERIAL_CONSISTENCY)) + size += CBUtil.sizeOfConsistencyLevel(serialConsistency); + if (version == ProtocolVersion.V3 && flags.contains(QueryFlag.DEFAULT_TIMESTAMP)) + size += 8; + return size; + default: + throw version.unsupported(); + } + } + + @Override + public String toString() { + return String.format("[cl=%s, vals=%s, skip=%b, psize=%d, state=%s, serialCl=%s]", consistency, values, skipMetadata, pageSize, pagingState, serialConsistency); + } + } + + public static class Batch extends Message.Request { + + public static final Message.Coder coder = new Message.Coder() { + public void encode(Batch msg, ByteBuf dest, ProtocolVersion version) { + int queries = msg.queryOrIdList.size(); + assert queries <= 0xFFFF; + + dest.writeByte(fromType(msg.type)); + dest.writeShort(queries); + + for (int i = 0; i < queries; i++) { + Object q = msg.queryOrIdList.get(i); + dest.writeByte((byte)(q instanceof String ? 0 : 1)); + if (q instanceof String) + CBUtil.writeLongString((String)q, dest); + else + CBUtil.writeBytes(((MD5Digest)q).bytes, dest); + + CBUtil.writeValueList(msg.values.get(i), dest); + } + + msg.options.encode(dest, version); + } + + public int encodedSize(Batch msg, ProtocolVersion version) { + int size = 3; // type + nb queries + for (int i = 0; i < msg.queryOrIdList.size(); i++) { + Object q = msg.queryOrIdList.get(i); + size += 1 + (q instanceof String + ? CBUtil.sizeOfLongString((String)q) + : CBUtil.sizeOfBytes(((MD5Digest)q).bytes)); + + size += CBUtil.sizeOfValueList(msg.values.get(i)); + } + size += msg.options.encodedSize(version); + return size; + } + + private byte fromType(BatchStatement.Type type) { + switch (type) { + case LOGGED: return 0; + case UNLOGGED: return 1; + case COUNTER: return 2; + default: throw new AssertionError(); + } + } + }; + + public final BatchStatement.Type type; + public final List queryOrIdList; + public final List> values; + public final BatchProtocolOptions options; + + public Batch(BatchStatement.Type type, List queryOrIdList, List> values, BatchProtocolOptions options, boolean tracingRequested) { + super(Message.Request.Type.BATCH, tracingRequested); + this.type = type; + this.queryOrIdList = queryOrIdList; + this.values = values; + this.options = options; + } + + @Override + Request copy() { + return new Batch(type, queryOrIdList, values, options, isTracingRequested()); + } + + @Override + Request copy(ConsistencyLevel newConsistencyLevel) { + return new Batch(type, queryOrIdList, values, options.copy(newConsistencyLevel), isTracingRequested()); + } + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("BATCH of ["); + for (int i = 0; i < queryOrIdList.size(); i++) { + if (i > 0) sb.append(", "); + sb.append(queryOrIdList.get(i)).append(" with ").append(values.get(i).size()).append(" values"); + } + sb.append("] with options ").append(options); + return sb.toString(); + } + } + + public static class BatchProtocolOptions { + private final EnumSet flags = EnumSet.noneOf(QueryFlag.class); + public final ConsistencyLevel consistency; + public final ConsistencyLevel serialConsistency; + public final long defaultTimestamp; + + public BatchProtocolOptions(ConsistencyLevel consistency, ConsistencyLevel serialConsistency, long defaultTimestamp) { + this.consistency = consistency; + this.serialConsistency = serialConsistency; + this.defaultTimestamp = defaultTimestamp; + + if (serialConsistency != ConsistencyLevel.SERIAL) + flags.add(QueryFlag.SERIAL_CONSISTENCY); + if (defaultTimestamp != Long.MIN_VALUE) + flags.add(QueryFlag.DEFAULT_TIMESTAMP); + } + + BatchProtocolOptions copy(ConsistencyLevel newConsistencyLevel) { + return new BatchProtocolOptions(newConsistencyLevel, serialConsistency, defaultTimestamp); + } + + public void encode(ByteBuf dest, ProtocolVersion version) { + switch (version) { + case V2: + CBUtil.writeConsistencyLevel(consistency, dest); + break; + case V3: + CBUtil.writeConsistencyLevel(consistency, dest); + dest.writeByte((byte)QueryFlag.serialize(flags)); + if (flags.contains(QueryFlag.SERIAL_CONSISTENCY)) + CBUtil.writeConsistencyLevel(serialConsistency, dest); + if (flags.contains(QueryFlag.DEFAULT_TIMESTAMP)) + dest.writeLong(defaultTimestamp); + break; + default: + throw version.unsupported(); + } + } + + public int encodedSize(ProtocolVersion version) { + switch (version) { + case V2: + return CBUtil.sizeOfConsistencyLevel(consistency); + case V3: + int size = 0; + size += CBUtil.sizeOfConsistencyLevel(consistency); + size += 1; // flags + if (flags.contains(QueryFlag.SERIAL_CONSISTENCY)) + size += CBUtil.sizeOfConsistencyLevel(serialConsistency); + if (flags.contains(QueryFlag.DEFAULT_TIMESTAMP)) + size += 8; + return size; + default: + throw version.unsupported(); + } + } + + @Override + public String toString() { + return String.format("[cl=%s, serialCl=%s, defaultTs=%d]", + consistency, serialConsistency, defaultTimestamp); + } + } + + public static class Prepare extends Message.Request { + + public static final Message.Coder coder = new Message.Coder() { + + public void encode(Prepare msg, ByteBuf dest, ProtocolVersion version) { + CBUtil.writeLongString(msg.query, dest); + } + + public int encodedSize(Prepare msg, ProtocolVersion version) { + return CBUtil.sizeOfLongString(msg.query); + } + }; + + private final String query; + + public Prepare(String query) { + super(Message.Request.Type.PREPARE); + this.query = query; + } + + @Override + public String toString() { + return "PREPARE " + query; + } + } + + public static class Register extends Message.Request { + + public static final Message.Coder coder = new Message.Coder() { + public void encode(Register msg, ByteBuf dest, ProtocolVersion version) { + dest.writeShort(msg.eventTypes.size()); + for (ProtocolEvent.Type type : msg.eventTypes) + CBUtil.writeEnumValue(type, dest); + } + + public int encodedSize(Register msg, ProtocolVersion version) { + int size = 2; + for (ProtocolEvent.Type type : msg.eventTypes) + size += CBUtil.sizeOfEnumValue(type); + return size; + } + }; + + private final List eventTypes; + + public Register(List eventTypes) { + super(Message.Request.Type.REGISTER); + this.eventTypes = eventTypes; + } + + @Override + public String toString() { + return "REGISTER " + eventTypes; + } + } + + public static class AuthResponse extends Message.Request { + + public static final Message.Coder coder = new Message.Coder() { + + public void encode(AuthResponse response, ByteBuf dest, ProtocolVersion version) { + CBUtil.writeValue(response.token, dest); + } + + public int encodedSize(AuthResponse response, ProtocolVersion version) { + return CBUtil.sizeOfValue(response.token); + } + }; + + private final byte[] token; + + public AuthResponse(byte[] token) { + super(Message.Request.Type.AUTH_RESPONSE); + this.token = token; + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/Responses.java b/driver-core/src/main/java/com/datastax/driver/core/Responses.java new file mode 100644 index 00000000000..ae9d19a3f46 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/Responses.java @@ -0,0 +1,590 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.net.InetSocketAddress; +import java.nio.ByteBuffer; +import java.util.*; + +import io.netty.buffer.ByteBuf; + +import com.datastax.driver.core.Responses.Result.Rows.Metadata; +import com.datastax.driver.core.exceptions.*; +import com.datastax.driver.core.utils.Bytes; + +import static com.datastax.driver.core.SchemaElement.KEYSPACE; +import static com.datastax.driver.core.SchemaElement.TABLE; + +class Responses { + + private Responses() {} + + public static class Error extends Message.Response { + + public static final Message.Decoder decoder = new Message.Decoder() { + @Override + public Error decode(ByteBuf body, ProtocolVersion version) { + ExceptionCode code = ExceptionCode.fromValue(body.readInt()); + String msg = CBUtil.readString(body); + Object infos = null; + switch (code) { + case UNAVAILABLE: + ConsistencyLevel clu = CBUtil.readConsistencyLevel(body); + int required = body.readInt(); + int alive = body.readInt(); + infos = new UnavailableException(clu, required, alive); + break; + case WRITE_TIMEOUT: + case READ_TIMEOUT: + ConsistencyLevel clt = CBUtil.readConsistencyLevel(body); + int received = body.readInt(); + int blockFor = body.readInt(); + if (code == ExceptionCode.WRITE_TIMEOUT) { + WriteType writeType = Enum.valueOf(WriteType.class, CBUtil.readString(body)); + infos = new WriteTimeoutException(clt, writeType, received, blockFor); + } else { + byte dataPresent = body.readByte(); + infos = new ReadTimeoutException(clt, received, blockFor, dataPresent != 0); + } + break; + case UNPREPARED: + infos = MD5Digest.wrap(CBUtil.readBytes(body)); + break; + case ALREADY_EXISTS: + String ksName = CBUtil.readString(body); + String cfName = CBUtil.readString(body); + infos = new AlreadyExistsException(ksName, cfName); + break; + } + return new Error(version, code, msg, infos); + } + }; + + public final ProtocolVersion serverProtocolVersion; + public final ExceptionCode code; + public final String message; + public final Object infos; // can be null + + private Error(ProtocolVersion serverProtocolVersion, ExceptionCode code, String message, Object infos) { + super(Message.Response.Type.ERROR); + this.serverProtocolVersion = serverProtocolVersion; + this.code = code; + this.message = message; + this.infos = infos; + } + + public DriverException asException(InetSocketAddress host) { + switch (code) { + case SERVER_ERROR: return new DriverInternalError(String.format("An unexpected error occurred server side on %s: %s", host, message)); + case PROTOCOL_ERROR: return new DriverInternalError("An unexpected protocol error occurred. This is a bug in this library, please report: " + message); + case BAD_CREDENTIALS: return new AuthenticationException(host, message); + case UNAVAILABLE: return ((UnavailableException)infos).copy(); // We copy to have a nice stack trace + case OVERLOADED: return new OverloadedException(host, message); + case IS_BOOTSTRAPPING: return new BootstrappingException(host, message); + case TRUNCATE_ERROR: return new TruncateException(message); + case WRITE_TIMEOUT: return ((WriteTimeoutException)infos).copy(); + case READ_TIMEOUT: return ((ReadTimeoutException)infos).copy(); + case SYNTAX_ERROR: return new SyntaxError(message); + case UNAUTHORIZED: return new UnauthorizedException(message); + case INVALID: return new InvalidQueryException(message); + case CONFIG_ERROR: return new InvalidConfigurationInQueryException(message); + case ALREADY_EXISTS: return ((AlreadyExistsException)infos).copy(); + case UNPREPARED: return new UnpreparedException(host, message); + default: return new DriverInternalError(String.format("Unknown protocol error code %s returned by %s. The error message was: %s", code, host, message)); + } + } + + @Override + public String toString() { + return "ERROR " + code + ": " + message; + } + } + + public static class Ready extends Message.Response { + + public static final Message.Decoder decoder = new Message.Decoder() { + public Ready decode(ByteBuf body, ProtocolVersion version) { + // TODO: Would it be cool to return a singleton? Check we don't need to + // set the streamId or something + return new Ready(); + } + }; + + public Ready() { + super(Message.Response.Type.READY); + } + + @Override + public String toString() { + return "READY"; + } + } + + public static class Authenticate extends Message.Response { + + public static final Message.Decoder decoder = new Message.Decoder() { + public Authenticate decode(ByteBuf body, ProtocolVersion version) { + String authenticator = CBUtil.readString(body); + return new Authenticate(authenticator); + } + }; + + public final String authenticator; + + public Authenticate(String authenticator) { + super(Message.Response.Type.AUTHENTICATE); + this.authenticator = authenticator; + } + + @Override + public String toString() { + return "AUTHENTICATE " + authenticator; + } + } + + public static class Supported extends Message.Response { + + public static final Message.Decoder decoder = new Message.Decoder() { + public Supported decode(ByteBuf body, ProtocolVersion version) { + return new Supported(CBUtil.readStringToStringListMap(body)); + } + }; + + public final Map> supported; + public final Set supportedCompressions = EnumSet.noneOf(ProtocolOptions.Compression.class); + + public Supported(Map> supported) { + super(Message.Response.Type.SUPPORTED); + this.supported = supported; + + parseCompressions(); + } + + private void parseCompressions() { + List compList = supported.get(Requests.Startup.COMPRESSION_OPTION); + if (compList == null) + return; + + for (String compStr : compList) { + ProtocolOptions.Compression compr = ProtocolOptions.Compression.fromString(compStr); + if (compr != null) + supportedCompressions.add(compr); + } + } + + @Override + public String toString() { + return "SUPPORTED " + supported; + } + } + + public static abstract class Result extends Message.Response { + + public static final Message.Decoder decoder = new Message.Decoder() { + public Result decode(ByteBuf body, ProtocolVersion version) { + Kind kind = Kind.fromId(body.readInt()); + return kind.subDecoder.decode(body, version); + } + }; + + public enum Kind { + VOID (1, Void.subcodec), + ROWS (2, Rows.subcodec), + SET_KEYSPACE (3, SetKeyspace.subcodec), + PREPARED (4, Prepared.subcodec), + SCHEMA_CHANGE(5, SchemaChange.subcodec); + + private final int id; + final Message.Decoder subDecoder; + + private static final Kind[] ids; + static { + int maxId = -1; + for (Kind k : Kind.values()) + maxId = Math.max(maxId, k.id); + ids = new Kind[maxId + 1]; + for (Kind k : Kind.values()) { + if (ids[k.id] != null) + throw new IllegalStateException("Duplicate kind id"); + ids[k.id] = k; + } + } + + private Kind(int id, Message.Decoder subDecoder) { + this.id = id; + this.subDecoder = subDecoder; + } + + public static Kind fromId(int id) { + Kind k = ids[id]; + if (k == null) + throw new DriverInternalError(String.format("Unknown kind id %d in RESULT message", id)); + return k; + } + } + + public final Kind kind; + + protected Result(Kind kind) { + super(Message.Response.Type.RESULT); + this.kind = kind; + } + + public static class Void extends Result { + // Even though we have no specific information here, don't make a + // singleton since as each message it has in fact a streamid and connection. + public Void() { + super(Kind.VOID); + } + + public static final Message.Decoder subcodec = new Message.Decoder() { + public Result decode(ByteBuf body, ProtocolVersion version) { + return new Void(); + } + }; + + @Override + public String toString() { + return "EMPTY RESULT"; + } + } + + public static class SetKeyspace extends Result { + public final String keyspace; + + private SetKeyspace(String keyspace) { + super(Kind.SET_KEYSPACE); + this.keyspace = keyspace; + } + + public static final Message.Decoder subcodec = new Message.Decoder() { + public Result decode(ByteBuf body, ProtocolVersion version) { + return new SetKeyspace(CBUtil.readString(body)); + } + }; + + @Override + public String toString() { + return "RESULT set keyspace " + keyspace; + } + } + + public static class Rows extends Result { + + public static class Metadata { + + private static enum Flag + { + // The order of that enum matters!! + GLOBAL_TABLES_SPEC, + HAS_MORE_PAGES, + NO_METADATA; + + public static EnumSet deserialize(int flags) { + EnumSet set = EnumSet.noneOf(Flag.class); + Flag[] values = Flag.values(); + for (int n = 0; n < values.length; n++) { + if ((flags & (1 << n)) != 0) + set.add(values[n]); + } + return set; + } + + public static int serialize(EnumSet flags) { + int i = 0; + for (Flag flag : flags) + i |= 1 << flag.ordinal(); + return i; + } + } + + static final Metadata EMPTY = new Metadata(0, null, null); + + public final int columnCount; + public final ColumnDefinitions columns; // Can be null if no metadata was asked by the query + public final ByteBuffer pagingState; + + private Metadata(int columnCount, ColumnDefinitions columns, ByteBuffer pagingState) { + this.columnCount = columnCount; + this.columns = columns; + this.pagingState = pagingState; + } + + public static Metadata decode(ByteBuf body) { + + // flags & column count + EnumSet flags = Flag.deserialize(body.readInt()); + int columnCount = body.readInt(); + + ByteBuffer state = null; + if (flags.contains(Flag.HAS_MORE_PAGES)) + state = CBUtil.readValue(body); + + if (flags.contains(Flag.NO_METADATA)) + return new Metadata(columnCount, null, state); + + boolean globalTablesSpec = flags.contains(Flag.GLOBAL_TABLES_SPEC); + + String globalKsName = null; + String globalCfName = null; + if (globalTablesSpec) { + globalKsName = CBUtil.readString(body); + globalCfName = CBUtil.readString(body); + } + + // metadata (names/types) + ColumnDefinitions.Definition[] defs = new ColumnDefinitions.Definition[columnCount]; + for (int i = 0; i < columnCount; i++) { + String ksName = globalTablesSpec ? globalKsName : CBUtil.readString(body); + String cfName = globalTablesSpec ? globalCfName : CBUtil.readString(body); + String name = CBUtil.readString(body); + DataType type = DataType.decode(body); + defs[i] = new ColumnDefinitions.Definition(ksName, cfName, name, type); + } + + return new Metadata(columnCount, new ColumnDefinitions(defs), state); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + + if (columns == null) { + sb.append('[').append(columnCount).append(" columns]"); + } else { + for (ColumnDefinitions.Definition column : columns) { + sb.append('[').append(column.getName()); + sb.append(" (").append(column.getType()).append(")]"); + } + } + if (pagingState != null) + sb.append(" (to be continued)"); + return sb.toString(); + } + } + + public static final Message.Decoder subcodec = new Message.Decoder() { + public Result decode(ByteBuf body, ProtocolVersion version) { + + Metadata metadata = Metadata.decode(body); + + int rowCount = body.readInt(); + int columnCount = metadata.columnCount; + + Queue> data = new ArrayDeque>(rowCount); + for (int i = 0; i < rowCount; i++) { + List row = new ArrayList(columnCount); + for (int j = 0; j < columnCount; j++) + row.add(CBUtil.readValue(body)); + data.add(row); + } + + return new Rows(metadata, data, version); + } + }; + + public final Metadata metadata; + public final Queue> data; + private final ProtocolVersion version; + + private Rows(Metadata metadata, Queue> data, ProtocolVersion version) { + super(Kind.ROWS); + this.metadata = metadata; + this.data = data; + this.version = version; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("ROWS ").append(metadata).append('\n'); + for (List row : data) { + for (int i = 0; i < row.size(); i++) { + ByteBuffer v = row.get(i); + if (v == null) { + sb.append(" | null"); + } else { + sb.append(" | "); + if (metadata.columns == null) { + sb.append(Bytes.toHexString(v)); + } else { + sb.append(metadata.columns.getType(i).deserialize(v, version)); + } + } + } + sb.append('\n'); + } + sb.append("---"); + return sb.toString(); + } + } + + public static class Prepared extends Result { + + public static final Message.Decoder subcodec = new Message.Decoder() { + public Result decode(ByteBuf body, ProtocolVersion version) { + MD5Digest id = MD5Digest.wrap(CBUtil.readBytes(body)); + Rows.Metadata metadata = Rows.Metadata.decode(body); + Rows.Metadata resultMetadata = decodeResultMetadata(body, version); + return new Prepared(id, metadata, resultMetadata); + } + + private Metadata decodeResultMetadata(ByteBuf body, ProtocolVersion version) { + switch (version) { + case V1: + return Rows.Metadata.EMPTY; + case V2: + case V3: + return Rows.Metadata.decode(body); + default: + throw version.unsupported(); + } + } + }; + + public final MD5Digest statementId; + public final Rows.Metadata metadata; + public final Rows.Metadata resultMetadata; + + private Prepared(MD5Digest statementId, Rows.Metadata metadata, Rows.Metadata resultMetadata) { + super(Kind.PREPARED); + this.statementId = statementId; + this.metadata = metadata; + this.resultMetadata = resultMetadata; + } + + @Override + public String toString() { + return "RESULT PREPARED " + statementId + ' ' + metadata + " (resultMetadata=" + resultMetadata + ')'; + } + } + + public static class SchemaChange extends Result { + + public enum Change { CREATED, UPDATED, DROPPED } + + public final Change change; + public final SchemaElement targetType; + public final String targetKeyspace; + public final String targetName; + + public static final Message.Decoder subcodec = new Message.Decoder() { + public Result decode(ByteBuf body, ProtocolVersion version) + { + // Note: the CREATE KEYSPACE/TABLE/TYPE SCHEMA_CHANGE response is different from the SCHEMA_CHANGE EVENT type + Change change; + SchemaElement target; + String keyspace, name; + switch (version) { + case V1: + case V2: + change = CBUtil.readEnumValue(Change.class, body); + keyspace = CBUtil.readString(body); + name = CBUtil.readString(body); + target = name.isEmpty() ? KEYSPACE : TABLE; + return new SchemaChange(change, target, keyspace, name); + case V3: + change = CBUtil.readEnumValue(Change.class, body); + target = CBUtil.readEnumValue(SchemaElement.class, body); + keyspace = CBUtil.readString(body); + name = (target == KEYSPACE) ? "" : CBUtil.readString(body); + return new SchemaChange(change, target, keyspace, name); + default: + throw version.unsupported(); + } + } + }; + + private SchemaChange(Change change, SchemaElement targetType, String targetKeyspace, String targetName) { + super(Kind.SCHEMA_CHANGE); + this.change = change; + this.targetType = targetType; + this.targetKeyspace = targetKeyspace; + this.targetName = targetName; + } + + @Override + public String toString() { + return "RESULT schema change " + change + " on " + targetType + ' ' + targetKeyspace + (targetName.isEmpty() ? "" : '.' + targetName); + } + } + } + + public static class Event extends Message.Response { + + public static final Message.Decoder decoder = new Message.Decoder() { + public Event decode(ByteBuf body, ProtocolVersion version) { + return new Event(ProtocolEvent.deserialize(body, version)); + } + }; + + public final ProtocolEvent event; + + public Event(ProtocolEvent event) { + super(Message.Response.Type.EVENT); + this.event = event; + } + + @Override + public String toString() { + return "EVENT " + event; + } + } + + public static class AuthChallenge extends Message.Response { + + public static final Message.Decoder decoder = new Message.Decoder() { + public AuthChallenge decode(ByteBuf body, ProtocolVersion version) { + ByteBuffer b = CBUtil.readValue(body); + if (b == null) + return new AuthChallenge(null); + + byte[] token = new byte[b.remaining()]; + b.get(token); + return new AuthChallenge(token); + } + }; + + public final byte[] token; + + private AuthChallenge(byte[] token) { + super(Message.Response.Type.AUTH_CHALLENGE); + this.token = token; + } + } + + public static class AuthSuccess extends Message.Response { + + public static final Message.Decoder decoder = new Message.Decoder() { + public AuthSuccess decode(ByteBuf body, ProtocolVersion version) { + ByteBuffer b = CBUtil.readValue(body); + if (b == null) + return new AuthSuccess(null); + + byte[] token = new byte[b.remaining()]; + b.get(token); + return new AuthSuccess(token); + } + }; + + public final byte[] token; + + private AuthSuccess(byte[] token) { + super(Message.Response.Type.AUTH_SUCCESS); + this.token = token; + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java new file mode 100644 index 00000000000..50c4a5a0c0c --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java @@ -0,0 +1,215 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + + +import java.util.Iterator; +import java.util.List; + +import com.google.common.util.concurrent.ListenableFuture; + +/** + * The result of a query. + *

+ * The retrieval of the rows of a ResultSet is generally paged (a first page + * of result is fetched and the next one is only fetched once all the results + * of the first one has been consumed). The size of the pages can be configured + * either globally through {@link QueryOptions#setFetchSize} or per-statement + * with {@link Statement#setFetchSize}. Though new pages are automatically (and + * transparently) fetched when needed, it is possible to force the retrieval + * of the next page early through {@link #fetchMoreResults}. Please note however + * that this ResultSet paging is not available with the version 1 of the native + * protocol (i.e. with Cassandra 1.2 or if version 1 has been explicitly requested + * through {@link Cluster.Builder#withProtocolVersion}). If the protocol version 1 + * is in use, a ResultSet is always fetched in it's entirely and it's up to the + * client to make sure that no query can yield ResultSet that won't hold in memory. + *

+ * Note that this class is not thread-safe. + */ +public interface ResultSet extends Iterable { + + /** + * Returns the columns returned in this ResultSet. + * + * @return the columns returned in this ResultSet. + */ + public ColumnDefinitions getColumnDefinitions(); + + /** + * Returns whether this ResultSet has more results. + * + * @return whether this ResultSet has more results. + */ + public boolean isExhausted(); + + /** + * Returns the next result from this ResultSet. + * + * @return the next row in this resultSet or null if this ResultSet is + * exhausted. + */ + public Row one(); + + /** + * Returns all the remaining rows in this ResultSet as a list. + *

+ * Note that, contrary to {@code iterator()} or successive calls to + * {@code one()}, this method forces fetching the full content of the ResultSet + * at once, holding it all in memory in particular. It is thus recommended + * to prefer iterations through {@code iterator()} when possible, especially + * if the ResultSet can be big. + * + * @return a list containing the remaining results of this ResultSet. The + * returned list is empty if and only the ResultSet is exhausted. The ResultSet + * will be exhausted after a call to this method. + */ + public List all(); + + /** + * Returns an iterator over the rows contained in this ResultSet. + * + * The {@link Iterator#next} method is equivalent to calling {@link #one}. + * So this iterator will consume results from this ResultSet and after a + * full iteration, the ResultSet will be empty. + * + * The returned iterator does not support the {@link Iterator#remove} method. + * + * @return an iterator that will consume and return the remaining rows of + * this ResultSet. + */ + @Override + public Iterator iterator(); + + /** + * The number of rows that can be retrieved from this result set without + * blocking to fetch. + * + * @return the number of rows readily available in this result set. If + * {@link #isFullyFetched()}, this is the total number of rows remaining + * in this result set (after which the result set will be exhausted). + */ + public int getAvailableWithoutFetching(); + + /** + * Whether all results from this result set have been fetched from the + * database. + *

+ * Note that if {@code isFullyFetched()}, then {@link #getAvailableWithoutFetching} + * will return how many rows remain in the result set before exhaustion. But + * please note that {@code !isFullyFetched()} never guarantees that the result set + * is not exhausted (you should call {@code isExhausted()} to verify it). + * + * @return whether all results have been fetched. + */ + public boolean isFullyFetched(); + + /** + * Force fetching the next page of results for this result set, if any. + *

+ * This method is entirely optional. It will be called automatically while + * the result set is consumed (through {@link #one}, {@link #all} or iteration) + * when needed (i.e. when {@code getAvailableWithoutFetching() == 0} and + * {@code isFullyFetched() == false}). + *

+ * You can however call this method manually to force the fetching of the + * next page of results. This can allow to prefetch results before they are + * strictly needed. For instance, if you want to prefetch the next page of + * results as soon as there is less than 100 rows readily available in this + * result set, you can do: + *

+     *   ResultSet rs = session.execute(...);
+     *   Iterator<Row> iter = rs.iterator();
+     *   while (iter.hasNext()) {
+     *       if (rs.getAvailableWithoutFetching() == 100 && !rs.isFullyFetched())
+     *           rs.fetchMoreResults();
+     *       Row row = iter.next()
+     *       ... process the row ...
+     *   }
+     * 
+ * This method is not blocking, so in the example above, the call to {@code + * fetchMoreResults} will not block the processing of the 100 currently available + * rows (but {@code iter.hasNext()} will block once those rows have been processed + * until the fetch query returns, if it hasn't yet). + *

+ * Only one page of results (for a given result set) can be + * fetched at any given time. If this method is called twice and the query + * triggered by the first call has not returned yet when the second one is + * performed, then the 2nd call will simply return a future on the currently + * in progress query. + * + * @return a future on the completion of fetching the next page of results. + * If the result set is already fully retrieved ({@code isFullyFetched() == true}), + * then the returned future will return immediately but not particular error will be + * thrown (you should thus call {@code isFullyFetched() to know if calling this + * method can be of any use}). + */ + public ListenableFuture fetchMoreResults(); + + /** + * Returns information on the execution of the last query made for this ResultSet. + *

+ * Note that in most cases, a ResultSet is fetched with only one query, but large + * result sets can be paged and thus be retrieved by multiple queries. In that + * case this method return the {@code ExecutionInfo} for the last query + * performed. To retrieve the information for all queries, use {@link #getAllExecutionInfo}. + *

+ * The returned object includes basic information such as the queried hosts, + * but also the Cassandra query trace if tracing was enabled for the query. + * + * @return the execution info for the last query made for this ResultSet. + */ + public ExecutionInfo getExecutionInfo(); + + /** + * Return the execution information for all queries made to retrieve this + * ResultSet. + *

+ * Unless the ResultSet is large enough to get paged underneath, the returned + * list will be singleton. If paging has been used however, the returned list + * contains the {@code ExecutionInfo} for all the queries done to obtain this + * ResultSet (at the time of the call) in the order those queries were made. + * + * @return a list of the execution info for all the queries made for this ResultSet. + */ + public List getAllExecutionInfo(); + + /** + * If the query that produced this ResultSet was a conditional update, + * return whether it was successfully applied. + *

+ * This is equivalent to calling: + * + *

+     * rs.one().getBool("[applied]");
+     * 
+ *

+ * For consistency, this method always returns {@code true} for + * non-conditional queries (although there is no reason to call the method + * in that case). This is also the case for conditional DDL statements + * ({@code CREATE KEYSPACE... IF NOT EXISTS}, {@code CREATE TABLE... IF NOT EXISTS}), + * for which Cassandra doesn't return an {@code [applied]} column. + *

+ * Note that, for versions of Cassandra strictly lower than 2.0.9 and 2.1.0-rc2, + * a server-side bug (CASSANDRA-7337) causes this method to always return + * {@code true} for batches containing conditional queries. + * + * @return if the query was a conditional update, whether it was applied. + * {@code true} for other types of queries. + * + * @see CASSANDRA-7337 + */ + public boolean wasApplied(); +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/ResultSetFuture.java b/driver-core/src/main/java/com/datastax/driver/core/ResultSetFuture.java new file mode 100644 index 00000000000..c8aed67f625 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/ResultSetFuture.java @@ -0,0 +1,123 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import com.google.common.util.concurrent.ListenableFuture; + +import com.datastax.driver.core.exceptions.NoHostAvailableException; +import com.datastax.driver.core.exceptions.QueryExecutionException; +import com.datastax.driver.core.exceptions.QueryValidationException; + +/** + * A future on a {@link ResultSet}. + * + * Note that this class implements Guava's {@code + * ListenableFuture} and can so be used with Guava's future utilities. + */ +public interface ResultSetFuture extends ListenableFuture { + + /** + * Waits for the query to return and return its result. + * + * This method is usually more convenient than {@link #get} because it: + *

    + *
  • Waits for the result uninterruptibly, and so doesn't throw + * {@link InterruptedException}.
  • + *
  • Returns meaningful exceptions, instead of having to deal + * with ExecutionException.
  • + *
+ * As such, it is the preferred way to get the future result. + * + * @return the query result set. + * + * @throws NoHostAvailableException if no host in the cluster can be + * contacted successfully to execute this query. + * @throws QueryExecutionException if the query triggered an execution + * exception, that is an exception thrown by Cassandra when it cannot execute + * the query with the requested consistency level successfully. + * @throws QueryValidationException if the query is invalid (syntax error, + * unauthorized or any other validation problem). + */ + public ResultSet getUninterruptibly(); + + /** + * Waits for the provided time for the query to return and return its + * result if available. + * + * This method is usually more convenient than {@link #get} because it: + *
    + *
  • Waits for the result uninterruptibly, and so doesn't throw + * {@link InterruptedException}.
  • + *
  • Returns meaningful exceptions, instead of having to deal + * with ExecutionException.
  • + *
+ * As such, it is the preferred way to get the future result. + * + * @param timeout the time to wait for the query to return. + * @param unit the unit for {@code timeout}. + * @return the query result set. + * + * @throws NoHostAvailableException if no host in the cluster can be + * contacted successfully to execute this query. + * @throws QueryExecutionException if the query triggered an execution + * exception, that is an exception thrown by Cassandra when it cannot execute + * the query with the requested consistency level successfully. + * @throws QueryValidationException if the query if invalid (syntax error, + * unauthorized or any other validation problem). + * @throws TimeoutException if the wait timed out (Note that this is + * different from a Cassandra timeout, which is a {@code + * QueryExecutionException}). + */ + public ResultSet getUninterruptibly(long timeout, TimeUnit unit) throws TimeoutException; + + /** + * Attempts to cancel the execution of the request corresponding to this + * future. This attempt will fail if the request has already returned. + *

+ * Please note that this only cancel the request driver side, but nothing + * is done to interrupt the execution of the request Cassandra side (and that even + * if {@code mayInterruptIfRunning} is true) since Cassandra does not + * support such interruption. + *

+ * This method can be used to ensure no more work is performed driver side + * (which, while it doesn't include stopping a request already submitted + * to a Cassandra node, may include not retrying another Cassandra host on + * failure/timeout) if the ResultSet is not going to be retried. Typically, + * the code to wait for a request result for a maximum of 1 second could + * look like: + *

+     *   ResultSetFuture future = session.executeAsync(...some query...);
+     *   try {
+     *       ResultSet result = future.get(1, TimeUnit.SECONDS);
+     *       ... process result ...
+     *   } catch (TimeoutException e) {
+     *       future.cancel(true); // Ensure any resource used by this query driver
+     *                            // side is released immediately
+     *       ... handle timeout ...
+     *   }
+     * 
+ * + * @param mayInterruptIfRunning the value of this parameter is currently + * ignored. + * @return {@code false} if the future could not be cancelled (it has already + * completed normally); {@code true} otherwise. + */ + @Override + public boolean cancel(boolean mayInterruptIfRunning); +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/Row.java b/driver-core/src/main/java/com/datastax/driver/core/Row.java new file mode 100644 index 00000000000..88aa1631740 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/Row.java @@ -0,0 +1,608 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.math.BigDecimal; +import java.math.BigInteger; +import java.net.InetAddress; +import java.nio.ByteBuffer; +import java.util.*; + +import com.datastax.driver.core.exceptions.InvalidTypeException; + +/** + * A CQL Row returned in a {@link ResultSet}. + *

+ * The values of a CQL Row can be retrieve by either index (index starts at 0) + * or name. When setting them by name, names follow the case insensitivity + * rules explained in {@link ColumnDefinitions}. + */ +public interface Row extends GettableData { + // Note that we re-include all the methods of GettableData just for the sake of better javadoc + + /** + * Returns the columns contained in this Row. + * + * @return the columns contained in this Row. + */ + public ColumnDefinitions getColumnDefinitions(); + + /** + * Returns whether the {@code i}th value of this row is NULL. + * + * @param i the index ({@code 0 <= i < size()}) of the column to check. + * @return whether the {@code i}th value of this row is NULL. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().size()}. + */ + @Override + public boolean isNull(int i); + + /** + * Returns whether the value for column {@code name} in this row is NULL. + * + * @param name the name of the column to check. + * @return whether the value of column {@code name} is NULL. + * + * @throws IllegalArgumentException if {@code name} is not part of the + * ResultSet this row is part of, i.e. if {@code !this.columns().names().contains(name)}. + */ + @Override + public boolean isNull(String name); + + /** + * Returns the {@code i}th value of this row as a boolean. + * + * @param i the index ({@code 0 <= i < size()}) of the column to retrieve. + * @return the boolean value of the {@code i}th column in this row. If the + * value is NULL, {@code false} is returned. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().size()}. + * @throws InvalidTypeException if column {@code i} is not of type BOOLEAN. + */ + @Override + public boolean getBool(int i); + + /** + * Returns the value of column {@code name} as a boolean. + * + * @param name the name of the column to retrieve. + * @return the boolean value of column {@code name}. If the value is NULL, + * {@code false} is returned. + * + * @throws IllegalArgumentException if {@code name} is not part of the + * ResultSet this row is part of, i.e. if {@code !this.columns().names().contains(name)}. + * @throws InvalidTypeException if column {@code name} is not of type BOOLEAN. + */ + @Override + public boolean getBool(String name); + + /** + * Returns the {@code i}th value of this row as an integer. + * + * @param i the index ({@code 0 <= i < size()}) of the column to retrieve. + * @return the value of the {@code i}th column in this row as an integer. If the + * value is NULL, {@code 0} is returned. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().size()}. + * @throws InvalidTypeException if column {@code i} is not of type INT. + */ + @Override + public int getInt(int i); + + /** + * Returns the value of column {@code name} as an integer. + * + * @param name the name of the column to retrieve. + * @return the value of column {@code name} as an integer. If the value is NULL, + * {@code 0} is returned. + * + * @throws IllegalArgumentException if {@code name} is not part of the + * ResultSet this row is part of, i.e. if {@code !this.columns().names().contains(name)}. + * @throws InvalidTypeException if column {@code name} is not of type INT. + */ + @Override + public int getInt(String name); + + /** + * Returns the {@code i}th value of this row as a long. + * + * @param i the index ({@code 0 <= i < size()}) of the column to retrieve. + * @return the value of the {@code i}th column in this row as a long. If the + * value is NULL, {@code 0L} is returned. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().size()}. + * @throws InvalidTypeException if column {@code i} is not of type BIGINT or COUNTER. + */ + @Override + public long getLong(int i); + + /** + * Returns the value of column {@code name} as a long. + * + * @param name the name of the column to retrieve. + * @return the value of column {@code name} as a long. If the value is NULL, + * {@code 0L} is returned. + * + * @throws IllegalArgumentException if {@code name} is not part of the + * ResultSet this row is part of, i.e. if {@code !this.columns().names().contains(name)}. + * @throws InvalidTypeException if column {@code i} is not of type BIGINT or COUNTER. + */ + @Override + public long getLong(String name); + + /** + * Returns the {@code i}th value of this row as a date. + * + * @param i the index ({@code 0 <= i < size()}) of the column to retrieve. + * @return the value of the {@code i}th column in this row as a data. If the + * value is NULL, {@code null} is returned. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().size()}. + * @throws InvalidTypeException if column {@code i} is not of type TIMESTAMP. + */ + @Override + public Date getDate(int i); + + /** + * Returns the value of column {@code name} as a date. + * + * @param name the name of the column to retrieve. + * @return the value of column {@code name} as a date. If the value is NULL, + * {@code null} is returned. + * + * @throws IllegalArgumentException if {@code name} is not part of the + * ResultSet this row is part of, i.e. if {@code !this.columns().names().contains(name)}. + * @throws InvalidTypeException if column {@code name} is not of type TIMESTAMP. + */ + @Override + public Date getDate(String name); + + /** + * Returns the {@code i}th value of this row as a float. + * + * @param i the index ({@code 0 <= i < size()}) of the column to retrieve. + * @return the value of the {@code i}th column in this row as a float. If the + * value is NULL, {@code 0.0f} is returned. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().size()}. + * @throws InvalidTypeException if column {@code i} is not of type FLOAT. + */ + @Override + public float getFloat(int i); + + /** + * Returns the value of column {@code name} as a float. + * + * @param name the name of the column to retrieve. + * @return the value of column {@code name} as a float. If the value is NULL, + * {@code 0.0f} is returned. + * + * @throws IllegalArgumentException if {@code name} is not part of the + * ResultSet this row is part of, i.e. if {@code !this.columns().names().contains(name)}. + * @throws InvalidTypeException if column {@code name} is not of type FLOAT. + */ + @Override + public float getFloat(String name); + + /** + * Returns the {@code i}th value of this row as a double. + * + * @param i the index ({@code 0 <= i < size()}) of the column to retrieve. + * @return the value of the {@code i}th column in this row as a double. If the + * value is NULL, {@code 0.0} is returned. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().size()}. + * @throws InvalidTypeException if column {@code i} is not of type DOUBLE. + */ + @Override + public double getDouble(int i); + + /** + * Returns the value of column {@code name} as a double. + * + * @param name the name of the column to retrieve. + * @return the value of column {@code name} as a double. If the value is NULL, + * {@code 0.0} is returned. + * + * @throws IllegalArgumentException if {@code name} is not part of the + * ResultSet this row is part of, i.e. if {@code !this.columns().names().contains(name)}. + * @throws InvalidTypeException if column {@code name} is not of type DOUBLE. + */ + @Override + public double getDouble(String name); + + /** + * Returns the {@code i}th value of this row as a ByteBuffer. + * + * Note: this method always return the bytes composing the value, even if + * the column is not of type BLOB. That is, this method never throw an + * InvalidTypeException. However, if the type is not BLOB, it is up to the + * caller to handle the returned value correctly. + * + * @param i the index ({@code 0 <= i < size()}) of the column to retrieve. + * @return the value of the {@code i}th column in this row as a ByteBuffer. If the + * value is NULL, {@code null} is returned. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().size()}. + */ + @Override + public ByteBuffer getBytesUnsafe(int i); + + /** + * Returns the value of column {@code name} as a ByteBuffer. + * + * Note: this method always return the bytes composing the value, even if + * the column is not of type BLOB. That is, this method never throw an + * InvalidTypeException. However, if the type is not BLOB, it is up to the + * caller to handle the returned value correctly. + * + * @param name the name of the column to retrieve. + * @return the value of column {@code name} as a ByteBuffer. If the value is NULL, + * {@code null} is returned. + * + * @throws IllegalArgumentException if {@code name} is not part of the + * ResultSet this row is part of, i.e. if {@code !this.columns().names().contains(name)}. + */ + @Override + public ByteBuffer getBytesUnsafe(String name); + + /** + * Returns the {@code i}th value of this row as a byte array. + *

+ * Note that this method validate that the column is of type BLOB. If you want to retrieve + * the bytes for any type of columns, use {@link #getBytesUnsafe(int)} instead. + * + * @param i the index ({@code 0 <= i < size()}) of the column to retrieve. + * @return the value of the {@code i}th column in this row as a byte array. If the + * value is NULL, {@code null} is returned. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().size()}. + * @throws InvalidTypeException if column {@code i} type is not of type BLOB. + */ + @Override + public ByteBuffer getBytes(int i); + + /** + * Returns the value of column {@code name} as a byte array. + *

+ * Note that this method validate that the column is of type BLOB. If you want to retrieve + * the bytes for any type of columns, use {@link #getBytesUnsafe(String)} instead. + * + * @param name the name of the column to retrieve. + * @return the value of column {@code name} as a byte array. If the value is NULL, + * {@code null} is returned. + * + * @throws IllegalArgumentException if {@code name} is not part of the + * ResultSet this row is part of, i.e. if {@code !this.columns().names().contains(name)}. + * @throws InvalidTypeException if column {@code i} type is not of type BLOB. + */ + @Override + public ByteBuffer getBytes(String name); + + /** + * Returns the {@code i}th value of this row as a string. + * + * @param i the index ({@code 0 <= i < size()}) of the column to retrieve. + * @return the value of the {@code i}th column in this row as a string. If the + * value is NULL, {@code null} is returned. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().size()}. + * @throws InvalidTypeException if column {@code i} type is none of: + * VARCHAR, TEXT or ASCII. + */ + @Override + public String getString(int i); + + /** + * Returns the value of column {@code name} as a string. + * + * @param name the name of the column to retrieve. + * @return the value of column {@code name} as a string. If the value is NULL, + * {@code null} is returned. + * + * @throws IllegalArgumentException if {@code name} is not part of the + * ResultSet this row is part of, i.e. if {@code !this.columns().names().contains(name)}. + * @throws InvalidTypeException if column {@code name} type is none of: + * VARCHAR, TEXT or ASCII. + */ + @Override + public String getString(String name); + + /** + * Returns the {@code i}th value of this row as a variable length integer. + * + * @param i the index ({@code 0 <= i < size()}) of the column to retrieve. + * @return the value of the {@code i}th column in this row as a variable + * length integer. If the value is NULL, {@code null} is returned. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().size()}. + * @throws InvalidTypeException if column {@code i} is not of type VARINT. + */ + @Override + public BigInteger getVarint(int i); + + /** + * Returns the value of column {@code name} as a variable length integer. + * + * @param name the name of the column to retrieve. + * @return the value of column {@code name} as a variable length integer. + * If the value is NULL, {@code null} is returned. + * + * @throws IllegalArgumentException if {@code name} is not part of the + * ResultSet this row is part of, i.e. if {@code !this.columns().names().contains(name)}. + * @throws InvalidTypeException if column {@code name} is not of type VARINT. + */ + @Override + public BigInteger getVarint(String name); + + /** + * Returns the {@code i}th value of this row as a variable length decimal. + * + * @param i the index ({@code 0 <= i < size()}) of the column to retrieve. + * @return the value of the {@code i}th column in this row as a variable + * length decimal. If the value is NULL, {@code null} is returned. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().size()}. + * @throws InvalidTypeException if column {@code i} is not of type DECIMAL. + */ + @Override + public BigDecimal getDecimal(int i); + + /** + * Returns the value of column {@code name} as a variable length decimal. + * + * @param name the name of the column to retrieve. + * @return the value of column {@code name} as a variable length decimal. + * If the value is NULL, {@code null} is returned. + * + * @throws IllegalArgumentException if {@code name} is not part of the + * ResultSet this row is part of, i.e. if {@code !this.columns().names().contains(name)}. + * @throws InvalidTypeException if column {@code name} is not of type DECIMAL. + */ + @Override + public BigDecimal getDecimal(String name); + + /** + * Returns the {@code i}th value of this row as a UUID. + * + * @param i the index ({@code 0 <= i < size()}) of the column to retrieve. + * @return the value of the {@code i}th column in this row as a UUID. + * If the value is NULL, {@code null} is returned. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().size()}. + * @throws InvalidTypeException if column {@code i} is not of type UUID + * or TIMEUUID. + */ + @Override + public UUID getUUID(int i); + + /** + * Returns the value of column {@code name} as a UUID. + * + * @param name the name of the column to retrieve. + * @return the value of column {@code name} as a UUID. + * If the value is NULL, {@code null} is returned. + * + * @throws IllegalArgumentException if {@code name} is not part of the + * ResultSet this row is part of, i.e. if {@code !this.columns().names().contains(name)}. + * @throws InvalidTypeException if column {@code name} is not of type + * UUID or TIMEUUID. + */ + @Override + public UUID getUUID(String name); + + /** + * Returns the {@code i}th value of this row as an InetAddress. + * + * @param i the index ({@code 0 <= i < size()}) of the column to retrieve. + * @return the value of the {@code i}th column in this row as an InetAddress. + * If the value is NULL, {@code null} is returned. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().size()}. + * @throws InvalidTypeException if column {@code i} is not of type INET. + */ + @Override + public InetAddress getInet(int i); + + /** + * Returns the value of column {@code name} as an InetAddress. + * + * @param name the name of the column to retrieve. + * @return the value of column {@code name} as an InetAddress. + * If the value is NULL, {@code null} is returned. + * + * @throws IllegalArgumentException if {@code name} is not part of the + * ResultSet this row is part of, i.e. if {@code !this.columns().names().contains(name)}. + * @throws InvalidTypeException if column {@code name} is not of type + * INET. + */ + @Override + public InetAddress getInet(String name); + + /** + * Returns the {@code i}th value of this row as a {@link Token}. + *

+ * {@link #getPartitionKeyToken()} should generally be preferred to this method (unless the + * token column is aliased). + * + * @param i the index ({@code 0 <= i < size()}) of the column to retrieve. + * @return the value of the {@code i}th column in this row as an Token. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().size()}. + * @throws InvalidTypeException if column {@code i} is not of the type of token values + * for this cluster (this depends on the configured partitioner). + */ + public Token getToken(int i); + + /** + * Returns the value of column {@code name} as a {@link Token}. + *

+ * {@link #getPartitionKeyToken()} should generally be preferred to this method (unless the + * token column is aliased). + * + * @param name the name of the column to retrieve. + * @return the value of column {@code name} as a Token. + * + * @throws IllegalArgumentException if {@code name} is not part of the + * ResultSet this row is part of, i.e. if {@code !this.columns().names().contains(name)}. + * @throws InvalidTypeException if column {@code name} is not of the type of token values + * for this cluster (this depends on the configured partitioner). + */ + public Token getToken(String name); + + /** + * Returns the value of the first column containing a {@link Token}. + *

+ * This method is a shorthand for queries returning a single token in an unaliased + * column. It will look for the first name matching {@code token(...)}: + *

+     * {@code
+     * ResultSet rs = session.execute("SELECT token(k) FROM my_table WHERE k = 1");
+     * Token token = rs.one().getPartitionKeyToken(); // retrieves token(k)
+     * }
+     * 
+ * If that doesn't work for you (for example, if you're using an alias), use + * {@link #getToken(int)} or {@link #getToken(String)}. + * + * @return the value of column {@code name} as a Token. + * + * @throws IllegalStateException if no column named {@code token(...)} exists in this + * ResultSet. + * @throws InvalidTypeException if the first column named {@code token(...)} is not of + * the type of token values for this cluster (this depends on the configured partitioner). + */ + public Token getPartitionKeyToken(); + + /** + * Returns the {@code i}th value of this row as a list. + * + * @param the type of the elements of the list to return. + * @param i the index ({@code 0 <= i < size()}) of the column to retrieve. + * @param elementsClass the class for the elements of the list to retrieve. + * @return the value of the {@code i}th column in this row as a list of + * {@code elementsClass} objects. If the value is NULL, an empty list is + * returned (note that Cassandra makes no difference between an empty list + * and column of type list that is not set). The returned list is immutable. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().size()}. + * @throws InvalidTypeException if column {@code i} is not a list or if its + * elements are not of class {@code elementsClass}. + */ + @Override + public List getList(int i, Class elementsClass); + + /** + * Returns the value of column {@code name} as a list. + * + * @param the type of the elements of the list to return. + * @param name the name of the column to retrieve. + * @param elementsClass the class for the elements of the list to retrieve. + * @return the value of the {@code i}th column in this row as a list of + * {@code elementsClass} objects. If the value is NULL, an empty list is + * returned (note that Cassandra makes no difference between an empty list + * and column of type list that is not set). The returned list is immutable. + * + * @throws IllegalArgumentException if {@code name} is not part of the + * ResultSet this row is part of, i.e. if {@code !this.columns().names().contains(name)}. + * @throws InvalidTypeException if column {@code name} is not a list or if its + * elements are not of class {@code elementsClass}. + */ + @Override + public List getList(String name, Class elementsClass); + + /** + * Returns the {@code i}th value of this row as a set. + * + * @param the type of the elements of the set to return. + * @param i the index ({@code 0 <= i < size()}) of the column to retrieve. + * @param elementsClass the class for the elements of the set to retrieve. + * @return the value of the {@code i}th column in this row as a set of + * {@code elementsClass} objects. If the value is NULL, an empty set is + * returned (note that Cassandra makes no difference between an empty set + * and column of type set that is not set). The returned set is immutable. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().size()}. + * @throws InvalidTypeException if column {@code i} is not a set or if its + * elements are not of class {@code elementsClass}. + */ + @Override + public Set getSet(int i, Class elementsClass); + + /** + * Returns the value of column {@code name} as a set. + * + * @param the type of the elements of the set to return. + * @param name the name of the column to retrieve. + * @param elementsClass the class for the elements of the set to retrieve. + * @return the value of the {@code i}th column in this row as a set of + * {@code elementsClass} objects. If the value is NULL, an empty set is + * returned (note that Cassandra makes no difference between an empty set + * and column of type set that is not set). The returned set is immutable. + * + * @throws IllegalArgumentException if {@code name} is not part of the + * ResultSet this row is part of, i.e. if {@code !this.columns().names().contains(name)}. + * @throws InvalidTypeException if column {@code name} is not a set or if its + * elements are not of class {@code elementsClass}. + */ + @Override + public Set getSet(String name, Class elementsClass); + + /** + * Returns the {@code i}th value of this row as a map. + * + * @param the type of the keys of the map to return. + * @param the type of the values of the map to return. + * @param i the index ({@code 0 <= i < size()}) of the column to retrieve. + * @param keysClass the class for the keys of the map to retrieve. + * @param valuesClass the class for the values of the map to retrieve. + * @return the value of the {@code i}th column in this row as a map of + * {@code keysClass} to {@code valuesClass} objects. If the value is NULL, + * an empty map is returned (note that Cassandra makes no difference + * between an empty map and column of type map that is not set). The + * returned map is immutable. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().size()}. + * @throws InvalidTypeException if column {@code i} is not a map, if its + * keys are not of class {@code keysClass} or if its values are not of + * class {@code valuesClass}. + */ + @Override + public Map getMap(int i, Class keysClass, Class valuesClass); + + /** + * Returns the value of column {@code name} as a map. + * + * @param the type of the keys of the map to return. + * @param the type of the values of the map to return. + * @param name the name of the column to retrieve. + * @param keysClass the class for the keys of the map to retrieve. + * @param valuesClass the class for the values of the map to retrieve. + * @return the value of the {@code i}th column in this row as a map of + * {@code keysClass} to {@code valuesClass} objects. If the value is NULL, + * an empty map is returned (note that Cassandra makes no difference + * between an empty map and column of type map that is not set). The + * returned map is immutable. + * + * @throws IllegalArgumentException if {@code name} is not part of the + * ResultSet this row is part of, i.e. if {@code !this.columns().names().contains(name)}. + * @throws InvalidTypeException if column {@code name} is not a map, if its + * keys are not of class {@code keysClass} or if its values are not of + * class {@code valuesClass}. + */ + @Override + public Map getMap(String name, Class keysClass, Class valuesClass); +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/SSLOptions.java b/driver-core/src/main/java/com/datastax/driver/core/SSLOptions.java new file mode 100644 index 00000000000..f68b0608492 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/SSLOptions.java @@ -0,0 +1,75 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import javax.net.ssl.SSLContext; +import java.security.KeyManagementException; +import java.security.NoSuchAlgorithmException; + +/** + * Options to provide to enable SSL connections. + */ +public class SSLOptions { + + private static final String SSL_PROTOCOL = "TLS"; + + /** + * The default SSL cipher suites. + */ + public static final String[] DEFAULT_SSL_CIPHER_SUITES = { "TLS_RSA_WITH_AES_128_CBC_SHA", "TLS_RSA_WITH_AES_256_CBC_SHA" }; + + final SSLContext context; + final String[] cipherSuites; + + /** + * Creates default SSL options. + *

+ * The resulting options will use the default JSSE options, and you can use the default + * JSSE System properties + * to customize it's behavior. This may in particular involve + * creating a simple keyStore and trustStore. + *

+ * The cipher suites used by this default instance are the one defined by + * {@code DEFAULT_SSL_CIPHER_SUITES} and match the default cipher suites + * supported by Cassandra server side. + */ + public SSLOptions() { + this(makeDefaultContext(), DEFAULT_SSL_CIPHER_SUITES); + } + + /** + * Creates SSL options that uses the provided SSL context and cipher suites. + * + * @param context the {@code SSLContext} to use. + * @param cipherSuites the cipher suites to use. + */ + public SSLOptions(SSLContext context, String[] cipherSuites) { + this.context = context; + this.cipherSuites = cipherSuites; + } + + private static SSLContext makeDefaultContext() throws IllegalStateException { + try { + SSLContext ctx = SSLContext.getInstance(SSL_PROTOCOL); + ctx.init(null, null, null); // use defaults + return ctx; + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException("This JVM doesn't support TLS, this shouldn't happen"); + } catch (KeyManagementException e) { + throw new IllegalStateException("Cannot initialize SSL Context", e); + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/SchemaElement.java b/driver-core/src/main/java/com/datastax/driver/core/SchemaElement.java new file mode 100644 index 00000000000..fa29718a61b --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/SchemaElement.java @@ -0,0 +1,20 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +enum SchemaElement { + KEYSPACE, TABLE, TYPE +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/ServerSideTimestampGenerator.java b/driver-core/src/main/java/com/datastax/driver/core/ServerSideTimestampGenerator.java new file mode 100644 index 00000000000..5560e2932ea --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/ServerSideTimestampGenerator.java @@ -0,0 +1,35 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +/** + * A timestamp generator that always returns {@link Long#MIN_VALUE}, in order to let Cassandra + * assign server-side timestamps. + */ +public class ServerSideTimestampGenerator implements TimestampGenerator { + /** + * The unique instance of this generator. + */ + public static final TimestampGenerator INSTANCE = new ServerSideTimestampGenerator(); + + @Override + public long next() { + return Long.MIN_VALUE; + } + + private ServerSideTimestampGenerator() { + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/Session.java b/driver-core/src/main/java/com/datastax/driver/core/Session.java new file mode 100644 index 00000000000..cfd99d6fe0f --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/Session.java @@ -0,0 +1,425 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.io.Closeable; +import java.util.Collection; + +import com.google.common.util.concurrent.ListenableFuture; + +import com.datastax.driver.core.exceptions.*; + +/** + * A session holds connections to a Cassandra cluster, allowing it to be queried. + * + * Each session maintains multiple connections to the cluster nodes, + * provides policies to choose which node to use for each query (round-robin on + * all nodes of the cluster by default), and handles retries for failed query (when + * it makes sense), etc... + *

+ * Session instances are thread-safe and usually a single instance is enough + * per application. As a given session can only be "logged" into one keyspace at + * a time (where the "logged" keyspace is the one used by query if the query doesn't + * explicitely use a fully qualified table name), it can make sense to create one + * session per keyspace used. This is however not necessary to query multiple keyspaces + * since it is always possible to use a single session with fully qualified table name + * in queries. + */ +public interface Session extends Closeable { + + /** + * The keyspace to which this Session is currently logged in, if any. + *

+ * This correspond to the name passed to {@link Cluster#connect(String)}, or to the + * last keyspace logged into through a "USE" CQL query if one was used. + * + * @return the name of the keyspace to which this Session is currently + * logged in, or {@code null} if the session is logged to no keyspace. + */ + public String getLoggedKeyspace(); + + /** + * Force the initialization of this Session instance if it hasn't been + * initialized yet. + *

+ * Please note first that most use won't need to call this method + * explicitly. If you use the {@link Cluster#connect} method {@code Cluster} + * to create your Session, the returned session will be already + * initialized. Even if you create a non-initialized session through + * {@link Cluster#newSession}, that session will get automatically + * initialized the first time that session is used for querying. This method + * is thus only useful if you use {@link Cluster#newSession} and want to + * explicitly force initialization without querying. + *

+ * Session initialization consists in connecting the Session to the known + * Cassandra hosts (at least those that should not be ignored due to + * the {@code LoadBalancingPolicy} in place). + *

+ * If the Cluster instance this Session depends on is not itself + * initialized, it will be initialized by this method. + *

+ * If the session is already initialized, this method is a no-op. + * + * @return this {@code Session} object. + * + * @throws NoHostAvailableException if this initialization triggers the + * Cluster initialization and no host amongst the contact points can be + * reached. + * @throws AuthenticationException if this initialization triggers the + * Cluster initialization and an authentication error occurs while contacting + * the initial contact points. + */ + public Session init(); + + /** + * Executes the provided query. + * + * This is a convenience method for {@code execute(new SimpleStatement(query))}. + * + * @param query the CQL query to execute. + * @return the result of the query. That result will never be null but can + * be empty (and will be for any non SELECT query). + * + * @throws NoHostAvailableException if no host in the cluster can be + * contacted successfully to execute this query. + * @throws QueryExecutionException if the query triggered an execution + * exception, i.e. an exception thrown by Cassandra when it cannot execute + * the query with the requested consistency level successfully. + * @throws QueryValidationException if the query if invalid (syntax error, + * unauthorized or any other validation problem). + */ + public ResultSet execute(String query); + + /** + * Executes the provided query using the provided value. + * + * This is a convenience method for {@code execute(new SimpleStatement(query, values))}. + * + * @param query the CQL query to execute. + * @param values values required for the execution of {@code query}. See + * {@link SimpleStatement#SimpleStatement(String, Object...)} for more detail. + * @return the result of the query. That result will never be null but can + * be empty (and will be for any non SELECT query). + * + * @throws NoHostAvailableException if no host in the cluster can be + * contacted successfully to execute this query. + * @throws QueryExecutionException if the query triggered an execution + * exception, i.e. an exception thrown by Cassandra when it cannot execute + * the query with the requested consistency level successfully. + * @throws QueryValidationException if the query if invalid (syntax error, + * unauthorized or any other validation problem). + * @throws UnsupportedFeatureException if version 1 of the protocol + * is in use (i.e. if you've force version 1 through {@link Cluster.Builder#withProtocolVersion} + * or you use Cassandra 1.2). + */ + public ResultSet execute(String query, Object... values); + + /** + * Executes the provided query. + * + * This method blocks until at least some result has been received from the + * database. However, for SELECT queries, it does not guarantee that the + * result has been received in full. But it does guarantee that some + * response has been received from the database, and in particular + * guarantee that if the request is invalid, an exception will be thrown + * by this method. + * + * @param statement the CQL query to execute (that can be any {@code Statement}). + * @return the result of the query. That result will never be null but can + * be empty (and will be for any non SELECT query). + * + * @throws NoHostAvailableException if no host in the cluster can be + * contacted successfully to execute this query. + * @throws QueryExecutionException if the query triggered an execution + * exception, i.e. an exception thrown by Cassandra when it cannot execute + * the query with the requested consistency level successfully. + * @throws QueryValidationException if the query if invalid (syntax error, + * unauthorized or any other validation problem). + * @throws UnsupportedFeatureException if the protocol version 1 is in use and + * a feature not supported has been used. Features that are not supported by + * the version protocol 1 include: BatchStatement, ResultSet paging and binary + * values in RegularStatement. + */ + public ResultSet execute(Statement statement); + + /** + * Executes the provided query asynchronously. + *

+ * This is a convenience method for {@code executeAsync(new SimpleStatement(query))}. + * + * @param query the CQL query to execute. + * @return a future on the result of the query. + */ + public ResultSetFuture executeAsync(String query); + + /** + * Executes the provided query asynchronously using the provided values. + * + * This is a convenience method for {@code executeAsync(new SimpleStatement(query, values))}. + * + * @param query the CQL query to execute. + * @param values values required for the execution of {@code query}. See + * {@link SimpleStatement#SimpleStatement(String, Object...)} for more detail. + * @return a future on the result of the query. + * + * @throws UnsupportedFeatureException if version 1 of the protocol + * is in use (i.e. if you've force version 1 through {@link Cluster.Builder#withProtocolVersion} + * or you use Cassandra 1.2). + */ + public ResultSetFuture executeAsync(String query, Object... values); + + /** + * Executes the provided query asynchronously. + * + * This method does not block. It returns as soon as the query has been + * passed to the underlying network stack. In particular, returning from + * this method does not guarantee that the query is valid or has even been + * submitted to a live node. Any exception pertaining to the failure of the + * query will be thrown when accessing the {@link ResultSetFuture}. + *

+ * Note that for queries that doesn't return a result (INSERT, UPDATE and + * DELETE), you will need to access the ResultSetFuture (that is call one of + * its get method to make sure the query was successful. + * + * @param statement the CQL query to execute (that can be either any {@code Statement}. + * @return a future on the result of the query. + * + * @throws UnsupportedFeatureException if the protocol version 1 is in use and + * a feature not supported has been used. Features that are not supported by + * the version protocol 1 include: BatchStatement, ResultSet paging and binary + * values in RegularStatement. + */ + public ResultSetFuture executeAsync(Statement statement); + + /** + * Prepares the provided query string. + * + * @param query the CQL query string to prepare + * @return the prepared statement corresponding to {@code query}. + * + * @throws NoHostAvailableException if no host in the cluster can be + * contacted successfully to prepare this query. + */ + public PreparedStatement prepare(String query); + + /** + * Prepares the provided query. + *

+ * This method is essentially a shortcut for {@code prepare(statement.getQueryString())}, + * but note that the resulting {@code PreparedStatement} will inherit the query properties + * set on {@code statement}. Concretely, this means that in the following code: + *

+     *   RegularStatement toPrepare = new SimpleStatement("SELECT * FROM test WHERE k=?").setConsistencyLevel(ConsistencyLevel.QUORUM);
+     *   PreparedStatement prepared = session.prepare(toPrepare);
+     *   session.execute(prepared.bind("someValue"));
+     * 
+ * the final execution will be performed with Quorum consistency. + *

+ * Please note that if the same CQL statement is prepared more than once, all + * calls to this method will return the same {@code PreparedStatement} object + * but the method will still apply the properties of the prepared + * {@code Statement} to this object. + * + * @param statement the statement to prepare + * @return the prepared statement corresponding to {@code statement}. + * + * @throws NoHostAvailableException if no host in the cluster can be + * contacted successfully to prepare this statement. + * @throws IllegalArgumentException if {@code statement.getValues() != null} + * (values for executing a prepared statement should be provided after preparation + * though the {@link PreparedStatement#bind} method or through a corresponding + * {@link BoundStatement}). + */ + public PreparedStatement prepare(RegularStatement statement); + + /** + * Prepares the provided query string asynchronously. + *

+ * This method is equivalent to {@link #prepare(String)} except that it + * does not block but return a future instead. Any error during preparation will + * be thrown when accessing the future, not by this method itself. + * + * @param query the CQL query string to prepare + * @return a future on the prepared statement corresponding to {@code query}. + */ + public ListenableFuture prepareAsync(String query); + + /** + * Prepares the provided query asynchronously. + *

+ * This method is essentially a shortcut for {@code prepareAsync(statement.getQueryString())}, + * but with the additional effect that the resulting {@code + * PreparedStatement} will inherit the query properties set on {@code statement}. + *

+ * Please note that if the same CQL statement is prepared more than once, all + * calls to this method will return the same {@code PreparedStatement} object + * but the method will still apply the properties of the prepared + * {@code Statement} to this object. + * + * @param statement the statement to prepare + * @return a future on the prepared statement corresponding to {@code statement}. + * + * @see Session#prepare(RegularStatement) + * + * @throws IllegalArgumentException if {@code statement.getValues() != null} + * (values for executing a prepared statement should be provided after preparation + * though the {@link PreparedStatement#bind} method or through a corresponding + * {@link BoundStatement}). + */ + public ListenableFuture prepareAsync(RegularStatement statement); + + /** + * Initiates a shutdown of this session instance. + *

+ * This method is asynchronous and return a future on the completion + * of the shutdown process. As soon a the session is shutdown, no + * new request will be accepted, but already submitted queries are + * allowed to complete. This method closes all connections of this + * session and reclaims all resources used by it. + *

+ * If for some reason you wish to expedite this process, the + * {@link CloseFuture#force} can be called on the result future. + *

+ * This method has no particular effect if the session was already closed + * (in which case the returned future will return immediately). + *

+ * Note that this method does not close the corresponding {@code Cluster} + * instance (which holds additional resources, in particular internal + * executors that must be shut down in order for the client program to + * terminate). + * If you want to do so, use {@link Cluster#close}, but note that it will + * close all sessions created from that cluster. + * + * @return a future on the completion of the shutdown process. + */ + public CloseFuture closeAsync(); + + /** + * Initiates a shutdown of this session instance and blocks until + * that shutdown completes. + *

+ * This method is a shortcut for {@code closeAsync().get()}. + *

+ * Note that this method does not close the corresponding {@code Cluster} + * instance (which holds additional resources, in particular internal + * executors that must be shut down in order for the client program to + * terminate). + * If you want to do so, use {@link Cluster#close}, but note that it will + * close all sessions created from that cluster. + */ + public void close(); + + /** + * Whether this Session instance has been closed. + *

+ * Note that this method returns true as soon as one closing this Session + * has started but it does not guarantee that the closing is done. If you + * want to guarantee that the closing is done, you can call {@code close()} + * and wait until it returns (or call the get method on {@code closeAsync()} + * with a very short timeout and check this doesn't timeout). + * + * @return {@code true} if this Session instance has been closed, {@code false} + * otherwise. + */ + public boolean isClosed(); + + /** + * Returns the {@code Cluster} object this session is part of. + * + * @return the {@code Cluster} object this session is part of. + */ + public Cluster getCluster(); + + /** + * Return a snapshot of the state of this Session. + *

+ * The returned object provides information on which hosts the session is + * connected to, how many connections are opened to each host, etc... + * The returned object is immutable, it is a snapshot of the Session State + * taken when this method is called. + * + * @return a snapshot of the state of this Session. + */ + public State getState(); + + /** + * The state of a Session. + *

+ * This mostly exposes information on the connections maintained by a Session: + * which host it is connected to, how many connection is has for each host, etc... + */ + public interface State { + /** + * The Session to which this State corresponds to. + * + * @return the Session to which this State corresponds to. + */ + public Session getSession(); + + /** + * The hosts to which the session is currently connected (more precisely, at the time + * this State has been grabbed). + *

+ * Please note that this method really returns the hosts for which the session currently + * holds a connection pool. A such, it's unlikely but not impossible for a host to be listed + * in the output of this method but to have {@code getOpenConnections} return 0, if the + * pool itself is created but not connections have been successfully opened yet. + * + * @return an immutable collection of the hosts to which the session is connected. + */ + public Collection getConnectedHosts(); + + /** + * The number of open connections to a given host. + *

+ * Note that this refers to active connections. The actual number of connections also + * includes {@link #getTrashedConnections(Host)}. + * + * @param host the host to get open connections for. + * @return The number of open connections to {@code host}. If the session + * is not connected to that host, 0 is returned. + */ + public int getOpenConnections(Host host); + + /** + * The number of "trashed" connections to a given host. + *

+ * When the load to a host decreases, the driver will reclaim some connections in order to save + * resources. No requests are sent to these connections anymore, but they are kept open for an + * additional amount of time ({@link PoolingOptions#getIdleTimeoutSeconds()}), in case the load + * goes up again. This method counts connections in that state. + * + * @param host the host to get trashed connections for. + * @return The number of trashed connections to {@code host}. If the session + * is not connected to that host, 0 is returned. + */ + public int getTrashedConnections(Host host); + + /** + * The number of queries that are currently being executed though a given host. + *

+ * This correspond to the number of queries that have been sent (by the session this + * is a State of) to the Cassandra Host on one of its connection but haven't yet returned. + * In that sense this provide a sort of measure of how busy the connections to that node + * are (at the time the {@code State} was grabbed at least). + * + * @param host the host to get in-flight queries for. + * @return the number of currently (as in 'at the time the state was grabbed') executing + * queries to {@code host}. + */ + public int getInFlightQueries(Host host); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/SessionManager.java b/driver-core/src/main/java/com/datastax/driver/core/SessionManager.java new file mode 100644 index 00000000000..7f1f0df99c9 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/SessionManager.java @@ -0,0 +1,671 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.net.InetSocketAddress; +import java.nio.ByteBuffer; +import java.util.*; +import java.util.concurrent.*; +import java.util.concurrent.atomic.AtomicReference; + +import com.google.common.base.Function; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; +import com.google.common.util.concurrent.*; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.datastax.driver.core.exceptions.DriverInternalError; +import com.datastax.driver.core.exceptions.InvalidQueryException; +import com.datastax.driver.core.exceptions.UnsupportedFeatureException; +import com.datastax.driver.core.policies.LoadBalancingPolicy; +import com.datastax.driver.core.policies.ReconnectionPolicy; +import com.datastax.driver.core.policies.SpeculativeExecutionPolicy; +import com.datastax.driver.core.utils.MoreFutures; + +/** + * Driver implementation of the Session interface. + */ +class SessionManager extends AbstractSession { + + private static final Logger logger = LoggerFactory.getLogger(Session.class); + + final Cluster cluster; + final ConcurrentMap pools; + final HostConnectionPool.PoolState poolsState; + final AtomicReference closeFuture = new AtomicReference(); + + private volatile boolean isInit; + private volatile boolean isClosing; + + // Package protected, only Cluster should construct that. + SessionManager(Cluster cluster) { + this.cluster = cluster; + this.pools = new ConcurrentHashMap(); + this.poolsState = new HostConnectionPool.PoolState(); + } + + public synchronized Session init() { + if (isInit) + return this; + + // If we haven't initialized the cluster, do it now + cluster.init(); + + // Create pools to initial nodes (and wait for them to be created) + Collection hosts = cluster.getMetadata().allHosts(); + createPoolsInParallel(hosts); + + isInit = true; + updateCreatedPools(); + return this; + } + + private void createPoolsInParallel(Collection hosts) { + List> futures = Lists.newArrayListWithCapacity(hosts.size()); + for (Host host : hosts) + if (host.state != Host.State.DOWN) + futures.add(maybeAddPool(host, null)); + try { + Futures.successfulAsList(futures).get(); + } catch (ExecutionException e) { + // Won't happen because we used successfulAsList + // And if a particular pool failed, maybeAddPool already handled it + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + } + + public String getLoggedKeyspace() { + return poolsState.keyspace; + } + + public ResultSetFuture executeAsync(Statement statement) { + return executeQuery(makeRequestMessage(statement, null), statement); + } + + public ListenableFuture prepareAsync(String query) { + Connection.Future future = new Connection.Future(new Requests.Prepare(query)); + execute(future, Statement.DEFAULT); + return toPreparedStatement(query, future); + } + + public CloseFuture closeAsync() { + CloseFuture future = closeFuture.get(); + if (future != null) + return future; + + isClosing = true; + cluster.manager.removeSession(this); + + List futures = new ArrayList(pools.size()); + for (HostConnectionPool pool : pools.values()) + futures.add(pool.closeAsync()); + + future = new CloseFuture.Forwarding(futures); + + return closeFuture.compareAndSet(null, future) + ? future + : closeFuture.get(); // We raced, it's ok, return the future that was actually set + } + + public boolean isClosed() { + return closeFuture.get() != null; + } + + public Cluster getCluster() { + return cluster; + } + + public Session.State getState() { + return new State(this); + } + + private ListenableFuture toPreparedStatement(final String query, final Connection.Future future) { + return Futures.transform(future, new Function() { + public PreparedStatement apply(Message.Response response) { + switch (response.type) { + case RESULT: + Responses.Result rm = (Responses.Result)response; + switch (rm.kind) { + case PREPARED: + Responses.Result.Prepared pmsg = (Responses.Result.Prepared)rm; + PreparedStatement stmt = DefaultPreparedStatement.fromMessage(pmsg, cluster.getMetadata(), cluster.getConfiguration().getProtocolOptions().getProtocolVersionEnum(), query, poolsState.keyspace); + stmt = cluster.manager.addPrepared(stmt); + try { + // All Sessions are connected to the same nodes so it's enough to prepare only the nodes of this session. + // If that changes, we'll have to make sure this propagate to other sessions too. + prepare(stmt.getQueryString(), future.getAddress()); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + // This method doesn't propagate interruption, at least not for now. However, if we've + // interrupted preparing queries on other node it's not a problem as we'll re-prepare + // later if need be. So just ignore. + } + return stmt; + default: + throw new DriverInternalError(String.format("%s response received when prepared statement was expected", rm.kind)); + } + case ERROR: + throw ((Responses.Error)response).asException(future.getAddress()); + default: + throw new DriverInternalError(String.format("%s response received when prepared statement was expected", response.type)); + } + } + }, executor()); // Since the transformation involves querying other nodes, we should not do that in an I/O thread + } + + Connection.Factory connectionFactory() { + return cluster.manager.connectionFactory; + } + + Configuration configuration() { + return cluster.manager.configuration; + } + + LoadBalancingPolicy loadBalancingPolicy() { + return cluster.manager.loadBalancingPolicy(); + } + + SpeculativeExecutionPolicy speculativeRetryPolicy() { + return cluster.manager.speculativeRetryPolicy(); + } + + ReconnectionPolicy reconnectionPolicy() { + return cluster.manager.reconnectionPolicy(); + } + + ListeningExecutorService executor() { + return cluster.manager.executor; + } + + ListeningExecutorService blockingExecutor() { + return cluster.manager.blockingExecutor; + } + + // Returns whether there was problem creating the pool + ListenableFuture forceRenewPool(final Host host, Connection reusedConnection) { + final HostDistance distance = cluster.manager.loadBalancingPolicy().distance(host); + if (distance == HostDistance.IGNORED) + return Futures.immediateFuture(true); + + if (isClosing) + return Futures.immediateFuture(false); + + final HostConnectionPool newPool = new HostConnectionPool(host, distance, this); + ListenableFuture poolInitFuture = newPool.initAsync(reusedConnection); + + final SettableFuture future = SettableFuture.create(); + + Futures.addCallback(poolInitFuture, new FutureCallback() { + @Override + public void onSuccess(Void result) { + HostConnectionPool previous = pools.put(host, newPool); + if (previous == null) { + logger.debug("Added connection pool for {}", host); + } else { + logger.debug("Renewed connection pool for {}", host); + previous.closeAsync(); + } + + // If we raced with a session shutdown, ensure that the pool will be closed. + if (isClosing) { + newPool.closeAsync(); + pools.remove(host); + future.set(false); + } else { + future.set(true); + } + } + + @Override + public void onFailure(Throwable t) { + logger.error("Error creating pool to " + host, t); + future.set(false); + } + }); + + return future; + } + + // Replace pool for a given host only if it's the given previous value (which can be null) + // This returns a future if the replacement was successful, or null if we raced. + private ListenableFuture replacePool(final Host host, HostDistance distance, HostConnectionPool previous, Connection reusedConnection) { + if (isClosing) + return MoreFutures.VOID_SUCCESS; + + final HostConnectionPool newPool = new HostConnectionPool(host, distance, this); + if (previous == null) { + if (pools.putIfAbsent(host, newPool) != null) { + return null; + } + } else { + if (!pools.replace(host, previous, newPool)) { + return null; + } + if (!previous.isClosed()) { + logger.warn("Replacing a pool that wasn't closed. Closing it now, but this was not expected."); + previous.closeAsync(); + } + } + + ListenableFuture poolInitFuture = newPool.initAsync(reusedConnection); + + Futures.addCallback(poolInitFuture, new FutureCallback() { + @Override + public void onSuccess(Void result) { + // If we raced with a session shutdown, ensure that the pool will be closed. + if (isClosing) { + newPool.closeAsync(); + pools.remove(host); + } + } + + @Override + public void onFailure(Throwable t) { + pools.remove(host); + } + }); + return poolInitFuture; + } + + // Returns whether there was problem creating the pool + ListenableFuture maybeAddPool(final Host host, Connection reusedConnection) { + final HostDistance distance = cluster.manager.loadBalancingPolicy().distance(host); + if (distance == HostDistance.IGNORED) + return Futures.immediateFuture(true); + + HostConnectionPool previous = pools.get(host); + if (previous != null && !previous.isClosed()) + return Futures.immediateFuture(true); + + while (true) { + previous = pools.get(host); + if (previous != null && !previous.isClosed()) + return Futures.immediateFuture(true); + + final SettableFuture future = SettableFuture.create(); + ListenableFuture newPoolInit = replacePool(host, distance, previous, reusedConnection); + if (newPoolInit != null) { + Futures.addCallback(newPoolInit, new FutureCallback() { + @Override + public void onSuccess(Void result) { + logger.debug("Added connection pool for {}", host); + future.set(true); + } + + @Override + public void onFailure(Throwable t) { + if (t instanceof UnsupportedProtocolVersionException) { + cluster.manager.logUnsupportedVersionProtocol(host, ((UnsupportedProtocolVersionException)t).unsupportedVersion); + cluster.manager.triggerOnDown(host, false); + } else if (t instanceof ClusterNameMismatchException) { + ClusterNameMismatchException e = (ClusterNameMismatchException)t; + cluster.manager.logClusterNameMismatch(host, e.expectedClusterName, e.actualClusterName); + cluster.manager.triggerOnDown(host, false); + } else { + logger.error("Error creating pool to " + host, t); + } + future.set(false); + } + }); + return future; + } + } + } + + CloseFuture removePool(Host host) { + final HostConnectionPool pool = pools.remove(host); + return pool == null + ? CloseFuture.immediateFuture() + : pool.closeAsync(); + } + + /* + * When the set of live nodes change, the loadbalancer will change his + * mind on host distances. It might change it on the node that came/left + * but also on other nodes (for instance, if a node dies, another + * previously ignored node may be now considered). + * + * This method ensures that all hosts for which a pool should exist + * have one, and hosts that shouldn't don't. + */ + void updateCreatedPools() { + // This method does nothing during initialization. Some hosts may be non-responsive but not yet marked DOWN; if + // we execute the code below we would try to create their pool over and over again. + // It's called explicitly at the end of init(), once isInit has been set to true. + if (!isInit) + return; + + try { + // We do 2 iterations, so that we add missing pools first, and them remove all unecessary pool second. + // That way, we'll avoid situation where we'll temporarily lose connectivity + List toRemove = new ArrayList(); + List> poolCreationFutures = new ArrayList>(); + + for (Host h : cluster.getMetadata().allHosts()) { + HostDistance dist = loadBalancingPolicy().distance(h); + HostConnectionPool pool = pools.get(h); + + if (pool == null) { + if (dist != HostDistance.IGNORED && h.state == Host.State.UP) + poolCreationFutures.add(maybeAddPool(h, null)); + } else if (dist != pool.hostDistance) { + if (dist == HostDistance.IGNORED) { + toRemove.add(h); + } else { + pool.hostDistance = dist; + pool.ensureCoreConnections(); + } + } + } + + // Wait pool creation before removing, so we don't lose connectivity + try { + Futures.successfulAsList(poolCreationFutures).get(); + } catch (ExecutionException e) { + // Won't happen because we used successfulAsList + // And if a particular pool failed, maybeAddPool already handled it + } + + List> poolRemovalFutures = new ArrayList>(toRemove.size()); + for (Host h : toRemove) + poolRemovalFutures.add(removePool(h)); + + Futures.allAsList(poolRemovalFutures).get(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } catch (ExecutionException e) { + logger.error("Unexpected error while refreshing connection pools", e.getCause()); + } + } + + void updateCreatedPools(Host h) { + HostDistance dist = loadBalancingPolicy().distance(h); + HostConnectionPool pool = pools.get(h); + + try { + if (pool == null) { + if (dist != HostDistance.IGNORED && h.state == Host.State.UP) + try { + maybeAddPool(h, null).get(); + } catch (ExecutionException e) { + // Ignore, maybeAddPool has already handled the error + } + } else if (dist != pool.hostDistance) { + if (dist == HostDistance.IGNORED) { + removePool(h).get(); + } else { + pool.hostDistance = dist; + pool.ensureCoreConnections(); + } + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } catch (ExecutionException e) { + logger.error("Unexpected error while refreshing connection pools", e.getCause()); + } + } + + void onDown(Host host) throws InterruptedException, ExecutionException { + // Note that with well behaved balancing policy (that ignore dead nodes), the removePool call is not necessary + // since updateCreatedPools should take care of it. But better protect against non well behaving policies. + removePool(host).force().get(); + updateCreatedPools(); + } + + void onRemove(Host host) throws InterruptedException, ExecutionException { + onDown(host); + } + + void setKeyspace(String keyspace) { + long timeout = configuration().getSocketOptions().getConnectTimeoutMillis(); + try { + Future future = executeQuery(new Requests.Query("use " + keyspace), Statement.DEFAULT); + // Note: using the connection timeout isn't perfectly correct, we should probably change that someday + Uninterruptibles.getUninterruptibly(future, timeout, TimeUnit.MILLISECONDS); + } catch (TimeoutException e) { + throw new DriverInternalError(String.format("No responses after %d milliseconds while setting current keyspace. This should not happen, unless you have setup a very low connection timeout.", timeout)); + } catch (ExecutionException e) { + throw DefaultResultSetFuture.extractCauseFromExecutionException(e); + } + } + + Message.Request makeRequestMessage(Statement statement, ByteBuffer pagingState) { + // We need the protocol version, which is only available once the cluster has initialized. Initialize the session to ensure this is the case. + // init() locks, so avoid if we know we don't need it. + if (!isInit) + init(); + ProtocolVersion version = cluster.manager.protocolVersion(); + + ConsistencyLevel consistency = statement.getConsistencyLevel(); + if (consistency == null) + consistency = configuration().getQueryOptions().getConsistencyLevel(); + + ConsistencyLevel serialConsistency = statement.getSerialConsistencyLevel(); + if (version.compareTo(ProtocolVersion.V3) < 0 && statement instanceof BatchStatement) { + if (serialConsistency != null) + throw new UnsupportedFeatureException(version, "Serial consistency on batch statements is not supported"); + } else if (serialConsistency == null) + serialConsistency = configuration().getQueryOptions().getSerialConsistencyLevel(); + + long defaultTimestamp = Long.MIN_VALUE; + if (cluster.manager.protocolVersion().compareTo(ProtocolVersion.V3) >= 0) { + defaultTimestamp = statement.getDefaultTimestamp(); + if (defaultTimestamp == Long.MIN_VALUE) + defaultTimestamp = cluster.getConfiguration().getPolicies().getTimestampGenerator().next(); + } + + int fetchSize = statement.getFetchSize(); + ByteBuffer usedPagingState = pagingState; + + if (version == ProtocolVersion.V1) { + assert pagingState == null; + // We don't let the user change the fetchSize globally if the proto v1 is used, so we just need to + // check for the case of a per-statement override + if (fetchSize <= 0) + fetchSize = -1; + else if (fetchSize != Integer.MAX_VALUE) + throw new UnsupportedFeatureException(version, "Paging is not supported"); + } else if (fetchSize <= 0) { + fetchSize = configuration().getQueryOptions().getFetchSize(); + } + + if (fetchSize == Integer.MAX_VALUE) + fetchSize = -1; + + if (pagingState == null) { + usedPagingState = statement.getPagingState(); + } + + if (statement instanceof StatementWrapper) + statement = ((StatementWrapper)statement).getWrappedStatement(); + + if (statement instanceof RegularStatement) { + RegularStatement rs = (RegularStatement)statement; + + // It saddens me that we special case for the query builder here, but for now this is simpler. + // We could provide a general API in RegularStatement instead at some point but it's unclear what's + // the cleanest way to do that is right now (and it's probably not really that useful anyway). + if (version == ProtocolVersion.V1 && rs instanceof com.datastax.driver.core.querybuilder.BuiltStatement) + ((com.datastax.driver.core.querybuilder.BuiltStatement)rs).setForceNoValues(true); + + ByteBuffer[] rawValues = rs.getValues(version); + + if (version == ProtocolVersion.V1 && rawValues != null) + throw new UnsupportedFeatureException(version, "Binary values are not supported"); + + List values = rawValues == null ? Collections.emptyList() : Arrays.asList(rawValues); + String qString = rs.getQueryString(); + Requests.QueryProtocolOptions options = new Requests.QueryProtocolOptions(consistency, values, false, + fetchSize, usedPagingState, serialConsistency, defaultTimestamp); + return new Requests.Query(qString, options, statement.isTracing()); + } else if (statement instanceof BoundStatement) { + BoundStatement bs = (BoundStatement)statement; + if (!cluster.manager.preparedQueries.containsKey(bs.statement.getPreparedId().id)) { + throw new InvalidQueryException(String.format("Tried to execute unknown prepared query : %s. " + + "You may have used a PreparedStatement that was created with another Cluster instance.", bs.statement.getPreparedId().id)); + } + bs.ensureAllSet(); + boolean skipMetadata = version != ProtocolVersion.V1 && bs.statement.getPreparedId().resultSetMetadata != null; + Requests.QueryProtocolOptions options = new Requests.QueryProtocolOptions(consistency, Arrays.asList(bs.wrapper.values), skipMetadata, + fetchSize, usedPagingState, serialConsistency, defaultTimestamp); + return new Requests.Execute(bs.statement.getPreparedId().id, options, statement.isTracing()); + } else { + assert statement instanceof BatchStatement : statement; + assert pagingState == null; + + if (version == ProtocolVersion.V1) + throw new UnsupportedFeatureException(version, "Protocol level batching is not supported"); + + BatchStatement bs = (BatchStatement)statement; + bs.ensureAllSet(); + BatchStatement.IdAndValues idAndVals = bs.getIdAndValues(version); + Requests.BatchProtocolOptions options = new Requests.BatchProtocolOptions(consistency, serialConsistency, defaultTimestamp); + return new Requests.Batch(bs.batchType, idAndVals.ids, idAndVals.values, options, statement.isTracing()); + } + } + + /** + * Execute the provided request. + * + * This method will find a suitable node to connect to using the + * {@link LoadBalancingPolicy} and handle host failover. + */ + void execute(RequestHandler.Callback callback, Statement statement) { + // init() locks, so avoid if we know we don't need it. + if (!isInit) + init(); + new RequestHandler(this, callback, statement).sendRequest(); + } + + private void prepare(String query, InetSocketAddress toExclude) throws InterruptedException { + for (Map.Entry entry : pools.entrySet()) { + if (entry.getKey().getSocketAddress().equals(toExclude)) + continue; + + // Let's not wait too long if we can't get a connection. Things + // will fix themselves once the user tries a query anyway. + Connection c = null; + boolean timedOut = false; + try { + c = entry.getValue().borrowConnection(200, TimeUnit.MILLISECONDS); + c.write(new Requests.Prepare(query)).get(); + } catch (ConnectionException e) { + // Again, not being able to prepare the query right now is no big deal, so just ignore + } catch (BusyConnectionException e) { + // Same as above + } catch (TimeoutException e) { + // Same as above + } catch (ExecutionException e) { + // We shouldn't really get exception while preparing a + // query, so log this (but ignore otherwise as it's not a big deal) + logger.error(String.format("Unexpected error while preparing query (%s) on %s", query, entry.getKey()), e); + // If the query timed out, that already released the connection + timedOut = e.getCause() instanceof OperationTimedOutException; + } finally { + if (c != null && !timedOut) + c.release(); + } + } + } + + ResultSetFuture executeQuery(Message.Request msg, Statement statement) { + + DefaultResultSetFuture future = new DefaultResultSetFuture(this, configuration().getProtocolOptions().getProtocolVersionEnum(), msg); + execute(future, statement); + return future; + } + + void cleanupIdleConnections(long now) { + for (HostConnectionPool pool : pools.values()) { + pool.cleanupIdleConnections(now); + } + } + + private static class State implements Session.State { + + private final SessionManager session; + private final List connectedHosts; + private final int[] openConnections; + private final int[] trashedConnections; + private final int[] inFlightQueries; + + private State(SessionManager session) { + this.session = session; + this.connectedHosts = ImmutableList.copyOf(session.pools.keySet()); + + this.openConnections = new int[connectedHosts.size()]; + this.trashedConnections = new int[connectedHosts.size()]; + this.inFlightQueries = new int[connectedHosts.size()]; + + int i = 0; + for (Host h : connectedHosts) { + HostConnectionPool p = session.pools.get(h); + // It's possible we race and the host has been removed since the beginning of this + // functions. In that case, the fact it's part of getConnectedHosts() but has no opened + // connections will be slightly weird, but it's unlikely enough that we don't bother avoiding. + if (p == null) { + openConnections[i] = 0; + trashedConnections[i] = 0; + inFlightQueries[i] = 0; + continue; + } + + openConnections[i] = p.opened(); + inFlightQueries[i] = p.totalInFlight.get(); + trashedConnections[i] = p.trashed(); + i++; + } + } + + private int getIdx(Host h) { + // We guarantee that we only ever create one Host object per-address, which means that '==' + // comparison is a proper way to test Host equality. Given that, the number of hosts + // per-session will always be small enough (even 1000 is kind of small and even with a 1000+ + // node cluster, you probably don't want a Session to connect to all of them) that iterating + // over connectedHosts will never be much more inefficient than keeping a + // Map. And it's less garbage/memory consumption so... + for (int i = 0; i < connectedHosts.size(); i++) + if (h == connectedHosts.get(i)) + return i; + return -1; + } + + public Session getSession() { + return session; + } + + public Collection getConnectedHosts() { + return connectedHosts; + } + + public int getOpenConnections(Host host) { + int i = getIdx(host); + return i < 0 ? 0 : openConnections[i]; + } + + public int getTrashedConnections(Host host) { + int i = getIdx(host); + return i < 0 ? 0 : trashedConnections[i]; + } + + public int getInFlightQueries(Host host) { + int i = getIdx(host); + return i < 0 ? 0 : inFlightQueries[i]; + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/SettableByIndexData.java b/driver-core/src/main/java/com/datastax/driver/core/SettableByIndexData.java new file mode 100644 index 00000000000..31772c23f5d --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/SettableByIndexData.java @@ -0,0 +1,288 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.math.BigDecimal; +import java.math.BigInteger; +import java.net.InetAddress; +import java.nio.ByteBuffer; +import java.util.*; + +import com.datastax.driver.core.exceptions.InvalidTypeException; + +/** + * Collection of (typed) CQL values that can set by index (starting a 0). + */ +public interface SettableByIndexData> { + + /** + * Sets the {@code i}th value to the provided boolean. + * + * @param i the index of the value to set. + * @param v the value to set. + * @return this object. + * + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws InvalidTypeException if value {@code i} is not of type BOOLEAN. + */ + public T setBool(int i, boolean v); + + /** + * Set the {@code i}th value to the provided integer. + * + * @param i the index of the value to set. + * @param v the value to set. + * @return this object. + * + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws InvalidTypeException if value {@code i} is not of type INT. + */ + public T setInt(int i, int v); + + /** + * Sets the {@code i}th value to the provided long. + * + * @param i the index of the value to set. + * @param v the value to set. + * @return this object. + * + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws InvalidTypeException if value {@code i} is not of type BIGINT or COUNTER. + */ + public T setLong(int i, long v); + + /** + * Set the {@code i}th value to the provided date. + * + * @param i the index of the value to set. + * @param v the value to set. + * @return this object. + * + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws InvalidTypeException if value {@code i} is not of type TIMESTAMP. + */ + public T setDate(int i, Date v); + + /** + * Sets the {@code i}th value to the provided float. + * + * @param i the index of the value to set. + * @param v the value to set. + * @return this object. + * + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws InvalidTypeException if value {@code i} is not of type FLOAT. + */ + public T setFloat(int i, float v); + + /** + * Sets the {@code i}th value to the provided double. + * + * @param i the index of the value to set. + * @param v the value to set. + * @return this object. + * + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws InvalidTypeException if value {@code i} is not of type DOUBLE. + */ + public T setDouble(int i, double v); + + /** + * Sets the {@code i}th value to the provided string. + * + * @param i the index of the value to set. + * @param v the value to set. + * @return this object. + * + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws InvalidTypeException if value {@code i} is of neither of the + * following types: VARCHAR, TEXT or ASCII. + */ + public T setString(int i, String v); + + /** + * Sets the {@code i}th value to the provided byte buffer. + * + * This method validate that the type of the column set is BLOB. If you + * want to insert manually serialized data into columns of another type, + * use {@link #setBytesUnsafe} instead. + * + * @param i the index of the value to set. + * @param v the value to set. + * @return this object. + * + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws InvalidTypeException if value {@code i} is not of type BLOB. + */ + public T setBytes(int i, ByteBuffer v); + + /** + * Sets the {@code i}th value to the provided byte buffer. + * + * Contrary to {@link #setBytes}, this method does not check the + * type of the column set. If you insert data that is not compatible with + * the type of the column, you will get an {@code InvalidQueryException} at + * execute time. + * + * @param i the index of the value to set. + * @param v the value to set. + * @return this object. + * + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + */ + public T setBytesUnsafe(int i, ByteBuffer v); + + /** + * Sets the {@code i}th value to the provided big integer. + * + * @param i the index of the value to set. + * @param v the value to set. + * @return this object. + * + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws InvalidTypeException if value {@code i} is not of type VARINT. + */ + public T setVarint(int i, BigInteger v); + + /** + * Sets the {@code i}th value to the provided big decimal. + * + * @param i the index of the value to set. + * @param v the value to set. + * @return this object. + * + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws InvalidTypeException if value {@code i} is not of type DECIMAL. + */ + public T setDecimal(int i, BigDecimal v); + + /** + * Sets the {@code i}th value to the provided UUID. + * + * @param i the index of the value to set. + * @param v the value to set. + * @return this object. + * + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws InvalidTypeException if value {@code i} is not of type UUID or + * TIMEUUID, or if value {@code i} is of type TIMEUUID but {@code v} is + * not a type 1 UUID. + */ + public T setUUID(int i, UUID v); + + /** + * Sets the {@code i}th value to the provided inet address. + * + * @param i the index of the value to set. + * @param v the value to set. + * @return this object. + * + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws InvalidTypeException if value {@code i} is not of type INET. + */ + public T setInet(int i, InetAddress v); + + /** + * Sets the {@code i}th value to the provided list. + *

+ * Please note that {@code null} values are not supported inside collection by CQL. + * + * @param i the index of the value to set. + * @param v the value to set. + * @return this object. + * + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws InvalidTypeException if value {@code i} is not a list type or + * if the elements of {@code v} are not of the type of the elements of + * column {@code i}. + * @throws NullPointerException if {@code v} contains null values. Nulls are not supported in collections + * by CQL. + */ + public T setList(int i, List v); + + /** + * Sets the {@code i}th value to the provided map. + *

+ * Please note that {@code null} values are not supported inside collection by CQL. + * + * @param i the index of the value to set. + * @param v the value to set. + * @return this object. + * + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws InvalidTypeException if value {@code i} is not a map type or + * if the elements (keys or values) of {@code v} are not of the type of the + * elements of column {@code i}. + * @throws NullPointerException if {@code v} contains null values. Nulls are not supported in collections + * by CQL. + */ + public T setMap(int i, Map v); + + /** + * Sets the {@code i}th value to the provided set. + *

+ * Please note that {@code null} values are not supported inside collection by CQL. + * + * @param i the index of the value to set. + * @param v the value to set. + * @return this object. + * + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws InvalidTypeException if value {@code i} is not a set type or + * if the elements of {@code v} are not of the type of the elements of + * column {@code i}. + * @throws NullPointerException if {@code v} contains null values. Nulls are not supported in collections + * by CQL. + */ + public T setSet(int i, Set v); + + /** + * Sets the {@code i}th value to the provided UDT value. + * + * @param i the index of the value to set. + * @param v the value to set. + * @return this object. + * + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws InvalidTypeException if value {@code i} is not a UDT value or if its definition + * does not correspond to the one of {@code v}. + */ + public T setUDTValue(int i, UDTValue v); + + /** + * Sets the {@code i}th value to the provided tuple value. + * + * @param i the index of the value to set. + * @param v the value to set. + * @return this object. + * + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws InvalidTypeException if value {@code i} is not a tuple value or if its types + * do not correspond to the ones of {@code v}. + */ + public T setTupleValue(int i, TupleValue v); + + /** + * Sets the {@code i}th value to {@code null}. + *

+ * This is mainly intended for CQL types which map to native Java types. + * + * @param i the index of the value to set. + * @return this object. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + */ + public T setToNull(int i); +} \ No newline at end of file diff --git a/driver-core/src/main/java/com/datastax/driver/core/SettableByNameData.java b/driver-core/src/main/java/com/datastax/driver/core/SettableByNameData.java new file mode 100644 index 00000000000..50a6e9a6c76 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/SettableByNameData.java @@ -0,0 +1,336 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.math.BigDecimal; +import java.math.BigInteger; +import java.net.InetAddress; +import java.nio.ByteBuffer; +import java.util.*; + +import com.datastax.driver.core.exceptions.InvalidTypeException; + +/** + * Collection of (typed) CQL values that can set by name. + */ +public interface SettableByNameData> { + + /** + * Sets the value for (all occurrences of) variable {@code name} to the + * provided boolean. + * + * @param name the name of the value to set; if {@code name} is present multiple + * times, all its values are set. + * @param v the value to set. + * @return this object. + * + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + * @throws InvalidTypeException if (any one occurrence of) {@code name} is not of type BOOLEAN. + */ + public T setBool(String name, boolean v); + + /** + * Sets the value for (all occurrences of) variable {@code name} to the + * provided integer. + * + * @param name the name of the value to set; if {@code name} is present multiple + * times, all its values are set. + * @param v the value to set. + * @return this object. + * + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + * @throws InvalidTypeException if (any one occurrence of) {@code name} is not of type INT. + */ + public T setInt(String name, int v); + + /** + * Sets the value for (all occurrences of) variable {@code name} to the + * provided long. + * + * @param name the name of the value to set; if {@code name} is present multiple + * times, all its values are set. + * @param v the value to set. + * @return this object. + * + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + * @throws InvalidTypeException if (any occurrence of) {@code name} is + * not of type BIGINT or COUNTER. + */ + public T setLong(String name, long v); + + /** + * Sets the value for (all occurrences of) variable {@code name} to the + * provided date. + * + * @param name the name of the value to set; if {@code name} is present multiple + * times, all its values are set. + * @param v the value to set. + * @return this object. + * + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + * @throws InvalidTypeException if (any occurrence of) {@code name} is + * not of type TIMESTAMP. + */ + public T setDate(String name, Date v); + + /** + * Sets the value for (all occurrences of) variable {@code name} to the + * provided float. + * + * @param name the name of the value to set; if {@code name} is present multiple + * times, all its values are set. + * @param v the value to set. + * @return this object. + * + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + * @throws InvalidTypeException if (any occurrence of) {@code name} is + * not of type FLOAT. + */ + public T setFloat(String name, float v); + + /** + * Sets the value for (all occurrences of) variable {@code name} to the + * provided double. + * + * @param name the name of the value to set; if {@code name} is present multiple + * times, all its values are set. + * @param v the value to set. + * @return this object. + * + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + * @throws InvalidTypeException if (any occurrence of) {@code name} is + * not of type DOUBLE. + */ + public T setDouble(String name, double v); + + /** + * Sets the value for (all occurrences of) variable {@code name} to the + * provided string. + * + * @param name the name of the value to set; if {@code name} is present multiple + * times, all its values are set. + * @param v the value to set. + * @return this object. + * + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + * @throws InvalidTypeException if (any occurrence of) {@code name} is + * of neither of the following types: VARCHAR, TEXT or ASCII. + */ + public T setString(String name, String v); + + /** + * Sets the value for (all occurrences of) variable {@code name} to the + * provided byte buffer. + * + * This method validate that the type of the column set is BLOB. If you + * want to insert manually serialized data into columns of another type, + * use {@link #setBytesUnsafe} instead. + * + * @param name the name of the value to set; if {@code name} is present multiple + * times, all its values are set. + * @param v the value to set. + * @return this object. + * + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + * @throws InvalidTypeException if (any occurrence of) {@code name} is not of type BLOB. + */ + public T setBytes(String name, ByteBuffer v); + + /** + * Sets the value for (all occurrences of) variable {@code name} to the + * provided byte buffer. + * + * Contrary to {@link #setBytes}, this method does not check the + * type of the column set. If you insert data that is not compatible with + * the type of the column, you will get an {@code InvalidQueryException} at + * execute time. + * + * @param name the name of the value to set; if {@code name} is present multiple + * times, all its values are set. + * @param v the value to set. + * @return this object. + * + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + */ + public T setBytesUnsafe(String name, ByteBuffer v); + + /** + * Sets the value for (all occurrences of) variable {@code name} to the + * provided big integer. + * + * @param name the name of the value to set; if {@code name} is present multiple + * times, all its values are set. + * @param v the value to set. + * @return this object. + * + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + * @throws InvalidTypeException if (any occurrence of) {@code name} is + * not of type VARINT. + */ + public T setVarint(String name, BigInteger v); + + /** + * Sets the value for (all occurrences of) variable {@code name} to the + * provided big decimal. + * + * @param name the name of the value to set; if {@code name} is present multiple + * times, all its values are set. + * @param v the value to set. + * @return this object. + * + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + * @throws InvalidTypeException if (any occurrence of) {@code name} is + * not of type DECIMAL. + */ + public T setDecimal(String name, BigDecimal v); + + /** + * Sets the value for (all occurrences of) variable {@code name} to the + * provided UUID. + * + * @param name the name of the value to set; if {@code name} is present multiple + * times, all its values are set. + * @param v the value to set. + * @return this object. + * + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + * @throws InvalidTypeException if (any occurrence of) {@code name} is + * not of type UUID or TIMEUUID, or if value {@code name} is of type + * TIMEUUID but {@code v} is not a type 1 UUID. + */ + public T setUUID(String name, UUID v); + + /** + * Sets the value for (all occurrences of) variable {@code name} to the + * provided inet address. + * + * @param name the name of the value to set; if {@code name} is present multiple + * times, all its values are set. + * @param v the value to set. + * @return this object. + * + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + * @throws InvalidTypeException if (any occurrence of) {@code name} is + * not of type INET. + */ + public T setInet(String name, InetAddress v); + + /** + * Sets the value for (all occurrences of) variable {@code name} to the + * provided list. + *

+ * Please note that {@code null} values are not supported inside collection by CQL. + * + * @param name the name of the value to set; if {@code name} is present multiple + * times, all its values are set. + * @param v the value to set. + * @return this object. + * + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + * @throws InvalidTypeException if (any occurrence of) {@code name} is + * not a list type or if the elements of {@code v} are not of the type of + * the elements of column {@code name}. + * @throws NullPointerException if {@code v} contains null values. Nulls are not supported in collections + * by CQL. + */ + public T setList(String name, List v); + + /** + * Sets the value for (all occurrences of) variable {@code name} to the + * provided map. + *

+ * Please note that {@code null} values are not supported inside collection by CQL. + * + * @param name the name of the value to set; if {@code name} is present multiple + * times, all its values are set. + * @param v the value to set. + * @return this object. + * + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + * @throws InvalidTypeException if (any occurrence of) {@code name} is + * not a map type or if the elements (keys or values) of {@code v} are not of + * the type of the elements of column {@code name}. + * @throws NullPointerException if {@code v} contains null values. Nulls are not supported in collections + * by CQL. + */ + public T setMap(String name, Map v); + + /** + * Sets the value for (all occurrences of) variable {@code name} to the + * provided set. + *

+ * Please note that {@code null} values are not supported inside collection by CQL. + * + * @param name the name of the value to set; if {@code name} is present multiple + * times, all its values are set. + * @param v the value to set. + * @return this object. + * + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + * @throws InvalidTypeException if (any occurrence of) {@code name} is + * not a map type or if the elements of {@code v} are not of the type of + * the elements of column {@code name}. + * @throws NullPointerException if {@code v} contains null values. Nulls are not supported in collections + * by CQL. + */ + public T setSet(String name, Set v); + + /** + * Sets the value for (all occurrences of) variable {@code name} to the + * provided UDT value. + * + * @param name the name of the value to set; if {@code name} is present multiple + * times, all its values are set. + * @param v the value to set. + * @return this object. + * + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + * @throws InvalidTypeException if value {@code i} is not a UDT value or if its definition + * does not correspond to the one of {@code v}. + * @throws InvalidTypeException if (any occurrence of) {@code name} is + * not a UDT value or if the definition of column {@code name} does not correspond to + * the one of {@code v}. + */ + public T setUDTValue(String name, UDTValue v); + + /** + * Sets the value for (all occurrences of) variable {@code name} to the + * provided tuple value. + * + * @param name the name of the value to set; if {@code name} is present multiple + * times, all its values are set. + * @param v the value to set. + * @return this object. + * + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + * @throws InvalidTypeException if (any occurrence of) {@code name} is + * not a tuple value or if the types of column {@code name} do not correspond to + * the ones of {@code v}. + */ + public T setTupleValue(String name, TupleValue v); + + /** + * Sets the value for (all occurrences of) variable {@code name} to {@code null}. + *

+ * This is mainly intended for CQL types which map to native Java types. + * + * @param name the name of the value to set; if {@code name} is present multiple + * times, all its values are set. + * @return this object. + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + */ + public T setToNull(String name); +} \ No newline at end of file diff --git a/driver-core/src/main/java/com/datastax/driver/core/SettableData.java b/driver-core/src/main/java/com/datastax/driver/core/SettableData.java new file mode 100644 index 00000000000..57dda0d91dd --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/SettableData.java @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; +import java.util.*; + +/** + * Collection of (typed) CQL values that can set either by index (starting a 0) or by name. + */ +public interface SettableData> extends SettableByIndexData, SettableByNameData { +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/SimpleJSONParser.java b/driver-core/src/main/java/com/datastax/driver/core/SimpleJSONParser.java new file mode 100644 index 00000000000..3ec443f3203 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/SimpleJSONParser.java @@ -0,0 +1,173 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.util.*; + +/** + * A very simple json parser. + * The only reason we need to read json in the driver is because for + * historical reason Cassandra encodes a few properties using json in + * the schema and we need to decode them. + * + * We however don't need a full-blown JSON library because: + * 1) we know we only need to decode string lists and string maps + * 2) we can basically assume the input is valid, we don't particularly + * have to bother about decoding exactly JSON as long as we at least + * decode what we need. + * 3) we don't really care much about performance, none of this is done + * in performance sensitive parts. + * + * So instead of pulling a new dependency, we roll out our own very dumb + * parser. We should obviously not expose this publicly. + */ +class SimpleJSONParser { + + private final String input; + private int idx; + + private SimpleJSONParser(String input) { + this.input = input; + } + + public static List parseStringList(String input) { + if (input == null || input.isEmpty()) + return Collections.emptyList(); + + List output = new ArrayList(); + SimpleJSONParser parser = new SimpleJSONParser(input); + if (parser.nextCharSkipSpaces() != '[') + throw new IllegalArgumentException("Not a JSON list: " + input); + + char c = parser.nextCharSkipSpaces(); + if (c == ']') + return output; + + while (true) { + assert c == '"'; + output.add(parser.nextString()); + c = parser.nextCharSkipSpaces(); + if (c == ']') + return output; + assert c == ','; + c = parser.nextCharSkipSpaces(); + } + } + + public static Map parseStringMap(String input) { + if (input == null || input.isEmpty()) + return Collections.emptyMap(); + + Map output = new HashMap(); + SimpleJSONParser parser = new SimpleJSONParser(input); + if (parser.nextCharSkipSpaces() != '{') + throw new IllegalArgumentException("Not a JSON map: " + input); + + char c = parser.nextCharSkipSpaces(); + if (c == '}') + return output; + + while (true) { + assert c == '"'; + String key = parser.nextString(); + c = parser.nextCharSkipSpaces(); + assert c == ':'; + c = parser.nextCharSkipSpaces(); + assert c == '"'; + String value = parser.nextString(); + output.put(key, value); + c = parser.nextCharSkipSpaces(); + if (c == '}') + return output; + assert c == ','; + c = parser.nextCharSkipSpaces(); + } + } + + /** + * Read the next char, the one at position idx, and advance ix. + */ + private char nextChar() { + if (idx >= input.length()) + throw new IllegalArgumentException("Invalid json input: " + input); + return input.charAt(idx++); + } + + /** + * Same as nextChar, except that it skips space characters (' ', '\t' and '\n'). + */ + private char nextCharSkipSpaces() { + char c = nextChar(); + while (c == ' ' || c == '\t' || c == '\n') + c = nextChar(); + return c; + } + + /** + * Reads a String, assuming idx is on the first character of the string (i.e. the + * one after the opening double-quote character). + * After the string has been read, idx will be on the first character after + * the closing double-quote. + */ + private String nextString() { + assert input.charAt(idx-1) == '"' : "Char is '" + input.charAt(idx-1) + '\''; + StringBuilder sb = new StringBuilder(); + while (true) { + char c = nextChar(); + switch (c) { + case '\n': + case '\r': + throw new IllegalArgumentException("Unterminated string"); + case '\\': + c = nextChar(); + switch (c) { + case 'b': + sb.append('\b'); + break; + case 't': + sb.append('\t'); + break; + case 'n': + sb.append('\n'); + break; + case 'f': + sb.append('\f'); + break; + case 'r': + sb.append('\r'); + break; + case 'u': + sb.append((char)Integer.parseInt(input.substring(idx, idx+4), 16)); + idx += 4; + break; + case '"': + case '\'': + case '\\': + case '/': + sb.append(c); + break; + default: + throw new IllegalArgumentException("Illegal escape"); + } + break; + default: + if (c == '"') + return sb.toString(); + sb.append(c); + } + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/SimpleStatement.java b/driver-core/src/main/java/com/datastax/driver/core/SimpleStatement.java new file mode 100644 index 00000000000..a736abe10da --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/SimpleStatement.java @@ -0,0 +1,254 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.nio.ByteBuffer; + +/** + * A simple {@code RegularStatement} implementation built directly from a query + * string. + */ +public class SimpleStatement extends RegularStatement { + + private final String query; + private final Object[] values; + + private volatile ByteBuffer routingKey; + private volatile String keyspace; + + /** + * Creates a new {@code SimpleStatement} with the provided query string (and no values). + * + * @param query the query string. + */ + public SimpleStatement(String query) { + this.query = query; + this.values = null; + } + + /** + * Creates a new {@code SimpleStatement} with the provided query string and values. + *

+ * This version of SimpleStatement is useful when you do not want to execute a + * query only once (and thus do not want to resort to prepared statement), but + * do not want to convert all column values to string (typically, if you have blob + * values, encoding them to a hexadecimal string is not very efficient). In + * that case, you can provide a query string with bind marker to this constructor + * along with the values for those bind variables. When executed, the server will + * prepare the provided, bind the provided values to that prepare statement and + * execute the resulting statement. Thus, + *

+     *   session.execute(new SimpleStatement(query, value1, value2, value3));
+     * 
+ * is functionally equivalent to + *
+     *   PreparedStatement ps = session.prepare(query);
+     *   session.execute(ps.bind(value1, value2, value3));
+     * 
+ * except that the former version: + *
    + *
  • Requires only one round-trip to a Cassandra node.
  • + *
  • Does not left any prepared statement stored in memory (neither client or + * server side) once it has been executed.
  • + *
+ *

+ * Note that the type of the {@code values} provided to this method will + * not be validated by the driver as is done by {@link BoundStatement#bind} since + * {@code query} is not parsed (and hence the driver cannot know what those value + * should be). If too much or too little values are provided or if a value is not + * a valid one for the variable it is bound to, an + * {@link com.datastax.driver.core.exceptions.InvalidQueryException} will be thrown + * by Cassandra at execution time. An {@code IllegalArgumentException} may be + * thrown by this constructor however if one of the value does not correspond to + * any CQL3 type (for instance, if it is a custom class). + * + * @param query the query string. + * @param values values required for the execution of {@code query}. + * + * @throws IllegalArgumentException if one of {@code values} is not of a type + * corresponding to a CQL3 type, i.e. is not a Class that could be returned + * by {@link DataType#asJavaClass}. + */ + public SimpleStatement(String query, Object... values) { + if (values.length > 65535) + throw new IllegalArgumentException("Too many values, the maximum allowed is 65535"); + this.query = query; + this.values = values; + } + + private static ByteBuffer[] convert(Object[] values, ProtocolVersion protocolVersion) { + ByteBuffer[] serializedValues = new ByteBuffer[values.length]; + for (int i = 0; i < values.length; i++) { + Object value = values[i]; + try { + if (value instanceof Token) + value = ((Token)value).getValue(); + serializedValues[i] = DataType.serializeValue(value, protocolVersion); + } catch (IllegalArgumentException e) { + // Catch and rethrow to provide a more helpful error message (one that include which value is bad) + throw new IllegalArgumentException(String.format("Value %d of type %s does not correspond to any CQL3 type", i, value.getClass())); + } + } + return serializedValues; + } + + /** + * Returns the query string. + * + * @return the query string; + */ + @Override + public String getQueryString() { + return query; + } + + @Override + public ByteBuffer[] getValues(ProtocolVersion protocolVersion) { + return values == null ? null : convert(values, protocolVersion); + } + + /** + * The number of values for this statement, that is the size of the array + * that will be returned by {@code getValues}. + * + * @return the number of values. + */ + public int valuesCount() { + return values == null ? 0 : values.length; + } + + @Override + public boolean hasValues() { + return values != null && values.length > 0; + } + + /** + * Returns the routing key for the query. + *

+ * Unless the routing key has been explicitly set through + * {@link #setRoutingKey}, this method will return {@code null} to + * avoid having to parse the query string to retrieve the partition key. + * + * @return the routing key set through {@link #setRoutingKey} if such a key + * was set, {@code null} otherwise. + * + * @see Statement#getRoutingKey + */ + @Override + public ByteBuffer getRoutingKey() { + return routingKey; + } + + /** + * Sets the routing key for this query. + *

+ * This method allows you to manually provide a routing key for this query. It + * is thus optional since the routing key is only an hint for token aware + * load balancing policy but is never mandatory. + *

+ * If the partition key for the query is composite, use the + * {@link #setRoutingKey(ByteBuffer...)} method instead to build the + * routing key. + * + * @param routingKey the raw (binary) value to use as routing key. + * @return this {@code SimpleStatement} object. + * + * @see Statement#getRoutingKey + */ + public SimpleStatement setRoutingKey(ByteBuffer routingKey) { + this.routingKey = routingKey; + return this; + } + + /** + * Returns the keyspace this query operates on. + *

+ * Unless the keyspace has been explicitly set through {@link #setKeyspace}, + * this method will return {@code null} to avoid having to parse the query + * string. + * + * @return the keyspace set through {@link #setKeyspace} if such keyspace was + * set, {@code null} otherwise. + * + * @see Statement#getKeyspace + */ + @Override + public String getKeyspace() { + return keyspace; + } + + /** + * Sets the keyspace this query operates on. + *

+ * This method allows you to manually provide a keyspace for this query. It + * is thus optional since the value returned by this method is only an hint + * for token aware load balancing policy but is never mandatory. + *

+ * Do note that if the query does not use a fully qualified keyspace, then + * you do not need to set the keyspace through that method as the + * currently logged in keyspace will be used. + * + * @param keyspace the name of the keyspace this query operates on. + * @return this {@code SimpleStatement} object. + * + * @see Statement#getKeyspace + */ + public SimpleStatement setKeyspace(String keyspace) { + this.keyspace = keyspace; + return this; + } + + /** + * Sets the routing key for this query. + *

+ * See {@link #setRoutingKey(ByteBuffer)} for more information. This + * method is a variant for when the query partition key is composite and + * thus the routing key must be built from multiple values. + * + * @param routingKeyComponents the raw (binary) values to compose to obtain + * the routing key. + * @return this {@code SimpleStatement} object. + * + * @see Statement#getRoutingKey + */ + public SimpleStatement setRoutingKey(ByteBuffer... routingKeyComponents) { + this.routingKey = compose(routingKeyComponents); + return this; + } + + // TODO: we could find that a better place (but it's not expose so it doesn't matter too much) + static ByteBuffer compose(ByteBuffer... buffers) { + int totalLength = 0; + for (ByteBuffer bb : buffers) + totalLength += 2 + bb.remaining() + 1; + + ByteBuffer out = ByteBuffer.allocate(totalLength); + for (ByteBuffer buffer : buffers) + { + ByteBuffer bb = buffer.duplicate(); + putShortLength(out, bb.remaining()); + out.put(bb); + out.put((byte) 0); + } + out.flip(); + return out; + } + + private static void putShortLength(ByteBuffer bb, int length) { + bb.put((byte) ((length >> 8) & 0xFF)); + bb.put((byte) (length & 0xFF)); + } +} \ No newline at end of file diff --git a/driver-core/src/main/java/com/datastax/driver/core/SocketOptions.java b/driver-core/src/main/java/com/datastax/driver/core/SocketOptions.java new file mode 100644 index 00000000000..d7b1d9ecd64 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/SocketOptions.java @@ -0,0 +1,292 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +/** + * Options to configure low-level socket options for the connections kept + * to the Cassandra hosts. + */ +public class SocketOptions { + + /** + * The default connection timeout in milliseconds if none is set explicitly + * using {@link #setConnectTimeoutMillis}. + *

+ * That default is of 5 seconds. + */ + public static final int DEFAULT_CONNECT_TIMEOUT_MILLIS = 5000; + + /** + * The default read timeout in milliseconds if none is set explicitly + * using {@link #setReadTimeoutMillis}. + *

+ * That default is of 12 seconds so as to be slightly bigger that the + * default Cassandra timeout. + * + * @see #getReadTimeoutMillis for more details on this timeout. + */ + public static final int DEFAULT_READ_TIMEOUT_MILLIS = 12000; + + private volatile int connectTimeoutMillis = DEFAULT_CONNECT_TIMEOUT_MILLIS; + private volatile int readTimeoutMillis = DEFAULT_READ_TIMEOUT_MILLIS; + private volatile Boolean keepAlive; + private volatile Boolean reuseAddress; + private volatile Integer soLinger; + private volatile Boolean tcpNoDelay = Boolean.TRUE; + private volatile Integer receiveBufferSize; + private volatile Integer sendBufferSize; + + /** + * Creates a new {@code SocketOptions} instance with default values. + */ + public SocketOptions() {} + + /** + * The connection timeout in milliseconds. + *

+ * As the name implies, the connection timeout defines how long the driver + * waits to establish a new connection to a Cassandra node before giving up. + * + * @return the connection timeout in milliseconds + */ + public int getConnectTimeoutMillis() { + return connectTimeoutMillis; + } + + /** + * Sets the connection timeout in milliseconds. + * + * @param connectTimeoutMillis the timeout to set. + * @return this {@code SocketOptions}. + */ + public SocketOptions setConnectTimeoutMillis(int connectTimeoutMillis) { + this.connectTimeoutMillis = connectTimeoutMillis; + return this; + } + + /** + * The per-host read timeout in milliseconds. + *

+ * This defines how long the driver will wait for a given Cassandra node to + * answer a query. + *

+ * Please note that this is not the maximum time a call to {@link Session#execute} may block; + * this is the maximum time that call will wait for one particular + * Cassandra host, but other hosts will be tried if one of them timeout. In + * other words, a {@link Session#execute} call may theoretically wait up to + * {@code getReadTimeoutMillis() * } (though the + * total number of hosts tried for a given query also depends on the + * {@link com.datastax.driver.core.policies.LoadBalancingPolicy} in use). + * If you want to control how long to wait for a query, use {@link Session#executeAsync} + * and the {@code ResultSetFuture#get(long, TimeUnit)} method. + *

+ * Also note that for efficiency reasons, this read timeout is approximate: it + * has an accuracy of up to 100 milliseconds (i.e. it may fire up to 100 milliseconds late). + * It is not meant to be used for precise timeout, but rather as a protection + * against misbehaving Cassandra nodes. + *

+ * + * @return the read timeout in milliseconds. + */ + public int getReadTimeoutMillis() { + return readTimeoutMillis; + } + + /** + * Sets the per-host read timeout in milliseconds. + *

+ * When setting this value, keep in mind the following: + *

    + *
  • the timeout settings used on the Cassandra side ({@code *_request_timeout_in_ms} + * in {@code cassandra.yaml}) should be taken into account when picking a value for this + * read timeout. In particular, if this read timeout option is lower than Cassandra's + * timeout, the driver might assume that the host is not responsive and mark it down.
  • + *
  • the read timeout is only approximate and only control the timeout to one Cassandra + * host, not the full query (see {@link #getReadTimeoutMillis} for more details). If a + * high level of precision on the timeout to a request is required, you should use + * the {@link ResultSetFuture#get(long, java.util.concurrent.TimeUnit)} method. + *
  • + *
+ *

+ * Setting a value of 0 disables read timeouts. + * + * @param readTimeoutMillis the timeout to set. + * @return this {@code SocketOptions}. + */ + public SocketOptions setReadTimeoutMillis(int readTimeoutMillis) { + this.readTimeoutMillis = readTimeoutMillis; + return this; + } + + /** + * Returns whether TCP keepalive is enabled. + * + * @return the value of the option, or {@code null} if it is not set. + * @see #setKeepAlive(boolean) + */ + public Boolean getKeepAlive() { + return keepAlive; + } + + /** + * Sets whether to enable TCP keepalive. + *

+ * By default, this option is not set by the driver. The actual value will be the default + * from the underlying Netty transport (Java NIO or native epoll). + * + * @param keepAlive whether to enable or disable the option. + * @return this {@code SocketOptions}. + * + * @see java.net.SocketOptions#TCP_NODELAY + */ + public SocketOptions setKeepAlive(boolean keepAlive) { + this.keepAlive = keepAlive; + return this; + } + + /** + * Returns whether reuse-address is enabled. + * + * @return the value of the option, or {@code null} if it is not set. + * @see #setReuseAddress(boolean) + */ + public Boolean getReuseAddress() { + return reuseAddress; + } + + /** + * Sets whether to enable reuse-address. + *

+ * By default, this option is not set by the driver. The actual value will be the default + * from the underlying Netty transport (Java NIO or native epoll). + * + * @param reuseAddress whether to enable or disable the option. + * @return this {@code SocketOptions}. + * + * @see java.net.SocketOptions#SO_REUSEADDR + */ + public SocketOptions setReuseAddress(boolean reuseAddress) { + this.reuseAddress = reuseAddress; + return this; + } + + /** + * Returns the linger-on-close timeout. + * + * @return the value of the option, or {@code null} if it is not set. + * + * @see #setSoLinger(int) + */ + public Integer getSoLinger() { + return soLinger; + } + + /** + * Sets the linger-on-close timeout. + *

+ * By default, this option is not set by the driver. The actual value will be the default + * from the underlying Netty transport (Java NIO or native epoll). + + * @param soLinger the new value. + * @return this {@code SocketOptions}. + * + * @see java.net.SocketOptions#SO_LINGER + */ + public SocketOptions setSoLinger(int soLinger) { + this.soLinger = soLinger; + return this; + } + + /** + * Returns whether Nagle's algorithm is disabled. + * + * @return the value of the option ({@code true} means Nagle is disabled), or {@code null} if it is not set. + * + * @see #setTcpNoDelay(boolean) + */ + public Boolean getTcpNoDelay() { + return tcpNoDelay; + } + + /** + * Sets whether to disable Nagle's algorithm. + *

+ * By default, this option is set to {@code true} (Nagle disabled). + * + * @param tcpNoDelay whether to enable or disable the option. + * @return this {@code SocketOptions}. + * + * @see java.net.SocketOptions#TCP_NODELAY + */ + public SocketOptions setTcpNoDelay(boolean tcpNoDelay) { + this.tcpNoDelay = tcpNoDelay; + return this; + } + + /** + * Returns the hint to the size of the underlying buffers for incoming network I/O. + * + * @return the value of the option, or {@code null} if it is not set. + * + * @see #setReceiveBufferSize(int) + */ + public Integer getReceiveBufferSize() { + return receiveBufferSize; + } + + /** + * Sets a hint to the size of the underlying buffers for incoming network I/O. + *

+ * By default, this option is not set by the driver. The actual value will be the default + * from the underlying Netty transport (Java NIO or native epoll). + * + * @param receiveBufferSize the new value. + * @return this {@code SocketOptions}. + * + * @see java.net.SocketOptions#SO_RCVBUF + */ + public SocketOptions setReceiveBufferSize(int receiveBufferSize) { + this.receiveBufferSize = receiveBufferSize; + return this; + } + + /** + * Returns the hint to the size of the underlying buffers for outgoing network I/O. + * + * @return the value of the option, or {@code null} if it is not set. + * + * @see #setSendBufferSize(int) + */ + public Integer getSendBufferSize() { + return sendBufferSize; + } + + /** + * Sets a hint to the size of the underlying buffers for outgoing network I/O. + *

+ * By default, this option is not set by the driver. The actual value will be the default + * from the underlying Netty transport (Java NIO or native epoll). + * + * @param sendBufferSize the new value. + * @return this {@code SocketOptions}. + * + * @see java.net.SocketOptions#SO_SNDBUF + */ + public SocketOptions setSendBufferSize(int sendBufferSize) { + this.sendBufferSize = sendBufferSize; + return this; + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/Statement.java b/driver-core/src/main/java/com/datastax/driver/core/Statement.java new file mode 100644 index 00000000000..9d2bae5fe59 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/Statement.java @@ -0,0 +1,449 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.nio.ByteBuffer; + +import com.datastax.driver.core.exceptions.PagingStateException; +import com.datastax.driver.core.policies.RetryPolicy; +import com.datastax.driver.core.querybuilder.BuiltStatement; + +/** + * An executable query. + *

+ * This represents either a {@link RegularStatement}, a {@link BoundStatement} or a + * {@link BatchStatement} along with the querying options (consistency level, + * whether to trace the query, ...). + */ +public abstract class Statement { + + // An exception to the RegularStatement, BoundStatement or BatchStatement rule above. This is + // used when preparing a statement and for other internal queries. Do not expose publicly. + static final Statement DEFAULT = new Statement() { + @Override + public ByteBuffer getRoutingKey() { + return null; + } + + @Override + public String getKeyspace() { + return null; + } + }; + + private volatile ConsistencyLevel consistency; + private volatile ConsistencyLevel serialConsistency; + private volatile boolean traceQuery; + private volatile int fetchSize; + private volatile long defaultTimestamp = Long.MIN_VALUE; + private volatile RetryPolicy retryPolicy; + private volatile ByteBuffer pagingState; + protected volatile Boolean idempotent; + + // We don't want to expose the constructor, because the code relies on this being only sub-classed by RegularStatement, BoundStatement and BatchStatement + Statement() { + } + + /** + * Sets the consistency level for the query. + * + * @param consistency the consistency level to set. + * @return this {@code Statement} object. + */ + public Statement setConsistencyLevel(ConsistencyLevel consistency) { + this.consistency = consistency; + return this; + } + + /** + * The consistency level for this query. + * + * @return the consistency level for this query, or {@code null} if no + * consistency level has been specified (through {@code setConsistencyLevel}). + * In the latter case, the default consistency level will be used. + */ + public ConsistencyLevel getConsistencyLevel() { + return consistency; + } + + /** + * Sets the serial consistency level for the query. + * + * The serial consistency level is only used by conditional updates (so INSERT, UPDATE + * and DELETE with an IF condition). For those, the serial consistency level defines + * the consistency level of the serial phase (or "paxos" phase) while the + * normal consistency level defines the consistency for the "learn" phase, i.e. what + * type of reads will be guaranteed to see the update right away. For instance, if + * a conditional write has a regular consistency of QUORUM (and is successful), then a + * QUORUM read is guaranteed to see that write. But if the regular consistency of that + * write is ANY, then only a read with a consistency of SERIAL is guaranteed to see it + * (even a read with consistency ALL is not guaranteed to be enough). + *

+ * The serial consistency can only be one of {@code ConsistencyLevel.SERIAL} or + * {@code ConsistencyLevel.LOCAL_SERIAL}. While {@code ConsistencyLevel.SERIAL} guarantees full + * linearizability (with other SERIAL updates), {@code ConsistencyLevel.LOCAL_SERIAL} only + * guarantees it in the local data center. + *

+ * The serial consistency level is ignored for any query that is not a conditional + * update (serial reads should use the regular consistency level for instance). + * + * @param serialConsistency the serial consistency level to set. + * @return this {@code Statement} object. + * + * @throws IllegalArgumentException if {@code serialConsistency} is not one of + * {@code ConsistencyLevel.SERIAL} or {@code ConsistencyLevel.LOCAL_SERIAL}. + */ + public Statement setSerialConsistencyLevel(ConsistencyLevel serialConsistency) { + if (serialConsistency != ConsistencyLevel.SERIAL && serialConsistency != ConsistencyLevel.LOCAL_SERIAL) + throw new IllegalArgumentException(); + this.serialConsistency = serialConsistency; + return this; + } + + /** + * The serial consistency level for this query. + *

+ * See {@link #setSerialConsistencyLevel} for more detail on the serial consistency level. + * + * @return the consistency level for this query, or {@code null} if no serial + * consistency level has been specified (through {@code setSerialConsistencyLevel}). + * In the latter case, the default serial consistency level will be used. + */ + public ConsistencyLevel getSerialConsistencyLevel() { + return serialConsistency; + } + + /** + * Enables tracing for this query. + * + * By default (that is unless you call this method), tracing is not enabled. + * + * @return this {@code Statement} object. + */ + public Statement enableTracing() { + this.traceQuery = true; + return this; + } + + /** + * Disables tracing for this query. + * + * @return this {@code Statement} object. + */ + public Statement disableTracing() { + this.traceQuery = false; + return this; + } + + /** + * Returns whether tracing is enabled for this query or not. + * + * @return {@code true} if this query has tracing enabled, {@code false} + * otherwise. + */ + public boolean isTracing() { + return traceQuery; + } + + /** + * Returns the routing key (in binary raw form) to use for token aware + * routing of this query. + *

+ * The routing key is optional in that implementers are free to + * return {@code null}. The routing key is an hint used for token-aware routing (see + * {@link com.datastax.driver.core.policies.TokenAwarePolicy}), and + * if provided should correspond to the binary value for the query + * partition key. However, not providing a routing key never causes a query + * to fail and if the load balancing policy used is not token aware, then + * the routing key can be safely ignored. + * + * @return the routing key for this query or {@code null}. + */ + public abstract ByteBuffer getRoutingKey(); + + /** + * Returns the keyspace this query operates on. + *

+ * Note that not all query specify on which keyspace they operate on, and + * so this method can always return {@code null}. Firstly, some queries do + * not operate inside a keyspace: keyspace creation, {@code USE} queries, + * user creation, etc. Secondly, even query that operate within a keyspace + * do not have to specify said keyspace directly, in which case the + * currently logged in keyspace (the one set through a {@code USE} query + * (or through the use of {@link Cluster#connect(String)})). Lastly, as + * for the routing key, this keyspace information is only a hint for + * token-aware routing (since replica placement depend on the replication + * strategy in use which is a per-keyspace property) and having this method + * return {@code null} (or even a bogus keyspace name) will never cause the + * query to fail. + * + * @return the keyspace this query operate on if relevant or {@code null}. + */ + public abstract String getKeyspace(); + + /** + * Sets the retry policy to use for this query. + *

+ * The default retry policy, if this method is not called, is the one returned by + * {@link com.datastax.driver.core.policies.Policies#getRetryPolicy} in the + * cluster configuration. This method is thus only useful in case you want + * to punctually override the default policy for this request. + * + * @param policy the retry policy to use for this query. + * @return this {@code Statement} object. + */ + public Statement setRetryPolicy(RetryPolicy policy) { + this.retryPolicy = policy; + return this; + } + + /** + * Returns the retry policy sets for this query, if any. + * + * @return the retry policy sets specifically for this query or {@code null} if no query specific + * retry policy has been set through {@link #setRetryPolicy} (in which case + * the Cluster retry policy will apply if necessary). + */ + public RetryPolicy getRetryPolicy() { + return retryPolicy; + } + + /** + * Sets the query fetch size. + *

+ * The fetch size controls how much resulting rows will be retrieved + * simultaneously (the goal being to avoid loading too much results + * in memory for queries yielding large results). Please note that + * while value as low as 1 can be used, it is *highly* discouraged to + * use such a low value in practice as it will yield very poor + * performance. If in doubt, leaving the default is probably a good + * idea. + *

+ * Only {@code SELECT} queries only ever make use of that setting. + *

+ * Note: Paging is not supported with the native protocol version 1. If + * you call this method with {@code fetchSize > 0} and + * {@code fetchSize != Integer.MAX_VALUE} and the protocol version is in + * use (i.e. if you've force version 1 through {@link Cluster.Builder#withProtocolVersion} + * or you use Cassandra 1.2), you will get {@link UnsupportedProtocolVersionException} + * when submitting this statement for execution. + * + * @param fetchSize the fetch size to use. If {@code fetchSize <e; 0}, + * the default fetch size will be used. To disable paging of the + * result set, use {@code fetchSize == Integer.MAX_VALUE}. + * @return this {@code Statement} object. + */ + public Statement setFetchSize(int fetchSize) { + this.fetchSize = fetchSize; + return this; + } + + /** + * The fetch size for this query. + * + * @return the fetch size for this query. If that value is less or equal + * to 0 (the default unless {@link #setFetchSize} is used), the default + * fetch size will be used. + */ + public int getFetchSize() { + return fetchSize; + } + + /** + * Sets the default timestamp for this query (in microseconds since the epoch). + *

+ * This feature is only available when version {@link ProtocolVersion#V3 V3} or + * higher of the native protocol is in use. With earlier versions, calling this + * method has no effect. + *

+ * The actual timestamp that will be used for this query is, in order of + * preference: + *

    + *
  • the timestamp specified directly in the CQL query string (using the + * {@code USING TIMESTAMP} syntax);
  • + *
  • the timestamp specified through this method, if different from + * {@link Long#MIN_VALUE};
  • + *
  • the timestamp returned by the {@link TimestampGenerator} currently in use, + * if different from {@link Long#MIN_VALUE}.
  • + *
+ * If none of these apply, no timestamp will be sent with the query and Cassandra + * will generate a server-side one (similar to the pre-V3 behavior). + * + * @param defaultTimestamp the default timestamp for this query (must be strictly + * positive). + * @return this {@code Statement} object. + * + * @see Cluster.Builder#withTimestampGenerator(TimestampGenerator) + */ + public Statement setDefaultTimestamp(long defaultTimestamp) { + this.defaultTimestamp = defaultTimestamp; + return this; + } + + /** + * The default timestamp for this query. + * + * @return the default timestamp (in microseconds since the epoch). + */ + public long getDefaultTimestamp() { + return defaultTimestamp; + } + + /** + * Sets the paging state. + *

+ * This will cause the next execution of this statement to fetch results from a given + * page, rather than restarting from the beginning. + *

+ * You get the paging state from a previous execution of the statement (see + * {@link ExecutionInfo#getPagingState()}. + * This is typically used to iterate in a "stateless" manner (e.g. across HTTP requests): + *

+     * {@code
+     * Statement st = new SimpleStatement("your query");
+     * ResultSet rs = session.execute(st.setFetchSize(20));
+     * int available = rs.getAvailableWithoutFetching();
+     * for (int i = 0; i < available; i++) {
+     *     Row row = rs.one();
+     *     // Do something with row (e.g. display it to the user...)
+     * }
+     * // Get state and serialize as string or byte[] to store it for the next execution
+     * // (e.g. pass it as a parameter in the "next page" URI)
+     * PagingState pagingState = rs.getExecutionInfo().getPagingState();
+     * String savedState = pagingState.toString();
+     *
+     * // Next execution:
+     * // Get serialized state back (e.g. get URI parameter)
+     * String savedState = ...
+     * Statement st = new SimpleStatement("your query");
+     * st.setPagingState(PagingState.fromString(savedState));
+     * ResultSet rs = session.execute(st.setFetchSize(20));
+     * int available = rs.getAvailableWithoutFetching();
+     * for (int i = 0; i < available; i++) {
+     *     ...
+     * }
+     * }
+     * 
+ *

+ * The paging state can only be reused between perfectly identical statements + * (same query string, same bound parameters). Altering the contents of the paging state + * or trying to set it on a different statement will cause this method to fail. + *

+ * Note that, due to internal implementation details, the paging state is not portable + * across native protocol versions (see the + * online documentation + * for more explanations about the native protocol). + * This means that {@code PagingState} instances generated with an old version won't work + * with a higher version. If that is a problem for you, consider using the "unsafe" API (see + * {@link #setPagingStateUnsafe(byte[])}). + * + * @param pagingState the paging state to set, or {@code null} to remove any state that was + * previously set on this statement. + * @return this {@code Statement} object. + * + * @throws PagingStateException if the paging state does not match this statement. + */ + public Statement setPagingState(PagingState pagingState) { + if (this instanceof BatchStatement) { + throw new UnsupportedOperationException("Cannot set the paging state on a batch statement"); + } else { + if (pagingState == null) { + this.pagingState = null; + } else if (pagingState.matches(this)) { + this.pagingState = pagingState.getRawState(); + } else { + throw new PagingStateException("Paging state mismatch, " + + "this means that either the paging state contents were altered, " + + "or you're trying to apply it to a different statement"); + } + } + return this; + } + + /** + * Sets the paging state. + *

+ * Contrary to {@link #setPagingState(PagingState)}, this method takes the "raw" form of the + * paging state (previously extracted with {@link ExecutionInfo#getPagingStateUnsafe()}. + * It won't validate that this statement matches the one that the paging state was extracted from. + * If the paging state was altered in any way, you will get unpredictable behavior from + * Cassandra (ranging from wrong results to a query failure). If you decide to use this variant, + * it is strongly recommended to add your own validation (for example, signing the raw state with + * a private key). + * + * @param pagingState the paging state to set, or {@code null} to remove any state that was + * previously set on this statement. + * @return this {@code Statement} object. + */ + public Statement setPagingStateUnsafe(byte[] pagingState) { + if (pagingState == null) { + this.pagingState = null; + } else { + this.pagingState = ByteBuffer.wrap(pagingState); + } + return this; + } + + ByteBuffer getPagingState() { + return pagingState; + } + + /** + * Sets whether this statement is idempotent. + *

+ * See {@link #isIdempotent()} for more explanations about this property. + * + * @param idempotent the new value. + * @return this {@code Statement} object. + */ + public Statement setIdempotent(boolean idempotent) { + this.idempotent = idempotent; + return this; + } + + /** + * Whether this statement is idempotent, i.e. whether it can be applied multiple times + * without changing the result beyond the initial application. + *

+ * Idempotence plays a role in {@link com.datastax.driver.core.policies.SpeculativeExecutionPolicy speculative executions}. + * If a statement is not idempotent, the driver will not schedule speculative + * executions for it. + *

+ * Note that this method can return {@code null}, in which case the driver will default to + * {@link QueryOptions#getDefaultIdempotence()}. + *

+ * By default, this method returns {@code null} for all statements, except for + * {@link BuiltStatement}s, where the value will be inferred from the query: if it updates + * counters, prepends/appends to a list, or uses a function call or + * {@link com.datastax.driver.core.querybuilder.QueryBuilder#raw(String)} anywhere in an inserted value, + * the result will be {@code false}; otherwise it will be {@code true}. + * In all cases, calling {@link #setIdempotent(boolean)} forces a value that overrides every other mechanism. + * + * @return whether this statement is idempotent, or {@code null} to use + * {@link QueryOptions#getDefaultIdempotence()}. + */ + public Boolean isIdempotent() { + return idempotent; + } + + boolean isIdempotentWithDefault(QueryOptions queryOptions) { + Boolean myValue = this.isIdempotent(); + if (myValue != null) + return myValue; + else + return queryOptions.getDefaultIdempotence(); + } +} \ No newline at end of file diff --git a/driver-core/src/main/java/com/datastax/driver/core/StatementWrapper.java b/driver-core/src/main/java/com/datastax/driver/core/StatementWrapper.java new file mode 100644 index 00000000000..ef072869a99 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/StatementWrapper.java @@ -0,0 +1,152 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.nio.ByteBuffer; + +import com.datastax.driver.core.policies.LoadBalancingPolicy; +import com.datastax.driver.core.policies.RetryPolicy; +import com.datastax.driver.core.policies.SpeculativeExecutionPolicy; + +/** + * Base class for custom {@link Statement} implementations that wrap another statement. + *

+ * This is intended for use with a custom {@link RetryPolicy}, {@link LoadBalancingPolicy} or + * {@link SpeculativeExecutionPolicy}. The client code can wrap a statement to "mark" it, or + * add information that will lead to special handling in the policy. + *

+ * Example: + *

+ * {@code
+ * // Define your own subclass
+ * public class MyCustomStatement extends StatementWrapper {
+ *     public MyCustomStatement(Statement wrapped) {
+ *         super(wrapped);
+ *     }
+ * }
+ *
+ * // In your load balancing policy, add a special case for that new type
+ * public class MyLoadBalancingPolicy implements LoadBalancingPolicy {
+ *     public Iterator newQueryPlan(String loggedKeyspace, Statement statement) {
+ *         if (statement instanceof MyCustomStatement) {
+ *             // return specially crafted plan
+ *         } else {
+ *             // return default plan
+ *         }
+ *     }
+ * }
+ *
+ * // The client wraps whenever it wants to trigger the special plan
+ * Statement s = new SimpleStatement("...");
+ * session.execute(s);                         // will use default plan
+ * session.execute(new MyCustomStatement(s));  // will use special plan
+ * }
+ * 
+ */ +public abstract class StatementWrapper extends Statement { + private final Statement wrapped; + + /** + * Builds a new instance. + * + * @param wrapped the wrapped statement. + */ + protected StatementWrapper(Statement wrapped) { + this.wrapped = wrapped; + } + + Statement getWrappedStatement() { + // Protect against multiple levels of wrapping (even though there is no practical reason for that) + return (wrapped instanceof StatementWrapper) + ? ((StatementWrapper)wrapped).getWrappedStatement() + : wrapped; + } + + @Override + public Statement setConsistencyLevel(ConsistencyLevel consistency) { + return wrapped.setConsistencyLevel(consistency); + } + + @Override + public Statement disableTracing() { + return wrapped.disableTracing(); + } + + @Override + public Statement setSerialConsistencyLevel(ConsistencyLevel serialConsistency) { + return wrapped.setSerialConsistencyLevel(serialConsistency); + } + + @Override + public Statement enableTracing() { + return wrapped.enableTracing(); + } + + @Override + public ByteBuffer getPagingState() { + return wrapped.getPagingState(); + } + + @Override + public boolean isTracing() { + return wrapped.isTracing(); + } + + @Override + public RetryPolicy getRetryPolicy() { + return wrapped.getRetryPolicy(); + } + + @Override + public ByteBuffer getRoutingKey() { + return wrapped.getRoutingKey(); + } + + @Override + public Statement setRetryPolicy(RetryPolicy policy) { + return wrapped.setRetryPolicy(policy); + } + + @Override + public ConsistencyLevel getConsistencyLevel() { + return wrapped.getConsistencyLevel(); + } + + @Override + public Statement setPagingState(PagingState pagingState) { + return wrapped.setPagingState(pagingState); + } + + @Override + public ConsistencyLevel getSerialConsistencyLevel() { + return wrapped.getSerialConsistencyLevel(); + } + + @Override + public String getKeyspace() { + return wrapped.getKeyspace(); + } + + @Override + public int getFetchSize() { + return wrapped.getFetchSize(); + } + + @Override + public Statement setFetchSize(int fetchSize) { + return wrapped.setFetchSize(fetchSize); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/StreamIdGenerator.java b/driver-core/src/main/java/com/datastax/driver/core/StreamIdGenerator.java new file mode 100644 index 00000000000..65260a6e945 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/StreamIdGenerator.java @@ -0,0 +1,141 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLongArray; + +/** + * Manages a set of integer identifiers. + *

+ * Clients can borrow an id with {@link #next()}, and return it to the set with {@link #release(int)}. + * It is guaranteed that a given id can't be borrowed by two clients at the same time. + * This class is thread-safe and non-blocking. + *

+ * Implementation notes: we use an atomic long array where each bit represents an id. It is set to 1 if + * the id is available, 0 otherwise. When looking for an id, we find a long that has remaining 1's and + * pick the rightmost one. + * To minimize the average time to find that long, we search the array in a round-robin fashion. + */ +class StreamIdGenerator { + static final int MAX_STREAM_PER_CONNECTION_V2 = 128; + static final int MAX_STREAM_PER_CONNECTION_V3 = 32768; + private static final long MAX_UNSIGNED_LONG = -1L; + + static StreamIdGenerator newInstance(ProtocolVersion version) { + return new StreamIdGenerator(streamIdSizeFor(version)); + } + + private static int streamIdSizeFor(ProtocolVersion version) { + switch (version) { + case V1: + case V2: + return 1; + case V3: + return 2; + default: + throw version.unsupported(); + } + } + + private final AtomicLongArray bits; + private final int maxIds; + private final AtomicInteger offset; + + // If a query timeout, we'll stop waiting for it. However in that case, we + // can't release/reuse the ID because we don't know if the response is lost + // or will just come back to use sometimes in the future. In that case, we + // just "mark" the fact that we have one less available ID and marked counts + // how many marks we've put. + private final AtomicInteger marked = new AtomicInteger(0); + + private StreamIdGenerator(int streamIdSizeInBytes) { + // Stream IDs are signed and we only handle positive values + // (negative stream IDs are for server side initiated streams). + maxIds = 1 << (streamIdSizeInBytes * 8 - 1); + + // This is true for 1 byte = 128 streams, and therefore for any higher value + assert maxIds % 64 == 0; + + // We use one bit in our array of longs to represent each stream ID. + bits = new AtomicLongArray(maxIds / 64); + + // Initialize all bits to 1 + for (int i = 0; i < bits.length(); i++) + bits.set(i, MAX_UNSIGNED_LONG); + + offset = new AtomicInteger(bits.length() - 1); + } + + public int next() throws BusyConnectionException { + int previousOffset, myOffset; + do { + previousOffset = offset.get(); + myOffset = (previousOffset + 1) % bits.length(); + } while (!offset.compareAndSet(previousOffset, myOffset)); + + for (int i = 0; i < bits.length(); i++) { + int j = (i + myOffset) % bits.length(); + + int id = atomicGetAndSetFirstAvailable(j); + if (id >= 0) + return id + (64 * j); + } + throw new BusyConnectionException(); + } + + public void release(int streamId) { + atomicClear(streamId / 64, streamId % 64); + } + + public void mark(int streamId) { + marked.incrementAndGet(); + } + + public void unmark(int streamId) { + marked.decrementAndGet(); + } + + public int maxAvailableStreams() { + return maxIds - marked.get(); + } + + // Returns >= 0 if found and set an id, -1 if no bits are available. + private int atomicGetAndSetFirstAvailable(int idx) { + while (true) { + long l = bits.get(idx); + if (l == 0) + return -1; + + // Find the position of the right-most 1-bit + int id = Long.numberOfTrailingZeros(l); + if (bits.compareAndSet(idx, l, l ^ mask(id))) + return id; + } + } + + private void atomicClear(int idx, int toClear) { + while (true) { + long l = bits.get(idx); + if (bits.compareAndSet(idx, l, l | mask(toClear))) + return; + } + } + + private static long mask(int id) { + return 1L << id; + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/SystemProperties.java b/driver-core/src/main/java/com/datastax/driver/core/SystemProperties.java new file mode 100644 index 00000000000..b35231e6272 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/SystemProperties.java @@ -0,0 +1,60 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Allows overriding internal settings via system properties. + *

+ * Warning: this is meant for integration tests only, NOT FOR PRODUCTION USE. + */ +class SystemProperties { + private static final Logger logger = LoggerFactory.getLogger(SystemProperties.class); + + static int getInt(String key, int defaultValue) { + String stringValue = System.getProperty(key); + if (stringValue == null) { + logger.debug("{} is undefined, using default value {}", key, defaultValue); + return defaultValue; + } + try { + int value = Integer.parseInt(stringValue); + logger.warn("{} is defined, using value {}", key, value); + return value; + } catch (NumberFormatException e) { + logger.warn("{} is defined but could not parse value {}, using default value {}", key, stringValue, defaultValue); + return defaultValue; + } + } + + static boolean getBoolean(String key, boolean defaultValue) { + String stringValue = System.getProperty(key); + if (stringValue == null) { + logger.debug("{} is undefined, using default value {}", key, defaultValue); + return defaultValue; + } + try { + boolean value = Boolean.parseBoolean(stringValue); + logger.warn("{} is defined, using value {}", key, value); + return value; + } catch (NumberFormatException e) { + logger.warn("{} is defined but could not parse value {}, using default value {}", key, stringValue, defaultValue); + return defaultValue; + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/TableMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/TableMetadata.java new file mode 100644 index 00000000000..2337fd62d49 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/TableMetadata.java @@ -0,0 +1,782 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.util.*; + +import com.google.common.base.Predicate; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Iterables; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Describes a Table. + */ +public class TableMetadata { + + private static final Logger logger = LoggerFactory.getLogger(TableMetadata.class); + + static final String CF_NAME = "columnfamily_name"; + private static final String CF_ID = "cf_id"; + + private static final String KEY_VALIDATOR = "key_validator"; + private static final String COMPARATOR = "comparator"; + private static final String VALIDATOR = "default_validator"; + + private static final String KEY_ALIASES = "key_aliases"; + private static final String COLUMN_ALIASES = "column_aliases"; + private static final String VALUE_ALIAS = "value_alias"; + + private static final String DEFAULT_KEY_ALIAS = "key"; + private static final String DEFAULT_COLUMN_ALIAS = "column"; + private static final String DEFAULT_VALUE_ALIAS = "value"; + + private static final Comparator columnMetadataComparator = new Comparator() { + public int compare(ColumnMetadata c1, ColumnMetadata c2) { + return c1.getName().compareTo(c2.getName()); + } + }; + + private final KeyspaceMetadata keyspace; + private final String name; + private final UUID id; + private final List partitionKey; + private final List clusteringColumns; + private final Map columns; + private final Options options; + private final List clusteringOrder; + + private final VersionNumber cassandraVersion; + + /** + * Clustering orders. + *

+ * This is used by {@link #getClusteringOrder} to indicate the clustering + * order of a table. + */ + public static enum Order { + ASC, DESC; + + static final Predicate isAscending = new Predicate() { + public boolean apply(Order o) { + return o == ASC; + } + }; + } + + private TableMetadata(KeyspaceMetadata keyspace, + String name, + UUID id, + List partitionKey, + List clusteringColumns, + LinkedHashMap columns, + Options options, + List clusteringOrder, + VersionNumber cassandraVersion) { + this.keyspace = keyspace; + this.name = name; + this.id = id; + this.partitionKey = partitionKey; + this.clusteringColumns = clusteringColumns; + this.columns = columns; + this.options = options; + this.clusteringOrder = clusteringOrder; + this.cassandraVersion = cassandraVersion; + } + + static TableMetadata build(KeyspaceMetadata ksm, Row row, Map rawCols, VersionNumber cassandraVersion) { + + String name = row.getString(CF_NAME); + UUID id = (cassandraVersion.getMajor() > 2 || (cassandraVersion.getMajor() == 2 && cassandraVersion.getMinor() >= 1)) + ? row.getUUID(CF_ID) + : null; + + CassandraTypeParser.ParseResult keyValidator = CassandraTypeParser.parseWithComposite(row.getString(KEY_VALIDATOR)); + CassandraTypeParser.ParseResult comparator = CassandraTypeParser.parseWithComposite(row.getString(COMPARATOR)); + List columnAliases = cassandraVersion.getMajor() >= 2 || row.getString(COLUMN_ALIASES) == null + ? Collections.emptyList() + : SimpleJSONParser.parseStringList(row.getString(COLUMN_ALIASES)); + + int clusteringSize = findClusteringSize(comparator, rawCols.values(), columnAliases, cassandraVersion); + boolean isDense = clusteringSize != comparator.types.size() - 1; + boolean isCompact = isDense || !comparator.isComposite; + + List partitionKey = nullInitializedList(keyValidator.types.size()); + List clusteringColumns = nullInitializedList(clusteringSize); + List clusteringOrder = nullInitializedList(clusteringSize); + // We use a linked hashmap because we will keep this in the order of a 'SELECT * FROM ...'. + LinkedHashMap columns = new LinkedHashMap(); + + + Options options = null; + try { + options = new Options(row, isCompact, cassandraVersion); + } catch (RuntimeException e) { + // See ControlConnection#refreshSchema for why we'd rather not probably this further. Since table options is one thing + // that tends to change often in Cassandra, it's worth special casing this. + logger.error(String.format("Error parsing schema options for table %s.%s: " + + "Cluster.getMetadata().getKeyspace(\"%s\").getTable(\"%s\").getOptions() will return null", + ksm.getName(), name, ksm.getName(), name), e); + } + + TableMetadata tm = new TableMetadata(ksm, name, id, partitionKey, clusteringColumns, columns, options, clusteringOrder, cassandraVersion); + + // We use this temporary set just so non PK columns are added in lexicographical order, which is the one of a + // 'SELECT * FROM ...' + Set otherColumns = new TreeSet(columnMetadataComparator); + + if (cassandraVersion.getMajor() < 2) { + // In C* 1.2, only the REGULAR columns are in the columns schema table, so we need to add the names from + // the aliases (and make sure we handle default aliases). + List keyAliases = row.getString(KEY_ALIASES) == null + ? Collections.emptyList() + : SimpleJSONParser.parseStringList(row.getString(KEY_ALIASES)); + for (int i = 0; i < partitionKey.size(); i++) { + String alias = keyAliases.size() > i ? keyAliases.get(i) : (i == 0 ? DEFAULT_KEY_ALIAS : DEFAULT_KEY_ALIAS + (i + 1)); + partitionKey.set(i, ColumnMetadata.forAlias(tm, alias, keyValidator.types.get(i))); + } + + for (int i = 0; i < clusteringSize; i++) { + String alias = columnAliases.size() > i ? columnAliases.get(i) : DEFAULT_COLUMN_ALIAS + (i + 1); + clusteringColumns.set(i, ColumnMetadata.forAlias(tm, alias, comparator.types.get(i))); + clusteringOrder.set(i, comparator.reversed.get(i) ? Order.DESC : Order.ASC); + } + + // We have a value alias if we're dense + if (isDense) { + String alias = row.isNull(VALUE_ALIAS) ? DEFAULT_VALUE_ALIAS : row.getString(VALUE_ALIAS); + DataType type = CassandraTypeParser.parseOne(row.getString(VALIDATOR)); + otherColumns.add(ColumnMetadata.forAlias(tm, alias, type)); + } + } + + for (ColumnMetadata.Raw rawCol : rawCols.values()) { + ColumnMetadata col = ColumnMetadata.fromRaw(tm, rawCol); + switch (rawCol.kind) { + case PARTITION_KEY: + partitionKey.set(rawCol.componentIndex, col); + break; + case CLUSTERING_KEY: + clusteringColumns.set(rawCol.componentIndex, col); + clusteringOrder.set(rawCol.componentIndex, rawCol.isReversed ? Order.DESC : Order.ASC); + break; + default: + otherColumns.add(col); + break; + } + } + + for (ColumnMetadata c : partitionKey) + columns.put(c.getName(), c); + for (ColumnMetadata c : clusteringColumns) + columns.put(c.getName(), c); + for (ColumnMetadata c : otherColumns) + columns.put(c.getName(), c); + + ksm.add(tm); + return tm; + } + + private static int findClusteringSize(CassandraTypeParser.ParseResult comparator, + Collection cols, + List columnAliases, + VersionNumber cassandraVersion) { + // In 2.0, this is relatively easy, we just find the biggest 'componentIndex' amongst the clustering columns. + // For 1.2 however, this is slightly more subtle: we need to infer it based on whether the comparator is composite or not, and whether we have + // regular columns or not. + if (cassandraVersion.getMajor() >= 2) { + int maxId = -1; + for (ColumnMetadata.Raw col : cols) + if (col.kind == ColumnMetadata.Raw.Kind.CLUSTERING_KEY) + maxId = Math.max(maxId, col.componentIndex); + return maxId + 1; + } else { + int size = comparator.types.size(); + if (comparator.isComposite) + return !comparator.collections.isEmpty() || (columnAliases.size() == size - 1 && comparator.types.get(size - 1).equals(DataType.text())) ? size - 1 : size; + else + // We know cols only has the REGULAR ones for 1.2 + return !columnAliases.isEmpty() || cols.isEmpty() ? size : 0; + } + } + + private static List nullInitializedList(int size) { + List l = new ArrayList(size); + for (int i = 0; i < size; ++i) + l.add(null); + return l; + } + + /** + * Returns the name of this table. + * + * @return the name of this CQL table. + */ + public String getName() { + return name; + } + + /** + * Returns the unique id of this table. + *

+ * Note: this id is available in Cassandra 2.1 and above. It will be + * {@code null} for earlier versions. + * + * @return the unique id of the table. + */ + public UUID getId() { + return id; + } + + /** + * Returns the keyspace this table belong to. + * + * @return the keyspace metadata of the keyspace this table belong to. + */ + public KeyspaceMetadata getKeyspace() { + return keyspace; + } + + /** + * Returns metadata on a column of this table. + * + * @param name the name of the column to retrieve ({@code name} will be + * interpreted as a case-insensitive identifier unless enclosed in double-quotes, + * see {@link Metadata#quote}). + * @return the metadata for the {@code name} column if it exists, or + * {@code null} otherwise. + */ + public ColumnMetadata getColumn(String name) { + return columns.get(Metadata.handleId(name)); + } + + /** + * Returns a list containing all the columns of this table. + * + * The order of the columns in the list is consistent with + * the order of the columns returned by a {@code SELECT * FROM thisTable}: + * the first column is the partition key, next are the clustering + * columns in their defined order, and then the rest of the + * columns follow in alphabetic order. + * + * @return a list containing the metadata for the columns of this table. + */ + public List getColumns() { + return new ArrayList(columns.values()); + } + + /** + * Returns the list of columns composing the primary key for this table. + * + * A table will always at least have a partition key (that + * may itself be one or more columns), so the returned list at least + * has one element. + * + * @return the list of columns composing the primary key for this table. + */ + public List getPrimaryKey() { + List pk = new ArrayList(partitionKey.size() + clusteringColumns.size()); + pk.addAll(partitionKey); + pk.addAll(clusteringColumns); + return pk; + } + + /** + * Returns the list of columns composing the partition key for this table. + * + * A table always has a partition key so the returned list has + * at least one element. + * + * @return the list of columns composing the partition key for this table. + */ + public List getPartitionKey() { + return Collections.unmodifiableList(partitionKey); + } + + /** + * Returns the list of clustering columns for this table. + * + * @return the list of clustering columns for this table. + * If there is no clustering columns, an empty list is returned. + */ + public List getClusteringColumns() { + return Collections.unmodifiableList(clusteringColumns); + } + + /** + * Returns the clustering order for this table. + *

+ * The returned contains the clustering order of each clustering column. The + * {@code i}th element of the result correspond to the order (ascending or + * descending) of the {@code i}th clustering column (see + * {@link #getClusteringColumns}). Note that a table defined without any + * particular clustering order is equivalent to one for which all the + * clustering key are in ascending order. + * + * @return a list with the clustering order for each clustering column. + */ + public List getClusteringOrder() { + return clusteringOrder; + } + + /** + * Returns the options for this table. + * + * @return the options for this table. + */ + public Options getOptions() { + return options; + } + + void add(ColumnMetadata column) { + columns.put(column.getName(), column); + } + + /** + * Returns a {@code String} containing CQL queries representing this + * table and the index on it. + *

+ * In other words, this method returns the queries that would allow you to + * recreate the schema of this table, along with the index defined on + * columns of this table. + *

+ * Note that the returned String is formatted to be human readable (for + * some definition of human readable at least). + * + * @return the CQL queries representing this table schema as a {code + * String}. + */ + public String exportAsString() { + StringBuilder sb = new StringBuilder(); + + sb.append(asCQLQuery(true)); + + for (ColumnMetadata column : columns.values()) { + ColumnMetadata.IndexMetadata index = column.getIndex(); + if (index == null) + continue; + + sb.append('\n').append(index.asCQLQuery()); + } + return sb.toString(); + } + + /** + * Returns a CQL query representing this table. + *

+ * This method returns a single 'CREATE TABLE' query with the options + * corresponding to this table definition. + *

+ * Note that the returned string is a single line; the returned query + * is not formatted in any way. + * + * @return the 'CREATE TABLE' query corresponding to this table. + * @see #exportAsString + */ + public String asCQLQuery() { + return asCQLQuery(false); + } + + private String asCQLQuery(boolean formatted) { + StringBuilder sb = new StringBuilder(); + + sb.append("CREATE TABLE ").append(Metadata.escapeId(keyspace.getName())).append('.').append(Metadata.escapeId(name)).append(" ("); + newLine(sb, formatted); + for (ColumnMetadata cm : columns.values()) + newLine(sb.append(spaces(4, formatted)).append(cm).append(','), formatted); + + // PK + sb.append(spaces(4, formatted)).append("PRIMARY KEY ("); + if (partitionKey.size() == 1) { + sb.append(partitionKey.get(0).getName()); + } else { + sb.append('('); + boolean first = true; + for (ColumnMetadata cm : partitionKey) { + if (first) first = false; else sb.append(", "); + sb.append(Metadata.escapeId(cm.getName())); + } + sb.append(')'); + } + for (ColumnMetadata cm : clusteringColumns) + sb.append(", ").append(Metadata.escapeId(cm.getName())); + sb.append(')'); + newLine(sb, formatted); + // end PK + + // Options + sb.append(") WITH "); + + if (options.isCompactStorage) + and(sb.append("COMPACT STORAGE"), formatted); + if (!Iterables.all(clusteringOrder, Order.isAscending)) + and(appendClusteringOrder(sb), formatted); + sb.append("read_repair_chance = ").append(options.readRepair); + and(sb, formatted).append("dclocal_read_repair_chance = ").append(options.localReadRepair); + if (cassandraVersion.getMajor() < 2 || (cassandraVersion.getMajor() == 2 && cassandraVersion.getMinor() == 0)) + and(sb, formatted).append("replicate_on_write = ").append(options.replicateOnWrite); + and(sb, formatted).append("gc_grace_seconds = ").append(options.gcGrace); + and(sb, formatted).append("bloom_filter_fp_chance = ").append(options.bfFpChance); + if (cassandraVersion.getMajor() < 2 || cassandraVersion.getMajor() == 2 && cassandraVersion.getMinor() < 1) + and(sb, formatted).append("caching = '").append(options.caching.get("keys")).append('\''); + else + and(sb, formatted).append("caching = ").append(formatOptionMap(options.caching)); + if (options.comment != null) + and(sb, formatted).append("comment = '").append(options.comment.replace("'","''")).append('\''); + and(sb, formatted).append("compaction = ").append(formatOptionMap(options.compaction)); + and(sb, formatted).append("compression = ").append(formatOptionMap(options.compression)); + if (cassandraVersion.getMajor() >= 2) { + and(sb, formatted).append("default_time_to_live = ").append(options.defaultTTL); + and(sb, formatted).append("speculative_retry = '").append(options.speculativeRetry).append('\''); + if (options.indexInterval != null) + and(sb, formatted).append("index_interval = ").append(options.indexInterval); + } + if (cassandraVersion.getMajor() > 2 || (cassandraVersion.getMajor() == 2 && cassandraVersion.getMinor() >= 1)) { + and(sb, formatted).append("min_index_interval = ").append(options.minIndexInterval); + and(sb, formatted).append("max_index_interval = ").append(options.maxIndexInterval); + } + sb.append(';'); + return sb.toString(); + } + + @Override + public String toString() { + return asCQLQuery(); + } + + private StringBuilder appendClusteringOrder(StringBuilder sb) { + sb.append("CLUSTERING ORDER BY ("); + for (int i = 0; i < clusteringColumns.size(); i++) { + if (i > 0) sb.append(", "); + sb.append(clusteringColumns.get(i).getName()).append(' ').append(clusteringOrder.get(i)); + } + return sb.append(')'); + } + + private static String formatOptionMap(Map m) { + StringBuilder sb = new StringBuilder(); + sb.append("{ "); + boolean first = true; + for (Map.Entry entry : m.entrySet()) { + if (first) first = false; else sb.append(", "); + sb.append('\'').append(entry.getKey()).append('\''); + sb.append(" : "); + try { + sb.append(Integer.parseInt(entry.getValue())); + } catch (NumberFormatException e) { + sb.append('\'').append(entry.getValue()).append('\''); + } + } + sb.append(" }"); + return sb.toString(); + } + + private StringBuilder and(StringBuilder sb, boolean formatted) { + return newLine(sb, formatted).append(spaces(2, formatted)).append(" AND "); + } + + static String spaces(int n, boolean formatted) { + if (!formatted) + return ""; + + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < n; i++) + sb.append(' '); + + return sb.toString(); + } + + static StringBuilder newLine(StringBuilder sb, boolean formatted) { + if (formatted) + sb.append('\n'); + return sb; + } + + public static class Options { + + private static final String COMMENT = "comment"; + private static final String READ_REPAIR = "read_repair_chance"; + private static final String LOCAL_READ_REPAIR = "local_read_repair_chance"; + private static final String REPLICATE_ON_WRITE = "replicate_on_write"; + private static final String GC_GRACE = "gc_grace_seconds"; + private static final String BF_FP_CHANCE = "bloom_filter_fp_chance"; + private static final String CACHING = "caching"; + private static final String COMPACTION_CLASS = "compaction_strategy_class"; + private static final String COMPACTION_OPTIONS = "compaction_strategy_options"; + private static final String MIN_COMPACTION_THRESHOLD = "min_compaction_threshold"; + private static final String MAX_COMPACTION_THRESHOLD = "max_compaction_threshold"; + private static final String POPULATE_CACHE_ON_FLUSH = "populate_io_cache_on_flush"; + private static final String COMPRESSION_PARAMS = "compression_parameters"; + private static final String MEMTABLE_FLUSH_PERIOD_MS = "memtable_flush_period_in_ms"; + private static final String DEFAULT_TTL = "default_time_to_live"; + private static final String SPECULATIVE_RETRY = "speculative_retry"; + private static final String INDEX_INTERVAL = "index_interval"; + private static final String MIN_INDEX_INTERVAL = "min_index_interval"; + private static final String MAX_INDEX_INTERVAL = "max_index_interval"; + + private static final boolean DEFAULT_REPLICATE_ON_WRITE = true; + private static final double DEFAULT_BF_FP_CHANCE = 0.01; + private static final boolean DEFAULT_POPULATE_CACHE_ON_FLUSH = false; + private static final int DEFAULT_MEMTABLE_FLUSH_PERIOD = 0; + private static final int DEFAULT_DEFAULT_TTL = 0; + private static final String DEFAULT_SPECULATIVE_RETRY = "NONE"; + private static final int DEFAULT_INDEX_INTERVAL = 128; + private static final int DEFAULT_MIN_INDEX_INTERVAL = 128; + private static final int DEFAULT_MAX_INDEX_INTERVAL = 2048; + + private final boolean isCompactStorage; + + private final String comment; + private final double readRepair; + private final double localReadRepair; + private final boolean replicateOnWrite; + private final int gcGrace; + private final double bfFpChance; + private final Map caching; + private final boolean populateCacheOnFlush; + private final int memtableFlushPeriodMs; + private final int defaultTTL; + private final String speculativeRetry; + private final Integer indexInterval; + private final Integer minIndexInterval; + private final Integer maxIndexInterval; + private final Map compaction = new HashMap(); + private final Map compression = new HashMap(); + + Options(Row row, boolean isCompactStorage, VersionNumber version) { + this.isCompactStorage = isCompactStorage; + this.comment = isNullOrAbsent(row, COMMENT) ? "" : row.getString(COMMENT); + this.readRepair = row.getDouble(READ_REPAIR); + this.localReadRepair = row.getDouble(LOCAL_READ_REPAIR); + boolean is210OrMore = version.getMajor() > 2 || (version.getMajor() == 2 && version.getMinor() >= 1); + this.replicateOnWrite = is210OrMore || isNullOrAbsent(row, REPLICATE_ON_WRITE) ? DEFAULT_REPLICATE_ON_WRITE : row.getBool(REPLICATE_ON_WRITE); + this.gcGrace = row.getInt(GC_GRACE); + this.bfFpChance = isNullOrAbsent(row, BF_FP_CHANCE) ? DEFAULT_BF_FP_CHANCE : row.getDouble(BF_FP_CHANCE); + this.caching = is210OrMore + ? SimpleJSONParser.parseStringMap(row.getString(CACHING)) + : ImmutableMap.of("keys", row.getString(CACHING)); + this.populateCacheOnFlush = isNullOrAbsent(row, POPULATE_CACHE_ON_FLUSH) ? DEFAULT_POPULATE_CACHE_ON_FLUSH : row.getBool(POPULATE_CACHE_ON_FLUSH); + this.memtableFlushPeriodMs = version.getMajor() < 2 || isNullOrAbsent(row, MEMTABLE_FLUSH_PERIOD_MS) ? DEFAULT_MEMTABLE_FLUSH_PERIOD : row.getInt(MEMTABLE_FLUSH_PERIOD_MS); + this.defaultTTL = version.getMajor() < 2 || isNullOrAbsent(row, DEFAULT_TTL) ? DEFAULT_DEFAULT_TTL : row.getInt(DEFAULT_TTL); + this.speculativeRetry = version.getMajor() < 2 || isNullOrAbsent(row, SPECULATIVE_RETRY) ? DEFAULT_SPECULATIVE_RETRY : row.getString(SPECULATIVE_RETRY); + + if (version.getMajor() >= 2 && !is210OrMore) + this.indexInterval = isNullOrAbsent(row, INDEX_INTERVAL) ? DEFAULT_INDEX_INTERVAL : row.getInt(INDEX_INTERVAL); + else + this.indexInterval = null; + + if (is210OrMore) { + this.minIndexInterval = isNullOrAbsent(row, MIN_INDEX_INTERVAL) + ? DEFAULT_MIN_INDEX_INTERVAL + : row.getInt(MIN_INDEX_INTERVAL); + this.maxIndexInterval = isNullOrAbsent(row, MAX_INDEX_INTERVAL) + ? DEFAULT_MAX_INDEX_INTERVAL + : row.getInt(MAX_INDEX_INTERVAL); + } else { + this.minIndexInterval = null; + this.maxIndexInterval = null; + } + + this.compaction.put("class", row.getString(COMPACTION_CLASS)); + this.compaction.putAll(SimpleJSONParser.parseStringMap(row.getString(COMPACTION_OPTIONS))); + + this.compression.putAll(SimpleJSONParser.parseStringMap(row.getString(COMPRESSION_PARAMS))); + } + + private static boolean isNullOrAbsent(Row row, String name) { + return row.getColumnDefinitions().getIndexOf(name) < 0 + || row.isNull(name); + } + + /** + * Returns whether the table uses the {@code COMPACT STORAGE} option. + * + * @return whether the table uses the {@code COMPACT STORAGE} option. + */ + public boolean isCompactStorage() { + return isCompactStorage; + } + + /** + * Returns the commentary set for this table. + * + * @return the commentary set for this table, or {@code null} if noe has been set. + */ + public String getComment() { + return comment; + } + + /** + * Returns the chance with which a read repair is triggered for this table. + * + * @return the read repair change set for table (in [0.0, 1.0]). + */ + public double getReadRepairChance() { + return readRepair; + } + + /** + * Returns the cluster local read repair chance set for this table. + * + * @return the local read repair change set for table (in [0.0, 1.0]). + */ + public double getLocalReadRepairChance() { + return localReadRepair; + } + + /** + * Returns whether replicateOnWrite is set for this table. + * + * This is only meaningful for tables holding counters. + * + * @return whether replicateOnWrite is set for this table. + */ + public boolean getReplicateOnWrite() { + return replicateOnWrite; + } + + /** + * Returns the tombstone garbage collection grace time in seconds for this table. + * + * @return the tombstone garbage collection grace time in seconds for this table. + */ + public int getGcGraceInSeconds() { + return gcGrace; + } + + /** + * Returns the false positive chance for the Bloom filter of this table. + * + * @return the Bloom filter false positive chance for this table (in [0.0, 1.0]). + */ + public double getBloomFilterFalsePositiveChance() { + return bfFpChance; + } + + /** + * Returns the caching options for this table. + * + * @return the caching options for this table. + */ + public Map getCaching() { + return caching; + } + + /** + * Whether the populate I/O cache on flush is set on this table. + * + * @return whether the populate I/O cache on flush is set on this table. + */ + public boolean getPopulateIOCacheOnFlush() { + return populateCacheOnFlush; + } + + /* + * Returns the memtable flush period (in milliseconds) option for this table. + *

+ * Note: this option is not available in Cassandra 1.2 and will return 0 (no periodic + * flush) when connected to 1.2 nodes. + * + * @return the memtable flush period option for this table or 0 if no + * periodic flush is configured. + */ + public int getMemtableFlushPeriodInMs() { + return memtableFlushPeriodMs; + } + + /** + * Returns the default TTL for this table. + *

+ * Note: this option is not available in Cassandra 1.2 and will return 0 (no default + * TTL) when connected to 1.2 nodes. + * + * @return the default TTL for this table or 0 if no default TTL is + * configured. + */ + public int getDefaultTimeToLive() { + return defaultTTL; + } + + /** + * Returns the speculative retry option for this table. + *

+ * Note: this option is not available in Cassandra 1.2 and will return "NONE" (no + * speculative retry) when connected to 1.2 nodes. + * + * @return the speculative retry option this table. + */ + public String getSpeculativeRetry() { + return speculativeRetry; + } + + /** + * Returns the index interval option for this table. + *

+ * Note: this option is not available in Cassandra 1.2 (more precisely, it is not + * configurable per-table) and will return 128 (the default index interval) when + * connected to 1.2 nodes. It is deprecated in Cassandra 2.1 and above, and will + * therefore return {@code null} for 2.1 nodes. + * + * @return the index interval option for this table. + */ + public Integer getIndexInterval() { + return indexInterval; + } + + /** + * Returns the minimum index interval option for this table. + *

+ * Note: this option is available in Cassandra 2.1 and above, and will return + * {@code null} for earlier versions. + * + * @return the minimum index interval option for this table. + */ + public Integer getMinIndexInterval() { + return minIndexInterval; + } + + /** + * Returns the maximum index interval option for this table. + *

+ * Note: this option is available in Cassandra 2.1 and above, and will return + * {@code null} for earlier versions. + * + * @return the maximum index interval option for this table. + */ + public Integer getMaxIndexInterval() { + return maxIndexInterval; + } + + /** + * Returns the compaction options for this table. + * + * @return a map containing the compaction options for this table. + */ + public Map getCompaction() { + return new HashMap(compaction); + } + + /** + * Returns the compression options for this table. + * + * @return a map containing the compression options for this table. + */ + public Map getCompression() { + return new HashMap(compression); + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/ThreadLocalMonotonicTimestampGenerator.java b/driver-core/src/main/java/com/datastax/driver/core/ThreadLocalMonotonicTimestampGenerator.java new file mode 100644 index 00000000000..833a26fe857 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/ThreadLocalMonotonicTimestampGenerator.java @@ -0,0 +1,46 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + + +/** + * A timestamp generator based on {@code System.currentTimeMillis()}, with an incrementing thread-local counter + * to generate the sub-millisecond part. + *

+ * This implementation guarantees incrementing timestamps for a given client thread, provided that no more than + * 1000 are requested for a given clock tick (the exact granularity of of {@link System#currentTimeMillis()} + * depends on the operating system). + *

+ * If that rate is exceeded, a warning is logged and the timestamps don't increment anymore until the next clock + * tick. + */ +public class ThreadLocalMonotonicTimestampGenerator extends AbstractMonotonicTimestampGenerator { + // We're deliberately avoiding an anonymous subclass with initialValue(), because this can introduce + // classloader leaks in managed environments like Tomcat + private final ThreadLocal lastRef = new ThreadLocal(); + + @Override + public long next() { + Long last = this.lastRef.get(); + if (last == null) + last = 0L; + + long next = computeNext(last); + + this.lastRef.set(next); + return next; + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/TimestampGenerator.java b/driver-core/src/main/java/com/datastax/driver/core/TimestampGenerator.java new file mode 100644 index 00000000000..00270443723 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/TimestampGenerator.java @@ -0,0 +1,32 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +/** + * Generates client-side, microsecond-precision query timestamps. + *

+ * Given that Cassandra uses those timestamps to resolve conflicts, implementations should generate + * incrementing timestamps for successive implementations. + */ +public interface TimestampGenerator { + /** + * Returns the next timestamp. + * + * @return the next timestamp (in microseconds). If it equals {@link Long#MIN_VALUE}, it won't be + * sent by the driver, letting Cassandra generate a server-side timestamp. + */ + long next(); +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/Token.java b/driver-core/src/main/java/com/datastax/driver/core/Token.java new file mode 100644 index 00000000000..d351fbed73f --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/Token.java @@ -0,0 +1,611 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.math.BigInteger; +import java.nio.ByteBuffer; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.util.Arrays; +import java.util.List; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.Lists; +import com.google.common.primitives.UnsignedBytes; + +import com.datastax.driver.core.utils.Bytes; + +/** + * A token on the Cassandra ring. + */ +public abstract class Token implements Comparable { + + /** + * Returns the data type of this token's value. + * + * @return the datatype. + */ + public abstract DataType getType(); + + /** + * Returns the raw value of this token. + * + * @return the value. + */ + public abstract Object getValue(); + + static Token.Factory getFactory(String partitionerName) { + if (partitionerName.endsWith("Murmur3Partitioner")) + return M3PToken.FACTORY; + else if (partitionerName.endsWith("RandomPartitioner")) + return RPToken.FACTORY; + else if (partitionerName.endsWith("OrderedPartitioner")) + return OPPToken.FACTORY; + else + return null; + } + + static abstract class Factory { + abstract Token fromString(String tokenStr); + abstract DataType getTokenType(); + abstract Token deserialize(ByteBuffer buffer, ProtocolVersion protocolVersion); + /** The minimum token is a special value that no key ever hashes to, it's used both as lower and upper bound. */ + abstract Token minToken(); + abstract Token hash(ByteBuffer partitionKey); + abstract List split(Token startToken, Token endToken, int numberOfSplits); + + // Base implementation for split + protected List split(BigInteger start, BigInteger range, + BigInteger ringEnd, BigInteger ringLength, + int numberOfSplits) { + BigInteger[] tmp = range.divideAndRemainder(BigInteger.valueOf(numberOfSplits)); + BigInteger divider = tmp[0]; + int remainder = tmp[1].intValue(); + + List results = Lists.newArrayListWithExpectedSize(numberOfSplits - 1); + BigInteger current = start; + BigInteger dividerPlusOne = (remainder == 0) ? null // won't be used + : divider.add(BigInteger.ONE); + + for (int i = 1; i < numberOfSplits; i++) { + current = current.add(remainder-- > 0 ? dividerPlusOne : divider); + if (ringEnd != null && current.compareTo(ringEnd) > 0) + current = current.subtract(ringLength); + results.add(current); + } + return results; + } + } + + // Murmur3Partitioner tokens + static class M3PToken extends Token { + private final long value; + + public static final Factory FACTORY = new M3PTokenFactory(); + + private static class M3PTokenFactory extends Factory { + + private static final BigInteger RING_END = BigInteger.valueOf(Long.MAX_VALUE); + private static final BigInteger RING_LENGTH = RING_END.subtract(BigInteger.valueOf(Long.MIN_VALUE)); + static final M3PToken MIN_TOKEN = new M3PToken(Long.MIN_VALUE); + static final M3PToken MAX_TOKEN = new M3PToken(Long.MAX_VALUE); + + private long getblock(ByteBuffer key, int offset, int index) { + int i_8 = index << 3; + int blockOffset = offset + i_8; + return ((long) key.get(blockOffset + 0) & 0xff) + (((long) key.get(blockOffset + 1) & 0xff) << 8) + + (((long) key.get(blockOffset + 2) & 0xff) << 16) + (((long) key.get(blockOffset + 3) & 0xff) << 24) + + (((long) key.get(blockOffset + 4) & 0xff) << 32) + (((long) key.get(blockOffset + 5) & 0xff) << 40) + + (((long) key.get(blockOffset + 6) & 0xff) << 48) + (((long) key.get(blockOffset + 7) & 0xff) << 56); + } + + private long rotl64(long v, int n) { + return ((v << n) | (v >>> (64 - n))); + } + + private long fmix(long k) { + k ^= k >>> 33; + k *= 0xff51afd7ed558ccdL; + k ^= k >>> 33; + k *= 0xc4ceb9fe1a85ec53L; + k ^= k >>> 33; + return k; + } + + // This is an adapted version of the MurmurHash.hash3_x64_128 from Cassandra used + // for M3P. Compared to that methods, there's a few inlining of arguments and we + // only return the first 64-bits of the result since that's all M3P uses. + @SuppressWarnings("fallthrough") + private long murmur(ByteBuffer data) { + int offset = data.position(); + int length = data.remaining(); + + int nblocks = length >> 4; // Process as 128-bit blocks. + + long h1 = 0; + long h2 = 0; + + long c1 = 0x87c37b91114253d5L; + long c2 = 0x4cf5ad432745937fL; + + //---------- + // body + + for(int i = 0; i < nblocks; i++) { + long k1 = getblock(data, offset, i*2+0); + long k2 = getblock(data, offset, i*2+1); + + k1 *= c1; k1 = rotl64(k1,31); k1 *= c2; h1 ^= k1; + h1 = rotl64(h1,27); h1 += h2; h1 = h1*5+0x52dce729; + k2 *= c2; k2 = rotl64(k2,33); k2 *= c1; h2 ^= k2; + h2 = rotl64(h2,31); h2 += h1; h2 = h2*5+0x38495ab5; + } + + //---------- + // tail + + // Advance offset to the unprocessed tail of the data. + offset += nblocks * 16; + + long k1 = 0; + long k2 = 0; + + switch(length & 15) { + case 15: k2 ^= ((long) data.get(offset+14)) << 48; + case 14: k2 ^= ((long) data.get(offset+13)) << 40; + case 13: k2 ^= ((long) data.get(offset+12)) << 32; + case 12: k2 ^= ((long) data.get(offset+11)) << 24; + case 11: k2 ^= ((long) data.get(offset+10)) << 16; + case 10: k2 ^= ((long) data.get(offset+9)) << 8; + case 9: k2 ^= ((long) data.get(offset+8)) << 0; + k2 *= c2; k2 = rotl64(k2,33); k2 *= c1; h2 ^= k2; + + case 8: k1 ^= ((long) data.get(offset+7)) << 56; + case 7: k1 ^= ((long) data.get(offset+6)) << 48; + case 6: k1 ^= ((long) data.get(offset+5)) << 40; + case 5: k1 ^= ((long) data.get(offset+4)) << 32; + case 4: k1 ^= ((long) data.get(offset+3)) << 24; + case 3: k1 ^= ((long) data.get(offset+2)) << 16; + case 2: k1 ^= ((long) data.get(offset+1)) << 8; + case 1: k1 ^= ((long) data.get(offset)); + k1 *= c1; k1 = rotl64(k1,31); k1 *= c2; h1 ^= k1; + }; + + //---------- + // finalization + + h1 ^= length; h2 ^= length; + + h1 += h2; + h2 += h1; + + h1 = fmix(h1); + h2 = fmix(h2); + + h1 += h2; + h2 += h1; + + return h1; + } + + @Override + M3PToken fromString(String tokenStr) { + return new M3PToken(Long.parseLong(tokenStr)); + } + + @Override + DataType getTokenType() { + return DataType.bigint(); + } + + @Override + Token deserialize(ByteBuffer buffer, ProtocolVersion protocolVersion) { + return new M3PToken((Long) getTokenType().deserialize(buffer, protocolVersion)); + } + + @Override + Token minToken() { + return MIN_TOKEN; + } + + @Override + M3PToken hash(ByteBuffer partitionKey) { + long v = murmur(partitionKey); + return new M3PToken(v == Long.MIN_VALUE ? Long.MAX_VALUE : v); + } + + @Override + List split(Token startToken, Token endToken, int numberOfSplits) { + // edge case: ]min, min] means the whole ring + if (startToken.equals(endToken) && startToken.equals(MIN_TOKEN)) + endToken = MAX_TOKEN; + + BigInteger start = BigInteger.valueOf(((M3PToken)startToken).value); + BigInteger end = BigInteger.valueOf(((M3PToken)endToken).value); + + BigInteger range = end.subtract(start); + if (range.compareTo(BigInteger.ZERO) < 0) + range = range.add(RING_LENGTH); + + List values = super.split(start, range, + RING_END, RING_LENGTH, + numberOfSplits); + List tokens = Lists.newArrayListWithExpectedSize(values.size()); + for (BigInteger value : values) + tokens.add(new M3PToken(value.longValue())); + return tokens; + } + } + + private M3PToken(long value) { + this.value = value; + } + + @Override + public DataType getType() { + return FACTORY.getTokenType(); + } + + @Override + public Object getValue() { + return value; + } + + @Override + public int compareTo(Token other) { + assert other instanceof M3PToken; + long otherValue = ((M3PToken)other).value; + return value < otherValue ? -1 : (value == otherValue) ? 0 : 1; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (obj == null || this.getClass() != obj.getClass()) + return false; + + return value == ((M3PToken)obj).value; + } + + @Override + public int hashCode() { + return (int)(value^(value>>>32)); + } + + @Override + public String toString() { + return Long.toString(value); + } + } + + // OPPartitioner tokens + static class OPPToken extends Token { + + private final ByteBuffer value; + + public static final Factory FACTORY = new OPPTokenFactory(); + + private static class OPPTokenFactory extends Factory { + private static final BigInteger TWO = BigInteger.valueOf(2); + private static final Token MIN_TOKEN = new OPPToken(ByteBuffer.allocate(0)); + + @Override + public OPPToken fromString(String tokenStr) { + // This method must be able to parse the contents of system.peers.tokens, which do not have the "0x" prefix. + // On the other hand, OPPToken#toString has the "0x" because it should be usable in a CQL query, and it's + // nice to have fromString and toString symetrical. + // So handle both cases: + if (!tokenStr.startsWith("0x")) { + String prefix = (tokenStr.length() % 2 == 0) ? "0x" : "0x0"; + tokenStr = prefix + tokenStr; + } + ByteBuffer value = Bytes.fromHexString(tokenStr); + return new OPPToken(value); + } + + @Override + DataType getTokenType() { + return DataType.blob(); + } + + @Override + Token deserialize(ByteBuffer buffer, ProtocolVersion protocolVersion) { + return new OPPToken(buffer); + } + + @Override + Token minToken() { + return MIN_TOKEN; + } + + @Override + OPPToken hash(ByteBuffer partitionKey) { + return new OPPToken(partitionKey); + } + + @Override + List split(Token startToken, Token endToken, int numberOfSplits) { + int tokenOrder = startToken.compareTo(endToken); + + // ]min,min] means the whole ring. However, since there is no "max token" with this partitioner, we can't come up + // with a magic end value that would cover the whole ring + if (tokenOrder == 0 && startToken.equals(MIN_TOKEN)) + throw new IllegalArgumentException("Cannot split whole ring with ordered partitioner"); + + OPPToken oppStartToken = (OPPToken)startToken; + OPPToken oppEndToken = (OPPToken)endToken; + + int significantBytes; + BigInteger start, end, range, ringEnd, ringLength; + BigInteger bigNumberOfSplits = BigInteger.valueOf(numberOfSplits); + if (tokenOrder < 0) { + // Since tokens are compared lexicographically, convert to integers using the largest length + // (ex: given 0x0A and 0x0BCD, switch to 0x0A00 and 0x0BCD) + significantBytes = Math.max(oppStartToken.value.capacity(), oppEndToken.value.capacity()); + + // If the number of splits does not fit in the difference between the two integers, use more bytes + // (ex: cannot fit 4 splits between 0x01 and 0x03, so switch to 0x0100 and 0x0300) + // At most 4 additional bytes will be needed, since numberOfSplits is an integer. + int addedBytes = 0; + while (true) { + start = toBigInteger(oppStartToken.value, significantBytes); + end = toBigInteger(oppEndToken.value, significantBytes); + range = end.subtract(start); + if (addedBytes == 4 || range.compareTo(bigNumberOfSplits) >= 0) + break; + significantBytes += 1; + addedBytes += 1; + } + ringEnd = ringLength = null; // won't be used + } else { + // Same logic except that we wrap around the ring + significantBytes = Math.max(oppStartToken.value.capacity(), oppEndToken.value.capacity()); + int addedBytes = 0; + while (true) { + start = toBigInteger(oppStartToken.value, significantBytes); + end = toBigInteger(oppEndToken.value, significantBytes); + ringLength = TWO.pow(significantBytes * 8); + ringEnd = ringLength.subtract(BigInteger.ONE); + range = end.subtract(start).add(ringLength); + if (addedBytes == 4 || range.compareTo(bigNumberOfSplits) >= 0) + break; + significantBytes += 1; + addedBytes += 1; + } + } + + List values = super.split(start, range, + ringEnd, ringLength, + numberOfSplits); + List tokens = Lists.newArrayListWithExpectedSize(values.size()); + for (BigInteger value : values) + tokens.add(new OPPToken(toBytes(value, significantBytes))); + return tokens; + } + + // Convert a token's byte array to a number in order to perform computations. + // This depends on the number of "significant bytes" that we use to normalize all tokens to the same size. + // For example if the token is 0x01 but significantBytes is 2, the result is 8 (0x0100). + private BigInteger toBigInteger(ByteBuffer bb, int significantBytes) { + byte[] bytes = Bytes.getArray(bb); + byte[] target; + if (significantBytes != bytes.length) { + target = new byte[significantBytes]; + System.arraycopy(bytes, 0, target, 0, bytes.length); + } else + target = bytes; + return new BigInteger(1, target); + } + + // Convert a numeric representation back to a byte array. + // Again, the number of significant bytes matters: if the input value is 1 but significantBytes is 2, the + // expected result is 0x0001 (a simple conversion would produce 0x01). + protected ByteBuffer toBytes(BigInteger value, int significantBytes) { + byte[] rawBytes = value.toByteArray(); + byte[] result; + if (rawBytes.length == significantBytes) + result = rawBytes; + else { + result = new byte[significantBytes]; + int start, length; + if (rawBytes[0] == 0) { // that's a sign byte, ignore (it can cause rawBytes.length == significantBytes + 1) + start = 1; + length = rawBytes.length - 1; + } else { + start = 0; + length = rawBytes.length; + } + System.arraycopy(rawBytes, start, result, significantBytes - length, length); + } + return ByteBuffer.wrap(result); + } + } + + @VisibleForTesting + OPPToken(ByteBuffer value) { + this.value = stripTrailingZeroBytes(value); + } + + /** + * @return A new ByteBuffer from the input Buffer with any trailing 0-bytes stripped off. + */ + private static ByteBuffer stripTrailingZeroBytes(ByteBuffer b) { + byte result[] = Bytes.getArray(b); + int zeroIndex = result.length; + for(int i = result.length-1; i > 0; i--) { + if(result[i] == 0) { + zeroIndex = i; + } else { + break; + } + } + return ByteBuffer.wrap(result, 0, zeroIndex); + } + + @Override + public DataType getType() { + return FACTORY.getTokenType(); + } + + @Override + public Object getValue() { + return value; + } + + @Override + public int compareTo(Token other) { + assert other instanceof OPPToken; + return UnsignedBytes.lexicographicalComparator().compare( + Bytes.getArray(value), + Bytes.getArray(((OPPToken)other).value)); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (obj == null || this.getClass() != obj.getClass()) + return false; + + return value.equals(((OPPToken)obj).value); + } + + @Override + public int hashCode() { + return value.hashCode(); + } + + @Override + public String toString() { + return Bytes.toHexString(value); + } + } + + // RandomPartitioner tokens + static class RPToken extends Token { + + private final BigInteger value; + + public static final Factory FACTORY = new RPTokenFactory(); + + private static class RPTokenFactory extends Factory { + + private static final BigInteger MIN_VALUE = BigInteger.ONE.negate(); + private static final BigInteger MAX_VALUE = BigInteger.valueOf(2).pow(127); + private static final BigInteger RING_LENGTH = MAX_VALUE.add(BigInteger.ONE); + private static final Token MIN_TOKEN = new RPToken(MIN_VALUE); + private static final Token MAX_TOKEN = new RPToken(MAX_VALUE); + + private BigInteger md5(ByteBuffer data) { + try { + MessageDigest digest = MessageDigest.getInstance("MD5"); + digest.update(data.duplicate()); + return new BigInteger(digest.digest()).abs(); + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException("MD5 doesn't seem to be available on this JVM", e); + } + } + + @Override + RPToken fromString(String tokenStr) { + return new RPToken(new BigInteger(tokenStr)); + } + + @Override + DataType getTokenType() { + return DataType.varint(); + } + + @Override + Token deserialize(ByteBuffer buffer, ProtocolVersion protocolVersion) { + return new RPToken((BigInteger)getTokenType().deserialize(buffer, protocolVersion)); + } + + @Override + Token minToken() { + return MIN_TOKEN; + } + + @Override + RPToken hash(ByteBuffer partitionKey) { + return new RPToken(md5(partitionKey)); + } + + @Override + List split(Token startToken, Token endToken, int numberOfSplits) { + // edge case: ]min, min] means the whole ring + if (startToken.equals(endToken) && startToken.equals(MIN_TOKEN)) + endToken = MAX_TOKEN; + + BigInteger start = ((RPToken)startToken).value; + BigInteger end = ((RPToken)endToken).value; + + BigInteger range = end.subtract(start); + if (range.compareTo(BigInteger.ZERO) < 0) + range = range.add(RING_LENGTH); + + List values = super.split(start, range, + MAX_VALUE, RING_LENGTH, + numberOfSplits); + List tokens = Lists.newArrayListWithExpectedSize(values.size()); + for (BigInteger value : values) + tokens.add(new RPToken(value)); + return tokens; + } + } + + private RPToken(BigInteger value) { + this.value = value; + } + + @Override + public DataType getType() { + return FACTORY.getTokenType(); + } + + @Override + public Object getValue() { + return value; + } + + @Override + public int compareTo(Token other) { + assert other instanceof RPToken; + return value.compareTo(((RPToken)other).value); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (obj == null || this.getClass() != obj.getClass()) + return false; + + return value.equals(((RPToken)obj).value); + } + + @Override + public int hashCode() { + return value.hashCode(); + } + + @Override + public String toString() { + return value.toString(); + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/TokenRange.java b/driver-core/src/main/java/com/datastax/driver/core/TokenRange.java new file mode 100644 index 00000000000..ac5895e0d8d --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/TokenRange.java @@ -0,0 +1,325 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Objects; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; + +/** + * A range of tokens on the Cassandra ring. + *

+ * A range is start-exclusive and end-inclusive. It is empty when start and end are the same token, except if that is the minimum + * token, in which case the range covers the whole ring (this is consistent with the behavior of CQL range queries). + *

+ * Note that CQL does not handle wrapping. To query all partitions in a range, see {@link #unwrap()}. + */ +public final class TokenRange implements Comparable { + private final Token start; + private final Token end; + @VisibleForTesting + final Token.Factory factory; + + TokenRange(Token start, Token end, Token.Factory factory) { + this.start = start; + this.end = end; + this.factory = factory; + } + + /** + * Return the start of the range. + * + * @return the start of the range (exclusive). + */ + public Token getStart() { + return start; + } + + /** + * Return the end of the range. + * + * @return the end of the range (inclusive). + */ + public Token getEnd() { + return end; + } + + /** + * Splits this range into a number of smaller ranges of equal "size" (referring to the number of tokens, not the actual amount of data). + *

+ * Splitting an empty range is not permitted. But note that, in edge cases, splitting a range might produce one or more empty ranges. + * + * @param numberOfSplits the number of splits to create. + * @return the splits. + * + * @throws IllegalArgumentException if the range is empty or if numberOfSplits < 1. + */ + public List splitEvenly(int numberOfSplits) { + if (numberOfSplits < 1) + throw new IllegalArgumentException(String.format("numberOfSplits (%d) must be greater than 0.", numberOfSplits)); + if (isEmpty()) + throw new IllegalArgumentException("Can't split empty range " + this); + + List tokenRanges = new ArrayList(); + List splitPoints = factory.split(start, end, numberOfSplits); + Token splitStart = start; + for (Token splitEnd : splitPoints) { + tokenRanges.add(new TokenRange(splitStart, splitEnd, factory)); + splitStart = splitEnd; + } + tokenRanges.add(new TokenRange(splitStart, end, factory)); + return tokenRanges; + } + + /** + * Returns whether this range is empty. + *

+ * A range is empty when start and end are the same token, except if that is the minimum token, + * in which case the range covers the whole ring (this is consistent with the behavior of CQL + * range queries). + * + * @return whether the range is empty. + */ + public boolean isEmpty() { + return start.equals(end) && !start.equals(factory.minToken()); + } + + /** + * Returns whether this range wraps around the end of the ring. + * + * @return whether this range wraps around. + */ + public boolean isWrappedAround() { + return start.compareTo(end) > 0 && !end.equals(factory.minToken()); + } + + /** + * Splits this range into a list of two non-wrapping ranges. This will return the range itself if it is + * non-wrapping, or two ranges otherwise. + *

+ * For example: + *

    + *
  • {@code ]1,10]} unwraps to itself;
  • + *
  • {@code ]10,1]} unwraps to {@code ]10,min_token]} and {@code ]min_token,1]}.
  • + *
+ *

+ * This is useful for CQL range queries, which do not handle wrapping: + *

+     * {@code
+     * List rows = new ArrayList();
+     * for (TokenRange subRange : range.unwrap()) {
+     *     ResultSet rs = session.execute("SELECT * FROM mytable WHERE token(pk) > ? and token(pk) <= ?",
+     *                                    subRange.getStart(), subRange.getEnd());
+     *     rows.addAll(rs.all());
+     * }
+     * }
+ * + * @return the list of non-wrapping ranges. + */ + public List unwrap() { + if (isWrappedAround()) { + return ImmutableList.of( + new TokenRange(start, factory.minToken(), factory), + new TokenRange(factory.minToken(), end, factory)); + } else { + return ImmutableList.of(this); + } + } + + /** + * Returns whether this range intersects another one. + *

+ * For example: + *

    + *
  • {@code ]3,5]} intersects {@code ]1,4]}, {@code ]4,5]}...
  • + *
  • {@code ]3,5]} does not intersect {@code ]1,2]}, {@code ]2,3]}, {@code ]5,7]}...
  • + *
+ * + * @param that the other range. + * @return whether they intersect. + */ + public boolean intersects(TokenRange that) { + // Empty ranges never intersect any other range + if (this.isEmpty() || that.isEmpty()) + return false; + + return this.contains(that.start, true) + || this.contains(that.end, false) + || that.contains(this.start, true) + || that.contains(this.end, false); + } + + /** + * Computes the intersection of this range with another one. + *

+ * If either of these ranges overlap the the ring, they are unwrapped and the unwrapped + * tokens are compared with one another. + *

+ * This call will fail if the two ranges do not intersect, you must check by calling + * {@link #intersects(TokenRange)} beforehand. + * + * @param that the other range. + * @return the range(s) resulting from the intersection. + * @throws IllegalArgumentException if the ranges do not intersect. + */ + public List intersectWith(TokenRange that) { + if (!this.intersects(that)) + throw new IllegalArgumentException("The two ranges do not intersect, use intersects() before calling this method"); + + List intersected = Lists.newArrayList(); + + // Compare the unwrapped ranges to one another. + List unwrappedForThis = this.unwrap(); + List unwrappedForThat = that.unwrap(); + for(TokenRange t1 : unwrappedForThis) { + for(TokenRange t2 : unwrappedForThat) { + if(t1.intersects(t2)) { + intersected.add(new TokenRange( + (t1.contains(t2.start, true)) ? t2.start : t1.start, + (t1.contains(t2.end, false)) ? t2.end : t1.end, + factory)); + } + } + } + + // If two intersecting ranges were produced, merge them if they are adjacent. + // This could happen in the case that two wrapped ranges intersected. + if(intersected.size() == 2) { + TokenRange t1 = intersected.get(0); + TokenRange t2 = intersected.get(1); + if (t1.end.equals(t2.start) || t2.end.equals(t1.start)) { + return ImmutableList.of(t1.mergeWith(t2)); + } + } + + return intersected; + } + + /** + * Checks whether this range contains a given token. + * + * @param token the token to check for. + * @return whether this range contains the token, i.e. {@code range.start < token <= range.end}. + */ + public boolean contains(Token token) { + return contains(token, false); + } + + // isStart handles the case where the token is the start of another range, for example: + // * ]1,2] contains 2, but it does not contain the start of ]2,3] + // * ]1,2] does not contain 1, but it contains the start of ]1,3] + private boolean contains(Token token, boolean isStart) { + boolean isAfterStart = isStart ? token.compareTo(start) >= 0 : token.compareTo(start) > 0; + boolean isBeforeEnd = end.equals(factory.minToken()) || + (isStart ? token.compareTo(end) < 0 : token.compareTo(end) <= 0); + return isWrappedAround() + ? isAfterStart || isBeforeEnd + : isAfterStart && isBeforeEnd; + } + + /** + * Merges this range with another one. + *

+ * The two ranges should either intersect or be adjacent; in other words, the merged range + * should not include tokens that are in neither of the original ranges. + *

+ * For example: + *

    + *
  • merging {@code ]3,5]} with {@code ]4,7]} produces {@code ]3,7]};
  • + *
  • merging {@code ]3,5]} with {@code ]4,5]} produces {@code ]3,5]};
  • + *
  • merging {@code ]3,5]} with {@code ]5,8]} produces {@code ]3,8]};
  • + *
  • merging {@code ]3,5]} with {@code ]6,8]} fails.
  • + *
+ * + * @param that the other range. + * @return the resulting range. + * + * @throws IllegalArgumentException if the ranges neither intersect nor are adjacent. + */ + public TokenRange mergeWith(TokenRange that) { + if (this.equals(that)) + return this; + + if (!(this.intersects(that) || this.end.equals(that.start) || that.end.equals(this.start))) + throw new IllegalArgumentException(String.format( + "Can't merge %s with %s because they neither intersect nor are adjacent", + this, that)); + + if (this.isEmpty()) + return that; + + if (that.isEmpty()) + return this; + + // That's actually "starts in or is adjacent to the end of" + boolean thisStartsInThat = that.contains(this.start, true) || this.start.equals(that.end); + boolean thatStartsInThis = this.contains(that.start, true) || that.start.equals(this.end); + + // This takes care of all the cases that return the full ring, so that we don't have to worry about them below + if (thisStartsInThat && thatStartsInThis) + return fullRing(); + + // Starting at this.start, see how far we can go while staying in at least one of the ranges. + Token mergedEnd = (thatStartsInThis && !this.contains(that.end, false)) + ? that.end + : this.end; + + // Repeat in the other direction. + Token mergedStart = thisStartsInThat ? that.start : this.start; + + return new TokenRange(mergedStart, mergedEnd, factory); + } + + private TokenRange fullRing() { + return new TokenRange(factory.minToken(), factory.minToken(), factory); + } + + @Override + public boolean equals(Object other) { + if (other == this) + return true; + if (other instanceof TokenRange) { + TokenRange that = (TokenRange)other; + return Objects.equal(this.start, that.start) && + Objects.equal(this.end, that.end); + } + return false; + } + + @Override + public int hashCode() { + return Objects.hashCode(start, end); + } + + @Override + public String toString() { + return String.format("]%s, %s]", start, end); + } + + @Override public int compareTo(TokenRange other) { + if(this.equals(other)) { + return 0; + } else { + int compareStart = this.start.compareTo(other.start); + return compareStart != 0 ? compareStart : this.end.compareTo(other.end); + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/TransportException.java b/driver-core/src/main/java/com/datastax/driver/core/TransportException.java new file mode 100644 index 00000000000..f261b20d365 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/TransportException.java @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.net.InetSocketAddress; + +/** + * A connection exception that has to do with the transport itself, i.e. that + * suggests the node is down. + */ +class TransportException extends ConnectionException +{ + private static final long serialVersionUID = 0; + + public TransportException(InetSocketAddress address, String msg, Throwable cause) + { + super(address, msg, cause); + } + + public TransportException(InetSocketAddress address, String msg) + { + super(address, msg); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/TupleType.java b/driver-core/src/main/java/com/datastax/driver/core/TupleType.java new file mode 100644 index 00000000000..71dbbf2955a --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/TupleType.java @@ -0,0 +1,136 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.util.*; + +import com.google.common.collect.ImmutableList; +import com.google.common.reflect.TypeToken; + +import com.datastax.driver.core.exceptions.InvalidTypeException; + +/** + * A tuple type. + *

+ * A tuple type is a essentially a list of types. + */ +public class TupleType extends DataType { + + private final List types; + + TupleType(List types) { + super(DataType.Name.TUPLE); + this.types = ImmutableList.copyOf(types); + } + + @SuppressWarnings("unchecked") + @Override + TypeCodec codec(ProtocolVersion protocolVersion) { + return (TypeCodec)TypeCodec.tupleOf(this); + } + + /** + * Creates a tuple type given a list of types. + * + * @param types the types for the tuple type. + * @return the newly created tuple type. + */ + public static TupleType of(DataType... types) { + return new TupleType(Arrays.asList(types)); + } + + /** + * The (immutable) list of types composing this tuple type. + * + * @return the (immutable) list of types composing this tuple type. + */ + public List getComponentTypes() { + return types; + } + + /** + * Returns a new empty value for this tuple type. + * + * @return an empty (with all component to {@code null}) value for this + * user type definition. + */ + public TupleValue newValue() { + return new TupleValue(this); + } + + /** + * Returns a new value for this tuple type that uses the provided values + * for the components. + *

+ * The numbers of values passed to this method must correspond to the + * number of components in this tuple type. The {@code i}th parameter + * value will then be assigned to the {@code i}th component of the resulting + * tuple value. + * + * @param values the values to use for the component of the resulting + * tuple. + * @return a new tuple values based on the provided values. + * + * @throws IllegalArgumentException if the number of {@code values} + * provided does not correspond to the number of components in this tuple + * type. + * @throws InvalidTypeException if any of the provided value is not of + * the correct type for the component. + */ + public TupleValue newValue(Object... values) { + if (values.length != types.size()) + throw new IllegalArgumentException(String.format("Invalid number of values. Expecting %d but got %d", types.size(), values.length)); + + TupleValue t = newValue(); + for (int i = 0; i < values.length; i++) + t.setValue(i, values[i] == null ? null : types.get(i).serialize(values[i], ProtocolVersion.V3)); + return t; + } + + @Override + public boolean isFrozen() { + return true; + } + + @Override + boolean canBeDeserializedAs(TypeToken typeToken) { + return typeToken.isAssignableFrom(getName().javaType); + } + + @Override + public int hashCode() { + return Arrays.hashCode(new Object[]{ name, types }); + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof TupleType)) + return false; + + TupleType d = (TupleType)o; + return name == d.name && types.equals(d.types); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + for (DataType type : types) { + sb.append(sb.length() == 0 ? "frozen>").toString(); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/TupleValue.java b/driver-core/src/main/java/com/datastax/driver/core/TupleValue.java new file mode 100644 index 00000000000..da9a19fb605 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/TupleValue.java @@ -0,0 +1,89 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.util.Arrays; +import java.util.List; + +/** + * A value for a Tuple. + */ +public class TupleValue extends AbstractAddressableByIndexData { + + private final TupleType type; + + /** + * Builds a new value for a tuple. + * + * @param types the types of the tuple's components. + */ + TupleValue(TupleType type) { + // All things in a tuple are encoded with the protocol v3 + super(ProtocolVersion.V3, type.getComponentTypes().size()); + this.type = type; + } + + protected DataType getType(int i) { + return type.getComponentTypes().get(i); + } + + @Override + protected String getName(int i) { + // This is used for error messages + return "component " + i; + } + + /** + * The tuple type this is a value of. + * + * @return The tuple type this is a value of. + */ + public TupleType getType() { + return type; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof TupleValue)) + return false; + + TupleValue that = (TupleValue)o; + if (!type.equals(that.type)) + return false; + + return super.equals(o); + } + + @Override + public int hashCode() { + return super.hashCode(); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("("); + for (int i = 0; i < values.length; i++) { + if (i > 0) + sb.append(", "); + + DataType dt = getType(i); + sb.append(values[i] == null ? "null" : dt.format(dt.deserialize(values[i], ProtocolVersion.V3))); + } + sb.append(")"); + return sb.toString(); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/TypeCodec.java b/driver-core/src/main/java/com/datastax/driver/core/TypeCodec.java new file mode 100644 index 00000000000..1d41048d412 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/TypeCodec.java @@ -0,0 +1,1325 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.math.BigDecimal; +import java.math.BigInteger; +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.nio.*; +import java.nio.charset.*; +import java.text.*; +import java.util.*; +import java.util.regex.Pattern; + +import com.datastax.driver.core.exceptions.InvalidTypeException; +import com.datastax.driver.core.utils.Bytes; + +abstract class TypeCodec { + + // Somehow those don't seem to get properly initialized if they're not here. The reason + // escape me right now so let's just leave it here for now + public static final StringCodec utf8Instance = new StringCodec(Charset.forName("UTF-8")); + public static final StringCodec asciiInstance = new StringCodec(Charset.forName("US-ASCII")); + + private static final Map> primitiveCodecs = new EnumMap>(DataType.Name.class); + static { + primitiveCodecs.put(DataType.Name.ASCII, StringCodec.asciiInstance); + primitiveCodecs.put(DataType.Name.BIGINT, LongCodec.instance); + primitiveCodecs.put(DataType.Name.BLOB, BytesCodec.instance); + primitiveCodecs.put(DataType.Name.BOOLEAN, BooleanCodec.instance); + primitiveCodecs.put(DataType.Name.COUNTER, LongCodec.instance); + primitiveCodecs.put(DataType.Name.DECIMAL, DecimalCodec.instance); + primitiveCodecs.put(DataType.Name.DOUBLE, DoubleCodec.instance); + primitiveCodecs.put(DataType.Name.FLOAT, FloatCodec.instance); + primitiveCodecs.put(DataType.Name.INET, InetCodec.instance); + primitiveCodecs.put(DataType.Name.INT, IntCodec.instance); + primitiveCodecs.put(DataType.Name.TEXT, StringCodec.utf8Instance); + primitiveCodecs.put(DataType.Name.TIMESTAMP, DateCodec.instance); + primitiveCodecs.put(DataType.Name.UUID, UUIDCodec.instance); + primitiveCodecs.put(DataType.Name.VARCHAR, StringCodec.utf8Instance); + primitiveCodecs.put(DataType.Name.VARINT, BigIntegerCodec.instance); + primitiveCodecs.put(DataType.Name.TIMEUUID, TimeUUIDCodec.instance); + primitiveCodecs.put(DataType.Name.CUSTOM, BytesCodec.instance); + } + + private static class PrimitiveCollectionCodecs + { + public final Map>> primitiveListsCodecs = new EnumMap>>(DataType.Name.class); + public final Map>> primitiveSetsCodecs = new EnumMap>>(DataType.Name.class); + public final Map>>> primitiveMapsCodecs = new EnumMap>>>(DataType.Name.class); + + @SuppressWarnings({"unchecked", "rawtypes"}) + public PrimitiveCollectionCodecs(ProtocolVersion protocolVersion) + { + for (Map.Entry> entry : primitiveCodecs.entrySet()) { + DataType.Name type = entry.getKey(); + TypeCodec codec = entry.getValue(); + primitiveListsCodecs.put(type, new ListCodec(codec, protocolVersion)); + primitiveSetsCodecs.put(type, new SetCodec(codec, protocolVersion)); + Map>> valueMap = new EnumMap>>(DataType.Name.class); + for (Map.Entry> valueEntry : primitiveCodecs.entrySet()) + valueMap.put(valueEntry.getKey(), new MapCodec(codec, valueEntry.getValue(), protocolVersion)); + primitiveMapsCodecs.put(type, valueMap); + } + } + + private static final PrimitiveCollectionCodecs primitiveCollectionCodecsV2 = new PrimitiveCollectionCodecs(ProtocolVersion.V2); + private static final PrimitiveCollectionCodecs primitiveCollectionCodecsV3 = new PrimitiveCollectionCodecs(ProtocolVersion.V3); + + static PrimitiveCollectionCodecs forVersion(ProtocolVersion version) { + // This happens during protocol negociation, when the version is not known yet. + // Use the smallest supported version, which is enough for what we need to do at this stage. + if (version == null) + version = ProtocolVersion.V1; + + switch (version) { + case V1: + case V2: + return primitiveCollectionCodecsV2; + case V3: + return primitiveCollectionCodecsV3; + default: + throw version.unsupported(); + } + } + } + + + private TypeCodec() {} + + public abstract T parse(String value); + public abstract String format(T value); + + public abstract ByteBuffer serialize(T value); + public abstract T deserialize(ByteBuffer bytes); + + @SuppressWarnings("unchecked") + static TypeCodec createFor(DataType.Name name) { + assert !name.isCollection(); + return (TypeCodec)primitiveCodecs.get(name); + } + + @SuppressWarnings("unchecked") + static TypeCodec> listOf(DataType arg, ProtocolVersion protocolVersion) { + PrimitiveCollectionCodecs codecs = PrimitiveCollectionCodecs.forVersion(protocolVersion); + TypeCodec> codec = codecs.primitiveListsCodecs.get(arg.getName()); + return codec != null ? (TypeCodec)codec : new ListCodec(arg.codec(protocolVersion), protocolVersion); + } + + @SuppressWarnings("unchecked") + static TypeCodec> setOf(DataType arg, ProtocolVersion protocolVersion) { + PrimitiveCollectionCodecs codecs = PrimitiveCollectionCodecs.forVersion(protocolVersion); + TypeCodec> codec = codecs.primitiveSetsCodecs.get(arg.getName()); + return codec != null ? (TypeCodec)codec : new SetCodec(arg.codec(protocolVersion), protocolVersion); + } + + @SuppressWarnings("unchecked") + static TypeCodec> mapOf(DataType keys, DataType values, ProtocolVersion protocolVersion) { + PrimitiveCollectionCodecs codecs = PrimitiveCollectionCodecs.forVersion(protocolVersion); + Map>> valueCodecs = codecs.primitiveMapsCodecs.get(keys.getName()); + TypeCodec> codec = valueCodecs == null ? null : valueCodecs.get(values.getName()); + return codec != null ? (TypeCodec)codec : new MapCodec(keys.codec(protocolVersion), values.codec(protocolVersion), protocolVersion); + } + + static UDTCodec udtOf(UserType definition) { + return new UDTCodec(definition); + } + + static TupleCodec tupleOf(TupleType type) { + return new TupleCodec(type); + } + + /* This is ugly, but not sure how we can do much better/faster + * Returns null if it's doesn't correspond to a known type. + * + * Also, note that this only a dataType that is fit for the value, + * but for instance, for a UUID, this will return DataType.uuid() but + * never DataType.timeuuid(). Also, provided an empty list, this will return + * DataType.list(DataType.blob()), which is semi-random. This is ok if all + * we want is serialize the value, but that's probably all we should do with + * the return of this method. + */ + static DataType getDataTypeFor(Object value) { + // Starts with ByteBuffer, so that if already serialized value are provided, we don't have the + // cost of testing a bunch of other types first + if (value instanceof ByteBuffer) + return DataType.blob(); + + if (value instanceof Number) { + if (value instanceof Integer) + return DataType.cint(); + if (value instanceof Long) + return DataType.bigint(); + if (value instanceof Float) + return DataType.cfloat(); + if (value instanceof Double) + return DataType.cdouble(); + if (value instanceof BigDecimal) + return DataType.decimal(); + if (value instanceof BigInteger) + return DataType.varint(); + return null; + } + + if (value instanceof String) + return DataType.text(); + + if (value instanceof Boolean) + return DataType.cboolean(); + + if (value instanceof InetAddress) + return DataType.inet(); + + if (value instanceof Date) + return DataType.timestamp(); + + if (value instanceof UUID) + return DataType.uuid(); + + if (value instanceof List) { + List l = (List)value; + if (l.isEmpty()) + return DataType.list(DataType.blob()); + DataType eltType = getDataTypeFor(l.get(0)); + return eltType == null ? null : DataType.list(eltType); + } + + if (value instanceof Set) { + Set s = (Set)value; + if (s.isEmpty()) + return DataType.set(DataType.blob()); + DataType eltType = getDataTypeFor(s.iterator().next()); + return eltType == null ? null : DataType.set(eltType); + } + + if (value instanceof Map) { + Map m = (Map)value; + if (m.isEmpty()) + return DataType.map(DataType.blob(), DataType.blob()); + Map.Entry e = m.entrySet().iterator().next(); + DataType keyType = getDataTypeFor(e.getKey()); + DataType valueType = getDataTypeFor(e.getValue()); + return keyType == null || valueType == null + ? null + : DataType.map(keyType, valueType); + } + + if (value instanceof UDTValue) { + return ((UDTValue) value).getType(); + } + + if (value instanceof TupleValue) { + return ((TupleValue) value).getType(); + } + + return null; + } + + private static ByteBuffer pack(List buffers, int elements, ProtocolVersion version) { + int size = 0; + for (ByteBuffer bb : buffers) { + int elemSize = sizeOfValue(bb, version); + size += elemSize; + } + + ByteBuffer result = ByteBuffer.allocate(sizeOfCollectionSize(elements, version) + size); + writeCollectionSize(result, elements, version); + for (ByteBuffer bb : buffers) + writeCollectionValue(result, bb, version); + return (ByteBuffer)result.flip(); + } + + private static void writeCollectionSize(ByteBuffer output, int elements, ProtocolVersion version) { + switch (version) { + case V1: + case V2: + if (elements > 65535) + throw new IllegalArgumentException("Native protocol version 2 supports up to 65535 elements in any collection - but collection contains " + elements + " elements"); + output.putShort((short)elements); + break; + case V3: + output.putInt(elements); + break; + default: + throw version.unsupported(); + } + } + + private static int getUnsignedShort(ByteBuffer bb) { + int length = (bb.get() & 0xFF) << 8; + return length | (bb.get() & 0xFF); + } + + private static int readCollectionSize(ByteBuffer input, ProtocolVersion version) { + switch (version) { + case V1: + case V2: + return getUnsignedShort(input); + case V3: + return input.getInt(); + default: + throw version.unsupported(); + } + } + + private static int sizeOfCollectionSize(int elements, ProtocolVersion version) { + switch (version) { + case V1: + case V2: + return 2; + case V3: + return 4; + default: + throw version.unsupported(); + } + } + + private static void writeCollectionValue(ByteBuffer output, ByteBuffer value, ProtocolVersion version) { + switch (version) { + case V1: + case V2: + assert value != null; + output.putShort((short)value.remaining()); + output.put(value.duplicate()); + break; + case V3: + if (value == null) { + output.putInt(-1); + } else { + output.putInt(value.remaining()); + output.put(value.duplicate()); + } + break; + default: + throw version.unsupported(); + } + } + + private static ByteBuffer readBytes(ByteBuffer bb, int length) { + ByteBuffer copy = bb.duplicate(); + copy.limit(copy.position() + length); + bb.position(bb.position() + length); + return copy; + } + + private static ByteBuffer readCollectionValue(ByteBuffer input, ProtocolVersion version) { + int size; + switch (version) { + case V1: + case V2: + size = getUnsignedShort(input); + break; + case V3: + size = input.getInt(); + break; + default: + throw version.unsupported(); + } + return size < 0 ? null : readBytes(input, size); + } + + private static int sizeOfValue(ByteBuffer value, ProtocolVersion version) { + switch (version) { + case V1: + case V2: + int elemSize = value.remaining(); + if (elemSize > 65535) + throw new IllegalArgumentException("Native protocol version 2 supports only elements with size up to 65535 bytes - but element size is " + elemSize + " bytes"); + return 2 + elemSize; + case V3: + return value == null ? 4 : 4 + value.remaining(); + default: + throw version.unsupported(); + } + } + + static class StringCodec extends TypeCodec { + + private final Charset charset; + + private StringCodec(Charset charset) { + this.charset = charset; + } + + @Override + public String parse(String value) { + if (value.charAt(0) != '\'' || value.charAt(value.length() - 1) != '\'') + throw new InvalidTypeException("text values must enclosed by a single quotes"); + + return value.substring(1, value.length() - 1).replace("''", "'"); + } + + @Override + public String format(String value) { + return '\'' + replace(value, '\'', "''") + '\''; + } + + // Simple method to replace a single character. String.replace is a bit too + // inefficient (see JAVA-67) + static String replace(String text, char search, String replacement) { + if (text == null || text.isEmpty()) + return text; + + int nbMatch = 0; + int start = -1; + do { + start = text.indexOf(search, start+1); + if (start != -1) + ++nbMatch; + } while (start != -1); + + if (nbMatch == 0) + return text; + + int newLength = text.length() + nbMatch * (replacement.length() - 1); + char[] result = new char[newLength]; + int newIdx = 0; + for (int i = 0; i < text.length(); i++) { + char c = text.charAt(i); + if (c == search) { + for (int r = 0; r < replacement.length(); r++) + result[newIdx++] = replacement.charAt(r); + } else { + result[newIdx++] = c; + } + } + return new String(result); + } + + @Override + public ByteBuffer serialize(String value) { + return ByteBuffer.wrap(value.getBytes(charset)); + } + + @Override + public String deserialize(ByteBuffer bytes) { + return new String(Bytes.getArray(bytes), charset); + } + } + + static class LongCodec extends TypeCodec { + + public static final LongCodec instance = new LongCodec(); + + private LongCodec() {} + + @Override + public Long parse(String value) { + try { + return Long.parseLong(value); + } catch (NumberFormatException e) { + throw new InvalidTypeException(String.format("Cannot parse 64-bits long value from \"%s\"", value)); + } + } + + @Override + public String format(Long value) { + return Long.toString(value); + } + + @Override + public ByteBuffer serialize(Long value) { + return serializeNoBoxing(value); + } + + public ByteBuffer serializeNoBoxing(long value) { + ByteBuffer bb = ByteBuffer.allocate(8); + bb.putLong(0, value); + return bb; + } + + @Override + public Long deserialize(ByteBuffer bytes) { + return deserializeNoBoxing(bytes); + } + + public long deserializeNoBoxing(ByteBuffer bytes) { + if (bytes.remaining() != 8) + throw new InvalidTypeException("Invalid 64-bits long value, expecting 8 bytes but got " + bytes.remaining()); + + return bytes.getLong(bytes.position()); + } + } + + static class BytesCodec extends TypeCodec { + + public static final BytesCodec instance = new BytesCodec(); + + private BytesCodec() {} + + @Override + public ByteBuffer parse(String value) { + return Bytes.fromHexString(value); + } + + @Override + public String format(ByteBuffer value) { + return Bytes.toHexString(value); + } + + @Override + public ByteBuffer serialize(ByteBuffer value) { + return value.duplicate(); + } + + @Override + public ByteBuffer deserialize(ByteBuffer bytes) { + return bytes.duplicate(); + } + } + + static class BooleanCodec extends TypeCodec { + private static final ByteBuffer TRUE = ByteBuffer.wrap(new byte[]{1}); + private static final ByteBuffer FALSE = ByteBuffer.wrap(new byte[]{0}); + + public static final BooleanCodec instance = new BooleanCodec(); + + private BooleanCodec() {} + + @Override + public Boolean parse(String value) { + if (value.equalsIgnoreCase(Boolean.FALSE.toString())) + return false; + if (value.equalsIgnoreCase(Boolean.TRUE.toString())) + return true; + + throw new InvalidTypeException(String.format("Cannot parse boolean value from \"%s\"", value)); + } + + @Override + public String format(Boolean value) { + return value ? "true" : "false"; + } + + @Override + public ByteBuffer serialize(Boolean value) { + return serializeNoBoxing(value); + } + + public ByteBuffer serializeNoBoxing(boolean value) { + return value ? TRUE.duplicate() : FALSE.duplicate(); + } + + @Override + public Boolean deserialize(ByteBuffer bytes) { + return deserializeNoBoxing(bytes); + } + + public boolean deserializeNoBoxing(ByteBuffer bytes) { + if (bytes.remaining() != 1) + throw new InvalidTypeException("Invalid boolean value, expecting 1 byte but got " + bytes.remaining()); + + return bytes.get(bytes.position()) != 0; + } + } + + static class DecimalCodec extends TypeCodec { + + public static final DecimalCodec instance = new DecimalCodec(); + + private DecimalCodec() {} + + @Override + public BigDecimal parse(String value) { + try { + return new BigDecimal(value); + } catch (NumberFormatException e) { + throw new InvalidTypeException(String.format("Cannot parse decimal value from \"%s\"", value)); + } + } + + @Override + public String format(BigDecimal value) { + return value.toString(); + } + + @Override + public ByteBuffer serialize(BigDecimal value) { + BigInteger bi = value.unscaledValue(); + int scale = value.scale(); + byte[] bibytes = bi.toByteArray(); + + ByteBuffer bytes = ByteBuffer.allocate(4 + bibytes.length); + bytes.putInt(scale); + bytes.put(bibytes); + bytes.rewind(); + return bytes; + } + + @Override + public BigDecimal deserialize(ByteBuffer bytes) { + if (bytes.remaining() < 4) + throw new InvalidTypeException("Invalid decimal value, expecting at least 4 bytes but got " + bytes.remaining()); + + bytes = bytes.duplicate(); + int scale = bytes.getInt(); + byte[] bibytes = new byte[bytes.remaining()]; + bytes.get(bibytes); + + BigInteger bi = new BigInteger(bibytes); + return new BigDecimal(bi, scale); + } + } + + static class DoubleCodec extends TypeCodec { + + public static final DoubleCodec instance = new DoubleCodec(); + + private DoubleCodec() {} + + @Override + public Double parse(String value) { + try { + return Double.parseDouble(value); + } catch (NumberFormatException e) { + throw new InvalidTypeException(String.format("Cannot parse 64-bits double value from \"%s\"", value)); + } + } + + @Override + public String format(Double value) { + return Double.toString(value); + } + + @Override + public ByteBuffer serialize(Double value) { + return serializeNoBoxing(value); + } + + public ByteBuffer serializeNoBoxing(double value) { + ByteBuffer bb = ByteBuffer.allocate(8); + bb.putDouble(0, value); + return bb; + } + + @Override + public Double deserialize(ByteBuffer bytes) { + return deserializeNoBoxing(bytes); + } + + public double deserializeNoBoxing(ByteBuffer bytes) { + if (bytes.remaining() != 8) + throw new InvalidTypeException("Invalid 64-bits double value, expecting 8 bytes but got " + bytes.remaining()); + + return bytes.getDouble(bytes.position()); + } + } + + static class FloatCodec extends TypeCodec { + + public static final FloatCodec instance = new FloatCodec(); + + private FloatCodec() {} + + @Override + public Float parse(String value) { + try { + return Float.parseFloat(value); + } catch (NumberFormatException e) { + throw new InvalidTypeException(String.format("Cannot parse 32-bits float value from \"%s\"", value)); + } + } + + @Override + public String format(Float value) { + return Float.toString(value); + } + + @Override + public ByteBuffer serialize(Float value) { + return serializeNoBoxing(value); + } + + public ByteBuffer serializeNoBoxing(float value) { + ByteBuffer bb = ByteBuffer.allocate(4); + bb.putFloat(0, value); + return bb; + } + + @Override + public Float deserialize(ByteBuffer bytes) { + return deserializeNoBoxing(bytes); + } + + public float deserializeNoBoxing(ByteBuffer bytes) { + if (bytes.remaining() != 4) + throw new InvalidTypeException("Invalid 32-bits float value, expecting 4 bytes but got " + bytes.remaining()); + + return bytes.getFloat(bytes.position()); + } + } + + static class InetCodec extends TypeCodec { + + public static final InetCodec instance = new InetCodec(); + + private InetCodec() {} + + @Override + public InetAddress parse(String value) { + value = value.trim(); + if (value.charAt(0) != '\'' || value.charAt(value.length() - 1) != '\'') + throw new InvalidTypeException(String.format("inet values must be enclosed in single quotes (\"%s\")", value)); + try { + return InetAddress.getByName(value.substring(1, value.length() - 1)); + } catch (Exception e) { + throw new InvalidTypeException(String.format("Cannot parse inet value from \"%s\"", value)); + } + } + + @Override + public String format(InetAddress value) { + return "'" + value.getHostAddress() + "'"; + } + + @Override + public ByteBuffer serialize(InetAddress value) { + return ByteBuffer.wrap(value.getAddress()); + } + + @Override + public InetAddress deserialize(ByteBuffer bytes) { + try { + return InetAddress.getByAddress(Bytes.getArray(bytes)); + } catch (UnknownHostException e) { + throw new InvalidTypeException("Invalid bytes for inet value, got " + bytes.remaining() + " bytes"); + } + } + } + + static class IntCodec extends TypeCodec { + + public static final IntCodec instance = new IntCodec(); + + private IntCodec() {} + + @Override + public Integer parse(String value) { + try { + return Integer.parseInt(value); + } catch (NumberFormatException e) { + throw new InvalidTypeException(String.format("Cannot parse 32-bits int value from \"%s\"", value)); + } + } + + @Override + public String format(Integer value) { + return Integer.toString(value); + } + + @Override + public ByteBuffer serialize(Integer value) { + return serializeNoBoxing(value); + } + + public ByteBuffer serializeNoBoxing(int value) { + ByteBuffer bb = ByteBuffer.allocate(4); + bb.putInt(0, value); + return bb; + } + + @Override + public Integer deserialize(ByteBuffer bytes) { + return deserializeNoBoxing(bytes); + } + + public int deserializeNoBoxing(ByteBuffer bytes) { + if (bytes.remaining() != 4) + throw new InvalidTypeException("Invalid 32-bits integer value, expecting 4 bytes but got " + bytes.remaining()); + + return bytes.getInt(bytes.position()); + } + } + + static class DateCodec extends TypeCodec { + + private static final String[] iso8601Patterns = new String[] { + "yyyy-MM-dd HH:mm", + "yyyy-MM-dd HH:mm:ss", + "yyyy-MM-dd HH:mmZ", + "yyyy-MM-dd HH:mm:ssZ", + "yyyy-MM-dd HH:mm:ss.SSS", + "yyyy-MM-dd HH:mm:ss.SSSZ", + "yyyy-MM-dd'T'HH:mm", + "yyyy-MM-dd'T'HH:mmZ", + "yyyy-MM-dd'T'HH:mm:ss", + "yyyy-MM-dd'T'HH:mm:ssZ", + "yyyy-MM-dd'T'HH:mm:ss.SSS", + "yyyy-MM-dd'T'HH:mm:ss.SSSZ", + "yyyy-MM-dd", + "yyyy-MM-ddZ" + }; + + public static final DateCodec instance = new DateCodec(); + private static final Pattern IS_LONG_PATTERN = Pattern.compile("^-?\\d+$"); + + private DateCodec() {} + + /* + * Copied and adapted from apache commons DateUtils.parseStrictly method (that is used Cassandra side + * to parse date strings). It is copied here so as to not create a dependency on apache commons "just + * for this". + */ + private static Date parseDate(String str, final String[] parsePatterns) throws ParseException { + SimpleDateFormat parser = new SimpleDateFormat(); + parser.setLenient(false); + + ParsePosition pos = new ParsePosition(0); + for (String parsePattern : parsePatterns) { + String pattern = parsePattern; + + parser.applyPattern(pattern); + pos.setIndex(0); + + String str2 = str; + Date date = parser.parse(str2, pos); + if (date != null && pos.getIndex() == str2.length()) { + return date; + } + } + throw new ParseException("Unable to parse the date: " + str, -1); + } + + @Override + public Date parse(String value) { + if (IS_LONG_PATTERN.matcher(value).matches()) { + try { + return new Date(Long.parseLong(value)); + } catch (NumberFormatException e) { + throw new InvalidTypeException(String.format("Cannot parse timestamp value from \"%s\"", value)); + } + } + + try { + return parseDate(value, iso8601Patterns); + } catch (ParseException e) { + throw new InvalidTypeException(String.format("Cannot parse date value from \"%s\"", value)); + } + } + + @Override + public String format(Date value) { + return Long.toString(value.getTime()); + } + + @Override + public ByteBuffer serialize(Date value) { + return LongCodec.instance.serializeNoBoxing(value.getTime()); + } + + @Override + public Date deserialize(ByteBuffer bytes) { + return new Date(LongCodec.instance.deserializeNoBoxing(bytes)); + } + } + + static class UUIDCodec extends TypeCodec { + + public static final UUIDCodec instance = new UUIDCodec(); + + protected UUIDCodec() {} + + @Override + public UUID parse(String value) { + try { + return UUID.fromString(value); + } catch (IllegalArgumentException e) { + throw new InvalidTypeException(String.format("Cannot parse UUID value from \"%s\"", value)); + } + } + + @Override + public String format(UUID value) { + return value.toString(); + } + + @Override + public ByteBuffer serialize(UUID value) { + ByteBuffer bb = ByteBuffer.allocate(16); + bb.putLong(0, value.getMostSignificantBits()); + bb.putLong(8, value.getLeastSignificantBits()); + return bb; + } + + @Override + public UUID deserialize(ByteBuffer bytes) { + return new UUID(bytes.getLong(bytes.position() + 0), bytes.getLong(bytes.position() + 8)); + } + } + + static class TimeUUIDCodec extends UUIDCodec { + + public static final TimeUUIDCodec instance = new TimeUUIDCodec(); + + private TimeUUIDCodec() {} + + @Override + public UUID parse(String value) { + UUID id = super.parse(value); + if (id.version() != 1) + throw new InvalidTypeException(String.format("Cannot parse type 1 UUID value from \"%s\": represents a type %d UUID", value, id.version())); + return id; + } + + @Override + public UUID deserialize(ByteBuffer bytes) { + UUID id = super.deserialize(bytes); + if (id.version() != 1) + throw new InvalidTypeException(String.format("Error deserializing type 1 UUID: deserialized value %s represents a type %d UUID", id, id.version())); + return id; + } + } + + static class BigIntegerCodec extends TypeCodec { + + public static final BigIntegerCodec instance = new BigIntegerCodec(); + + private BigIntegerCodec() {} + + @Override + public BigInteger parse(String value) { + try { + return new BigInteger(value); + } catch (NumberFormatException e) { + throw new InvalidTypeException(String.format("Cannot parse varint value from \"%s\"", value)); + } + } + + @Override + public String format(BigInteger value) { + return value.toString(); + } + + @Override + public ByteBuffer serialize(BigInteger value) { + return ByteBuffer.wrap(value.toByteArray()); + } + + @Override + public BigInteger deserialize(ByteBuffer bytes) { + return new BigInteger(Bytes.getArray(bytes)); + } + } + + static class ListCodec extends TypeCodec> { + + private final TypeCodec eltCodec; + private final ProtocolVersion protocolVersion; + + public ListCodec(TypeCodec eltCodec, ProtocolVersion protocolVersion) { + this.eltCodec = eltCodec; + this.protocolVersion = protocolVersion; + } + + @Override + public List parse(String value) { + int idx = ParseUtils.skipSpaces(value, 0); + if (value.charAt(idx++) != '[') + throw new InvalidTypeException(String.format("cannot parse list value from \"%s\", at character %d expecting '[' but got '%c'", value, idx, value.charAt(idx))); + + idx = ParseUtils.skipSpaces(value, idx); + + if (value.charAt(idx) == ']') + return Collections.emptyList(); + + List l = new ArrayList(); + while (idx < value.length()) { + int n; + try { + n = ParseUtils.skipCQLValue(value, idx); + } catch (IllegalArgumentException e) { + throw new InvalidTypeException(String.format("Cannot parse list value from \"%s\", invalid CQL value at character %d", value, idx), e); + } + + l.add(eltCodec.parse(value.substring(idx, n))); + idx = n; + + idx = ParseUtils.skipSpaces(value, idx); + if (value.charAt(idx) == ']') + return l; + if (value.charAt(idx++) != ',') + throw new InvalidTypeException(String.format("Cannot parse list value from \"%s\", at character %d expecting ',' but got '%c'", value, idx, value.charAt(idx))); + + idx = ParseUtils.skipSpaces(value, idx); + } + throw new InvalidTypeException(String.format("Malformed list value \"%s\", missing closing ']'", value)); + } + + @Override + public String format(List value) { + StringBuilder sb = new StringBuilder(); + sb.append("["); + for (int i = 0; i < value.size(); i++) { + if (i != 0) + sb.append(", "); + sb.append(eltCodec.format(value.get(i))); + } + sb.append("]"); + return sb.toString(); + } + + @Override + public ByteBuffer serialize(List value) { + List bbs = new ArrayList(value.size()); + for (T elt : value) + bbs.add(eltCodec.serialize(elt)); + + return pack(bbs, value.size(), protocolVersion); + } + + @Override + public List deserialize(ByteBuffer bytes) { + try { + ByteBuffer input = bytes.duplicate(); + int n = readCollectionSize(input, protocolVersion); + List l = new ArrayList(n); + for (int i = 0; i < n; i++) { + ByteBuffer databb = readCollectionValue(input, protocolVersion); + l.add(eltCodec.deserialize(databb)); + } + return l; + } catch (BufferUnderflowException e) { + throw new InvalidTypeException("Not enough bytes to deserialize list"); + } + } + } + + static class SetCodec extends TypeCodec> { + + private final TypeCodec eltCodec; + private final ProtocolVersion protocolVersion; + + public SetCodec(TypeCodec eltCodec, ProtocolVersion protocolVersion) { + this.eltCodec = eltCodec; + this.protocolVersion = protocolVersion; + } + + @Override + public Set parse(String value) { + int idx = ParseUtils.skipSpaces(value, 0); + if (value.charAt(idx++) != '{') + throw new InvalidTypeException(String.format("cannot parse set value from \"%s\", at character %d expecting '{' but got '%c'", value, idx, value.charAt(idx))); + + idx = ParseUtils.skipSpaces(value, idx); + + if (value.charAt(idx) == '}') + return Collections.emptySet(); + + Set s = new HashSet(); + while (idx < value.length()) { + int n; + try { + n = ParseUtils.skipCQLValue(value, idx); + } catch (IllegalArgumentException e) { + throw new InvalidTypeException(String.format("Cannot parse set value from \"%s\", invalid CQL value at character %d", value, idx), e); + } + + s.add(eltCodec.parse(value.substring(idx, n))); + idx = n; + + idx = ParseUtils.skipSpaces(value, idx); + if (value.charAt(idx) == '}') + return s; + if (value.charAt(idx++) != ',') + throw new InvalidTypeException(String.format("Cannot parse set value from \"%s\", at character %d expecting ',' but got '%c'", value, idx, value.charAt(idx))); + + idx = ParseUtils.skipSpaces(value, idx); + } + throw new InvalidTypeException(String.format("Malformed set value \"%s\", missing closing '}'", value)); + } + + @Override + public String format(Set value) { + StringBuilder sb = new StringBuilder(); + sb.append("{"); + int i = 0; + for (T v : value) { + if (i++ != 0) + sb.append(", "); + sb.append(eltCodec.format(v)); + } + sb.append("}"); + return sb.toString(); + } + + @Override + public ByteBuffer serialize(Set value) { + List bbs = new ArrayList(value.size()); + for (T elt : value) + bbs.add(eltCodec.serialize(elt)); + + return pack(bbs, value.size(), protocolVersion); + } + + @Override + public Set deserialize(ByteBuffer bytes) { + try { + ByteBuffer input = bytes.duplicate(); + int n = readCollectionSize(input, protocolVersion); + Set l = new LinkedHashSet(n); + for (int i = 0; i < n; i++) { + ByteBuffer databb = readCollectionValue(input, protocolVersion); + l.add(eltCodec.deserialize(databb)); + } + return l; + } catch (BufferUnderflowException e) { + throw new InvalidTypeException("Not enough bytes to deserialize a set"); + } + } + } + + static class MapCodec extends TypeCodec> { + + private final TypeCodec keyCodec; + private final TypeCodec valueCodec; + private final ProtocolVersion protocolVersion; + + public MapCodec(TypeCodec keyCodec, TypeCodec valueCodec, ProtocolVersion protocolVersion) { + this.keyCodec = keyCodec; + this.valueCodec = valueCodec; + this.protocolVersion = protocolVersion; + } + + @Override + public Map parse(String value) { + int idx = ParseUtils.skipSpaces(value, 0); + if (value.charAt(idx++) != '{') + throw new InvalidTypeException(String.format("cannot parse map value from \"%s\", at character %d expecting '{' but got '%c'", value, idx, value.charAt(idx))); + + idx = ParseUtils.skipSpaces(value, idx); + + if (value.charAt(idx) == '}') + return Collections.emptyMap(); + + Map m = new HashMap(); + while (idx < value.length()) { + int n; + try { + n = ParseUtils.skipCQLValue(value, idx); + } catch (IllegalArgumentException e) { + throw new InvalidTypeException(String.format("Cannot parse map value from \"%s\", invalid CQL value at character %d", value, idx), e); + } + + K k = keyCodec.parse(value.substring(idx, n)); + idx = n; + + idx = ParseUtils.skipSpaces(value, idx); + if (value.charAt(idx++) != ':') + throw new InvalidTypeException(String.format("Cannot parse map value from \"%s\", at character %d expecting ':' but got '%c'", value, idx, value.charAt(idx))); + idx = ParseUtils.skipSpaces(value, idx); + + try { + n = ParseUtils.skipCQLValue(value, idx); + } catch (IllegalArgumentException e) { + throw new InvalidTypeException(String.format("Cannot parse map value from \"%s\", invalid CQL value at character %d", value, idx), e); + } + + V v = valueCodec.parse(value.substring(idx, n)); + idx = n; + + m.put(k, v); + + idx = ParseUtils.skipSpaces(value, idx); + if (value.charAt(idx) == '}') + return m; + if (value.charAt(idx++) != ',') + throw new InvalidTypeException(String.format("Cannot parse map value from \"%s\", at character %d expecting ',' but got '%c'", value, idx, value.charAt(idx))); + + idx = ParseUtils.skipSpaces(value, idx); + + } + throw new InvalidTypeException(String.format("Malformed map value \"%s\", missing closing '}'", value)); + } + + @Override + public String format(Map value) { + StringBuilder sb = new StringBuilder(); + sb.append("{"); + int i = 0; + for (Map.Entry e : value.entrySet()) { + if (i++ != 0) + sb.append(", "); + sb.append(keyCodec.format(e.getKey())); + sb.append(":"); + sb.append(valueCodec.format(e.getValue())); + } + sb.append("}"); + return sb.toString(); + } + + @Override + public ByteBuffer serialize(Map value) { + List bbs = new ArrayList(2 * value.size()); + for (Map.Entry entry : value.entrySet()) { + bbs.add(keyCodec.serialize(entry.getKey())); + bbs.add(valueCodec.serialize(entry.getValue())); + } + return pack(bbs, value.size(), protocolVersion); + } + + @Override + public Map deserialize(ByteBuffer bytes) { + try { + ByteBuffer input = bytes.duplicate(); + int n = readCollectionSize(input, protocolVersion); + Map m = new LinkedHashMap(n); + for (int i = 0; i < n; i++) { + ByteBuffer kbb = readCollectionValue(input, protocolVersion); + ByteBuffer vbb = readCollectionValue(input, protocolVersion); + m.put(keyCodec.deserialize(kbb), valueCodec.deserialize(vbb)); + } + return m; + } catch (BufferUnderflowException e) { + throw new InvalidTypeException("Not enough bytes to deserialize a map"); + } + } + } + + static class UDTCodec extends TypeCodec { + + private final UserType definition; + + public UDTCodec(UserType definition) { + this.definition = definition; + } + + @Override + public UDTValue parse(String value) { + return definition.parseValue(value); + } + + @Override + public String format(UDTValue value) { + return value.toString(); + } + + @Override + public ByteBuffer serialize(UDTValue value) { + int size = 0; + for (ByteBuffer v : value.values) + size += 4 + (v == null ? 0 : v.remaining()); + + ByteBuffer result = ByteBuffer.allocate(size); + for (ByteBuffer bb : value.values) { + if (bb == null) { + result.putInt(-1); + } else { + result.putInt(bb.remaining()); + result.put(bb.duplicate()); + } + } + return (ByteBuffer)result.flip(); + } + + @Override + public UDTValue deserialize(ByteBuffer bytes) { + ByteBuffer input = bytes.duplicate(); + UDTValue value = definition.newValue(); + + int i = 0; + while (input.hasRemaining() && i < value.values.length) { + int n = input.getInt(); + value.values[i++] = n < 0 ? null : readBytes(input, n); + } + return value; + } + } + + static class TupleCodec extends TypeCodec { + + private final TupleType type; + + public TupleCodec(TupleType type) { + this.type = type; + } + + @Override + public TupleValue parse(String value) { + TupleValue v = type.newValue(); + + int idx = ParseUtils.skipSpaces(value, 0); + if (value.charAt(idx++) != '(') + throw new InvalidTypeException(String.format("Cannot parse tuple value from \"%s\", at character %d expecting '(' but got '%c'", value, idx, value.charAt(idx))); + + idx = ParseUtils.skipSpaces(value, idx); + + if (value.charAt(idx) == ')') + return v; + + int i = 0; + while (idx < value.length()) { + int n; + try { + n = ParseUtils.skipCQLValue(value, idx); + } catch (IllegalArgumentException e) { + throw new InvalidTypeException(String.format("Cannot parse tuple value from \"%s\", invalid CQL value at character %d", value, idx), e); + } + + DataType dt = type.getComponentTypes().get(i); + v.setBytesUnsafe(i, dt.serialize(dt.parse(value.substring(idx, n)), ProtocolVersion.V3)); + idx = n; + i += 1; + + idx = ParseUtils.skipSpaces(value, idx); + if (value.charAt(idx) == ')') + return v; + if (value.charAt(idx) != ',') + throw new InvalidTypeException(String.format("Cannot parse tuple value from \"%s\", at character %d expecting ',' but got '%c'", value, idx, value.charAt(idx))); + ++idx; // skip ',' + + idx = ParseUtils.skipSpaces(value, idx); + } + throw new InvalidTypeException(String.format("Malformed tuple value \"%s\", missing closing ')'", value)); + } + + @Override + public String format(TupleValue value) { + return value.toString(); + } + + @Override + public ByteBuffer serialize(TupleValue value) { + int size = 0; + for (ByteBuffer v : value.values) + size += 4 + (v == null ? 0 : v.remaining()); + + ByteBuffer result = ByteBuffer.allocate(size); + for (ByteBuffer bb : value.values) { + if (bb == null) { + result.putInt(-1); + } else { + result.putInt(bb.remaining()); + result.put(bb.duplicate()); + } + } + return (ByteBuffer)result.flip(); + } + + @Override + public TupleValue deserialize(ByteBuffer bytes) { + ByteBuffer input = bytes.duplicate(); + TupleValue value = type.newValue(); + + int i = 0; + while (input.hasRemaining() && i < value.values.length) { + int n = input.getInt(); + value.values[i++] = n < 0 ? null : readBytes(input, n); + } + return value; + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/UDTValue.java b/driver-core/src/main/java/com/datastax/driver/core/UDTValue.java new file mode 100644 index 00000000000..09938f2529a --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/UDTValue.java @@ -0,0 +1,88 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +/** + * A value for a User Defined Type. + */ +public class UDTValue extends AbstractData { + + private final UserType definition; + + UDTValue(UserType definition) { + // All things in a UDT are encoded with the protocol v3 + super(ProtocolVersion.V3, definition.size()); + this.definition = definition; + } + + protected DataType getType(int i) { + return definition.byIdx[i].getType(); + } + + protected String getName(int i) { + return definition.byIdx[i].getName(); + } + + protected int[] getAllIndexesOf(String name) { + int[] indexes = definition.byName.get(Metadata.handleId(name)); + if (indexes == null) + throw new IllegalArgumentException(name + " is not a field defined in this UDT"); + return indexes; + } + + /** + * The UDT this is a value of. + * + * @return the UDT this is a value of. + */ + public UserType getType() { + return definition; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof UDTValue)) + return false; + + UDTValue that = (UDTValue)o; + if (!definition.equals(that.definition)) + return false; + + return super.equals(o); + } + + @Override + public int hashCode() { + return super.hashCode(); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("{"); + for (int i = 0; i < values.length; i++) { + if (i > 0) + sb.append(", "); + + sb.append(getName(i)); + sb.append(":"); + DataType dt = getType(i); + sb.append(values[i] == null ? "null" : dt.format(dt.deserialize(values[i], ProtocolVersion.V3))); + } + sb.append("}"); + return sb.toString(); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/UnsupportedProtocolVersionException.java b/driver-core/src/main/java/com/datastax/driver/core/UnsupportedProtocolVersionException.java new file mode 100644 index 00000000000..119ab587027 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/UnsupportedProtocolVersionException.java @@ -0,0 +1,39 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.net.InetSocketAddress; + +/** + * Indicates that we've attempted to connect to a 1.2 C* node with version 2 of + * the protocol. + */ +class UnsupportedProtocolVersionException extends Exception { + + private static final long serialVersionUID = 0; + + public final InetSocketAddress address; + public final ProtocolVersion unsupportedVersion; + public final ProtocolVersion serverVersion; + + public UnsupportedProtocolVersionException(InetSocketAddress address, ProtocolVersion unsupportedVersion, ProtocolVersion serverVersion) + { + super(String.format("[%s] Host %s does not support protocol version %s but %s", address, address, unsupportedVersion, serverVersion)); + this.address = address; + this.unsupportedVersion = unsupportedVersion; + this.serverVersion = serverVersion; + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/UserType.java b/driver-core/src/main/java/com/datastax/driver/core/UserType.java new file mode 100644 index 00000000000..df718204b98 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/UserType.java @@ -0,0 +1,351 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.util.*; + +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Iterators; +import com.google.common.reflect.TypeToken; + +import com.datastax.driver.core.exceptions.InvalidTypeException; + +/** + * A User Defined Type (UDT). + *

+ * A UDT is a essentially a named collection of fields (with a name and a type). + */ +public class UserType extends DataType implements Iterable{ + + private static final String TYPE_NAME = "type_name"; + private static final String COLS_NAMES = "field_names"; + private static final String COLS_TYPES = "field_types"; + + private final String keyspace; + private final String typeName; + + // Note that we don't expose the order of fields, from an API perspective this is a map + // of String->Field, but internally we care about the order because the serialization format + // of UDT expects a particular order. + final Field[] byIdx; + // For a given name, we can only have one field with that name, so we don't need a int[] in + // practice. However, storing one element arrays save allocations in UDTValue.getAllIndexesOf + // implementation. + final Map byName; + + UserType(String keyspace, String typeName, Collection fields) { + super(DataType.Name.UDT); + + this.keyspace = keyspace; + this.typeName = typeName; + this.byIdx = fields.toArray(new Field[fields.size()]); + + ImmutableMap.Builder builder = new ImmutableMap.Builder(); + for (int i = 0; i < byIdx.length; i++) + builder.put(byIdx[i].getName(), new int[]{ i }); + this.byName = builder.build(); + } + + static UserType build(Row row) { + String keyspace = row.getString(KeyspaceMetadata.KS_NAME); + String name = row.getString(TYPE_NAME); + + List fieldNames = row.getList(COLS_NAMES, String.class); + List fieldTypes = row.getList(COLS_TYPES, String.class); + + List fields = new ArrayList(fieldNames.size()); + for (int i = 0; i < fieldNames.size(); i++) + fields.add(new Field(fieldNames.get(i), CassandraTypeParser.parseOne(fieldTypes.get(i)))); + + return new UserType(keyspace, name, fields); + } + + @SuppressWarnings("unchecked") + @Override + TypeCodec codec(ProtocolVersion protocolVersion) { + return (TypeCodec)TypeCodec.udtOf(this); + } + + /** + * Returns a new empty value for this user type definition. + * + * @return an empty value for this user type definition. + */ + public UDTValue newValue() { + return new UDTValue(this); + } + + /** + * The name of the keyspace this UDT is part of. + * + * @return the name of the keyspace this UDT is part of. + */ + public String getKeyspace() { + return keyspace; + } + + /** + * The name of this user type. + * + * @return the name of this user type. + */ + public String getTypeName() { + return typeName; + } + + /** + * Returns the number of fields in this UDT. + * + * @return the number of fields in this UDT. + */ + public int size() { + return byIdx.length; + } + + /** + * Returns whether this UDT contains a given field. + * + * @param name the name to check. Note that {@code name} obey the usual + * CQL identifier rules: it should be quoted if it denotes a case sensitive + * identifier (you can use {@link Metadata#quote} for the quoting). + * @return {@code true} if this UDT contains a field named {@code name}, + * {@code false} otherwise. + */ + public boolean contains(String name) { + return byName.containsKey(Metadata.handleId(name)); + } + + /** + * Returns an iterator over the fields of this UDT. + * + * @return an iterator over the fields of this UDT. + */ + @Override + public Iterator iterator() { + return Iterators.forArray(byIdx); + } + + /** + * Returns the names of the fields of this UDT. + * + * @return the names of the fields of this UDT as a collection. + */ + public Collection getFieldNames() { + return byName.keySet(); + } + + /** + * Returns the type of a given field. + * + * @param name the name of the field. Note that {@code name} obey the usual + * CQL identifier rules: it should be quoted if it denotes a case sensitive + * identifier (you can use {@link Metadata#quote} for the quoting). + * @return the type of field {@code name} if this UDT has a field of this + * name, {@code null} otherwise. + * + * @throws IllegalArgumentException if {@code name} is not a field of this + * UDT definition. + */ + public DataType getFieldType(String name) { + int[] idx = byName.get(Metadata.handleId(name)); + if (idx == null) + throw new IllegalArgumentException(name + " is not a field defined in this definition"); + + return byIdx[idx[0]].getType(); + } + + @Override + public boolean isFrozen() { + return true; + } + + @Override + boolean canBeDeserializedAs(TypeToken typeToken) { + return typeToken.isAssignableFrom(getName().javaType); + } + + @Override + public final int hashCode() { + return Arrays.hashCode(new Object[]{ name, keyspace, typeName, byIdx }); + } + + @Override + public final boolean equals(Object o) { + if(!(o instanceof UserType)) + return false; + + UserType other = (UserType)o; + + // Note: we don't test byName because it's redundant with byIdx in practice, + // but also because the map holds 'int[]' which don't have proper equal. + return keyspace.equals(other.keyspace) + && typeName.equals(other.typeName) + && Arrays.equals(byIdx, other.byIdx); + } + + /** + * Returns a CQL query representing this user type in human readable form. + *

+ * This method is equivalent to {@link #asCQLQuery} but the ouptut is + * formatted to be human readable (for some definition of human readable). + * + * @return the CQL query representing this user type. + */ + public String exportAsString() { + return asCQLQuery(true); + } + + /** + * Returns a CQL query representing this user type. + *

+ * This method returns a single 'CREATE TYPE' query corresponding + * to this UDT definition. + *

+ * Note that the returned string is a single line; the returned query + * is not formatted in any way. + * + * @return the 'CREATE TYPE' query corresponding to this user type. + * @see #exportAsString + */ + public String asCQLQuery() { + return asCQLQuery(false); + } + + private String asCQLQuery(boolean formatted) { + StringBuilder sb = new StringBuilder(); + + sb.append("CREATE TYPE ").append(Metadata.escapeId(keyspace)).append('.').append(Metadata.escapeId(typeName)).append(" ("); + TableMetadata.newLine(sb, formatted); + for (int i = 0; i < byIdx.length; i++) { + sb.append(TableMetadata.spaces(4, formatted)).append(byIdx[i]); + if (i < byIdx.length - 1) + sb.append(','); + TableMetadata.newLine(sb, formatted); + } + + return sb.append(");").toString(); + } + + @Override + public String toString() { + return "frozen<" + Metadata.escapeId(getKeyspace()) + '.' + Metadata.escapeId(getTypeName()) + ">"; + } + + // We don't want to expose that, it's already exposed through DataType.parse + UDTValue parseValue(String value) { + UDTValue v = newValue(); + + int idx = ParseUtils.skipSpaces(value, 0); + if (value.charAt(idx++) != '{') + throw new InvalidTypeException(String.format("Cannot parse UDT value from \"%s\", at character %d expecting '{' but got '%c'", value, idx, value.charAt(idx))); + + idx = ParseUtils.skipSpaces(value, idx); + + if (value.charAt(idx) == '}') + return v; + + while (idx < value.length()) { + + int n; + try { + n = ParseUtils.skipCQLId(value, idx); + } catch (IllegalArgumentException e) { + throw new InvalidTypeException(String.format("Cannot parse UDT value from \"%s\", cannot parse a CQL identifier at character %d", value, idx), e); + } + String name = value.substring(idx, n); + idx = n; + + if (!contains(name)) + throw new InvalidTypeException(String.format("Unknown field %s in value \"%s\"", name, value)); + + idx = ParseUtils.skipSpaces(value, idx); + if (value.charAt(idx++) != ':') + throw new InvalidTypeException(String.format("Cannot parse UDT value from \"%s\", at character %d expecting ':' but got '%c'", value, idx, value.charAt(idx))); + idx = ParseUtils.skipSpaces(value, idx); + + try { + n = ParseUtils.skipCQLValue(value, idx); + } catch (IllegalArgumentException e) { + throw new InvalidTypeException(String.format("Cannot parse UDT value from \"%s\", invalid CQL value at character %d", value, idx), e); + } + + DataType dt = getFieldType(name); + v.setBytesUnsafe(name, dt.serialize(dt.parse(value.substring(idx, n)), ProtocolVersion.V3)); + idx = n; + + idx = ParseUtils.skipSpaces(value, idx); + if (value.charAt(idx) == '}') + return v; + if (value.charAt(idx) != ',') + throw new InvalidTypeException(String.format("Cannot parse UDT value from \"%s\", at character %d expecting ',' but got '%c'", value, idx, value.charAt(idx))); + ++idx; // skip ',' + + idx = ParseUtils.skipSpaces(value, idx); + } + throw new InvalidTypeException(String.format("Malformed UDT value \"%s\", missing closing '}'", value)); + } + + /** + * A UDT field. + */ + public static class Field { + private final String name; + private final DataType type; + + Field(String name, DataType type) { + this.name = name; + this.type = type; + } + + /** + * Returns the name of the field. + * + * @return the name of the field. + */ + public String getName() { + return name; + } + + /** + * Returns the type of the field. + * + * @return the type of the field. + */ + public DataType getType() { + return type; + } + + @Override + public final int hashCode() { + return Arrays.hashCode(new Object[]{ name, type }); + } + + @Override + public final boolean equals(Object o) { + if(!(o instanceof Field)) + return false; + + Field other = (Field)o; + return name.equals(other.name) + && type.equals(other.type); + } + + @Override + public String toString() { + return Metadata.escapeId(name) + ' ' + type; + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/VersionNumber.java b/driver-core/src/main/java/com/datastax/driver/core/VersionNumber.java new file mode 100644 index 00000000000..50568db1cc4 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/VersionNumber.java @@ -0,0 +1,254 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.regex.Pattern; +import java.util.regex.Matcher; + +import com.google.common.base.Objects; + +/** + * A version number in the form X.Y.Z with optional pre-release labels and build metadata. + *

+ * Version numbers compare the usual way, the major number (X) is compared first, then the minor + * one (Y) and then the patch level one (Z). Lastly, versions with pre-release sorts before the + * versions that don't have one, and labels are sorted alphabetically if necessary. Build metadata + * are ignored for sorting versions. The versions supported loosely correspond to what + * http://semver.org/ defines though it does not adhere to it strictly. + */ +public class VersionNumber implements Comparable { + + private static final String VERSION_REGEXP = "(\\d+)\\.(\\d+)(\\.\\d+)?(\\.\\d+)?([~\\-]\\w[.\\w]*(?:\\-\\w[.\\w]*)*)?(\\+[.\\w]+)?"; + private static final Pattern pattern = Pattern.compile(VERSION_REGEXP); + + private final int major; + private final int minor; + private final int patch; + private final int dsePatch; + + private final String[] preReleases; + private final String build; + + private VersionNumber(int major, int minor, int patch, int dsePatch, String[] preReleases, String build) { + this.major = major; + this.minor = minor; + this.patch = patch; + this.dsePatch = dsePatch; + this.preReleases = preReleases; + this.build = build; + } + + /** + * Parse a version from a string. + *

+ * The version string should have primarily the form X.Y.Z to which can be appended + * one or more pre-release label after dashes (2.0.1-beta1, 2.1.4-rc1-SNAPSHOT) + * and an optional build label (2.1.0-beta1+a20ba.sha). Out of convenience, the + * "patch" version number, Z, can be ommitted, in which case it is assumed to be 0. + * + * @param version the string to parse + * @return the parsed version number. + * + * @throws IllegalArgumentException if the provided string does not + * represent a valid version. + */ + public static VersionNumber parse(String version) { + if (version == null) + return null; + + Matcher matcher = pattern.matcher(version); + if (!matcher.matches()) + throw new IllegalArgumentException("Invalid version number: " + version); + + try { + int major = Integer.parseInt(matcher.group(1)); + int minor = Integer.parseInt(matcher.group(2)); + + String pa = matcher.group(3); + int patch = pa == null || pa.isEmpty() ? 0 : Integer.parseInt(pa.substring(1)); // dropping the initial '.' since it's included this time + + String dse = matcher.group(4); + int dsePatch = dse == null || dse.isEmpty() ? -1 : Integer.parseInt(dse.substring(1)); // dropping the initial '.' since it's included this time + + String pr = matcher.group(5); + String[] preReleases = pr == null || pr.isEmpty() ? null : pr.substring(1).split("\\-"); // drop initial '-' or '~' then split on the remaining ones + + String bl = matcher.group(6); + String build = bl == null || bl.isEmpty() ? null : bl.substring(1); // drop the initial '+' + + return new VersionNumber(major, minor, patch, dsePatch, preReleases, build); + } catch (NumberFormatException e) { + throw new IllegalArgumentException("Invalid version number: " + version); + } + } + + /** + * The major version number. + * + * @return the major version number, i.e. X in X.Y.Z. + */ + public int getMajor() { + return major; + } + + /** + * The minor version number. + * + * @return the minor version number, i.e. Y in X.Y.Z. + */ + public int getMinor() { + return minor; + } + + /** + * The patch version number. + * + * @return the patch version number, i.e. Z in X.Y.Z. + */ + public int getPatch() { + return patch; + } + + /** + * The DSE patch version number (will only be present for version of Cassandra in DSE). + *

+ * DataStax Entreprise (DSE) adds a fourth number to the version number to track potential + * hot fixes and/or DSE specific patches that may have been applied to the Cassandra version. + * In that case, this method return that fourth number. + * + * @return the DSE patch version number, i.e. D in X.Y.Z.D, or -1 if the version number is + * not from DSE. + */ + public int getDSEPatch() { + return dsePatch; + } + + /** + * The pre-release labels if relevants, i.e. label1 and label2 in X.Y.Z-label1-lable2. + * + * @return the pre-releases labels. The return list will be {@code null} if the version number + * doesn't have one. + */ + public List getPreReleaseLabels() { + return Collections.unmodifiableList(Arrays.asList(preReleases)); + } + + /** + * The build label if there is one. + * + * @return the build label or {@code null} if the version number + * doesn't have one. + */ + public String getBuildLabel() { + return build; + } + + /** + * The next stable version, i.e. the version stripped of its pre-release labels and build metadata. + *

+ * This is mostly used during our development stage, where we test the driver against pre-release + * versions of Cassandra like 2.1.0-rc7-SNAPSHOT, but need to compare to the stable version 2.1.0 + * when testing for native protocol compatibility, etc. + * + * @return the next stable version. + */ + public VersionNumber nextStable() { + return new VersionNumber(major, minor, patch, dsePatch, null, null); + } + + public int compareTo(VersionNumber other) { + if (major < other.major) + return -1; + if (major > other.major) + return 1; + + if (minor < other.minor) + return -1; + if (minor > other.minor) + return 1; + + if (patch < other.patch) + return -1; + if (patch > other.patch) + return 1; + + if (dsePatch < 0) { + if (other.dsePatch >= 0) + return -1; + } else { + if (other.dsePatch < 0) + return 1; + + // Both are >= 0 + if (dsePatch < other.dsePatch) + return -1; + if (dsePatch > other.dsePatch) + return 1; + } + + if (preReleases == null) + return other.preReleases == null ? 0 : 1; + if (other.preReleases == null) + return -1; + + for (int i = 0; i < Math.min(preReleases.length, other.preReleases.length); i++) { + int cmp = preReleases[i].compareTo(other.preReleases[i]); + if (cmp != 0) + return cmp; + } + + return preReleases.length == other.preReleases.length ? 0 : (preReleases.length < other.preReleases.length ? -1 : 1); + } + + @Override + public boolean equals(Object o) { + if(!(o instanceof VersionNumber)) + return false; + VersionNumber that = (VersionNumber)o; + if (major != that.major || minor != that.minor || patch != that.patch) + return false; + + return major == that.major + && minor == that.minor + && patch == that.patch + && dsePatch == that.dsePatch + && (preReleases == null ? that.preReleases == null : Arrays.equals(preReleases, that.preReleases)) + && Objects.equal(build, that.build); + } + + @Override + public int hashCode() { + return Objects.hashCode(major, minor, patch, dsePatch, preReleases, build); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(major).append('.').append(minor).append('.').append(patch); + if (dsePatch >= 0) + sb.append('.').append(dsePatch); + if (preReleases != null) { + for (String preRelease : preReleases) + sb.append('-').append(preRelease); + } + if (build != null) + sb.append('+').append(build); + return sb.toString(); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/WriteType.java b/driver-core/src/main/java/com/datastax/driver/core/WriteType.java new file mode 100644 index 00000000000..c8064e82801 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/WriteType.java @@ -0,0 +1,45 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +/** + * The type of a Cassandra write query. + *

+ * This information is returned by Cassandra when a write timeout is raised to + * indicate what type of write timed out. This information is useful to decide + * which retry policy to adopt. + */ +public enum WriteType +{ + /** A write to a single partition key. Such writes are guaranteed to be atomic and isolated. */ + SIMPLE, + /** + * A write to a multiple partition key that used the distributed batch log to ensure atomicity + * (atomicity meaning that if any statement in the batch succeeds, all will eventually succeed). + */ + BATCH, + /** A write to a multiple partition key that doesn't use the distributed batch log. Atomicity for such writes is not guaranteed */ + UNLOGGED_BATCH, + /** A counter write (that can be for one or multiple partition key). Such write should not be replayed to avoid over-counting. */ + COUNTER, + /** The initial write to the distributed batch log that Cassandra performs internally before a BATCH write. */ + BATCH_LOG, + /** + * A conditional write. If a timeout has this {@code WriteType}, the timeout has happened while doing the compare-and-swap for + * an conditional update. In this case, the update may or may not have been applied. + */ + CAS; +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/AlreadyExistsException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/AlreadyExistsException.java new file mode 100644 index 00000000000..e9a2ecadef8 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/AlreadyExistsException.java @@ -0,0 +1,87 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.exceptions; + +/** + * Exception thrown when a query attempts to create a keyspace or table that already exists. + */ +public class AlreadyExistsException extends QueryValidationException { + + private static final long serialVersionUID = 0; + + private final String keyspace; + private final String table; + + public AlreadyExistsException(String keyspace, String table) { + super(makeMsg(keyspace, table)); + this.keyspace = keyspace; + this.table = table; + } + + private AlreadyExistsException(String msg, Throwable cause, String keyspace, String table) { + super(msg, cause); + this.keyspace = keyspace; + this.table = table; + } + + private static String makeMsg(String keyspace, String table) { + if (table.isEmpty()) + return String.format("Keyspace %s already exists", keyspace); + else + return String.format("Table %s.%s already exists", keyspace, table); + } + + /** + * Returns whether the query yielding this exception was a table creation + * attempt. + * + * @return {@code true} if this exception is raised following a table + * creation attempt, {@code false} if it was a keyspace creation attempt. + */ + public boolean wasTableCreation() { + return !table.isEmpty(); + } + + /** + * The name of keyspace that either already exists or is home to the table + * that already exists. + * + * @return a keyspace name that is either the keyspace whose creation + * attempt failed because a keyspace of the same name already exists (in + * that case, {@link #table} will return {@code null}), or the keyspace of + * the table creation attempt (in which case {@link #table} will return the + * name of said table). + */ + public String getKeyspace() { + return keyspace; + } + + /** + * If the failed creation was a table creation, the name of the table that already exists. + * + * @return the name of table whose creation attempt failed because a table + * of this name already exists, or {@code null} if the query was a keyspace + * creation query. + */ + public String getTable() { + return table.isEmpty() ? null : table; + } + + @Override + public DriverException copy() { + return new AlreadyExistsException(getMessage(), this, keyspace, table); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/AuthenticationException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/AuthenticationException.java new file mode 100644 index 00000000000..9e9389a5dab --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/AuthenticationException.java @@ -0,0 +1,65 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.exceptions; + +import java.net.InetAddress; +import java.net.InetSocketAddress; + +/** + * Indicates an error during the authentication phase while connecting to a node. + */ +public class AuthenticationException extends DriverException { + + private static final long serialVersionUID = 0; + + private final InetSocketAddress address; + + public AuthenticationException(InetSocketAddress address, String message) { + super(String.format("Authentication error on host %s: %s", address, message)); + this.address = address; + } + + private AuthenticationException(String message, Throwable cause, InetSocketAddress address) + { + super(message, cause); + this.address = address; + } + + /** + * The host for which the authentication failed. + *

+ * This is a shortcut for {@code getAddress().getAddress()}. + * + * @return the host for which the authentication failed. + */ + public InetAddress getHost() { + return address.getAddress(); + } + + /** + * The full address of the host for which the authentication failed. + * + * @return the host for which the authentication failed. + */ + public InetSocketAddress getAddress() { + return address; + } + + @Override + public DriverException copy() { + return new AuthenticationException(getMessage(), this, address); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/BootstrappingException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/BootstrappingException.java new file mode 100644 index 00000000000..1ea96982972 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/BootstrappingException.java @@ -0,0 +1,59 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.exceptions; + +import java.net.InetSocketAddress; + +/** + * Indicates that the contacted host was bootstrapping. + * This class is mainly intended for internal use; + * client applications are not expected to deal with this exception directly, + * because the driver would transparently retry the same query on another host; + * but such exceptions are likely to appear occasionally in the driver logs. + */ +public class BootstrappingException extends DriverInternalError { + + private static final long serialVersionUID = 0; + + private final InetSocketAddress address; + + public BootstrappingException(InetSocketAddress address, String message) { + super(String.format("Queried host (%s) was bootstrapping: %s", address, message)); + this.address = address; + } + + /** + * Private constructor used solely when copying exceptions. + */ + private BootstrappingException(InetSocketAddress address, String message, BootstrappingException cause) { + super(message, cause); + this.address = address; + } + + /** + * The full address of the host that was bootstrapping. + * + * @return The full address of the host that was bootstrapping. + */ + public InetSocketAddress getAddress() { + return address; + } + + @Override + public BootstrappingException copy() { + return new BootstrappingException(address, getMessage(), this); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/DriverException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/DriverException.java new file mode 100644 index 00000000000..3127cfa6c52 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/DriverException.java @@ -0,0 +1,55 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.exceptions; + +/** + * Top level class for exceptions thrown by the driver. + */ +public class DriverException extends RuntimeException { + + private static final long serialVersionUID = 0; + + DriverException() { + super(); + } + + public DriverException(String message) { + super(message); + } + + public DriverException(Throwable cause) { + super(cause); + } + + public DriverException(String message, Throwable cause) { + super(message, cause); + } + + /** + * Copy the exception. + *

+ * This return a new exception, equivalent to the original one, except that + * because a new object is created in the current thread, the top-most + * element in the stacktrace of the exception will refer to the current + * thread (this mainly use for internal use by the driver). The cause of + * the copied exception will be the original exception. + * + * @return a copy/clone of this exception. + */ + public DriverException copy() { + return new DriverException(getMessage(), this); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/DriverInternalError.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/DriverInternalError.java new file mode 100644 index 00000000000..3258d840e7c --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/DriverInternalError.java @@ -0,0 +1,44 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.exceptions; + +/** + * An unexpected error happened internally. + * + * This should never be raise and indicates a bug (either in the driver or in + * Cassandra). + */ +public class DriverInternalError extends DriverException { + + private static final long serialVersionUID = 0; + + public DriverInternalError(String message) { + super(message); + } + + public DriverInternalError(Throwable cause) { + super(cause); + } + + public DriverInternalError(String message, Throwable cause) { + super(message, cause); + } + + @Override + public DriverInternalError copy() { + return new DriverInternalError(getMessage(), this); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/InvalidConfigurationInQueryException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/InvalidConfigurationInQueryException.java new file mode 100644 index 00000000000..a1e5553bfc9 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/InvalidConfigurationInQueryException.java @@ -0,0 +1,32 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.exceptions; + +/** + * A specific invalid query exception that indicates that the query is invalid + * because of some configuration problem. + *

+ * This is generally throw by query that manipulate the schema (CREATE and + * ALTER) when the required configuration options are invalid. + */ +public class InvalidConfigurationInQueryException extends InvalidQueryException { + + private static final long serialVersionUID = 0; + + public InvalidConfigurationInQueryException(String msg) { + super(msg); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/InvalidQueryException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/InvalidQueryException.java new file mode 100644 index 00000000000..6e61dd6bb6f --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/InvalidQueryException.java @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.exceptions; + +/** + * Indicates a syntactically correct but invalid query. + */ +public class InvalidQueryException extends QueryValidationException { + + private static final long serialVersionUID = 0; + + public InvalidQueryException(String msg) { + super(msg); + } + + private InvalidQueryException(String msg, Throwable cause) { + super(msg, cause); + } + + @Override + public DriverException copy() { + return new InvalidQueryException(getMessage(), this); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/InvalidTypeException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/InvalidTypeException.java new file mode 100644 index 00000000000..b50f8f2553a --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/InvalidTypeException.java @@ -0,0 +1,34 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.exceptions; + +public class InvalidTypeException extends DriverException { + + private static final long serialVersionUID = 0; + + public InvalidTypeException(String msg) { + super(msg); + } + + public InvalidTypeException(String msg, Throwable cause) { + super(msg, cause); + } + + @Override + public DriverException copy() { + return new InvalidTypeException(getMessage(), this); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/NoHostAvailableException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/NoHostAvailableException.java new file mode 100644 index 00000000000..51f7a3e4b3a --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/NoHostAvailableException.java @@ -0,0 +1,126 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.exceptions; + +import java.io.PrintWriter; +import java.io.StringWriter; +import java.net.InetSocketAddress; +import java.util.HashMap; +import java.util.Map; + +/** + * Exception thrown when a query cannot be performed because no host are + * available. + * + * This exception is thrown if + *

    + *
  • either there is no host live in the cluster at the moment of the query
  • + *
  • all host that have been tried have failed due to a connection problem
  • + *
+ * + * For debugging purpose, the list of hosts that have been tried along with the + * failure cause can be retrieved using the {@link #errors} method. + */ +public class NoHostAvailableException extends DriverException { + + private static final long serialVersionUID = 0; + + private static final int MAX_ERRORS_IN_DEFAULT_MESSAGE = 3; + + private final Map errors; + + public NoHostAvailableException(Map errors) { + super(makeMessage(errors, MAX_ERRORS_IN_DEFAULT_MESSAGE, false, false)); + this.errors = errors; + } + + private NoHostAvailableException(String message, Throwable cause, Map errors) { + super(message, cause); + this.errors = errors; + } + + /** + * Return the hosts tried along with the error encountered while trying + * them. + * + * @return a map containing for each tried host the error triggered when + * trying it. + */ + public Map getErrors() { + return new HashMap(errors); + } + + /** + * Builds a custom message for this exception. + * + * @param maxErrors the maximum number of errors displayed (useful to limit the size of the message for big clusters). Beyond this limit, + * host names are still displayed, but not the associated errors. Set to {@code Integer.MAX_VALUE} to display all hosts. + * @param formatted whether to format the output (line break between each host). + * @param includeStackTraces whether to include the full stacktrace of each host error. Note that this automatically implies + * {@code formatted}. + * @return the message. + */ + public String getCustomMessage(int maxErrors, boolean formatted, boolean includeStackTraces) { + if (includeStackTraces) + formatted = true; + return makeMessage(errors, maxErrors, formatted, includeStackTraces); + } + + @Override + public DriverException copy() { + return new NoHostAvailableException(getMessage(), this, errors); + } + + private static String makeMessage(Map errors, int maxErrorsInMessage, boolean formatted, boolean includeStackTraces) { + if (errors.size() == 0) + return "All host(s) tried for query failed (no host was tried)"; + + StringWriter stringWriter = new StringWriter(); + PrintWriter out = new PrintWriter(stringWriter); + + out.print("All host(s) tried for query failed (tried:"); + out.print(formatted ? "\n" : " "); + + int n = 0; + boolean truncated = false; + for (Map.Entry entry : errors.entrySet()) + { + if (n > 0) out.print(formatted ? "\n" : ", "); + out.print(entry.getKey()); + if (n < maxErrorsInMessage) { + if (includeStackTraces) { + out.print("\n"); + entry.getValue().printStackTrace(out); + out.print("\n"); + } else { + out.printf(" (%s)", entry.getValue()); + } + } else { + truncated = true; + } + n += 1; + } + if (truncated) { + out.print(formatted ? "\n" : " "); + out.printf("[only showing errors of first %d hosts, use getErrors() for more details]", maxErrorsInMessage); + } + if (formatted && !includeStackTraces) + out.print("\n"); + out.print(")"); + out.close(); + return stringWriter.toString(); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/OverloadedException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/OverloadedException.java new file mode 100644 index 00000000000..276774c0a5b --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/OverloadedException.java @@ -0,0 +1,59 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.exceptions; + +import java.net.InetSocketAddress; + +/** + * Indicates that the contacted host reported itself being overloaded. + * This class is mainly intended for internal use; + * client applications are not expected to deal with this exception directly, + * because the driver would transparently retry the same query on another host; + * but such exceptions are likely to appear occasionally in the driver logs. + */ +public class OverloadedException extends DriverInternalError { + + private static final long serialVersionUID = 0; + + private final InetSocketAddress address; + + public OverloadedException(InetSocketAddress address, String message) { + super(String.format("Queried host (%s) was overloaded: %s", address, message)); + this.address = address; + } + + /** + * Private constructor used solely when copying exceptions. + */ + private OverloadedException(InetSocketAddress address, String message, OverloadedException cause) { + super(message, cause); + this.address = address; + } + + /** + * The full address of the host that reported itself being overloaded. + * + * @return The full address of the host that reported itself being overloaded. + */ + public InetSocketAddress getAddress() { + return address; + } + + @Override + public OverloadedException copy() { + return new OverloadedException(address, getMessage(), this); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/PagingStateException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/PagingStateException.java new file mode 100644 index 00000000000..1c16e0b15cd --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/PagingStateException.java @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.exceptions; + +public class PagingStateException extends DriverException { + + private static final long serialVersionUID = 0; + + public PagingStateException(String msg) { + super(msg); + } + + public PagingStateException(String msg, Throwable cause) { + super(msg, cause); + } + +} \ No newline at end of file diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/QueryExecutionException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/QueryExecutionException.java new file mode 100644 index 00000000000..93d31ff4e8e --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/QueryExecutionException.java @@ -0,0 +1,34 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.exceptions; + +/** + * Exception related to the execution of a query. + * + * This correspond to the exception that Cassandra throw when a (valid) query + * cannot be executed (TimeoutException, UnavailableException, ...). + */ +@SuppressWarnings("serial") +public abstract class QueryExecutionException extends DriverException { + + protected QueryExecutionException(String msg) { + super(msg); + } + + protected QueryExecutionException(String msg, Throwable cause) { + super(msg, cause); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/QueryTimeoutException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/QueryTimeoutException.java new file mode 100644 index 00000000000..cc386093e72 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/QueryTimeoutException.java @@ -0,0 +1,78 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.exceptions; + +import com.datastax.driver.core.ConsistencyLevel; + +/** + * A Cassandra timeout during a query. + * + * Such an exception is returned when the query has been tried by Cassandra but + * cannot be achieved with the requested consistency level within the rpc + * timeout set for Cassandra. + */ +@SuppressWarnings("serial") +public abstract class QueryTimeoutException extends QueryExecutionException { + + private final ConsistencyLevel consistency; + private final int received; + private final int required; + + protected QueryTimeoutException(String msg, ConsistencyLevel consistency, int received, int required) { + super(msg); + this.consistency = consistency; + this.received = received; + this.required = required; + } + + protected QueryTimeoutException(String msg, Throwable cause, ConsistencyLevel consistency, int received, int required) { + super(msg, cause); + this.consistency = consistency; + this.received = received; + this.required = required; + } + + /** + * The consistency level of the operation that time outed. + * + * @return the consistency level of the operation that time outed. + */ + public ConsistencyLevel getConsistencyLevel() { + return consistency; + } + + /** + * The number of replica that had acknowledged/responded to the operation + * before it time outed. + * + * @return the number of replica that had acknowledged/responded the + * operation before it time outed. + */ + public int getReceivedAcknowledgements() { + return received; + } + + /** + * The minimum number of replica acknowledgements/responses that were + * required to fulfill the operation. + * + * @return The minimum number of replica acknowledgements/response that + * were required to fulfill the operation. + */ + public int getRequiredAcknowledgements() { + return required; + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/QueryValidationException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/QueryValidationException.java new file mode 100644 index 00000000000..83d7277c216 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/QueryValidationException.java @@ -0,0 +1,32 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.exceptions; + +/** + * An exception indicating that a query cannot be executed because it is + * syntactically incorrect, invalid, unauthorized or any other reason. + */ +@SuppressWarnings("serial") +public abstract class QueryValidationException extends DriverException { + + protected QueryValidationException(String msg) { + super(msg); + } + + protected QueryValidationException(String msg, Throwable cause) { + super(msg, cause); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/ReadTimeoutException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/ReadTimeoutException.java new file mode 100644 index 00000000000..f63b4f4cbf1 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/ReadTimeoutException.java @@ -0,0 +1,76 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.exceptions; + +import com.datastax.driver.core.ConsistencyLevel; + +/** + * A Cassandra timeout during a read query. + */ +public class ReadTimeoutException extends QueryTimeoutException { + + private static final long serialVersionUID = 0; + + private final boolean dataPresent; + + public ReadTimeoutException(ConsistencyLevel consistency, int received, int required, boolean dataPresent) { + super(String.format("Cassandra timeout during read query at consistency %s (%s)", consistency, formatDetails(received, required, dataPresent)), + consistency, + received, + required); + this.dataPresent = dataPresent; + } + + private ReadTimeoutException(String msg, Throwable cause, ConsistencyLevel consistency, int received, int required, boolean dataPresent) { + super(msg, cause, consistency, received, required); + this.dataPresent = dataPresent; + } + + private static String formatDetails(int received, int required, boolean dataPresent) { + if (received < required) + return String.format("%d responses were required but only %d replica responded", required, received); + else if (!dataPresent) + return "the replica queried for data didn't respond"; + else + return "timeout while waiting for repair of inconsistent replica"; + } + + /** + * Whether the actual data was amongst the received replica responses. + * + * During reads, Cassandra doesn't request data from every replica to + * minimize internal network traffic. Instead, some replica are only asked + * for a checksum of the data. A read timeout may occurred even if enough + * replica have responded to fulfill the consistency level if only checksum + * responses have been received. This method allow to detect that case. + * + * @return {@code true} if the data was amongst the received replica + * responses, {@code false} otherwise. + */ + public boolean wasDataRetrieved() { + return dataPresent; + } + + @Override + public DriverException copy() { + return new ReadTimeoutException(getMessage(), + this, + getConsistencyLevel(), + getReceivedAcknowledgements(), + getRequiredAcknowledgements(), + wasDataRetrieved()); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/SyntaxError.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/SyntaxError.java new file mode 100644 index 00000000000..08d6c19704c --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/SyntaxError.java @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.exceptions; + +/** + * Indicates a syntax error in a query. + */ +public class SyntaxError extends QueryValidationException { + + private static final long serialVersionUID = 0; + + public SyntaxError(String msg) { + super(msg); + } + + private SyntaxError(String msg, Throwable cause) { + super(msg, cause); + } + + @Override + public DriverException copy() { + return new SyntaxError(getMessage(), this); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/TraceRetrievalException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/TraceRetrievalException.java new file mode 100644 index 00000000000..b95588ad136 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/TraceRetrievalException.java @@ -0,0 +1,39 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.exceptions; + +/** + * Exception thrown if a query trace cannot be retrieved. + * + * @see com.datastax.driver.core.QueryTrace + */ +public class TraceRetrievalException extends DriverException { + + private static final long serialVersionUID = 0; + + public TraceRetrievalException(String message) { + super(message); + } + + public TraceRetrievalException(String message, Throwable cause) { + super(message, cause); + } + + @Override + public DriverException copy() { + return new TraceRetrievalException(getMessage(), this); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/TruncateException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/TruncateException.java new file mode 100644 index 00000000000..1d3921ffae7 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/TruncateException.java @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.exceptions; + +/** + * Error during a truncation operation. + */ +public class TruncateException extends QueryExecutionException { + + private static final long serialVersionUID = 0; + + public TruncateException(String msg) { + super(msg); + } + + private TruncateException(String msg, Throwable cause) { + super(msg, cause); + } + + @Override + public DriverException copy() { + return new TruncateException(getMessage(), this); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/UnauthorizedException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/UnauthorizedException.java new file mode 100644 index 00000000000..658b648c859 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/UnauthorizedException.java @@ -0,0 +1,38 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.exceptions; + +/** + * Indicates that a query cannot be performed due to the authorization + * restrictions of the logged user. + */ +public class UnauthorizedException extends QueryValidationException { + + private static final long serialVersionUID = 0; + + public UnauthorizedException(String msg) { + super(msg); + } + + private UnauthorizedException(String msg, Throwable cause) { + super(msg, cause); + } + + @Override + public DriverException copy() { + return new UnauthorizedException(getMessage(), this); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/UnavailableException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/UnavailableException.java new file mode 100644 index 00000000000..f73b1900e37 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/UnavailableException.java @@ -0,0 +1,81 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.exceptions; + +import com.datastax.driver.core.ConsistencyLevel; + +/** + * Exception thrown when the coordinator knows there is not enough replica + * alive to perform a query with the requested consistency level. + */ +public class UnavailableException extends QueryExecutionException { + + private static final long serialVersionUID = 0; + + private final ConsistencyLevel consistency; + private final int required; + private final int alive; + + public UnavailableException(ConsistencyLevel consistency, int required, int alive) { + super(String.format("Not enough replica available for query at consistency %s (%d required but only %d alive)", consistency, required, alive)); + this.consistency = consistency; + this.required = required; + this.alive = alive; + } + + private UnavailableException(String message, Throwable cause, ConsistencyLevel consistency, int required, int alive) { + super(message, cause); + this.consistency = consistency; + this.required = required; + this.alive = alive; + } + + /** + * The consistency level of the operation triggering this unavailable exception. + * + * @return the consistency level of the operation triggering this unavailable exception. + */ + public ConsistencyLevel getConsistencyLevel() { + return consistency; + } + + /** + * The number of replica acknowledgements/responses required to perform the + * operation (with its required consistency level). + * + * @return the number of replica acknowledgements/responses required to perform the + * operation. + */ + public int getRequiredReplicas() { + return required; + } + + /** + * The number of replica that were known to be alive by the Cassandra + * coordinator node when it tried to execute the operation. + * + * @return The number of replica that were known to be alive by the Cassandra + * coordinator node when it tried to execute the operation. + */ + public int getAliveReplicas() { + return alive; + } + + @Override + public DriverException copy() { + return new UnavailableException(getMessage(), this, consistency, required, alive); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/UnpreparedException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/UnpreparedException.java new file mode 100644 index 00000000000..4ab4450ee39 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/UnpreparedException.java @@ -0,0 +1,59 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.exceptions; + +import java.net.InetSocketAddress; + +/** + * Indicates that the contacted host replied with an UNPREPARED error code. + * This class is mainly intended for internal use; + * client applications are not expected to deal with this exception directly, + * because the driver would transparently prepare the query and execute it again; + * but such exceptions are likely to appear occasionally in the driver logs. + */ +public class UnpreparedException extends DriverInternalError { + + private static final long serialVersionUID = 0; + + private final InetSocketAddress address; + + public UnpreparedException(InetSocketAddress address, String message) { + super(String.format("A prepared query was submitted on %s but was not known of that node: %s", address, message)); + this.address = address; + } + + /** + * Private constructor used solely when copying exceptions. + */ + private UnpreparedException(InetSocketAddress address, String message, UnpreparedException cause) { + super(message, cause); + this.address = address; + } + + /** + * The full address of the host that replied with an UNPREPARED error code. + * + * @return The full address of the host that replied with an UNPREPARED error code. + */ + public InetSocketAddress getAddress() { + return address; + } + + @Override + public UnpreparedException copy() { + return new UnpreparedException(address, getMessage(), this); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/UnsupportedFeatureException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/UnsupportedFeatureException.java new file mode 100644 index 00000000000..151936596f7 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/UnsupportedFeatureException.java @@ -0,0 +1,31 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.exceptions; + +import com.datastax.driver.core.ProtocolVersion; + +/** + * Exception thrown when a feature is not supported by the native protocol + * currently in use. + */ +public class UnsupportedFeatureException extends DriverException { + + private static final long serialVersionUID = 0; + + public UnsupportedFeatureException(ProtocolVersion currentVersion, String msg) { + super("Unsupported feature with the native protocol " + currentVersion + " (which is currently in use): " + msg); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/WriteTimeoutException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/WriteTimeoutException.java new file mode 100644 index 00000000000..caaad71c438 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/WriteTimeoutException.java @@ -0,0 +1,61 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.exceptions; + +import com.datastax.driver.core.ConsistencyLevel; +import com.datastax.driver.core.WriteType; + +/** + * A Cassandra timeout during a write query. + */ +public class WriteTimeoutException extends QueryTimeoutException { + + private static final long serialVersionUID = 0; + + private final WriteType writeType; + + public WriteTimeoutException(ConsistencyLevel consistency, WriteType writeType, int received, int required) { + super(String.format("Cassandra timeout during write query at consistency %s (%d replica were required but only %d acknowledged the write)", consistency, required, received), + consistency, + received, + required); + this.writeType = writeType; + } + + private WriteTimeoutException(String msg, Throwable cause, ConsistencyLevel consistency, WriteType writeType, int received, int required) { + super(msg, cause, consistency, received, required); + this.writeType = writeType; + } + + /** + * The type of the write for which a timeout was raised. + * + * @return the type of the write for which a timeout was raised. + */ + public WriteType getWriteType() { + return writeType; + } + + @Override + public DriverException copy() { + return new WriteTimeoutException(getMessage(), + this, + getConsistencyLevel(), + getWriteType(), + getReceivedAcknowledgements(), + getRequiredAcknowledgements()); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/package-info.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/package-info.java new file mode 100644 index 00000000000..53524d5da2d --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/package-info.java @@ -0,0 +1,19 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Exceptions thrown by the DataStax Java driver for Cassandra. + */ +package com.datastax.driver.core.exceptions; diff --git a/driver-core/src/main/java/com/datastax/driver/core/package-info.java b/driver-core/src/main/java/com/datastax/driver/core/package-info.java new file mode 100644 index 00000000000..30be9bd0dcb --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/package-info.java @@ -0,0 +1,21 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * The main package for the DataStax Java driver for Cassandra. + *

+ * The main entry for this package is the {@link com.datastax.driver.core.Cluster} class. + */ +package com.datastax.driver.core; diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/AddressTranslater.java b/driver-core/src/main/java/com/datastax/driver/core/policies/AddressTranslater.java new file mode 100644 index 00000000000..a21f66fed42 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/AddressTranslater.java @@ -0,0 +1,59 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.policies; + +import java.net.InetSocketAddress; + +import com.datastax.driver.core.Cluster; + +/** + * Translates IP addresses received from Cassandra nodes into locally queriable + * addresses. + *

+ * The driver auto-detect new Cassandra nodes added to the cluster through server + * side pushed notifications and through checking the system tables. For each + * node, the address the driver will receive will correspond to the address set as + * {@code rpc_address} in the node yaml file. In most case, this is the correct + * address to use by the driver and that is what is used by default. However, + * sometimes the addresses received through this mechanism will either not be + * reachable directly by the driver or should not be the prefered address to use + * to reach the node (for instance, the {@code rpc_address} set on Cassandra nodes + * might be a private IP, but some clients may have to use a public IP, or + * pass by a router to reach that node). This interface allows to deal with + * such cases, by allowing to translate an address as sent by a Cassandra node + * to another address to be used by the driver for connection. + *

+ * Please note that the contact points addresses provided while creating the + * {@link Cluster} instance are not "tanslated", only IP address retrieve from or sent + * by Cassandra nodes to the driver are. + */ +public interface AddressTranslater { + + /** + * Translates a Cassandra {@code rpc_address} to another address if necessary. + * + * @param address the address of a node as returned by Cassandra. Note that + * if the {@code rpc_address} of a node has been configured to {@code 0.0.0.0} + * server side, then the provided address will be the node {@code listen_address}, + * *not* {@code 0.0.0.0}. Also note that the port for {@code InetSocketAddress} + * will always be the one set at Cluster construction time (9042 by default). + * @return the address the driver should actually use to connect to the node + * designated by {@code address}. If the return is {@code null}, then {@code + * address} will be used by the driver (it is thus equivalent to returing + * {@code address} directly) + */ + public InetSocketAddress translate(InetSocketAddress address); +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/ChainableLoadBalancingPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/ChainableLoadBalancingPolicy.java new file mode 100644 index 00000000000..96f697c3a10 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/ChainableLoadBalancingPolicy.java @@ -0,0 +1,28 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.policies; + +/** + * A load balancing policy that wraps another policy. + */ +public interface ChainableLoadBalancingPolicy extends LoadBalancingPolicy { + /** + * Returns the child policy. + * + * @return the child policy. + */ + LoadBalancingPolicy getChildPolicy(); +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/CloseableAddressTranslater.java b/driver-core/src/main/java/com/datastax/driver/core/policies/CloseableAddressTranslater.java new file mode 100644 index 00000000000..37dc5d5e3fe --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/CloseableAddressTranslater.java @@ -0,0 +1,32 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.policies; + +import com.datastax.driver.core.Cluster; + +/** + * Extends {@link AddressTranslater} for implementations that need to free some resources + * at {@link Cluster} shutdown. + *

+ * Note: the only reason {@link #close()} was not added directly to {@code AddressTranslater} + * is backward-compatibility. + */ +public interface CloseableAddressTranslater extends AddressTranslater { + /** + * Called at {@link Cluster} shutdown. + */ + void close(); +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/CloseableLoadBalancingPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/CloseableLoadBalancingPolicy.java new file mode 100644 index 00000000000..eee79d7bf30 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/CloseableLoadBalancingPolicy.java @@ -0,0 +1,31 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.policies; + +/** + * A load balancing policy that wants to be notified at cluster shutdown. + * + * The only reason that this is separate from {@link LoadBalancingPolicy} is to avoid breaking binary compatibility at the + * time this was introduced (2.0.7 / 2.1.3). It might be merged with the parent interface in a future major version. + */ +public interface CloseableLoadBalancingPolicy extends LoadBalancingPolicy { + /** + * Gets invoked at cluster shutdown. + * + * This gives the policy the opportunity to perform some cleanup, for instance stop threads that it might have started. + */ + void close(); +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/ConstantReconnectionPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/ConstantReconnectionPolicy.java new file mode 100644 index 00000000000..146283a9d7e --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/ConstantReconnectionPolicy.java @@ -0,0 +1,65 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.policies; + +/** + * A reconnection policy that waits a constant time between each reconnection attempt. + */ +public class ConstantReconnectionPolicy implements ReconnectionPolicy { + + private final long delayMs; + + /** + * Creates a reconnection policy that creates with the provided constant wait + * time between reconnection attempts. + * + * @param constantDelayMs the constant delay in milliseconds to use. + */ + public ConstantReconnectionPolicy(long constantDelayMs) { + if (constantDelayMs < 0) + throw new IllegalArgumentException(String.format("Invalid negative delay (got %d)", constantDelayMs)); + + this.delayMs = constantDelayMs; + } + + /** + * The constant delay used by this reconnection policy. + * + * @return the constant delay used by this reconnection policy. + */ + public long getConstantDelayMs() { + return delayMs; + } + + /** + * A new schedule that uses a constant {@code getConstantDelayMs()} delay + * between reconnection attempt. + * + * @return the newly created schedule. + */ + @Override + public ReconnectionSchedule newSchedule() { + return new ConstantSchedule(); + } + + private class ConstantSchedule implements ReconnectionSchedule { + + @Override + public long nextDelayMs() { + return delayMs; + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/ConstantSpeculativeExecutionPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/ConstantSpeculativeExecutionPolicy.java new file mode 100644 index 00000000000..b8cc6691b80 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/ConstantSpeculativeExecutionPolicy.java @@ -0,0 +1,71 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.policies; + +import java.util.concurrent.atomic.AtomicInteger; + +import com.google.common.base.Preconditions; + +import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.Host; +import com.datastax.driver.core.Statement; + +/** + * A {@link SpeculativeExecutionPolicy} that schedules a given number of speculative executions, separated by a fixed delay. + */ +public class ConstantSpeculativeExecutionPolicy implements SpeculativeExecutionPolicy { + private final int maxSpeculativeExecutions; + private final long constantDelayMillis; + + /** + * Builds a new instance. + * + * @param constantDelayMillis the delay between each speculative execution. Must be strictly positive. + * @param maxSpeculativeExecutions the number of speculative executions. Must be strictly positive. + * + * @throws IllegalArgumentException if one of the arguments does not respect the preconditions above. + */ + public ConstantSpeculativeExecutionPolicy(final long constantDelayMillis, final int maxSpeculativeExecutions) { + Preconditions.checkArgument(constantDelayMillis > 0, + "delay must be strictly positive (was %d)", constantDelayMillis); + Preconditions.checkArgument(maxSpeculativeExecutions > 0, + "number of speculative executions must be strictly positive (was %d)", maxSpeculativeExecutions); + this.constantDelayMillis = constantDelayMillis; + this.maxSpeculativeExecutions = maxSpeculativeExecutions; + } + + @Override + public SpeculativeExecutionPlan newPlan(String loggedKeyspace, Statement statement) { + return new SpeculativeExecutionPlan() { + private final AtomicInteger remaining = new AtomicInteger(maxSpeculativeExecutions); + + @Override + public long nextExecution(Host lastQueried) { + return (remaining.getAndDecrement() > 0) ? constantDelayMillis : -1; + } + }; + } + + @Override + public void init(Cluster cluster) { + // do nothing + } + + @Override + public void close() { + // do nothing + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/DCAwareRoundRobinPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/DCAwareRoundRobinPolicy.java new file mode 100644 index 00000000000..f09c343cabf --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/DCAwareRoundRobinPolicy.java @@ -0,0 +1,385 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.policies; + +import java.util.*; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicInteger; + +import com.google.common.base.Joiner; +import com.google.common.base.Strings; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.AbstractIterator; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.Configuration; +import com.datastax.driver.core.ConsistencyLevel; +import com.datastax.driver.core.Host; +import com.datastax.driver.core.HostDistance; +import com.datastax.driver.core.Statement; + +/** + * A data-center aware Round-robin load balancing policy. + *

+ * This policy provides round-robin queries over the node of the local + * data center. It also includes in the query plans returned a configurable + * number of hosts in the remote data centers, but those are always tried + * after the local nodes. In other words, this policy guarantees that no + * host in a remote data center will be queried unless no host in the local + * data center can be reached. + *

+ * If used with a single data center, this policy is equivalent to the + * {@code LoadBalancingPolicy.RoundRobin} policy, but its DC awareness + * incurs a slight overhead so the {@code LoadBalancingPolicy.RoundRobin} + * policy could be preferred to this policy in that case. + */ +public class DCAwareRoundRobinPolicy implements LoadBalancingPolicy, CloseableLoadBalancingPolicy { + + private static final Logger logger = LoggerFactory.getLogger(DCAwareRoundRobinPolicy.class); + + private final String UNSET = ""; + + private final ConcurrentMap> perDcLiveHosts = new ConcurrentHashMap>(); + private final AtomicInteger index = new AtomicInteger(); + + @VisibleForTesting + volatile String localDc; + + private final int usedHostsPerRemoteDc; + private final boolean dontHopForLocalCL; + + private volatile Configuration configuration; + + /** + * Creates a new datacenter aware round robin policy that auto-discover + * the local data-center. + *

+ * If this constructor is used, the data-center used as local will the + * data-center of the first Cassandra node the driver connects to. This + * will always be ok if all the contact points use at {@code Cluster} + * creation are in the local data-center. If it's not the case, you should + * provide the local data-center name yourself by using one of the other + * constructor of this class. + *

+ * This constructor is a shortcut for {@code new DCAwareRoundRobinPolicy(null)}, + * and as such will ignore all hosts in remote data-centers. + */ + public DCAwareRoundRobinPolicy() { + this(null, 0, false, true); + } + + /** + * Creates a new datacenter aware round robin policy given the name of + * the local datacenter. + *

+ * The name of the local datacenter provided must be the local + * datacenter name as known by Cassandra. + *

+ * The policy created will ignore all remote hosts. In other words, + * this is equivalent to {@code new DCAwareRoundRobinPolicy(localDc, 0)}. + * + * @param localDc the name of the local datacenter (as known by + * Cassandra). If this is {@code null}, the policy will default to the + * data-center of the first node connected to. + */ + public DCAwareRoundRobinPolicy(String localDc) { + this(localDc, 0, false, false); + } + + /** + * Creates a new DCAwareRoundRobin policy given the name of the local + * datacenter and that uses the provided number of host per remote + * datacenter as failover for the local hosts. + *

+ * The name of the local datacenter provided must be the local + * datacenter name as known by Cassandra. + *

+ * If {@code usedHostsPerRemoteDc > 0}, then if for a query no host + * in the local datacenter can be reached and if the consistency + * level of the query is not {@code LOCAL_ONE} or {@code LOCAL_QUORUM}, + * then up to {@code usedHostsPerRemoteDc} host per remote data-center + * will be tried by the policy as a fallback. Please note that no + * remote host will be used for {@code LOCAL_ONE} and {@code LOCAL_QUORUM} + * since this would change the meaning of the consistency level (and + * thus somewhat break the consistency contract). + * + * @param localDc the name of the local datacenter (as known by + * Cassandra). If this is {@code null}, the policy will default to the + * data-center of the first node connected to. + * @param usedHostsPerRemoteDc the number of host per remote + * datacenter that policies created by the returned factory should + * consider. Created policies {@code distance} method will return a + * {@code HostDistance.REMOTE} distance for only {@code + * usedHostsPerRemoteDc} hosts per remote datacenter. Other hosts + * of the remote datacenters will be ignored (and thus no + * connections to them will be maintained). + */ + public DCAwareRoundRobinPolicy(String localDc, int usedHostsPerRemoteDc) { + this(localDc, usedHostsPerRemoteDc, false, false); + } + + /** + * Creates a new DCAwareRoundRobin policy given the name of the local + * datacenter and that uses the provided number of host per remote + * datacenter as failover for the local hosts. + *

+ * This constructor is equivalent to {@link #DCAwareRoundRobinPolicy(String, int)} + * but allows to override the policy of never using remote data-center + * nodes for {@code LOCAL_ONE} and {@code LOCAL_QUORUM} queries. It is + * however inadvisable to do so in almost all cases, as this would + * potentially break consistency guarantees and if you are fine with that, + * it's probably better to use a weaker consitency like {@code ONE}, {@code + * TWO} or {@code THREE}. As such, this constructor should generally + * be avoided in favor of {@link #DCAwareRoundRobinPolicy(String, int)}. + * Use it only if you know and understand what you do. + * + * @param localDc the name of the local datacenter (as known by + * Cassandra). If this is {@code null}, the policy will default to the + * data-center of the first node connected to. + * @param usedHostsPerRemoteDc the number of host per remote + * datacenter that policies created by the returned factory should + * consider. Created policies {@code distance} method will return a + * {@code HostDistance.REMOTE} distance for only {@code + * usedHostsPerRemoteDc} hosts per remote datacenter. Other hosts + * of the remote datacenters will be ignored (and thus no + * connections to them will be maintained). + * @param allowRemoteDCsForLocalConsistencyLevel whether or not the + * policy may return remote host when building query plan for query + * having consitency {@code LOCAL_ONE} and {@code LOCAL_QUORUM}. + */ + public DCAwareRoundRobinPolicy(String localDc, int usedHostsPerRemoteDc, boolean allowRemoteDCsForLocalConsistencyLevel) { + this(localDc, usedHostsPerRemoteDc, allowRemoteDCsForLocalConsistencyLevel, false); + } + + private DCAwareRoundRobinPolicy(String localDc, int usedHostsPerRemoteDc, boolean allowRemoteDCsForLocalConsistencyLevel, boolean allowEmptyLocalDc) { + if (!allowEmptyLocalDc && Strings.isNullOrEmpty(localDc)) + throw new IllegalArgumentException("Null or empty data center specified for DC-aware policy"); + this.localDc = localDc == null ? UNSET : localDc; + this.usedHostsPerRemoteDc = usedHostsPerRemoteDc; + this.dontHopForLocalCL = !allowRemoteDCsForLocalConsistencyLevel; + } + + @Override + public void init(Cluster cluster, Collection hosts) { + if (localDc != UNSET) + logger.info("Using provided data-center name '{}' for DCAwareRoundRobinPolicy", localDc); + + this.configuration = cluster.getConfiguration(); + + ArrayList notInLocalDC = new ArrayList(); + + for (Host host : hosts) { + String dc = dc(host); + + // If the localDC was in "auto-discover" mode and it's the first host for which we have a DC, use it. + if (localDc == UNSET && dc != UNSET) { + logger.info("Using data-center name '{}' for DCAwareRoundRobinPolicy (if this is incorrect, please provide the correct datacenter name with DCAwareRoundRobinPolicy constructor)", dc); + localDc = dc; + } else if (!dc.equals(localDc)) + notInLocalDC.add(String.format("%s (%s)", host.toString(), dc)); + + if (!dc.equals(localDc)) notInLocalDC.add(String.format("%s (%s)", host.toString(), host.getDatacenter())); + + CopyOnWriteArrayList prev = perDcLiveHosts.get(dc); + if (prev == null) + perDcLiveHosts.put(dc, new CopyOnWriteArrayList(Collections.singletonList(host))); + else + prev.addIfAbsent(host); + } + + if (notInLocalDC.size() > 0) { + String nonLocalHosts = Joiner.on(",").join(notInLocalDC); + logger.warn("Some contact points don't match local data center. Local DC = {}. Non-conforming contact points: {}", localDc, nonLocalHosts); + } + } + + private String dc(Host host) { + String dc = host.getDatacenter(); + return dc == null ? localDc : dc; + } + + @SuppressWarnings("unchecked") + private static CopyOnWriteArrayList cloneList(CopyOnWriteArrayList list) { + return (CopyOnWriteArrayList)list.clone(); + } + + /** + * Return the HostDistance for the provided host. + *

+ * This policy consider nodes in the local datacenter as {@code LOCAL}. + * For each remote datacenter, it considers a configurable number of + * hosts as {@code REMOTE} and the rest is {@code IGNORED}. + *

+ * To configure how many host in each remote datacenter is considered + * {@code REMOTE}, see {@link #DCAwareRoundRobinPolicy(String, int)}. + * + * @param host the host of which to return the distance of. + * @return the HostDistance to {@code host}. + */ + @Override + public HostDistance distance(Host host) { + String dc = dc(host); + if (dc == UNSET || dc.equals(localDc)) + return HostDistance.LOCAL; + + CopyOnWriteArrayList dcHosts = perDcLiveHosts.get(dc); + if (dcHosts == null || usedHostsPerRemoteDc == 0) + return HostDistance.IGNORED; + + // We need to clone, otherwise our subList call is not thread safe + dcHosts = cloneList(dcHosts); + return dcHosts.subList(0, Math.min(dcHosts.size(), usedHostsPerRemoteDc)).contains(host) + ? HostDistance.REMOTE + : HostDistance.IGNORED; + } + + /** + * Returns the hosts to use for a new query. + *

+ * The returned plan will always try each known host in the local + * datacenter first, and then, if none of the local host is reachable, + * will try up to a configurable number of other host per remote datacenter. + * The order of the local node in the returned query plan will follow a + * Round-robin algorithm. + * + * @param loggedKeyspace the keyspace currently logged in on for this + * query. + * @param statement the query for which to build the plan. + * @return a new query plan, i.e. an iterator indicating which host to + * try first for querying, which one to use as failover, etc... + */ + @Override + public Iterator newQueryPlan(String loggedKeyspace, final Statement statement) { + + CopyOnWriteArrayList localLiveHosts = perDcLiveHosts.get(localDc); + final List hosts = localLiveHosts == null ? Collections.emptyList() : cloneList(localLiveHosts); + final int startIdx = index.getAndIncrement(); + + return new AbstractIterator() { + + private int idx = startIdx; + private int remainingLocal = hosts.size(); + + // For remote Dcs + private Iterator remoteDcs; + private List currentDcHosts; + private int currentDcRemaining; + + @Override + protected Host computeNext() { + while (true) { + if (remainingLocal > 0) { + remainingLocal--; + int c = idx++ % hosts.size(); + if (c < 0) { + c += hosts.size(); + } + return hosts.get(c); + } + + if (currentDcHosts != null && currentDcRemaining > 0) { + currentDcRemaining--; + int c = idx++ % currentDcHosts.size(); + if (c < 0) { + c += currentDcHosts.size(); + } + return currentDcHosts.get(c); + } + + ConsistencyLevel cl = statement.getConsistencyLevel() == null + ? configuration.getQueryOptions().getConsistencyLevel() + : statement.getConsistencyLevel(); + + if (dontHopForLocalCL && cl.isDCLocal()) + return endOfData(); + + if (remoteDcs == null) { + Set copy = new HashSet(perDcLiveHosts.keySet()); + copy.remove(localDc); + remoteDcs = copy.iterator(); + } + + if (!remoteDcs.hasNext()) + break; + + String nextRemoteDc = remoteDcs.next(); + CopyOnWriteArrayList nextDcHosts = perDcLiveHosts.get(nextRemoteDc); + if (nextDcHosts != null) { + // Clone for thread safety + List dcHosts = cloneList(nextDcHosts); + currentDcHosts = dcHosts.subList(0, Math.min(dcHosts.size(), usedHostsPerRemoteDc)); + currentDcRemaining = currentDcHosts.size(); + } + } + return endOfData(); + } + }; + } + + @Override + public void onUp(Host host) { + String dc = dc(host); + + // If the localDC was in "auto-discover" mode and it's the first host for which we have a DC, use it. + if (localDc == UNSET && dc != UNSET) { + logger.info("Using data-center name '{}' for DCAwareRoundRobinPolicy (if this is incorrect, please provide the correct datacenter name with DCAwareRoundRobinPolicy constructor)", dc); + localDc = dc; + } + + CopyOnWriteArrayList dcHosts = perDcLiveHosts.get(dc); + if (dcHosts == null) { + CopyOnWriteArrayList newMap = new CopyOnWriteArrayList(Collections.singletonList(host)); + dcHosts = perDcLiveHosts.putIfAbsent(dc, newMap); + // If we've successfully put our new host, we're good, otherwise we've been beaten so continue + if (dcHosts == null) + return; + } + dcHosts.addIfAbsent(host); + } + + @Override + public void onSuspected(Host host) { + } + + @Override + public void onDown(Host host) { + CopyOnWriteArrayList dcHosts = perDcLiveHosts.get(dc(host)); + if (dcHosts != null) + dcHosts.remove(host); + } + + @Override + public void onAdd(Host host) { + onUp(host); + } + + @Override + public void onRemove(Host host) { + onDown(host); + } + + @Override + public void close() { + // nothing to do + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/DefaultRetryPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/DefaultRetryPolicy.java new file mode 100644 index 00000000000..38a9b28f457 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/DefaultRetryPolicy.java @@ -0,0 +1,137 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.policies; + +import com.datastax.driver.core.ConsistencyLevel; +import com.datastax.driver.core.Statement; +import com.datastax.driver.core.WriteType; + +/** + * The default retry policy. + *

+ * This policy retries queries in only two cases: + *

    + *
  • On a read timeout, if enough replicas replied but data was not retrieved.
  • + *
  • On a write timeout, if we timeout while writing the distributed log used by batch statements.
  • + *
+ *

+ * This retry policy is conservative in that it will never retry with a + * different consistency level than the one of the initial operation. + *

+ * In some cases, it may be convenient to use a more aggressive retry policy + * like {@link DowngradingConsistencyRetryPolicy}. + */ +public class DefaultRetryPolicy implements RetryPolicy { + + public static final DefaultRetryPolicy INSTANCE = new DefaultRetryPolicy(); + + private DefaultRetryPolicy() {} + + /** + * Defines whether to retry and at which consistency level on a read timeout. + *

+ * This method triggers a maximum of one retry, and only if enough + * replicas had responded to the read request but data was not retrieved + * amongst those. Indeed, that case usually means that enough replica + * are alive to satisfy the consistency but the coordinator picked a + * dead one for data retrieval, not having detected that replica as dead + * yet. The reasoning for retrying then is that by the time we get the + * timeout the dead replica will likely have been detected as dead and + * the retry has a high chance of success. + * + * @param statement the original query that timed out. + * @param cl the original consistency level of the read that timed out. + * @param requiredResponses the number of responses that were required to + * achieve the requested consistency level. + * @param receivedResponses the number of responses that had been received + * by the time the timeout exception was raised. + * @param dataRetrieved whether actual data (by opposition to data checksum) + * was present in the received responses. + * @param nbRetry the number of retries already performed for this operation. + * @return {@code RetryDecision.retry(cl)} if no retry attempt has yet been tried and + * {@code receivedResponses >= requiredResponses && !dataRetrieved}, {@code RetryDecision.rethrow()} otherwise. + */ + @Override + public RetryDecision onReadTimeout(Statement statement, ConsistencyLevel cl, int requiredResponses, int receivedResponses, boolean dataRetrieved, int nbRetry) { + if (nbRetry != 0) + return RetryDecision.rethrow(); + + return receivedResponses >= requiredResponses && !dataRetrieved ? RetryDecision.retry(cl) : RetryDecision.rethrow(); + } + + /** + * Defines whether to retry and at which consistency level on a write timeout. + *

+ * This method triggers a maximum of one retry, and only in the case of + * a {@code WriteType.BATCH_LOG} write. The reasoning for the retry in + * that case is that write to the distributed batch log is tried by the + * coordinator of the write against a small subset of all the nodes alive + * in the local datacenter. Hence, a timeout usually means that none of + * the nodes in that subset were alive but the coordinator hasn't + * detected them as dead. By the time we get the timeout the dead + * nodes will likely have been detected as dead and the retry has thus a + * high chance of success. + * + * @param statement the original query that timed out. + * @param cl the original consistency level of the write that timed out. + * @param writeType the type of the write that timed out. + * @param requiredAcks the number of acknowledgments that were required to + * achieve the requested consistency level. + * @param receivedAcks the number of acknowledgments that had been received + * by the time the timeout exception was raised. + * @param nbRetry the number of retry already performed for this operation. + * @return {@code RetryDecision.retry(cl)} if no retry attempt has yet been tried and + * {@code writeType == WriteType.BATCH_LOG}, {@code RetryDecision.rethrow()} otherwise. + */ + @Override + public RetryDecision onWriteTimeout(Statement statement, ConsistencyLevel cl, WriteType writeType, int requiredAcks, int receivedAcks, int nbRetry) { + if (nbRetry != 0) + return RetryDecision.rethrow(); + + // If the batch log write failed, retry the operation as this might just be we were unlucky at picking candidates + return writeType == WriteType.BATCH_LOG ? RetryDecision.retry(cl) : RetryDecision.rethrow(); + } + + /** + * Defines whether to retry and at which consistency level on an + * unavailable exception. + *

+ * This method triggers a retry iff no retry has been executed before + * (nbRetry == 0), with {@link RetryDecision#tryNextHost}, otherwise it + * throws an exception. The retry will be processed on the next host + * in the query plan according to the current Load Balancing Policy. + * Where retrying on the same host in the event of an Unavailable exception + * has almost no chance of success, if the first replica tried happens to + * be "network" isolated from all the other nodes but can still answer to + * the client, it makes sense to retry the query on another node. + * + * @param statement the original query for which the consistency level cannot + * be achieved. + * @param cl the original consistency level for the operation. + * @param requiredReplica the number of replica that should have been + * (known) alive for the operation to be attempted. + * @param aliveReplica the number of replica that were know to be alive by + * the coordinator of the operation. + * @param nbRetry the number of retry already performed for this operation. + * @return {@code RetryDecision.rethrow()}. + */ + @Override + public RetryDecision onUnavailable(Statement statement, ConsistencyLevel cl, int requiredReplica, int aliveReplica, int nbRetry) { + return (nbRetry == 0) + ? RetryDecision.tryNextHost(cl) + : RetryDecision.rethrow(); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/DowngradingConsistencyRetryPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/DowngradingConsistencyRetryPolicy.java new file mode 100644 index 00000000000..f4f1a53d564 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/DowngradingConsistencyRetryPolicy.java @@ -0,0 +1,197 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.policies; + +import com.datastax.driver.core.ConsistencyLevel; +import com.datastax.driver.core.Statement; +import com.datastax.driver.core.WriteType; + +/** + * A retry policy that sometimes retry with a lower consistency level than + * the one initially requested. + *

+ * BEWARE: This policy may retry queries using a lower consistency + * level than the one initially requested. By doing so, it may break + * consistency guarantees. In other words, if you use this retry policy, + * there is cases (documented below) where a read at {@code QUORUM} + * may not see a preceding write at {@code QUORUM}. Do not use this + * policy unless you have understood the cases where this can happen and + * are ok with that. It is also highly recommended to always wrap this + * policy into {@link LoggingRetryPolicy} to log the occurrences of + * such consistency break. + *

+ * This policy implements the same retries than the {@link DefaultRetryPolicy} + * policy. But on top of that, it also retries in the following cases: + *

    + *
  • On a read timeout: if the number of replica that responded is + * greater than one but lower than is required by the requested + * consistency level, the operation is retried at a lower consistency + * level.
  • + *
  • On a write timeout: if the operation is an {@code + * WriteType.UNLOGGED_BATCH} and at least one replica acknowledged the + * write, the operation is retried at a lower consistency level. + * Furthermore, for other operation, if at least one replica acknowledged + * the write, the timeout is ignored.
  • + *
  • On an unavailable exception: if at least one replica is alive, the + * operation is retried at a lower consistency level.
  • + *
+ *

+ * The reasoning being this retry policy is the following one. If, based + * on the information the Cassandra coordinator node returns, retrying the + * operation with the initially requested consistency has a change to + * succeed, do it. Otherwise, if based on these information we know the + * initially requested consistency level cannot be achieve currently, then: + *

    + *
  • For writes, ignore the exception (thus silently failing the + * consistency requirement) if we know the write has been persisted on at + * least one replica.
  • + *
  • For reads, try reading at a lower consistency level (thus silently + * failing the consistency requirement).
  • + *
+ * In other words, this policy implements the idea that if the requested + * consistency level cannot be achieved, the next best thing for writes is + * to make sure the data is persisted, and that reading something is better + * than reading nothing, even if there is a risk of reading stale data. + */ +public class DowngradingConsistencyRetryPolicy implements RetryPolicy { + + public static final DowngradingConsistencyRetryPolicy INSTANCE = new DowngradingConsistencyRetryPolicy(); + + private DowngradingConsistencyRetryPolicy() {} + + private RetryDecision maxLikelyToWorkCL(int knownOk) { + if (knownOk >= 3) + return RetryDecision.retry(ConsistencyLevel.THREE); + else if (knownOk == 2) + return RetryDecision.retry(ConsistencyLevel.TWO); + else if (knownOk == 1) + return RetryDecision.retry(ConsistencyLevel.ONE); + else + return RetryDecision.rethrow(); + } + + /** + * Defines whether to retry and at which consistency level on a read timeout. + *

+ * This method triggers a maximum of one retry. If less replica + * responded than required by the consistency level (but at least one + * replica did respond), the operation is retried at a lower + * consistency level. If enough replica responded but data was not + * retrieve, the operation is retried with the initial consistency + * level. Otherwise, an exception is thrown. + * + * @param statement the original query that timed out. + * @param cl the original consistency level of the read that timed out. + * @param requiredResponses the number of responses that were required to + * achieve the requested consistency level. + * @param receivedResponses the number of responses that had been received + * by the time the timeout exception was raised. + * @param dataRetrieved whether actual data (by opposition to data checksum) + * was present in the received responses. + * @param nbRetry the number of retry already performed for this operation. + * @return a RetryDecision as defined above. + */ + @Override + public RetryDecision onReadTimeout(Statement statement, ConsistencyLevel cl, int requiredResponses, int receivedResponses, boolean dataRetrieved, int nbRetry) { + if (nbRetry != 0) + return RetryDecision.rethrow(); + + // CAS reads are not all that useful in terms of visibility of the writes since CAS write supports the + // normal consistency levels on the committing phase. So the main use case for CAS reads is probably for + // when you've timed out on a CAS write and want to make sure what happened. Downgrading in that case + // would be always wrong so we just special case to rethrow. + if (cl == ConsistencyLevel.SERIAL || cl == ConsistencyLevel.LOCAL_SERIAL) + return RetryDecision.rethrow(); + + if (receivedResponses < requiredResponses) { + // Tries the biggest CL that is expected to work + return maxLikelyToWorkCL(receivedResponses); + } + + return !dataRetrieved ? RetryDecision.retry(cl) : RetryDecision.rethrow(); + } + + /** + * Defines whether to retry and at which consistency level on a write timeout. + *

+ * This method triggers a maximum of one retry. If {@code writeType == + * WriteType.BATCH_LOG}, the write is retried with the initial + * consistency level. If {@code writeType == WriteType.UNLOGGED_BATCH} + * and at least one replica acknowledged, the write is retried with a + * lower consistency level (with unlogged batch, a write timeout can + * always mean that part of the batch haven't been persisted at + * all, even if {@code receivedAcks > 0}). For other write types ({@code WriteType.SIMPLE} + * and {@code WriteType.BATCH}), if we know the write has been persisted on at + * least one replica, we ignore the exception. Otherwise, an exception is thrown. + * + * @param statement the original query that timed out. + * @param cl the original consistency level of the write that timed out. + * @param writeType the type of the write that timed out. + * @param requiredAcks the number of acknowledgments that were required to + * achieve the requested consistency level. + * @param receivedAcks the number of acknowledgments that had been received + * by the time the timeout exception was raised. + * @param nbRetry the number of retry already performed for this operation. + * @return a RetryDecision as defined above. + */ + @Override + public RetryDecision onWriteTimeout(Statement statement, ConsistencyLevel cl, WriteType writeType, int requiredAcks, int receivedAcks, int nbRetry) { + if (nbRetry != 0) + return RetryDecision.rethrow(); + + switch (writeType) { + case SIMPLE: + case BATCH: + // Since we provide atomicity there is no point in retrying + return receivedAcks > 0 ? RetryDecision.ignore() : RetryDecision.rethrow(); + case UNLOGGED_BATCH: + // Since only part of the batch could have been persisted, + // retry with whatever consistency should allow to persist all + return maxLikelyToWorkCL(receivedAcks); + case BATCH_LOG: + return RetryDecision.retry(cl); + } + // We want to rethrow on COUNTER and CAS, because in those case "we don't know" and don't want to guess + return RetryDecision.rethrow(); + } + + /** + * Defines whether to retry and at which consistency level on an + * unavailable exception. + *

+ * This method triggers a maximum of one retry. If at least one replica + * is know to be alive, the operation is retried at a lower consistency + * level. + * + * @param statement the original query for which the consistency level cannot + * be achieved. + * @param cl the original consistency level for the operation. + * @param requiredReplica the number of replica that should have been + * (known) alive for the operation to be attempted. + * @param aliveReplica the number of replica that were know to be alive by + * the coordinator of the operation. + * @param nbRetry the number of retry already performed for this operation. + * @return a RetryDecision as defined above. + */ + @Override + public RetryDecision onUnavailable(Statement statement, ConsistencyLevel cl, int requiredReplica, int aliveReplica, int nbRetry) { + if (nbRetry != 0) + return RetryDecision.rethrow(); + + // Tries the biggest CL that is expected to work + return maxLikelyToWorkCL(aliveReplica); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/EC2MultiRegionAddressTranslater.java b/driver-core/src/main/java/com/datastax/driver/core/policies/EC2MultiRegionAddressTranslater.java new file mode 100644 index 00000000000..a45b284ca8e --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/EC2MultiRegionAddressTranslater.java @@ -0,0 +1,144 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.policies; + +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.util.Enumeration; +import java.util.Hashtable; + +import com.google.common.annotations.VisibleForTesting; +import javax.naming.Context; +import javax.naming.NamingEnumeration; +import javax.naming.NamingException; +import javax.naming.directory.Attribute; +import javax.naming.directory.Attributes; +import javax.naming.directory.DirContext; +import javax.naming.directory.InitialDirContext; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.datastax.driver.core.exceptions.DriverException; + +/** + * {@link AddressTranslater} implementation for a multi-region EC2 deployment where clients are also deployed in EC2. + *

+ * Its distinctive feature is that it translates addresses according to the location of the Cassandra host: + *

    + *
  • addresses in different EC2 regions (than the client) are unchanged;
  • + *
  • addresses in the same EC2 region are translated to private IPs.
  • + *
+ * This optimizes network costs, because Amazon charges more for communication over public IPs. + * + *

+ * Implementation note: this class performs a reverse DNS lookup of the origin address, to find the domain name of the target + * instance. Then it performs a forward DNS lookup of the domain name; the EC2 DNS does the private/public switch automatically + * based on location. + */ +public class EC2MultiRegionAddressTranslater implements CloseableAddressTranslater { + + private static final Logger logger = LoggerFactory.getLogger(EC2MultiRegionAddressTranslater.class); + + // TODO when we switch to Netty 4.1, we can replace this with the Netty built-in DNS client + private final DirContext ctx; + + public EC2MultiRegionAddressTranslater() { + Hashtable env = new Hashtable(); + env.put(Context.INITIAL_CONTEXT_FACTORY, "com.sun.jndi.dns.DnsContextFactory"); + try { + ctx = new InitialDirContext(env); + } catch (NamingException e) { + throw new DriverException("Could not create translater", e); + } + } + + @VisibleForTesting + EC2MultiRegionAddressTranslater(DirContext ctx) { + this.ctx = ctx; + } + + @Override + public InetSocketAddress translate(InetSocketAddress socketAddress) { + InetAddress address = socketAddress.getAddress(); + try { + // InetAddress#getHostName() is supposed to perform a reverse DNS lookup, but for some reason it doesn't work + // within the same EC2 region (it returns the IP address itself). + // We use an alternate implementation: + String domainName = lookupPtrRecord(reverse(address)); + if (domainName == null) { + logger.warn("Found no domain name for {}, returning it as-is", address); + return socketAddress; + } + + InetAddress translatedAddress = InetAddress.getByName(domainName); + logger.debug("Resolved {} to {}", address, translatedAddress); + return new InetSocketAddress(translatedAddress, socketAddress.getPort()); + } catch (Exception e) { + logger.warn("Error resolving " + address + ", returning it as-is", e); + return socketAddress; + } + } + + private String lookupPtrRecord(String reversedDomain) throws Exception { + Attributes attrs = ctx.getAttributes(reversedDomain, new String[]{ "PTR" }); + for (NamingEnumeration ae = attrs.getAll(); ae.hasMoreElements(); ) { + Attribute attr = (Attribute)ae.next(); + for (Enumeration vals = attr.getAll(); vals.hasMoreElements(); ) + return vals.nextElement().toString(); + } + return null; + } + + public void close() { + try { + ctx.close(); + } catch (NamingException e) { + logger.warn("Error closing translater", e); + } + } + + // Builds the "reversed" domain name in the ARPA domain to perform the reverse lookup + @VisibleForTesting + static String reverse(InetAddress address) { + byte[] bytes = address.getAddress(); + if (bytes.length == 4) + return reverseIpv4(bytes); + else + return reverseIpv6(bytes); + } + + private static String reverseIpv4(byte[] bytes) { + StringBuilder builder = new StringBuilder(); + for (int i = bytes.length - 1; i >= 0; i--) { + builder.append(bytes[i] & 0xFF).append('.'); + } + builder.append("in-addr.arpa"); + return builder.toString(); + } + + private static String reverseIpv6(byte[] bytes) { + StringBuilder builder = new StringBuilder(); + for (int i = bytes.length - 1; i >= 0; i--) { + byte b = bytes[i]; + int lowNibble = b & 0x0F; + int highNibble = b >> 4 & 0x0F; + builder.append(Integer.toHexString(lowNibble)).append('.') + .append(Integer.toHexString(highNibble)).append('.'); + } + builder.append("ip6.arpa"); + return builder.toString(); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/ExponentialReconnectionPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/ExponentialReconnectionPolicy.java new file mode 100644 index 00000000000..d7e264e57d8 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/ExponentialReconnectionPolicy.java @@ -0,0 +1,98 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.policies; + +/** + * A reconnection policy that waits exponentially longer between each + * reconnection attempt (but keeps a constant delay once a maximum delay is + * reached). + */ +public class ExponentialReconnectionPolicy implements ReconnectionPolicy { + + private final long baseDelayMs; + private final long maxDelayMs; + private final long maxAttempts; + + /** + * Creates a reconnection policy waiting exponentially longer for each new attempt. + * + * @param baseDelayMs the base delay in milliseconds to use for + * the schedules created by this policy. + * @param maxDelayMs the maximum delay to wait between two attempts. + */ + public ExponentialReconnectionPolicy(long baseDelayMs, long maxDelayMs) { + if (baseDelayMs < 0 || maxDelayMs < 0) + throw new IllegalArgumentException("Invalid negative delay"); + if (baseDelayMs == 0) + throw new IllegalArgumentException("baseDelayMs must be strictly positive"); + if (maxDelayMs < baseDelayMs) + throw new IllegalArgumentException(String.format("maxDelayMs (got %d) cannot be smaller than baseDelayMs (got %d)", maxDelayMs, baseDelayMs)); + + this.baseDelayMs = baseDelayMs; + this.maxDelayMs = maxDelayMs; + + // Maximum number of attempts after which we overflow (which is kind of theoretical anyway, you'll + // die of old age before reaching that but hey ...) + int ceil = (baseDelayMs & (baseDelayMs - 1)) == 0 ? 0 : 1; + this.maxAttempts = 64 - Long.numberOfLeadingZeros(Long.MAX_VALUE / baseDelayMs) - ceil; + } + + /** + * The base delay in milliseconds for this policy (e.g. the delay before + * the first reconnection attempt). + * + * @return the base delay in milliseconds for this policy. + */ + public long getBaseDelayMs() { + return baseDelayMs; + } + + /** + * The maximum delay in milliseconds between reconnection attempts for this policy. + * + * @return the maximum delay in milliseconds between reconnection attempts for this policy. + */ + public long getMaxDelayMs() { + return maxDelayMs; + } + + /** + * A new schedule that used an exponentially growing delay between reconnection attempts. + *

+ * For this schedule, reconnection attempt {@code i} will be tried + * {@code Math.min(2^(i-1) * getBaseDelayMs(), getMaxDelayMs())} milliseconds after the previous one. + * + * @return the newly created schedule. + */ + @Override + public ReconnectionSchedule newSchedule() { + return new ExponentialSchedule(); + } + + private class ExponentialSchedule implements ReconnectionSchedule { + + private int attempts; + + @Override + public long nextDelayMs() { + + if (attempts > maxAttempts) + return maxDelayMs; + + return Math.min(baseDelayMs * (1L << attempts++), maxDelayMs); + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/FallthroughRetryPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/FallthroughRetryPolicy.java new file mode 100644 index 00000000000..5e4f179699d --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/FallthroughRetryPolicy.java @@ -0,0 +1,89 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.policies; + +import com.datastax.driver.core.ConsistencyLevel; +import com.datastax.driver.core.Statement; +import com.datastax.driver.core.WriteType; + +/** + * A retry policy that never retry (nor ignore). + *

+ * All of the methods of this retry policy unconditionally return {@link RetryPolicy.RetryDecision#rethrow}. + * If this policy is used, retry will have to be implemented in business code. + */ +public class FallthroughRetryPolicy implements RetryPolicy { + + public static final FallthroughRetryPolicy INSTANCE = new FallthroughRetryPolicy(); + + private FallthroughRetryPolicy() {} + + /** + * Defines whether to retry and at which consistency level on a read timeout. + * + * @param statement the original query that timed out. + * @param cl the original consistency level of the read that timed out. + * @param requiredResponses the number of responses that were required to + * achieve the requested consistency level. + * @param receivedResponses the number of responses that had been received + * by the time the timeout exception was raised. + * @param dataRetrieved whether actual data (by opposition to data checksum) + * was present in the received responses. + * @param nbRetry the number of retry already performed for this operation. + * @return {@code RetryDecision.rethrow()}. + */ + @Override + public RetryDecision onReadTimeout(Statement statement, ConsistencyLevel cl, int requiredResponses, int receivedResponses, boolean dataRetrieved, int nbRetry) { + return RetryDecision.rethrow(); + } + + /** + * Defines whether to retry and at which consistency level on a write timeout. + * + * @param statement the original query that timed out. + * @param cl the original consistency level of the write that timed out. + * @param writeType the type of the write that timed out. + * @param requiredAcks the number of acknowledgments that were required to + * achieve the requested consistency level. + * @param receivedAcks the number of acknowledgments that had been received + * by the time the timeout exception was raised. + * @param nbRetry the number of retry already performed for this operation. + * @return {@code RetryDecision.rethrow()}. + */ + @Override + public RetryDecision onWriteTimeout(Statement statement, ConsistencyLevel cl, WriteType writeType, int requiredAcks, int receivedAcks, int nbRetry) { + return RetryDecision.rethrow(); + } + + /** + * Defines whether to retry and at which consistency level on an + * unavailable exception. + * + * @param statement the original query for which the consistency level cannot + * be achieved. + * @param cl the original consistency level for the operation. + * @param requiredReplica the number of replica that should have been + * (known) alive for the operation to be attempted. + * @param aliveReplica the number of replica that were know to be alive by + * the coordinator of the operation. + * @param nbRetry the number of retry already performed for this operation. + * @return {@code RetryDecision.rethrow()}. + */ + @Override + public RetryDecision onUnavailable(Statement statement, ConsistencyLevel cl, int requiredReplica, int aliveReplica, int nbRetry) { + return RetryDecision.rethrow(); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/IdentityTranslater.java b/driver-core/src/main/java/com/datastax/driver/core/policies/IdentityTranslater.java new file mode 100644 index 00000000000..4c25b7c0e87 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/IdentityTranslater.java @@ -0,0 +1,38 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.policies; + +import java.net.InetSocketAddress; + +/** + * The default {@link AddressTranslater} used by the driver that do no + * translation. + */ +public class IdentityTranslater implements AddressTranslater { + + /** + * Translates a Cassandra {@code rpc_address} to another address if necessary. + *

+ * This method is the identity function, it always return the address passed + * in argument, doing no translation. + * + * @param address the address of a node as returned by Cassandra. + * @return {@code address} unmodified. + */ + public InetSocketAddress translate(InetSocketAddress address) { + return address; + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/LatencyAwarePolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/LatencyAwarePolicy.java new file mode 100644 index 00000000000..5d1f73f4ad9 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/LatencyAwarePolicy.java @@ -0,0 +1,727 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.policies; + +import java.util.*; +import java.util.concurrent.*; +import java.util.concurrent.atomic.AtomicReference; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.AbstractIterator; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.datastax.driver.core.*; +import com.datastax.driver.core.exceptions.*; + +/** + * A wrapper load balancing policy that adds latency awareness to a child policy. + *

+ * When used, this policy will collect the latencies of the queries to each + * Cassandra node and maintain a per-node latency score (an average). Based + * on these scores, the policy will penalize (technically, it will ignore them + * unless no other nodes are up) the nodes that are slower than the best + * performing node by more than some configurable amount (the exclusion + * threshold). + *

+ * The latency score for a given node is a based on a form of + * exponential moving average. + * In other words, the latency score of a node is the average of its previously + * measured latencies, but where older measurements gets an exponentially decreasing + * weight. The exact weight applied to a newly received latency is based on the + * time elapsed since the previous measure (to account for the fact that + * latencies are not necessarily reported with equal regularity, neither + * over time nor between different nodes). + *

+ * Once a node is excluded from query plans (because its averaged latency grew + * over the exclusion threshold), its latency score will not be updated anymore + * (since it is not queried). To give a chance to this node to recover, the + * policy has a configurable retry period. The policy will not penalize a host + * for which no measurement has been collected for more than this retry period. + *

+ * Please see the {@link Builder} class and methods for more details on the + * possible parameters of this policy. + * + * @since 1.0.4 + */ +public class LatencyAwarePolicy implements ChainableLoadBalancingPolicy, CloseableLoadBalancingPolicy { + + private static final Logger logger = LoggerFactory.getLogger(LatencyAwarePolicy.class); + + private final LoadBalancingPolicy childPolicy; + private final Tracker latencyTracker; + private final ScheduledExecutorService updaterService = Executors.newSingleThreadScheduledExecutor(threadFactory("LatencyAwarePolicy updater")); + + private final double exclusionThreshold; + + private final long scale; + private final long retryPeriod; + private final long minMeasure; + + private LatencyAwarePolicy(LoadBalancingPolicy childPolicy, + double exclusionThreshold, + long scale, + long retryPeriod, + long updateRate, + int minMeasure) { + this.childPolicy = childPolicy; + this.retryPeriod = retryPeriod; + this.scale = scale; + this.latencyTracker = new Tracker(); + this.exclusionThreshold = exclusionThreshold; + this.minMeasure = minMeasure; + + updaterService.scheduleAtFixedRate(new Updater(), updateRate, updateRate, TimeUnit.NANOSECONDS); + } + + @Override + public LoadBalancingPolicy getChildPolicy() { + return childPolicy; + } + + /** + * Creates a new latency aware policy builder given the child policy + * that the resulting policy should wrap. + * + * @param childPolicy the load balancing policy to wrap with latency + * awareness. + * @return the created builder. + */ + public static Builder builder(LoadBalancingPolicy childPolicy) { + return new Builder(childPolicy); + } + + @VisibleForTesting + class Updater implements Runnable { + + private Set excludedAtLastTick = Collections.emptySet(); + + @Override + public void run() { + try { + logger.trace("Updating LatencyAwarePolicy minimum"); + latencyTracker.updateMin(); + + if (logger.isDebugEnabled()) { + /* + * For users to be able to know if the policy potentially needs tuning, we need to provide + * some feedback on on how things evolve. For that, we use the min computation to also check + * which host will be excluded if a query is submitted now and if any host is, we log it (but + * we try to avoid flooding too). This is probably interesting information anyway since it + * gets an idea of which host perform badly. + */ + Set excludedThisTick = new HashSet(); + double currentMin = latencyTracker.getMinAverage(); + for (Map.Entry entry : getScoresSnapshot().getAllStats().entrySet()) { + Host host = entry.getKey(); + Snapshot.Stats stats = entry.getValue(); + if (stats.getMeasurementsCount() < minMeasure) + continue; + + if (stats.lastUpdatedSince() > retryPeriod) { + if (excludedAtLastTick.contains(host)) + logger.debug(String.format("Previously avoided host %s has not be queried since %.3fms: will be reconsidered.", host, inMS(stats.lastUpdatedSince()))); + continue; + } + + if (stats.getLatencyScore() > ((long)(exclusionThreshold * currentMin))) { + excludedThisTick.add(host); + if (!excludedAtLastTick.contains(host)) + logger.debug(String.format("Host %s has an average latency score of %.3fms, more than %f times more than the minimum %.3fms: will be avoided temporarily.", + host, inMS(stats.getLatencyScore()), exclusionThreshold, inMS(currentMin))); + continue; + } + + if (excludedAtLastTick.contains(host)) { + logger.debug("Previously avoided host {} average latency has come back within accepted bounds: will be reconsidered.", host); + } + } + excludedAtLastTick = excludedThisTick; + } + } catch (RuntimeException e) { + // An unexpected exception would suppress further execution, so catch, log, but swallow after that. + logger.error("Error while updating LatencyAwarePolicy minimum", e); + } + } + } + + private static double inMS(long nanos) { + return ((double)nanos) / (1000 * 1000); + } + + private static double inMS(double nanos) { + return nanos / (1000 * 1000); + } + + private static ThreadFactory threadFactory(String nameFormat) { + return new ThreadFactoryBuilder().setNameFormat(nameFormat).build(); + } + + @Override + public void init(Cluster cluster, Collection hosts) { + childPolicy.init(cluster, hosts); + cluster.register(latencyTracker); + } + + /** + * Returns the HostDistance for the provided host. + * + * @param host the host of which to return the distance of. + * @return the HostDistance to {@code host} as returned by the wrapped policy. + */ + @Override + public HostDistance distance(Host host) { + return childPolicy.distance(host); + } + + /** + * Returns the hosts to use for a new query. + *

+ * The returned plan will be the same as the plan generated by the + * child policy, but with the (initial) exclusion of hosts whose recent + * (averaged) latency is more than {@code exclusionThreshold * minLatency} + * (where {@code minLatency} is the (averaged) latency of the fastest + * host). + *

+ * The hosts that are initially excluded due to their latency will be returned + * by this iterator, but only only after all non-excluded hosts of the + * child policy have been returned. + * + * @param loggedKeyspace the currently logged keyspace. + * @param statement the statement for which to build the plan. + * @return the new query plan. + */ + @Override + public Iterator newQueryPlan(String loggedKeyspace, Statement statement) { + final Iterator childIter = childPolicy.newQueryPlan(loggedKeyspace, statement); + return new AbstractIterator() { + + private Queue skipped; + + @Override + protected Host computeNext() { + long min = latencyTracker.getMinAverage(); + long now = System.nanoTime(); + while (childIter.hasNext()) { + Host host = childIter.next(); + TimestampedAverage latency = latencyTracker.latencyOf(host); + + // If we haven't had enough data point yet to have a score, or the last update of the score + // is just too old, include the host. + if (min < 0 || latency == null || latency.nbMeasure < minMeasure || (now - latency.timestamp) > retryPeriod) + return host; + + // If the host latency is within acceptable bound of the faster known host, return + // that host. Otherwise, skip it. + if (latency.average <= ((long)(exclusionThreshold * (double)min))) + return host; + + if (skipped == null) + skipped = new ArrayDeque(); + skipped.offer(host); + } + + if (skipped != null && !skipped.isEmpty()) + return skipped.poll(); + + return endOfData(); + }; + }; + } + + /** + * Returns a snapshot of the scores (latency averages) maintained by this + * policy. + * + * @return a new (immutable) {@link Snapshot} object containing the current + * latency scores maintained by this policy. + */ + public Snapshot getScoresSnapshot() { + Map currentLatencies = latencyTracker.currentLatencies(); + ImmutableMap.Builder builder = ImmutableMap.builder(); + long now = System.nanoTime(); + for (Map.Entry entry : currentLatencies.entrySet()) { + Host host = entry.getKey(); + TimestampedAverage latency = entry.getValue(); + Snapshot.Stats stats = new Snapshot.Stats(now - latency.timestamp, latency.average, latency.nbMeasure); + builder.put(host, stats); + } + return new Snapshot(builder.build()); + } + + @Override + public void onUp(Host host) { + childPolicy.onUp(host); + } + + @Override + public void onSuspected(Host host) { + childPolicy.onSuspected(host); + } + + @Override + public void onDown(Host host) { + childPolicy.onDown(host); + latencyTracker.resetHost(host); + } + + @Override + public void onAdd(Host host) { + childPolicy.onAdd(host); + } + + @Override + public void onRemove(Host host) { + childPolicy.onRemove(host); + latencyTracker.resetHost(host); + } + + /** + * An immutable snapshot of the per-host scores (and stats in general) + * maintained by {@code LatencyAwarePolicy} to base its decision upon. + */ + public static class Snapshot { + private final Map stats; + + private Snapshot(Map stats) { + this.stats = stats; + } + + /** + * A map with the stats for all hosts tracked by the {@code + * LatencyAwarePolicy} at the time of the snapshot. + * + * @return a immutable map with all the stats contained in this + * snapshot. + */ + public Map getAllStats() { + return stats; + } + + /** + * The {@code Stats} object for a given host. + * + * @param host the host to return the stats of. + * @return the {@code Stats} for {@code host} in this snapshot or + * {@code null} if the snapshot has not information on {@code host}. + */ + public Stats getStats(Host host) { + return stats.get(host); + } + + /** + * A snapshot of the statistics on a given host kept by {@code LatencyAwarePolicy}. + */ + public static class Stats { + private final long lastUpdatedSince; + private final long average; + private final long nbMeasurements; + + private Stats(long lastUpdatedSince, long average, long nbMeasurements) { + this.lastUpdatedSince = lastUpdatedSince; + this.average = average; + this.nbMeasurements = nbMeasurements; + } + + /** + * The number of nanoseconds since the last latency update was recorded (at the time + * of the snapshot). + * + * @return The number of nanoseconds since the last latency update was recorded (at the time + * of the snapshot). + */ + public long lastUpdatedSince() { + return lastUpdatedSince; + } + + /** + * The latency score for the host this is the stats of at the time of the snapshot. + * + * @return the latency score for the host this is the stats of at the time of the snapshot, + * or {@code -1L} if not enough measurements have been taken to assign a score. + */ + public long getLatencyScore() { + return average; + } + + /** + * The number of recorded latency measurements for the host this is the stats of. + * + * @return the number of recorded latency measurements for the host this is the stats of. + */ + public long getMeasurementsCount() { + return nbMeasurements; + } + } + } + + /** + * A set of DriverException subclasses that we should prevent from updating the host's score. + * The intent behind it is to filter out "fast" errors: when a host replies with such errors, + * it usually does so very quickly, because it did not involve any actual + * coordination work. Such errors are not good indicators of the host's responsiveness, + * and tend to make the host's score look better than it actually is. + */ + private static final Set> EXCLUDED_EXCEPTIONS = ImmutableSet.of( + UnavailableException.class, // this is done via the snitch and is usually very fast + OverloadedException.class, + BootstrappingException.class, + UnpreparedException.class, + QueryValidationException.class // query validation also happens at early stages in the coordinator + ); + + private class Tracker implements LatencyTracker { + + private final ConcurrentMap latencies = new ConcurrentHashMap(); + private volatile long cachedMin = -1L; + + public void update(Host host, Statement statement, Exception exception, long newLatencyNanos) { + if(shouldConsiderNewLatency(statement, exception)) { + HostLatencyTracker hostTracker = latencies.get(host); + if (hostTracker == null) { + hostTracker = new HostLatencyTracker(scale, (30L * minMeasure) / 100L); + HostLatencyTracker old = latencies.putIfAbsent(host, hostTracker); + if (old != null) + hostTracker = old; + } + hostTracker.add(newLatencyNanos); + } + } + + private boolean shouldConsiderNewLatency(Statement statement, Exception exception) { + // query was successful: always consider + if(exception == null) return true; + // filter out "fast" errors + if(EXCLUDED_EXCEPTIONS.contains(exception.getClass())) return false; + return true; + } + + public void updateMin() { + long newMin = Long.MAX_VALUE; + long now = System.nanoTime(); + for (HostLatencyTracker tracker : latencies.values()) { + TimestampedAverage latency = tracker.getCurrentAverage(); + if (latency != null && latency.average >= 0 && latency.nbMeasure >= minMeasure && (now - latency.timestamp) <= retryPeriod) + newMin = Math.min(newMin, latency.average); + } + if (newMin != Long.MAX_VALUE) + cachedMin = newMin; + } + + public long getMinAverage() { + return cachedMin; + } + + public TimestampedAverage latencyOf(Host host) { + HostLatencyTracker tracker = latencies.get(host); + return tracker == null ? null : tracker.getCurrentAverage(); + } + + public Map currentLatencies() { + Map map = new HashMap(latencies.size()); + for (Map.Entry entry : latencies.entrySet()) + map.put(entry.getKey(), entry.getValue().getCurrentAverage()); + return map; + } + + public void resetHost(Host host) { + latencies.remove(host); + } + } + + private static class TimestampedAverage { + + private final long timestamp; + private final long average; + private final long nbMeasure; + + TimestampedAverage(long timestamp, long average, long nbMeasure) { + this.timestamp = timestamp; + this.average = average; + this.nbMeasure = nbMeasure; + } + } + + private static class HostLatencyTracker { + + private final long thresholdToAccount; + private final double scale; + private final AtomicReference current = new AtomicReference(); + + HostLatencyTracker(long scale, long thresholdToAccount) { + this.scale = (double)scale; // We keep in double since that's how we'll use it. + this.thresholdToAccount = thresholdToAccount; + } + + public void add(long newLatencyNanos) { + TimestampedAverage previous, next; + do { + previous = current.get(); + next = computeNextAverage(previous, newLatencyNanos); + } while (next != null && !current.compareAndSet(previous, next)); + } + + private TimestampedAverage computeNextAverage(TimestampedAverage previous, long newLatencyNanos) { + + long currentTimestamp = System.nanoTime(); + + long nbMeasure = previous == null ? 1 : previous.nbMeasure + 1; + if (nbMeasure < thresholdToAccount) + return new TimestampedAverage(currentTimestamp, -1L, nbMeasure); + + if (previous == null || previous.average < 0) + return new TimestampedAverage(currentTimestamp, newLatencyNanos, nbMeasure); + + // Note: it's possible for the delay to be 0, in which case newLatencyNanos will basically be + // discarded. It's fine: nanoTime is precise enough in practice that even if it happens, it + // will be very rare, and discarding a latency every once in a while is not the end of the world. + // We do test for negative value, even though in theory that should not happen, because it seems + // that historically there has been bugs here (https://blogs.oracle.com/dholmes/entry/inside_the_hotspot_vm_clocks) + // so while this is almost surely not a problem anymore, there's no reason to break the computation + // if this even happen. + long delay = currentTimestamp - previous.timestamp; + if (delay <= 0) + return null; + + double scaledDelay = ((double)delay)/scale; + // Note: We don't use log1p because we it's quite a bit slower and we don't care about the precision (and since we + // refuse ridiculously big scales, scaledDelay can't be so low that scaledDelay+1 == 1.0 (due to rounding)). + double prevWeight = Math.log(scaledDelay+1) / scaledDelay; + long newAverage = (long)((1.0 - prevWeight) * newLatencyNanos + prevWeight * previous.average); + + return new TimestampedAverage(currentTimestamp, newAverage, nbMeasure); + } + + public TimestampedAverage getCurrentAverage() { + return current.get(); + } + } + + /** + * Helper builder object to create a latency aware policy. + *

+ * This helper allows to configure the different parameters used by + * {@code LatencyAwarePolicy}. The only mandatory parameter is the child + * policy that will be wrapped with latency awareness. The other parameters + * can be set through the methods of this builder, but all have defaults (that + * are documented in the javadoc of each method) if you don't. + *

+ * If you observe that the resulting policy excludes hosts too aggressively or + * not enough so, the main parameters to check are the exclusion threshold + * ({@link #withExclusionThreshold}) and scale ({@link #withScale}). + * + * @since 1.0.4 + */ + public static class Builder { + + private static final double DEFAULT_EXCLUSION_THRESHOLD = 2.0; + private static final long DEFAULT_SCALE = TimeUnit.MILLISECONDS.toNanos(100); + private static final long DEFAULT_RETRY_PERIOD = TimeUnit.SECONDS.toNanos(10); + private static final long DEFAULT_UPDATE_RATE = TimeUnit.MILLISECONDS.toNanos(100); + private static final int DEFAULT_MIN_MEASURE = 50; + + private final LoadBalancingPolicy childPolicy; + + private double exclusionThreshold = DEFAULT_EXCLUSION_THRESHOLD; + private long scale = DEFAULT_SCALE; + private long retryPeriod = DEFAULT_RETRY_PERIOD; + private long updateRate = DEFAULT_UPDATE_RATE; + private int minMeasure = DEFAULT_MIN_MEASURE; + + /** + * Creates a new latency aware policy builder given the child policy + * that the resulting policy wraps. + * + * @param childPolicy the load balancing policy to wrap with latency + * awareness. + */ + public Builder(LoadBalancingPolicy childPolicy) { + this.childPolicy = childPolicy; + } + + /** + * Sets the exclusion threshold to use for the resulting latency aware policy. + *

+ * The exclusion threshold controls how much worse the average latency + * of a node must be compared to the fastest performing node for it to be + * penalized by the policy. + *

+ * The default exclusion threshold (if this method is not called) is 2. + * In other words, the resulting policy excludes nodes that are more than + * twice slower than the fastest node. + * + * @param exclusionThreshold the exclusion threshold to use. Must be + * greater or equal to 1. + * @return this builder. + * + * @throws IllegalArgumentException if {@code exclusionThreshold < 1}. + */ + public Builder withExclusionThreshold(double exclusionThreshold) { + if (exclusionThreshold < 1d) + throw new IllegalArgumentException("Invalid exclusion threshold, must be greater than 1."); + this.exclusionThreshold = exclusionThreshold; + return this; + } + + /** + * Sets the scale to use for the resulting latency aware policy. + *

+ * The {@code scale} provides control on how the weight given to older latencies + * decreases over time. For a given host, if a new latency \(l\) is received at + * time \(t\), and the previously calculated average is \(prev\) calculated at + * time \(t'\), then the newly calculated average \(avg\) for that host is calculated + * thusly: + * \[ + * d = \frac{t - t'}{scale} \\ + * \alpha = 1 - \left(\frac{\ln(d+1)}{d}\right) \\ + * avg = \alpha * l + (1-\alpha) * prev + * \] + * Typically, with a {@code scale} of 100 milliseconds (the default), if a new + * latency is measured and the previous measure is 10 millisecond old (so \(d=0.1\)), + * then \(\alpha\) will be around \(0.05\). In other words, the new latency will + * weight 5% of the updated average. A bigger scale will get less weight to new + * measurements (compared to previous ones), a smaller one will give them more weight. + *

+ * The default scale (if this method is not used) is of 100 milliseconds. If unsure, try + * this default scale first and experiment only if it doesn't provide acceptable results + * (hosts are excluded too quickly or not fast enough and tuning the exclusion threshold + * doesn't help). + * + * @param scale the scale to use. + * @param unit the unit of {@code scale}. + * @return this builder. + * + * @throws IllegalArgumentException if {@code scale <e; 0}. + */ + public Builder withScale(long scale, TimeUnit unit) { + if (scale <= 0) + throw new IllegalArgumentException("Invalid scale, must be strictly positive"); + this.scale = unit.toNanos(scale); + return this; + } + + /** + * Sets the retry period for the resulting latency aware policy. + *

+ * The retry period defines how long a node may be penalized by the + * policy before it is given a 2nd change. More precisely, a node is excluded + * from query plans if both his calculated average latency is {@code exclusionThreshold} + * times slower than the fastest node average latency (at the time the query plan is + * computed) and his calculated average latency has been updated since + * less than {@code retryPeriod}. Since penalized nodes will likely not see their + * latency updated, this is basically how long the policy will exclude a node. + * + * @param retryPeriod the retry period to use. + * @param unit the unit for {@code retryPeriod}. + * @return this builder. + * + * @throws IllegalArgumentException if {@code retryPeriod < 0}. + */ + public Builder withRetryPeriod(long retryPeriod, TimeUnit unit) { + if (retryPeriod < 0) + throw new IllegalArgumentException("Invalid retry period, must be positive"); + this.retryPeriod = unit.toNanos(retryPeriod); + return this; + } + + /** + * Sets the update rate for the resulting latency aware policy. + * + * The update rate defines how often the minimum average latency is + * recomputed. While the average latency score of each node is computed + * iteratively (updated each time a new latency is collected), the + * minimum score needs to be recomputed from scratch every time, which + * is slightly more costly. For this reason, the minimum is only + * re-calculated at the given fixed rate and cached between re-calculation. + *

+ * The default update rate if 100 milliseconds, which should be + * appropriate for most applications. In particular, note that while we + * want to avoid to recompute the minimum for every query, that + * computation is not particularly intensive either and there is no + * reason to use a very slow rate (more than second is probably + * unnecessarily slow for instance). + * + * @param updateRate the update rate to use. + * @param unit the unit for {@code updateRate}. + * @return this builder. + * + * @throws IllegalArgumentException if {@code updateRate <e; 0}. + */ + public Builder withUpdateRate(long updateRate, TimeUnit unit) { + if (updateRate <= 0) + throw new IllegalArgumentException("Invalid update rate value, must be strictly positive"); + this.updateRate = unit.toNanos(updateRate); + return this; + } + + /** + * Sets the minimum number of measurements per-host to consider for + * the resulting latency aware policy. + *

+ * Penalizing nodes is based on an average of their recently measured + * average latency. This average is only meaningful if a minimum of + * measurements have been collected (moreover, a newly started + * Cassandra node will tend to perform relatively poorly on the first + * queries due to the JVM warmup). This is what this option controls. + * If less that {@code minMeasure} data points have been collected for + * a given host, the policy will never penalize that host. Also, the + * 30% first measurement will be entirely ignored (in other words, the + * {@code 30% * minMeasure} first measurement to a node are entirely + * ignored, while the {@code 70%} next ones are accounted in the latency + * computed but the node won't get convicted until we've had at least + * {@code minMeasure} measurements). + *

+ * Note that the number of collected measurements for a given host is + * reset if the node is restarted. + *

+ * The default for this option (if this method is not called) is 50. + * Note that it is probably not a good idea to put this option too low + * if only to avoid the influence of JVM warm-up on newly restarted + * nodes. + * + * @param minMeasure the minimum measurements to consider. + * @return this builder. + * + * @throws IllegalArgumentException if {@code minMeasure < 0}. + */ + public Builder withMininumMeasurements(int minMeasure) { + if (minMeasure < 0) + throw new IllegalArgumentException("Invalid minimum measurements value, must be positive"); + this.minMeasure = minMeasure; + return this; + } + + /** + * Builds a new latency aware policy using the options set on this + * builder. + * + * @return the newly created {@code LatencyAwarePolicy}. + */ + public LatencyAwarePolicy build() { + return new LatencyAwarePolicy(childPolicy, exclusionThreshold, scale, retryPeriod, updateRate, minMeasure); + } + } + + @Override + public void close() { + if (childPolicy instanceof CloseableLoadBalancingPolicy) + ((CloseableLoadBalancingPolicy)childPolicy).close(); + updaterService.shutdown(); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/LoadBalancingPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/LoadBalancingPolicy.java new file mode 100644 index 00000000000..f21f44591da --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/LoadBalancingPolicy.java @@ -0,0 +1,92 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.policies; + +import java.util.Collection; +import java.util.Iterator; + +import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.Host; +import com.datastax.driver.core.HostDistance; +import com.datastax.driver.core.Statement; + +/** + * The policy that decides which Cassandra hosts to contact for each new query. + *

+ * Two methods need to be implemented: + *

    + *
  • {@link LoadBalancingPolicy#distance}: returns the "distance" of an + * host for that balancing policy.
  • + *
  • {@link LoadBalancingPolicy#newQueryPlan}: it is used for each query to + * find which host to query first, and which hosts to use as failover.
  • + *
+ *

+ * The {@code LoadBalancingPolicy} is a {@link com.datastax.driver.core.Host.StateListener} + * and is thus informed of hosts up/down events. For efficiency purposes, the + * policy is expected to exclude down hosts from query plans. + */ +public interface LoadBalancingPolicy extends Host.StateListener { + + /** + * Initialize this load balancing policy. + *

+ * Note that the driver guarantees that it will call this method exactly + * once per policy object and will do so before any call to another of the + * methods of the policy. + * + * @param cluster the {@code Cluster} instance for which the policy is created. + * @param hosts the initial hosts to use. + */ + public void init(Cluster cluster, Collection hosts); + + /** + * Returns the distance assigned by this policy to the provided host. + *

+ * The distance of an host influence how much connections are kept to the + * node (see {@link HostDistance}). A policy should assign a {@code + * LOCAL} distance to nodes that are susceptible to be returned first by + * {@code newQueryPlan} and it is useless for {@code newQueryPlan} to + * return hosts to which it assigns an {@code IGNORED} distance. + *

+ * The host distance is primarily used to prevent keeping too many + * connections to host in remote datacenters when the policy itself always + * picks host in the local datacenter first. + * + * @param host the host of which to return the distance of. + * @return the HostDistance to {@code host}. + */ + public HostDistance distance(Host host); + + /** + * Returns the hosts to use for a new query. + *

+ * Each new query will call this method. The first host in the result will + * then be used to perform the query. In the event of a connection problem + * (the queried host is down or appear to be so), the next host will be + * used. If all hosts of the returned {@code Iterator} are down, the query + * will fail. + * + * @param loggedKeyspace the currently logged keyspace (the one set through either + * {@link Cluster#connect(String)} or by manually doing a {@code USE} query) for + * the session on which this plan need to be built. This can be {@code null} if + * the corresponding session has no keyspace logged in. + * @param statement the query for which to build a plan. + * @return an iterator of Host. The query is tried against the hosts + * returned by this iterator in order, until the query has been sent + * successfully to one of the host. + */ + public Iterator newQueryPlan(String loggedKeyspace, Statement statement); +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/LoggingRetryPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/LoggingRetryPolicy.java new file mode 100644 index 00000000000..309f114ccad --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/LoggingRetryPolicy.java @@ -0,0 +1,98 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.policies; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.datastax.driver.core.ConsistencyLevel; +import com.datastax.driver.core.Statement; +import com.datastax.driver.core.WriteType; + +/** + * A retry policy that wraps another policy, logging the decision made by its sub-policy. + *

+ * Note that this policy only log the IGNORE and RETRY decisions (since + * RETHROW decisions are just meant to propagate the cassandra exception). The + * logging is done at the INFO level. + */ +public class LoggingRetryPolicy implements RetryPolicy { + + private static final Logger logger = LoggerFactory.getLogger(LoggingRetryPolicy.class); + private final RetryPolicy policy; + + /** + * Creates a new {@code RetryPolicy} that logs the decision of {@code policy}. + * + * @param policy the policy to wrap. The policy created by this constructor + * will return the same decision than {@code policy} but will log them. + */ + public LoggingRetryPolicy(RetryPolicy policy) { + this.policy = policy; + } + + private static ConsistencyLevel cl(ConsistencyLevel cl, RetryDecision decision) { + return decision.getRetryConsistencyLevel() == null ? cl : decision.getRetryConsistencyLevel(); + } + + @Override + public RetryDecision onReadTimeout(Statement statement, ConsistencyLevel cl, int requiredResponses, int receivedResponses, boolean dataRetrieved, int nbRetry) { + RetryDecision decision = policy.onReadTimeout(statement, cl, requiredResponses, receivedResponses, dataRetrieved, nbRetry); + switch (decision.getType()) { + case IGNORE: + String f1 = "Ignoring read timeout (initial consistency: %s, required responses: %d, received responses: %d, data retrieved: %b, retries: %d)"; + logger.info(String.format(f1, cl, requiredResponses, receivedResponses, dataRetrieved, nbRetry)); + break; + case RETRY: + String f2 = "Retrying on read timeout at consistency %s (initial consistency: %s, required responses: %d, received responses: %d, data retrieved: %b, retries: %d)"; + logger.info(String.format(f2, cl(cl, decision), cl, requiredResponses, receivedResponses, dataRetrieved, nbRetry)); + break; + } + return decision; + } + + @Override + public RetryDecision onWriteTimeout(Statement statement, ConsistencyLevel cl, WriteType writeType, int requiredAcks, int receivedAcks, int nbRetry) { + RetryDecision decision = policy.onWriteTimeout(statement, cl, writeType, requiredAcks, receivedAcks, nbRetry); + switch (decision.getType()) { + case IGNORE: + String f1 = "Ignoring write timeout (initial consistency: %s, write type: %s, required acknowledgments: %d, received acknowledgments: %d, retries: %d)"; + logger.info(String.format(f1, cl, writeType, requiredAcks, receivedAcks, nbRetry)); + break; + case RETRY: + String f2 = "Retrying on write timeout at consistency %s(initial consistency: %s, write type: %s, required acknowledgments: %d, received acknowledgments: %d, retries: %d)"; + logger.info(String.format(f2, cl(cl, decision), cl, writeType, requiredAcks, receivedAcks, nbRetry)); + break; + } + return decision; + } + + @Override + public RetryDecision onUnavailable(Statement statement, ConsistencyLevel cl, int requiredReplica, int aliveReplica, int nbRetry) { + RetryDecision decision = policy.onUnavailable(statement, cl, requiredReplica, aliveReplica, nbRetry); + switch (decision.getType()) { + case IGNORE: + String f1 = "Ignoring unavailable exception (initial consistency: %s, required replica: %d, alive replica: %d, retries: %d)"; + logger.info(String.format(f1, cl, requiredReplica, aliveReplica, nbRetry)); + break; + case RETRY: + String f2 = "Retrying on unavailable exception at consistency %s (initial consistency: %s, required replica: %d, alive replica: %d, retries: %d)"; + logger.info(String.format(f2, cl(cl, decision), cl, requiredReplica, aliveReplica, nbRetry)); + break; + } + return decision; + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/NoSpeculativeExecutionPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/NoSpeculativeExecutionPolicy.java new file mode 100644 index 00000000000..1524aeb8463 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/NoSpeculativeExecutionPolicy.java @@ -0,0 +1,55 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.policies; + +import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.Host; +import com.datastax.driver.core.Statement; + +/** + * A {@link SpeculativeExecutionPolicy} that never schedules speculative executions. + */ +public class NoSpeculativeExecutionPolicy implements SpeculativeExecutionPolicy { + + /** The single instance (this class is stateless). */ + public static final NoSpeculativeExecutionPolicy INSTANCE = new NoSpeculativeExecutionPolicy(); + + private static final SpeculativeExecutionPlan PLAN = new SpeculativeExecutionPlan() { + @Override + public long nextExecution(Host lastQueried) { + return -1; + } + }; + + @Override + public SpeculativeExecutionPlan newPlan(String loggedKeyspace, Statement statement) { + return PLAN; + } + + private NoSpeculativeExecutionPolicy() { + // do nothing + } + + @Override + public void init(Cluster cluster) { + // do nothing + } + + @Override + public void close() { + // do nothing + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/PercentileSpeculativeExecutionPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/PercentileSpeculativeExecutionPolicy.java new file mode 100644 index 00000000000..1f63a7d4950 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/PercentileSpeculativeExecutionPolicy.java @@ -0,0 +1,100 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.policies; + +import java.util.concurrent.atomic.AtomicInteger; + +import com.google.common.annotations.Beta; + +import static com.google.common.base.Preconditions.checkArgument; + +import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.Host; +import com.datastax.driver.core.Statement; +import com.datastax.driver.core.PerHostPercentileTracker; + +/** + * A policy that triggers speculative executions when the request to the current host is above a given percentile. + * + * This class uses a {@link PerHostPercentileTracker} that must be registered with the cluster instance: + *

+ * PerHostPercentileTracker tracker = PerHostPercentileTracker
+ *     .builderWithHighestTrackableLatencyMillis(15000)
+ *     .build();
+ * PercentileSpeculativeExecutionPolicy policy = new PercentileSpeculativeExecutionPolicy(tracker, 99.0, 2);
+ * cluster = Cluster.builder()
+ *     .addContactPoint("127.0.0.1")
+ *     .withSpeculativeExecutionPolicy(policy)
+ *     .build();
+ * cluster.register(tracker);
+ * 
+ * You must register the tracker with the cluster yourself (as shown on the last line above), this class will not + * do it itself. + *

+ * This class is currently provided as a beta preview: it hasn't been extensively tested yet, and the API is still subject + * to change. + */ +@Beta +public class PercentileSpeculativeExecutionPolicy implements SpeculativeExecutionPolicy { + private final PerHostPercentileTracker percentileTracker; + private final double percentile; + private final int maxSpeculativeExecutions; + + /** + * Builds a new instance. + * + * @param percentileTracker the component that will record latencies. Note that this policy doesn't register it with the {@code Cluster}, + * you must do it yourself (see the code example in this class's Javadoc). + * @param percentile the percentile that a request's latency must fall into to be considered slow (ex: {@code 99.0}). + * @param maxSpeculativeExecutions the maximum number of speculative executions that will be triggered for a given request (this does not + * include the initial, normal request). Must be strictly positive. + */ + public PercentileSpeculativeExecutionPolicy(PerHostPercentileTracker percentileTracker, double percentile, int maxSpeculativeExecutions) { + checkArgument(maxSpeculativeExecutions > 0, + "number of speculative executions must be strictly positive (was %d)", maxSpeculativeExecutions); + checkArgument(percentile >= 0.0 && percentile < 100, + "percentile must be between 0.0 and 100 (was %f)"); + + this.percentileTracker = percentileTracker; + this.percentile = percentile; + this.maxSpeculativeExecutions = maxSpeculativeExecutions; + } + + @Override + public SpeculativeExecutionPlan newPlan(String loggedKeyspace, Statement statement) { + return new SpeculativeExecutionPlan() { + private final AtomicInteger remaining = new AtomicInteger(maxSpeculativeExecutions); + + @Override + public long nextExecution(Host lastQueried) { + if (remaining.getAndDecrement() > 0) + return percentileTracker.getLatencyAtPercentile(lastQueried, percentile); + else + return -1; + } + }; + } + + @Override + public void init(Cluster cluster) { + // nothing + } + + @Override + public void close() { + // nothing + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/Policies.java b/driver-core/src/main/java/com/datastax/driver/core/policies/Policies.java new file mode 100644 index 00000000000..0c47d883440 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/Policies.java @@ -0,0 +1,360 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.policies; + +import com.google.common.base.Objects; + +import com.datastax.driver.core.ServerSideTimestampGenerator; +import com.datastax.driver.core.TimestampGenerator; + +/** + * Policies configured for a {@link com.datastax.driver.core.Cluster} instance. + */ +public class Policies { + + /** + * Returns a builder to create a new {@code Policies} object. + * + * @return the builder. + */ + public static Builder builder() { + return new Builder(); + } + + private static final ReconnectionPolicy DEFAULT_RECONNECTION_POLICY = new ExponentialReconnectionPolicy(1000, 10 * 60 * 1000); + private static final RetryPolicy DEFAULT_RETRY_POLICY = DefaultRetryPolicy.INSTANCE; + private static final AddressTranslater DEFAULT_ADDRESS_TRANSLATER = new IdentityTranslater(); + private static final SpeculativeExecutionPolicy DEFAULT_SPECULATIVE_EXECUTION_POLICY = NoSpeculativeExecutionPolicy.INSTANCE; + + private final LoadBalancingPolicy loadBalancingPolicy; + private final ReconnectionPolicy reconnectionPolicy; + private final RetryPolicy retryPolicy; + private final AddressTranslater addressTranslater; + private final TimestampGenerator timestampGenerator; + private final SpeculativeExecutionPolicy speculativeExecutionPolicy; + + /** + * @deprecated this constructor is exposed for backward compatibility. + * Use {@link #builder()} instead. + */ + @Deprecated + /* + * This constructor should be private and called only by the builder. + * If a new field gets added, add it to this constructor and make it private, then expose the current signature as a deprecated method + * (see end of class). + */ + public Policies(LoadBalancingPolicy loadBalancingPolicy, + ReconnectionPolicy reconnectionPolicy, + RetryPolicy retryPolicy, + AddressTranslater addressTranslater, + TimestampGenerator timestampGenerator, + SpeculativeExecutionPolicy speculativeExecutionPolicy) { + this.loadBalancingPolicy = loadBalancingPolicy; + this.reconnectionPolicy = reconnectionPolicy; + this.retryPolicy = retryPolicy; + this.addressTranslater = addressTranslater; + this.timestampGenerator = timestampGenerator; + this.speculativeExecutionPolicy = speculativeExecutionPolicy; + } + + /** + * The default load balancing policy. + *

+ * The default load balancing policy is {@link DCAwareRoundRobinPolicy} with token + * awareness (so {@code new TokenAwarePolicy(new DCAwareRoundRobinPolicy())}). + *

+ * Note that this policy shuffles the replicas when token awareness is used, see + * {@link TokenAwarePolicy#TokenAwarePolicy(LoadBalancingPolicy, boolean)} for an + * explanation of the tradeoffs. + * + * @return the default load balancing policy. + */ + public static LoadBalancingPolicy defaultLoadBalancingPolicy() { + // Note: balancing policies are stateful, so we can't store that in a static or that would screw thing + // up if multiple Cluster instance are started in the same JVM. + return new TokenAwarePolicy(new DCAwareRoundRobinPolicy()); + } + + /** + * The default reconnection policy. + *

+ * The default reconnection policy is an {@link ExponentialReconnectionPolicy} + * where the base delay is 1 second and the max delay is 10 minutes; + * + * @return the default reconnection policy. + */ + public static ReconnectionPolicy defaultReconnectionPolicy() { + return DEFAULT_RECONNECTION_POLICY; + } + + /** + * The default retry policy. + *

+ * The default retry policy is {@link DefaultRetryPolicy}. + * + * @return the default retry policy. + */ + public static RetryPolicy defaultRetryPolicy() { + return DEFAULT_RETRY_POLICY; + } + + /** + * The default address translater. + *

+ * The default address translater is {@link IdentityTranslater}. + * + * @return the default address translater. + */ + public static AddressTranslater defaultAddressTranslater() { + return DEFAULT_ADDRESS_TRANSLATER; + } + + /** + * The default timestamp generator. + *

+ * This is an instance of {@link ServerSideTimestampGenerator}. + * + * @return the default timestamp generator. + */ + public static TimestampGenerator defaultTimestampGenerator() { + return ServerSideTimestampGenerator.INSTANCE; + } + + /** + * The default speculative retry policy. + *

+ * The default speculative retry policy is a {@link NoSpeculativeExecutionPolicy}. + * + * @return the default speculative retry policy. + */ + public static SpeculativeExecutionPolicy defaultSpeculativeExecutionPolicy() { + return DEFAULT_SPECULATIVE_EXECUTION_POLICY; + } + + /** + * The load balancing policy in use. + *

+ * The load balancing policy defines how Cassandra hosts are picked for queries. + * + * @return the load balancing policy in use. + */ + public LoadBalancingPolicy getLoadBalancingPolicy() { + return loadBalancingPolicy; + } + + /** + * The reconnection policy in use. + *

+ * The reconnection policy defines how often the driver tries to reconnect to a dead node. + * + * @return the reconnection policy in use. + */ + public ReconnectionPolicy getReconnectionPolicy() { + return reconnectionPolicy; + } + + /** + * The retry policy in use. + *

+ * The retry policy defines in which conditions a query should be + * automatically retries by the driver. + * + * @return the retry policy in use. + */ + public RetryPolicy getRetryPolicy() { + return retryPolicy; + } + + /** + * The address translater in use. + * + * @return the address translater in use. + */ + public AddressTranslater getAddressTranslater() { + return addressTranslater; + } + + /** + * The timestamp generator to use. + * + * @return the timestamp generator to use. + */ + public TimestampGenerator getTimestampGenerator() { + return timestampGenerator; + } + + /** + * The speculative execution policy in use. + * + * @return the speculative execution policy in use. + */ + public SpeculativeExecutionPolicy getSpeculativeExecutionPolicy() { + return speculativeExecutionPolicy; + } + + /** + * A builder to create a new {@code Policies} object. + */ + public static class Builder { + private LoadBalancingPolicy loadBalancingPolicy; + private ReconnectionPolicy reconnectionPolicy; + private RetryPolicy retryPolicy; + private AddressTranslater addressTranslater; + private TimestampGenerator timestampGenerator; + private SpeculativeExecutionPolicy speculativeExecutionPolicy; + + /** + * Sets the load balancing policy. + * + * @param loadBalancingPolicy see {@link #getLoadBalancingPolicy()}. + * @return this builder. + */ + public Builder withLoadBalancingPolicy(LoadBalancingPolicy loadBalancingPolicy) { + this.loadBalancingPolicy = loadBalancingPolicy; + return this; + } + + /** + * Sets the reconnection policy. + * + * @param reconnectionPolicy see {@link #getReconnectionPolicy()}. + * @return this builder. + */ + public Builder withReconnectionPolicy(ReconnectionPolicy reconnectionPolicy) { + this.reconnectionPolicy = reconnectionPolicy; + return this; + } + + /** + * Sets the retry policy. + * + * @param retryPolicy see {@link #getRetryPolicy()}. + * @return this builder. + */ + public Builder withRetryPolicy(RetryPolicy retryPolicy) { + this.retryPolicy = retryPolicy; + return this; + } + + /** + * Sets the address translator. + * + * @param addressTranslater see {@link #getAddressTranslater()}. + * @return this builder. + */ + public Builder withAddressTranslater(AddressTranslater addressTranslater) { + this.addressTranslater = addressTranslater; + return this; + } + + /** + * Sets the timestamp generator. + * + * @param timestampGenerator see {@link #getTimestampGenerator()}. + * @return this builder. + */ + public Builder withTimestampGenerator(TimestampGenerator timestampGenerator) { + this.timestampGenerator = timestampGenerator; + return this; + } + + /** + * Sets the speculative execution policy. + * + * @param speculativeExecutionPolicy see {@link #getSpeculativeExecutionPolicy()}. + * @return this builder. + */ + public Builder withSpeculativeExecutionPolicy(SpeculativeExecutionPolicy speculativeExecutionPolicy) { + this.speculativeExecutionPolicy = speculativeExecutionPolicy; + return this; + } + + /** + * Builds the final object from this builder. + *

+ * Any field that hasn't been set explicitly will get its default value. + * + * @return the object. + */ + public Policies build() { + return new Policies( + loadBalancingPolicy == null ? Policies.defaultLoadBalancingPolicy() : loadBalancingPolicy, + Objects.firstNonNull(reconnectionPolicy, Policies.defaultReconnectionPolicy()), + Objects.firstNonNull(retryPolicy, Policies.defaultRetryPolicy()), + Objects.firstNonNull(addressTranslater, Policies.defaultAddressTranslater()), + Objects.firstNonNull(timestampGenerator, Policies.defaultTimestampGenerator()), + Objects.firstNonNull(speculativeExecutionPolicy, Policies.defaultSpeculativeExecutionPolicy())); + } + } + + /* + * The public constructors of this class were mistakenly changed over time (this is a breaking change). + * + * The code below provides backward compatibility with every signature that was ever exposed in a released version. + * This is ugly but the safest way to avoid issues for clients upgrading from a previous version. + * + * In the future, new fields will only be added to the builder, which avoids this problem. + */ + + /** + * @deprecated this constructor is provided for backward compatibility. + * Use {@link #builder()} instead. + */ + @Deprecated + public Policies() { + this(defaultLoadBalancingPolicy(), defaultReconnectionPolicy(), defaultRetryPolicy(), defaultAddressTranslater(), defaultTimestampGenerator(), defaultSpeculativeExecutionPolicy()); + } + + /** + * @deprecated this constructor is provided for backward compatibility. + * Use {@link #builder()} instead. + */ + public Policies(LoadBalancingPolicy loadBalancingPolicy, ReconnectionPolicy reconnectionPolicy, RetryPolicy retryPolicy) { + this(loadBalancingPolicy, reconnectionPolicy, retryPolicy, defaultAddressTranslater(), defaultTimestampGenerator(), defaultSpeculativeExecutionPolicy()); + } + + /** + * @deprecated this constructor is provided for backward compatibility. + * Use {@link #builder()} instead. + */ + public Policies(LoadBalancingPolicy loadBalancingPolicy, ReconnectionPolicy reconnectionPolicy, RetryPolicy retryPolicy, AddressTranslater addressTranslater) { + this(loadBalancingPolicy, reconnectionPolicy, retryPolicy, addressTranslater, defaultTimestampGenerator(), defaultSpeculativeExecutionPolicy()); + } + + /** + * @deprecated this constructor is provided for backward compatibility. + * Use {@link #builder()} instead. + */ + public Policies(LoadBalancingPolicy loadBalancingPolicy, ReconnectionPolicy reconnectionPolicy, RetryPolicy retryPolicy, SpeculativeExecutionPolicy speculativeExecutionPolicy) { + this(loadBalancingPolicy, reconnectionPolicy, retryPolicy, defaultAddressTranslater(), defaultTimestampGenerator(), speculativeExecutionPolicy); + } + + /** + * @deprecated this constructor is provided for backward compatibility. + * Use {@link #builder()} instead. + */ + public Policies(LoadBalancingPolicy loadBalancingPolicy, ReconnectionPolicy reconnectionPolicy, RetryPolicy retryPolicy, AddressTranslater addressTranslater, TimestampGenerator timestampGenerator) { + this(loadBalancingPolicy, reconnectionPolicy, retryPolicy, addressTranslater, timestampGenerator, defaultSpeculativeExecutionPolicy()); + } + + /** + * @deprecated this constructor is provided for backward compatibility. + * Use {@link #builder()} instead. + */ + public Policies(LoadBalancingPolicy loadBalancingPolicy, ReconnectionPolicy reconnectionPolicy, RetryPolicy retryPolicy, AddressTranslater addressTranslater, SpeculativeExecutionPolicy speculativeExecutionPolicy) { + this(loadBalancingPolicy, reconnectionPolicy, retryPolicy, addressTranslater, defaultTimestampGenerator(), speculativeExecutionPolicy); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/ReconnectionPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/ReconnectionPolicy.java new file mode 100644 index 00000000000..971ad2404ab --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/ReconnectionPolicy.java @@ -0,0 +1,62 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.policies; + +/** + * Policy that decides how often the reconnection to a dead node is attempted. + * + * Each time a node is detected dead (because a connection error occurs), a new + * {@code ReconnectionSchedule} instance is created (through the {@link #newSchedule()}). + * Then each call to the {@link ReconnectionSchedule#nextDelayMs} method of + * this instance will decide when the next reconnection attempt to this node + * will be tried. + * + * Note that if the driver receives a push notification from the Cassandra cluster + * that a node is UP, any existing {@code ReconnectionSchedule} on that node + * will be cancelled and a new one will be created (in effect, the driver reset + * the scheduler). + * + * The default {@link ExponentialReconnectionPolicy} policy is usually + * adequate. + */ +public interface ReconnectionPolicy { + + /** + * Creates a new schedule for reconnection attempts. + * + * @return the created schedule. + */ + public ReconnectionSchedule newSchedule(); + + /** + * Schedules reconnection attempts to a node. + */ + public interface ReconnectionSchedule { + + /** + * When to attempt the next reconnection. + * + * This method will be called once when the host is detected down to + * schedule the first reconnection attempt, and then once after each failed + * reconnection attempt to schedule the next one. Hence each call to this + * method are free to return a different value. + * + * @return a time in milliseconds to wait before attempting the next + * reconnection. + */ + public long nextDelayMs(); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/RetryPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/RetryPolicy.java new file mode 100644 index 00000000000..6272c17c8b0 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/RetryPolicy.java @@ -0,0 +1,195 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.policies; + +import com.datastax.driver.core.ConsistencyLevel; +import com.datastax.driver.core.Statement; +import com.datastax.driver.core.WriteType; + +/** + * A policy that defines a default behavior to adopt when a request returns + * a TimeoutException or an UnavailableException. + * + * Such policy allows to centralize the handling of query retries, allowing to + * minimize the need for exception catching/handling in business code. + */ +public interface RetryPolicy { + + /** + * A retry decision to adopt on a Cassandra exception (read/write timeout + * or unavailable exception). + *

+ * There is three possible decision: + *

    + *
  • RETHROW: no retry should be attempted and an exception should be thrown
  • + *
  • RETRY: the operation will be retried. The consistency level of the + * retry should be specified.
  • + *
  • IGNORE: no retry should be attempted and the exception should be + * ignored. In that case, the operation that triggered the Cassandra + * exception will return an empty result set.
  • + *
+ */ + class RetryDecision { + /** + * The type of retry decisions. + */ + public enum Type { RETRY, RETHROW, IGNORE }; + + private final Type type; + private final ConsistencyLevel retryCL; + private final boolean retryCurrent; + + private RetryDecision(Type type, ConsistencyLevel retryCL, boolean retryCurrent){ + this.type = type; + this.retryCL = retryCL; + this.retryCurrent = retryCurrent; + } + + /** + * The type of this retry decision. + * + * @return the type of this retry decision. + */ + public Type getType() { + return type; + } + + /** + * The consistency level for a retry decision. + * + * @return the consistency level for a retry decision or {@code null} + * if this retry decision is an {@code IGNORE} or a {@code RETHROW}. + */ + public ConsistencyLevel getRetryConsistencyLevel() { + return retryCL; + } + + /** + * Whether the retry policy uses the same host for retry decision. + * + * @return the retry on next host boolean. Default is false. + */ + public boolean isRetryCurrent() { + return retryCurrent; + } + + /** + * Creates a RETHROW retry decision. + * + * @return a RETHROW retry decision. + */ + public static RetryDecision rethrow() { + return new RetryDecision(Type.RETHROW, null, true); + } + + /** + * Creates a RETRY retry decision using the provided consistency level. + * + * @param consistency the consistency level to use for the retry. + * @return a RETRY with consistency level {@code consistency} retry decision. + */ + public static RetryDecision retry(ConsistencyLevel consistency) { + return new RetryDecision(Type.RETRY, consistency, true); + } + + /** + * Creates an IGNORE retry decision. + * + * @return an IGNORE retry decision. + */ + public static RetryDecision ignore() { + return new RetryDecision(Type.IGNORE, null, true); + } + + /** + * Creates a RETRY retry decision and indicates to retry on another host + * using the provided consistency level. + * + * @return a RETRY retry decision. + */ + public static RetryDecision tryNextHost(ConsistencyLevel retryCL) { + return new RetryDecision(Type.RETRY, retryCL, false); + } + + @Override + public String toString() { + switch (type) { + case RETRY: return "Retry at " + retryCL + " on " + (retryCurrent ? "same " : "next ") + "host."; + case RETHROW: return "Rethrow"; + case IGNORE: return "Ignore"; + } + throw new AssertionError(); + } + } + + /** + * Defines whether to retry and at which consistency level on a read timeout. + *

+ * Note that this method may be called even if + * {@code requiredResponses >= receivedResponses} if {@code dataPresent} is + * {@code false} (see + * {@link com.datastax.driver.core.exceptions.ReadTimeoutException#wasDataRetrieved}). + * + * @param statement the original query that timed out. + * @param cl the original consistency level of the read that timed out. + * @param requiredResponses the number of responses that were required to + * achieve the requested consistency level. + * @param receivedResponses the number of responses that had been received + * by the time the timeout exception was raised. + * @param dataRetrieved whether actual data (by opposition to data checksum) + * was present in the received responses. + * @param nbRetry the number of retry already performed for this operation. + * @return the retry decision. If {@code RetryDecision.RETHROW} is returned, + * a {@link com.datastax.driver.core.exceptions.ReadTimeoutException} will + * be thrown for the operation. + */ + public RetryDecision onReadTimeout(Statement statement, ConsistencyLevel cl, int requiredResponses, int receivedResponses, boolean dataRetrieved, int nbRetry); + + /** + * Defines whether to retry and at which consistency level on a write timeout. + * + * @param statement the original query that timed out. + * @param cl the original consistency level of the write that timed out. + * @param writeType the type of the write that timed out. + * @param requiredAcks the number of acknowledgments that were required to + * achieve the requested consistency level. + * @param receivedAcks the number of acknowledgments that had been received + * by the time the timeout exception was raised. + * @param nbRetry the number of retry already performed for this operation. + * @return the retry decision. If {@code RetryDecision.RETHROW} is returned, + * a {@link com.datastax.driver.core.exceptions.WriteTimeoutException} will + * be thrown for the operation. + */ + public RetryDecision onWriteTimeout(Statement statement, ConsistencyLevel cl, WriteType writeType, int requiredAcks, int receivedAcks, int nbRetry); + + /** + * Defines whether to retry and at which consistency level on an + * unavailable exception. + * + * @param statement the original query for which the consistency level cannot + * be achieved. + * @param cl the original consistency level for the operation. + * @param requiredReplica the number of replica that should have been + * (known) alive for the operation to be attempted. + * @param aliveReplica the number of replica that were know to be alive by + * the coordinator of the operation. + * @param nbRetry the number of retry already performed for this operation. + * @return the retry decision. If {@code RetryDecision.RETHROW} is returned, + * an {@link com.datastax.driver.core.exceptions.UnavailableException} will + * be thrown for the operation. + */ + public RetryDecision onUnavailable(Statement statement, ConsistencyLevel cl, int requiredReplica, int aliveReplica, int nbRetry); +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/RoundRobinPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/RoundRobinPolicy.java new file mode 100644 index 00000000000..6cf4190611f --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/RoundRobinPolicy.java @@ -0,0 +1,176 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.policies; + +import java.util.Collection; +import java.util.Iterator; +import java.util.List; +import java.util.Random; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicInteger; + +import com.google.common.collect.AbstractIterator; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.Configuration; +import com.datastax.driver.core.ConsistencyLevel; +import com.datastax.driver.core.Host; +import com.datastax.driver.core.HostDistance; +import com.datastax.driver.core.Statement; + +/** + * A Round-robin load balancing policy. + *

+ * This policy queries nodes in a round-robin fashion. For a given query, + * if an host fail, the next one (following the round-robin order) is + * tried, until all hosts have been tried. + *

+ * This policy is not datacenter aware and will include every known + * Cassandra host in its round robin algorithm. If you use multiple + * datacenter this will be inefficient and you will want to use the + * {@link DCAwareRoundRobinPolicy} load balancing policy instead. + */ +public class RoundRobinPolicy implements LoadBalancingPolicy, CloseableLoadBalancingPolicy { + + private static final Logger logger = LoggerFactory.getLogger(RoundRobinPolicy.class); + + private final CopyOnWriteArrayList liveHosts = new CopyOnWriteArrayList(); + private final AtomicInteger index = new AtomicInteger(); + + private volatile Configuration configuration; + private volatile boolean hasLoggedLocalCLUse; + + /** + * Creates a load balancing policy that picks host to query in a round robin + * fashion (on all the hosts of the Cassandra cluster). + */ + public RoundRobinPolicy() {} + + @Override + public void init(Cluster cluster, Collection hosts) { + this.liveHosts.addAll(hosts); + this.configuration = cluster.getConfiguration(); + this.index.set(new Random().nextInt(Math.max(hosts.size(), 1))); + } + + /** + * Return the HostDistance for the provided host. + *

+ * This policy consider all nodes as local. This is generally the right + * thing to do in a single datacenter deployment. If you use multiple + * datacenter, see {@link DCAwareRoundRobinPolicy} instead. + * + * @param host the host of which to return the distance of. + * @return the HostDistance to {@code host}. + */ + @Override + public HostDistance distance(Host host) { + return HostDistance.LOCAL; + } + + /** + * Returns the hosts to use for a new query. + *

+ * The returned plan will try each known host of the cluster. Upon each + * call to this method, the {@code i}th host of the plans returned will cycle + * over all the hosts of the cluster in a round-robin fashion. + * + * @param loggedKeyspace the keyspace currently logged in on for this + * query. + * @param statement the query for which to build the plan. + * @return a new query plan, i.e. an iterator indicating which host to + * try first for querying, which one to use as failover, etc... + */ + @Override + public Iterator newQueryPlan(String loggedKeyspace, Statement statement) { + + if (!hasLoggedLocalCLUse) { + ConsistencyLevel cl = statement.getConsistencyLevel() == null + ? configuration.getQueryOptions().getConsistencyLevel() + : statement.getConsistencyLevel(); + if (cl.isDCLocal()) { + hasLoggedLocalCLUse = true; + logger.warn("Detected request at Consistency Level {} but the non-DC aware RoundRobinPolicy is in use. " + + "It is strongly advised to use DCAwareRoundRobinPolicy if you have multiple DCs/use DC-aware consistency levels " + + "(note: this message will only be logged once)", statement.getConsistencyLevel()); + } + } + + // We clone liveHosts because we want a version of the list that + // cannot change concurrently of the query plan iterator (this + // would be racy). We use clone() as it don't involve a copy of the + // underlying array (and thus we rely on liveHosts being a CopyOnWriteArrayList). + @SuppressWarnings("unchecked") + final List hosts = (List)liveHosts.clone(); + final int startIdx = index.getAndIncrement(); + + // Overflow protection; not theoretically thread safe but should be good enough + if (startIdx > Integer.MAX_VALUE - 10000) + index.set(0); + + return new AbstractIterator() { + + private int idx = startIdx; + private int remaining = hosts.size(); + + @Override + protected Host computeNext() { + if (remaining <= 0) + return endOfData(); + + remaining--; + int c = idx++ % hosts.size(); + if (c < 0) + c += hosts.size(); + return hosts.get(c); + } + }; + } + + @Override + public void onUp(Host host) { + liveHosts.addIfAbsent(host); + } + + @Override + public void onSuspected(Host host) { + } + + @Override + public void onDown(Host host) { + liveHosts.remove(host); + } + + @Override + public void onAdd(Host host) { + onUp(host); + } + + @Override + public void onRemove(Host host) { + onDown(host); + } + + @Override + public void close() { + // nothing to do + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/SpeculativeExecutionPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/SpeculativeExecutionPolicy.java new file mode 100644 index 00000000000..25e193fb561 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/SpeculativeExecutionPolicy.java @@ -0,0 +1,72 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.policies; + +import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.Host; +import com.datastax.driver.core.Statement; + +/** + * The policy that decides if the driver will send speculative queries to the next hosts when the current host takes too + * long to respond. + *

+ * Note that only idempotent statements will be speculatively retried, see + * {@link com.datastax.driver.core.Statement#isIdempotent()} for more information. + */ +public interface SpeculativeExecutionPolicy { + /** + * Gets invoked at cluster startup. + * + * @param cluster the cluster that this policy is associated with. + */ + void init(Cluster cluster); + + /** + * Returns the plan to use for a new query. + * + * @param loggedKeyspace the currently logged keyspace (the one set through either + * {@link Cluster#connect(String)} or by manually doing a {@code USE} query) for + * the session on which this plan need to be built. This can be {@code null} if + * the corresponding session has no keyspace logged in. + * @param statement the query for which to build a plan. + * @return the plan. + */ + SpeculativeExecutionPlan newPlan(String loggedKeyspace, Statement statement); + + /** + * Gets invoked at cluster shutdown. + * + * This gives the policy the opportunity to perform some cleanup, for instance stop threads that it might have started. + */ + void close(); + + /** + * A plan that governs speculative executions for a given query. + *

+ * Each time a host is queried, {@link #nextExecution(Host)} is invoked to determine if and when a speculative query to + * the next host will be sent. + */ + interface SpeculativeExecutionPlan { + /** + * Returns the time before the next speculative query. + * + * @param lastQueried the host that was just queried. + * @return the time (in milliseconds) before a speculative query is sent to the next host. If zero or negative, + * no speculative query will be sent. + */ + long nextExecution(Host lastQueried); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/TokenAwarePolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/TokenAwarePolicy.java new file mode 100644 index 00000000000..6c7b98d6935 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/TokenAwarePolicy.java @@ -0,0 +1,198 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.policies; + +import java.nio.ByteBuffer; +import java.util.*; + +import com.google.common.collect.Lists; + +import com.google.common.collect.AbstractIterator; + +import com.datastax.driver.core.*; + +/** + * A wrapper load balancing policy that add token awareness to a child policy. + *

+ * This policy encapsulates another policy. The resulting policy works in + * the following way: + *

    + *
  • the {@code distance} method is inherited from the child policy.
  • + *
  • the iterator return by the {@code newQueryPlan} method will first + * return the {@code LOCAL} replicas for the query (based on {@link Statement#getRoutingKey}) + * if possible (i.e. if the query {@code getRoutingKey} method + * doesn't return {@code null} and if {@link Metadata#getReplicas} + * returns a non empty set of replicas for that partition key). If no + * local replica can be either found or successfully contacted, the rest + * of the query plan will fallback to one of the child policy.
  • + *
+ *

+ * Do note that only replica for which the child policy {@code distance} + * method returns {@code HostDistance.LOCAL} will be considered having + * priority. For example, if you wrap {@link DCAwareRoundRobinPolicy} with this + * token aware policy, replicas from remote data centers may only be + * returned after all the host of the local data center. + */ +public class TokenAwarePolicy implements ChainableLoadBalancingPolicy, CloseableLoadBalancingPolicy { + + private final LoadBalancingPolicy childPolicy; + private final boolean shuffleReplicas; + private Metadata clusterMetadata; + + /** + * Creates a new {@code TokenAware} policy. + * + * @param childPolicy the load balancing policy to wrap with token awareness. + * @param shuffleReplicas whether to shuffle the replicas returned by {@code getRoutingKey}. + * Note that setting this parameter to {@code true} might decrease the + * effectiveness of caching (especially at consistency level ONE), since + * the same row will be retrieved from any replica (instead of only the + * "primary" replica without shuffling). + * On the other hand, shuffling will better distribute writes, and can + * alleviate hotspots caused by "fat" partitions. + * + */ + public TokenAwarePolicy(LoadBalancingPolicy childPolicy, boolean shuffleReplicas) { + this.childPolicy = childPolicy; + this.shuffleReplicas = shuffleReplicas; + } + + /** + * Creates a new {@code TokenAware} policy with shuffling of replicas. + * + * @param childPolicy the load balancing policy to wrap with token + * awareness. + * + * @see #TokenAwarePolicy(LoadBalancingPolicy, boolean) + */ + public TokenAwarePolicy(LoadBalancingPolicy childPolicy) { + this(childPolicy, true); + } + + @Override + public LoadBalancingPolicy getChildPolicy() { + return childPolicy; + } + + @Override + public void init(Cluster cluster, Collection hosts) { + clusterMetadata = cluster.getMetadata(); + childPolicy.init(cluster, hosts); + } + + /** + * Return the HostDistance for the provided host. + * + * @param host the host of which to return the distance of. + * @return the HostDistance to {@code host} as returned by the wrapped policy. + */ + @Override + public HostDistance distance(Host host) { + return childPolicy.distance(host); + } + + /** + * Returns the hosts to use for a new query. + *

+ * The returned plan will first return replicas (whose {@code HostDistance} + * for the child policy is {@code LOCAL}) for the query if it can determine + * them (i.e. mainly if {@code statement.getRoutingKey()} is not {@code null}). + * Following what it will return the plan of the child policy. + * + * @param statement the query for which to build the plan. + * @return the new query plan. + */ + @Override + public Iterator newQueryPlan(final String loggedKeyspace, final Statement statement) { + + ByteBuffer partitionKey = statement.getRoutingKey(); + String keyspace = statement.getKeyspace(); + if (keyspace == null) + keyspace = loggedKeyspace; + + if (partitionKey == null || keyspace == null) + return childPolicy.newQueryPlan(keyspace, statement); + + final Set replicas = clusterMetadata.getReplicas(Metadata.quote(keyspace), partitionKey); + if (replicas.isEmpty()) + return childPolicy.newQueryPlan(loggedKeyspace, statement); + + final Iterator iter; + if (shuffleReplicas) { + List l = Lists.newArrayList(replicas); + Collections.shuffle(l); + iter = l.iterator(); + } else { + iter = replicas.iterator(); + } + + return new AbstractIterator() { + + private Iterator childIterator; + + @Override + protected Host computeNext() { + while (iter.hasNext()) { + Host host = iter.next(); + if (host.isUp() && childPolicy.distance(host) == HostDistance.LOCAL) + return host; + } + + if (childIterator == null) + childIterator = childPolicy.newQueryPlan(loggedKeyspace, statement); + + while (childIterator.hasNext()) { + Host host = childIterator.next(); + // Skip it if it was already a local replica + if (!replicas.contains(host) || childPolicy.distance(host) != HostDistance.LOCAL) + return host; + } + return endOfData(); + } + }; + } + + @Override + public void onUp(Host host) { + childPolicy.onUp(host); + } + + @Override + public void onSuspected(Host host) { + childPolicy.onSuspected(host); + } + + @Override + public void onDown(Host host) { + childPolicy.onDown(host); + } + + @Override + public void onAdd(Host host) { + childPolicy.onAdd(host); + } + + @Override + public void onRemove(Host host) { + childPolicy.onRemove(host); + } + + @Override + public void close() { + if (childPolicy instanceof CloseableLoadBalancingPolicy) + ((CloseableLoadBalancingPolicy)childPolicy).close(); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/WhiteListPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/WhiteListPolicy.java new file mode 100644 index 00000000000..4ef7e315bef --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/WhiteListPolicy.java @@ -0,0 +1,162 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.policies; + +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Iterator; +import java.util.List; +import java.util.Set; + +import com.google.common.collect.ImmutableSet; + +import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.Host; +import com.datastax.driver.core.HostDistance; +import com.datastax.driver.core.Statement; + +/** + * A load balancing policy wrapper that ensure that only hosts from a provided + * white list will ever be returned. + *

+ * This policy wraps another load balancing policy and will delegate the choice + * of hosts to the wrapped policy with the exception that only hosts contained + * in the white list provided when constructing this policy will ever be + * returned. Any host not in the while list will be considered {@code IGNORED} + * and thus will not be connected to. + *

+ * This policy can be useful to ensure that the driver only connects to a + * predefined set of hosts. Keep in mind however that this policy defeats + * somewhat the host auto-detection of the driver. As such, this policy is only + * useful in a few special cases or for testing, but is not optimal in general. + * If all you want to do is limiting connections to hosts of the local + * data-center then you should use DCAwareRoundRobinPolicy and *not* this policy + * in particular. + */ +public class WhiteListPolicy implements ChainableLoadBalancingPolicy, CloseableLoadBalancingPolicy { + private final LoadBalancingPolicy childPolicy; + private final Set whiteList; + + /** + * Create a new policy that wraps the provided child policy but only "allow" hosts + * from the provided while list. + * + * @param childPolicy the wrapped policy. + * @param whiteList the white listed hosts. Only hosts from this list may get connected + * to (whether they will get connected to or not depends on the child policy). + */ + public WhiteListPolicy(LoadBalancingPolicy childPolicy, Collection whiteList) { + this.childPolicy = childPolicy; + this.whiteList = ImmutableSet.copyOf(whiteList); + } + + @Override + public LoadBalancingPolicy getChildPolicy() { + return childPolicy; + } + + /** + * Initialize this load balancing policy. + * + * @param cluster the {@code Cluster} instance for which the policy is created. + * @param hosts the initial hosts to use. + * + * @throws IllegalArgumentException if none of the host in {@code hosts} + * (which will correspond to the contact points) are part of the white list. + */ + @Override + public void init(Cluster cluster, Collection hosts) { + List whiteHosts = new ArrayList(hosts.size()); + for (Host host : hosts) + if (whiteList.contains(host.getSocketAddress())) + whiteHosts.add(host); + + if (whiteHosts.isEmpty()) + throw new IllegalArgumentException(String.format("Cannot use WhiteListPolicy where the white list (%s) contains none of the contacts points (%s)", whiteList, hosts)); + + childPolicy.init(cluster, whiteHosts); + } + + /** + * Return the HostDistance for the provided host. + * + * @param host the host of which to return the distance of. + * @return {@link HostDistance#IGNORED} if {@code host} is not part of the white list, the HostDistance + * as returned by the wrapped policy otherwise. + */ + @Override + public HostDistance distance(Host host) { + return whiteList.contains(host.getSocketAddress()) + ? childPolicy.distance(host) + : HostDistance.IGNORED; + } + + /** + * Returns the hosts to use for a new query. + *

+ * It is guaranteed that only hosts from the white list will be returned. + * + * @param loggedKeyspace the currently logged keyspace (the one set through either + * {@link Cluster#connect(String)} or by manually doing a {@code USE} query) for + * the session on which this plan need to be built. This can be {@code null} if + * the corresponding session has no keyspace logged in. + * @param statement the query for which to build a plan. + */ + @Override + public Iterator newQueryPlan(String loggedKeyspace, Statement statement) { + // Just delegate to the child policy, since we filter the hosts not white + // listed upfront, the child policy will never see a host that is not white + // listed and thus can't return one. + return childPolicy.newQueryPlan(loggedKeyspace, statement); + } + + @Override + public void onUp(Host host) { + if (whiteList.contains(host.getSocketAddress())) + childPolicy.onUp(host); + } + + @Override + public void onSuspected(Host host) { + if (whiteList.contains(host.getSocketAddress())) + childPolicy.onSuspected(host); + } + + @Override + public void onDown(Host host) { + if (whiteList.contains(host.getSocketAddress())) + childPolicy.onDown(host); + } + + @Override + public void onAdd(Host host) { + if (whiteList.contains(host.getSocketAddress())) + childPolicy.onAdd(host); + } + + @Override + public void onRemove(Host host) { + if (whiteList.contains(host.getSocketAddress())) + childPolicy.onRemove(host); + } + + @Override + public void close() { + if (childPolicy instanceof CloseableLoadBalancingPolicy) + ((CloseableLoadBalancingPolicy)childPolicy).close(); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/package-info.java b/driver-core/src/main/java/com/datastax/driver/core/policies/package-info.java new file mode 100644 index 00000000000..0b506f2fbeb --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/package-info.java @@ -0,0 +1,19 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Policies that allow to control some of the behavior of the DataStax Java driver for Cassandra. + */ +package com.datastax.driver.core.policies; diff --git a/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Assignment.java b/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Assignment.java new file mode 100644 index 00000000000..ff294ab5840 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Assignment.java @@ -0,0 +1,224 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.querybuilder; + +import java.nio.ByteBuffer; +import java.util.List; + +import static com.datastax.driver.core.querybuilder.Utils.appendName; +import static com.datastax.driver.core.querybuilder.Utils.appendValue; + +public abstract class Assignment extends Utils.Appendeable { + + final String name; + + private Assignment(String name) { + this.name = name; + } + + /** + * The name of the column this assignment applies to. + * + * @return the name of the column this assignment applies to. + */ + public String getColumnName() { + return name; + } + + abstract boolean isIdempotent(); + + static class SetAssignment extends Assignment { + + private final Object value; + + SetAssignment(String name, Object value) { + super(name); + this.value = value; + } + + @Override + void appendTo(StringBuilder sb, List variables) { + appendName(name, sb); + sb.append('='); + appendValue(value, sb, variables); + } + + @Override + boolean containsBindMarker() { + return Utils.containsBindMarker(value); + } + + @Override + boolean isIdempotent() { + return Utils.isIdempotent(value); + } + } + + static class CounterAssignment extends Assignment { + + private final Object value; + private final boolean isIncr; + + CounterAssignment(String name, Object value, boolean isIncr) { + super(name); + if (!isIncr && value instanceof Long && ((Long)value) < 0) { + this.value = -((Long)value); + this.isIncr = true; + } else { + this.value = value; + this.isIncr = isIncr; + } + } + + @Override + void appendTo(StringBuilder sb, List variables) { + appendName(name, sb).append('='); + appendName(name, sb).append(isIncr ? "+" : "-"); + appendValue(value, sb, variables); + } + + @Override + boolean containsBindMarker() { + return Utils.containsBindMarker(value); + } + + @Override + boolean isIdempotent() { + return false; + } + } + + static class ListPrependAssignment extends Assignment { + + private final Object value; + + ListPrependAssignment(String name, Object value) { + super(name); + this.value = value; + } + + @Override + void appendTo(StringBuilder sb, List variables) { + appendName(name, sb).append('='); + appendValue(value, sb, variables); + sb.append('+'); + appendName(name, sb); + } + + @Override + boolean containsBindMarker() { + return Utils.containsBindMarker(value); + } + + @Override + boolean isIdempotent() { + return false; + } + } + + static class ListSetIdxAssignment extends Assignment { + + private final int idx; + private final Object value; + + ListSetIdxAssignment(String name, int idx, Object value) { + super(name); + this.idx = idx; + this.value = value; + } + + @Override + void appendTo(StringBuilder sb, List variables) { + appendName(name, sb).append('[').append(idx).append("]="); + appendValue(value, sb, variables); + } + + @Override + boolean containsBindMarker() { + return Utils.containsBindMarker(value); + } + + @Override + boolean isIdempotent() { + return true; + } + } + + static class CollectionAssignment extends Assignment { + + private final Object collection; + private final boolean isAdd; + private final boolean isIdempotent; + + CollectionAssignment(String name, Object collection, boolean isAdd, boolean isIdempotent) { + super(name); + this.collection = collection; + this.isAdd = isAdd; + this.isIdempotent = isIdempotent; + } + + CollectionAssignment(String name, Object collection, boolean isAdd) { + this(name, collection, isAdd, true); + } + + @Override + void appendTo(StringBuilder sb, List variables) { + appendName(name, sb).append('='); + appendName(name, sb).append(isAdd ? "+" : "-"); + appendValue(collection, sb, variables); + } + + @Override + boolean containsBindMarker() { + return Utils.containsBindMarker(collection); + } + + @Override + public boolean isIdempotent() { + return isIdempotent; + } + } + + static class MapPutAssignment extends Assignment { + + private final Object key; + private final Object value; + + MapPutAssignment(String name, Object key, Object value) { + super(name); + this.key = key; + this.value = value; + } + + @Override + void appendTo(StringBuilder sb, List variables) { + appendName(name, sb).append('['); + appendValue(key, sb, variables); + sb.append("]="); + appendValue(value, sb, variables); + } + + @Override + boolean containsBindMarker() { + return Utils.containsBindMarker(key) || Utils.containsBindMarker(value); + } + + @Override + boolean isIdempotent() { + return true; + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Batch.java b/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Batch.java new file mode 100644 index 00000000000..ad9f5e8d6ae --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Batch.java @@ -0,0 +1,213 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.querybuilder; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; + +import com.datastax.driver.core.ProtocolVersion; +import com.datastax.driver.core.RegularStatement; +import com.datastax.driver.core.SimpleStatement; + +/** + * A built BATCH statement. + */ +public class Batch extends BuiltStatement { + + private final List statements; + private final boolean logged; + private final Options usings; + private ByteBuffer routingKey; + + // Only used when we add at last one statement that is not a BuiltStatement subclass + private int nonBuiltStatementValues; + + Batch(RegularStatement[] statements, boolean logged) { + super((String)null); + this.statements = statements.length == 0 + ? new ArrayList() + : new ArrayList(statements.length); + this.logged = logged; + this.usings = new Options(this); + + for (int i = 0; i < statements.length; i++) + add(statements[i]); + } + + @Override + StringBuilder buildQueryString(List variables) { + StringBuilder builder = new StringBuilder(); + + builder.append(isCounterOp() + ? "BEGIN COUNTER BATCH" + : (logged ? "BEGIN BATCH" : "BEGIN UNLOGGED BATCH")); + + if (!usings.usings.isEmpty()) { + builder.append(" USING "); + Utils.joinAndAppend(builder, " AND ", usings.usings, variables); + } + builder.append(' '); + + for (int i = 0; i < statements.size(); i++) { + RegularStatement stmt = statements.get(i); + if (stmt instanceof BuiltStatement) { + BuiltStatement bst = (BuiltStatement)stmt; + builder.append(maybeAddSemicolon(bst.buildQueryString(variables))); + + } else { + String str = stmt.getQueryString(); + builder.append(str); + if (!str.trim().endsWith(";")) + builder.append(';'); + + // Note that we force hasBindMarkers if there is any non-BuiltStatement, so we know + // that we can only get there with variables == null + assert variables == null; + } + } + builder.append("APPLY BATCH;"); + return builder; + } + + /** + * Adds a new statement to this batch. + * + * @param statement the new statement to add. + * @return this batch. + * + * @throws IllegalArgumentException if counter and non-counter operations + * are mixed. + */ + public Batch add(RegularStatement statement) { + boolean isCounterOp = statement instanceof BuiltStatement && ((BuiltStatement) statement).isCounterOp(); + + if (this.isCounterOp == null) + setCounterOp(isCounterOp); + else if (isCounterOp() != isCounterOp) + throw new IllegalArgumentException("Cannot mix counter operations and non-counter operations in a batch statement"); + + this.statements.add(statement); + + if (statement instanceof BuiltStatement) + { + this.hasBindMarkers |= ((BuiltStatement)statement).hasBindMarkers; + } + else + { + // For non-BuiltStatement, we cannot know if it includes a bind makers and we assume it does. In practice, + // this means we will always serialize values as strings when there is non-BuiltStatement + this.hasBindMarkers = true; + this.nonBuiltStatementValues += ((SimpleStatement)statement).valuesCount(); + } + + checkForBindMarkers(null); + + if (routingKey == null && statement.getRoutingKey() != null) + routingKey = statement.getRoutingKey(); + + return this; + } + + @Override + public ByteBuffer[] getValues(ProtocolVersion protocolVersion) { + // If there is some non-BuiltStatement inside the batch with values, we shouldn't + // use super.getValues() since it will ignore the values of said non-BuiltStatement. + // If that's the case, we just collects all those values (and we know + // super.getValues() == null in that case since we've explicitely set this.hasBindMarker + // to true). Otherwise, we simply call super.getValues(). + if (nonBuiltStatementValues == 0) + return super.getValues(protocolVersion); + + ByteBuffer[] values = new ByteBuffer[nonBuiltStatementValues]; + int i = 0; + for (RegularStatement statement : statements) + { + if (statement instanceof BuiltStatement) + continue; + + ByteBuffer[] statementValues = statement.getValues(protocolVersion); + System.arraycopy(statementValues, 0, values, i, statementValues.length); + i += statementValues.length; + } + return values; + } + + /** + * Adds a new options for this BATCH statement. + * + * @param using the option to add. + * @return the options of this BATCH statement. + */ + public Options using(Using using) { + return usings.and(using); + } + + /** + * Returns the first non-null routing key of the statements in this batch + * or null otherwise. + * + * @return the routing key for this batch statement. + */ + @Override + public ByteBuffer getRoutingKey() { + return routingKey; + } + + /** + * Returns the keyspace of the first statement in this batch. + * + * @return the keyspace of the first statement in this batch. + */ + @Override + public String getKeyspace() { + return statements.isEmpty() ? null : statements.get(0).getKeyspace(); + } + + /** + * The options of a BATCH statement. + */ + public static class Options extends BuiltStatement.ForwardingStatement { + + private final List usings = new ArrayList(); + + Options(Batch statement) { + super(statement); + } + + /** + * Adds the provided option. + * + * @param using a BATCH option. + * @return this {@code Options} object. + */ + public Options and(Using using) { + usings.add(using); + checkForBindMarkers(using); + return this; + } + + /** + * Adds a new statement to the BATCH statement these options are part of. + * + * @param statement the statement to add. + * @return the BATCH statement these options are part of. + */ + public Batch add(RegularStatement statement) { + return this.statement.add(statement); + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/querybuilder/BindMarker.java b/driver-core/src/main/java/com/datastax/driver/core/querybuilder/BindMarker.java new file mode 100644 index 00000000000..a814cb22da3 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/querybuilder/BindMarker.java @@ -0,0 +1,44 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.querybuilder; + +/** + * A CQL3 bind marker. + *

+ * This can be either an anonymous bind marker or a named one (but note that + * named ones are only supported starting in Cassandra 2.0.1). + *

+ * Please note that to create a new bind maker object you should use + * {@link QueryBuilder#bindMarker()} (anonymous marker) or + * {@link QueryBuilder#bindMarker(String)} (named marker). + */ +public class BindMarker { + static final BindMarker ANONYMOUS = new BindMarker(null); + + private final String name; + + BindMarker(String name) { + this.name = name; + } + + @Override + public String toString() { + if (name == null) + return "?"; + + return Utils.appendName(name, new StringBuilder(name.length() + 1).append(':')).toString(); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/querybuilder/BuiltStatement.java b/driver-core/src/main/java/com/datastax/driver/core/querybuilder/BuiltStatement.java new file mode 100644 index 00000000000..b2ba0935aed --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/querybuilder/BuiltStatement.java @@ -0,0 +1,388 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.querybuilder; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; +import java.util.regex.Pattern; + +import com.datastax.driver.core.*; +import com.datastax.driver.core.policies.RetryPolicy; + +/** + * Common ancestor to the query builder built statements. + */ +public abstract class BuiltStatement extends RegularStatement { + + private static final Pattern lowercaseId = Pattern.compile("[a-z][a-z0-9_]*"); + + private final List partitionKey; + private final ByteBuffer[] routingKey; + final String keyspace; + + private boolean dirty; + private String cache; + private List values; + + Boolean isCounterOp; + boolean hasNonIdempotentOps; + + // Whether the user has inputted bind markers. If that's the case, we never generate values as + // it means the user meant for the statement to be prepared and we shouldn't add our own markers. + boolean hasBindMarkers; + private boolean forceNoValues; + + BuiltStatement(String keyspace) { + this.partitionKey = null; + this.routingKey = null; + this.keyspace = keyspace; + } + + BuiltStatement(TableMetadata tableMetadata) { + this.partitionKey = tableMetadata.getPartitionKey(); + this.routingKey = new ByteBuffer[tableMetadata.getPartitionKey().size()]; + this.keyspace = escapeId(tableMetadata.getKeyspace().getName()); + } + + // Same as Metadata.escapeId, but we don't have access to it here. + protected String escapeId(String ident) { + // we don't need to escape if it's lowercase and match non-quoted CQL3 ids. + return lowercaseId.matcher(ident).matches() ? ident : Metadata.quote(ident); + } + + @Override + public String getQueryString() { + maybeRebuildCache(); + return cache; + } + + private void maybeRebuildCache() { + if (!dirty && cache != null) + return; + + StringBuilder sb; + values = null; + + if (hasBindMarkers || forceNoValues) { + sb = buildQueryString(null); + } else { + values = new ArrayList(); + sb = buildQueryString(values); + + if (values.size() > 65535) + throw new IllegalArgumentException("Too many values for built statement, the maximum allowed is 65535"); + + if (values.isEmpty()) + values = null; + } + + maybeAddSemicolon(sb); + + cache = sb.toString(); + dirty = false; + } + + static StringBuilder maybeAddSemicolon(StringBuilder sb) { + // Use the same test that String#trim() uses to determine + // if a character is a whitespace character. + int l = sb.length(); + while (l > 0 && sb.charAt(l - 1) <= ' ') + l -= 1; + if (l != sb.length()) + sb.setLength(l); + + if (l == 0 || sb.charAt(l - 1) != ';') + sb.append(';'); + return sb; + } + + abstract StringBuilder buildQueryString(List variables); + + boolean isCounterOp() { + return isCounterOp == null ? false : isCounterOp; + } + + void setCounterOp(boolean isCounterOp) { + this.isCounterOp = isCounterOp; + } + + boolean hasNonIdempotentOps() { + return hasNonIdempotentOps; + } + + void setNonIdempotentOps() { + hasNonIdempotentOps = true; + } + + void checkForBindMarkers(Object value) { + dirty = true; + if (Utils.containsBindMarker(value)) + hasBindMarkers = true; + } + + void checkForBindMarkers(Utils.Appendeable value) { + dirty = true; + if (value != null && value.containsBindMarker()) + hasBindMarkers = true; + } + + // TODO: Correctly document the InvalidTypeException + void maybeAddRoutingKey(String name, Object value) { + if (routingKey == null || name == null || value == null || value instanceof BindMarker) + return; + + for (int i = 0; i < partitionKey.size(); i++) { + if (name.equals(partitionKey.get(i).getName()) && Utils.isRawValue(value)) { + DataType dt = partitionKey.get(i).getType(); + // We don't really care which protocol version we use, since the only place it matters if for + // collections (not inside UDT), and those are not allowed in a partition key anyway, hence the hardcoding. + routingKey[i] = dt.serialize(dt.parse(Utils.toRawString(value)), ProtocolVersion.NEWEST_SUPPORTED); + return; + } + } + } + + @Override + public ByteBuffer getRoutingKey() { + if (routingKey == null) + return null; + + for (ByteBuffer bb : routingKey) + if (bb == null) + return null; + + return routingKey.length == 1 + ? routingKey[0] + : compose(routingKey); + } + + @Override + public String getKeyspace() { + return keyspace; + } + + @Override + public ByteBuffer[] getValues(ProtocolVersion protocolVersion) { + maybeRebuildCache(); + return values == null ? null : Utils.convert(values, protocolVersion); + } + + @Override + public boolean hasValues() { + maybeRebuildCache(); + return values != null; + } + + @Override + public Boolean isIdempotent() { + // If a value was forced with setIdempotent, it takes priority + if (idempotent != null) + return idempotent; + + // Otherwise return the computed value + return !hasNonIdempotentOps(); + } + + @Override + public String toString() { + if (forceNoValues) + return getQueryString(); + + return maybeAddSemicolon(buildQueryString(null)).toString(); + } + + // Not meant to be public + List getRawValues() { + maybeRebuildCache(); + return values; + } + + /** + * Allows to force this builder to not generate values (through its {@code getValues()} method). + *

+ * By default (and unless the protocol version 1 is in use, see below) and + * for performance reasons, the query builder will not serialize all values + * provided to strings. This means that the {@link #getQueryString} may + * return a query string with bind markers (where and when is at the + * discretion of the builder) and {@link #getValues} will return the binary + * values for those markers. This method allows to force the builder to not + * generate binary values but rather to serialize them all in the query + * string. In practice, this means that if you call {@code + * setForceNoValues(true)}, you are guaranteed that {@code getValues()} will + * return {@code null} and that the string returned by {@code + * getQueryString()} will contain no other bind markers than the one + * inputted by the user. + *

+ * If the native protocol version 1 is in use, the driver will default + * to not generating values since those are not supported by that version of + * the protocol. In practice, the driver will automatically call this method + * with {@code true} as argument prior to execution. Hence, calling this + * method when the protocol version 1 is in use is basically a no-op. + *

+ * Note that this method is mainly useful for debugging purpose. In general, + * the default behavior should be the correct and most efficient one. + * + * @param forceNoValues whether or not this builder may generate values. + * @return this statement. + */ + public RegularStatement setForceNoValues(boolean forceNoValues) { + this.forceNoValues = forceNoValues; + this.dirty = true; + return this; + } + + // This is a duplicate of the one in SimpleStatement, but I don't want to expose this publicly so... + static ByteBuffer compose(ByteBuffer... buffers) { + int totalLength = 0; + for (ByteBuffer bb : buffers) + totalLength += 2 + bb.remaining() + 1; + + ByteBuffer out = ByteBuffer.allocate(totalLength); + for (ByteBuffer buffer : buffers) + { + ByteBuffer bb = buffer.duplicate(); + putShortLength(out, bb.remaining()); + out.put(bb); + out.put((byte) 0); + } + out.flip(); + return out; + } + + private static void putShortLength(ByteBuffer bb, int length) { + bb.put((byte) ((length >> 8) & 0xFF)); + bb.put((byte) (length & 0xFF)); + } + + /** + * An utility class to create a BuiltStatement that encapsulate another one. + */ + abstract static class ForwardingStatement extends BuiltStatement { + + T statement; + + ForwardingStatement(T statement) { + super((String)null); + this.statement = statement; + } + + @Override + public String getQueryString() { + return statement.getQueryString(); + } + + @Override + StringBuilder buildQueryString(List values) { + return statement.buildQueryString(values); + } + + @Override + public ByteBuffer getRoutingKey() { + return statement.getRoutingKey(); + } + + @Override + public String getKeyspace() { + return statement.getKeyspace(); + } + + @Override + boolean isCounterOp() { + return statement.isCounterOp(); + } + + @Override + boolean hasNonIdempotentOps() { + return statement.hasNonIdempotentOps(); + } + + @Override + public RegularStatement setForceNoValues(boolean forceNoValues) { + statement.setForceNoValues(forceNoValues); + return this; + } + + @Override + List getRawValues() { + return statement.getRawValues(); + } + + @Override + public Statement setConsistencyLevel(ConsistencyLevel consistency) { + statement.setConsistencyLevel(consistency); + return this; + } + + @Override + public ConsistencyLevel getConsistencyLevel() { + return statement.getConsistencyLevel(); + } + + @Override + public Statement enableTracing() { + statement.enableTracing(); + return this; + } + + @Override + public Statement disableTracing() { + statement.disableTracing(); + return this; + } + + @Override + public boolean isTracing() { + return statement.isTracing(); + } + + @Override + public Statement setRetryPolicy(RetryPolicy policy) { + statement.setRetryPolicy(policy); + return this; + } + + @Override + public RetryPolicy getRetryPolicy() { + return statement.getRetryPolicy(); + } + + @Override + public ByteBuffer[] getValues(ProtocolVersion protocolVersion) { + return statement.getValues(protocolVersion); + } + + @Override + public boolean hasValues() { + return statement.hasValues(); + } + + @Override + void checkForBindMarkers(Object value) { + statement.checkForBindMarkers(value); + } + + @Override + void checkForBindMarkers(Utils.Appendeable value) { + statement.checkForBindMarkers(value); + } + + @Override + public String toString() { + return statement.toString(); + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Clause.java b/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Clause.java new file mode 100644 index 00000000000..fed6d1d132f --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Clause.java @@ -0,0 +1,224 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.querybuilder; + +import java.util.List; + +public abstract class Clause extends Utils.Appendeable { + + abstract String name(); + abstract Object firstValue(); + + private static abstract class AbstractClause extends Clause { + final String name; + + private AbstractClause(String name) { + this.name = name; + } + + @Override + String name() { + return name; + } + } + + static class SimpleClause extends AbstractClause { + + private final String op; + private final Object value; + + SimpleClause(String name, String op, Object value) { + super(name); + this.op = op; + this.value = value; + } + + @Override + void appendTo(StringBuilder sb, List variables) { + Utils.appendName(name, sb).append(op); + Utils.appendValue(value, sb, variables); + } + + @Override + Object firstValue() { + return value; + } + + @Override + boolean containsBindMarker() { + return Utils.containsBindMarker(value); + } + } + + static class InClause extends AbstractClause { + + private final List values; + + InClause(String name, List values) { + super(name); + this.values = values; + + if (values == null) + throw new IllegalArgumentException("Missing values for IN clause"); + if (values.size() > 65535) + throw new IllegalArgumentException("Too many values for IN clause, the maximum allowed is 65535"); + } + + @Override + void appendTo(StringBuilder sb, List variables) { + + // We special case the case of just one bind marker because there is little + // reasons to do: + // ... IN (?) ... + // since in that case it's more elegant to use an equal. On the other side, + // it is a lot more useful to do: + // ... IN ? ... + // which binds the variable to the full list the IN is on. + if (values.size() == 1 && values.get(0) instanceof BindMarker) { + Utils.appendName(name, sb).append(" IN ").append(values.get(0)); + return; + } + + Utils.appendName(name, sb).append(" IN ("); + Utils.joinAndAppendValues(sb, ",", values, variables).append(')'); + } + + @Override + Object firstValue() { + return values.isEmpty() ? null : values.get(0); + } + + @Override + boolean containsBindMarker() { + for (Object value : values) + if (Utils.containsBindMarker(value)) + return true; + return false; + } + } + + static class ContainsClause extends AbstractClause { + + private final Object value; + + ContainsClause(String name, Object value) { + super(name); + this.value = value; + + if (value == null) + throw new IllegalArgumentException("Missing value for CONTAINS clause"); + } + + @Override + void appendTo(StringBuilder sb, List variables) { + Utils.appendName(name, sb).append(" CONTAINS "); + Utils.appendValue(value, sb, variables); + } + + @Override + Object firstValue() { + return value; + } + + @Override + boolean containsBindMarker() { + return Utils.containsBindMarker(value); + } + } + + + static class ContainsKeyClause extends AbstractClause { + + private final Object value; + + ContainsKeyClause(String name, Object value) { + super(name); + this.value = value; + + if (value == null) + throw new IllegalArgumentException("Missing value for CONTAINS KEY clause"); + } + + @Override + void appendTo(StringBuilder sb, List variables) { + Utils.appendName(name, sb).append(" CONTAINS KEY "); + Utils.appendValue(value, sb, variables); + } + + @Override + Object firstValue() { + return value; + } + + @Override + boolean containsBindMarker() { + return Utils.containsBindMarker(value); + } + } + + + static class CompoundClause extends Clause { + private String op; + private final List names; + private final List values; + + CompoundClause(List names, String op, List values) { + assert names.size() == values.size(); + this.op = op; + this.names = names; + this.values = values; + } + + @Override + String name() { + // This is only used for routing key purpose, and so far CompoundClause + // are not allowed for the partitionKey anyway + return null; + } + + @Override + Object firstValue() { + // This is only used for routing key purpose, and so far CompoundClause + // are not allowed for the partitionKey anyway + return null; + } + + @Override + boolean containsBindMarker() { + for (int i = 0; i < values.size(); i++) + if (Utils.containsBindMarker(values.get(i))) + return true; + return false; + } + + @Override + void appendTo(StringBuilder sb, List variables) { + sb.append("("); + for (int i = 0; i < names.size(); i++) { + if (i > 0) + sb.append(","); + Utils.appendName(names.get(i), sb); + } + sb.append(")").append(op).append("("); + for (int i = 0; i < values.size(); i++) { + if (i > 0) + sb.append(","); + Utils.appendValue(values.get(i), sb, variables); + } + sb.append(")"); + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Delete.java b/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Delete.java new file mode 100644 index 00000000000..ec075bed33c --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Delete.java @@ -0,0 +1,525 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.querybuilder; + +import java.util.ArrayList; +import java.util.List; + +import com.datastax.driver.core.TableMetadata; + +/** + * A built DELETE statement. + */ +public class Delete extends BuiltStatement { + + private final String table; + private final List columns; + private final Where where; + private final Options usings; + private final Conditions conditions; + private boolean ifExists; + + Delete(String keyspace, String table, List columns) { + super(keyspace); + this.table = table; + this.columns = columns; + this.where = new Where(this); + this.usings = new Options(this); + this.conditions = new Conditions(this); + } + + Delete(TableMetadata table, List columns) { + super(table); + this.table = escapeId(table.getName()); + this.columns = columns; + this.where = new Where(this); + this.usings = new Options(this); + this.conditions = new Conditions(this); + } + + @Override + StringBuilder buildQueryString(List variables) { + StringBuilder builder = new StringBuilder(); + + builder.append("DELETE"); + if (!columns.isEmpty()) + Utils.joinAndAppend(builder.append(" "), ",", columns, variables); + + builder.append(" FROM "); + if (keyspace != null) + Utils.appendName(keyspace, builder).append('.'); + Utils.appendName(table, builder); + if (!usings.usings.isEmpty()) { + builder.append(" USING "); + Utils.joinAndAppend(builder, " AND ", usings.usings, variables); + } + + if (!where.clauses.isEmpty()) { + builder.append(" WHERE "); + Utils.joinAndAppend(builder, " AND ", where.clauses, variables); + } + + if (ifExists) { + builder.append(" IF EXISTS "); + } + + if (!conditions.conditions.isEmpty()) { + builder.append(" IF "); + Utils.joinAndAppend(builder, " AND ", conditions.conditions, variables); + } + + return builder; + } + + /** + * Adds a WHERE clause to this statement. + * + * This is a shorter/more readable version for {@code where().and(clause)}. + * + * @param clause the clause to add. + * @return the where clause of this query to which more clause can be added. + */ + public Where where(Clause clause) { + return where.and(clause); + } + + /** + * Returns a Where statement for this query without adding clause. + * + * @return the where clause of this query to which more clause can be added. + */ + public Where where() { + return where; + } + + /** + * Adds a conditions clause (IF) to this statement. + *

+ * This is a shorter/more readable version for {@code onlyIf().and(condition)}. + * + * @param condition the condition to add. + * @return the conditions of this query to which more conditions can be added. + */ + public Conditions onlyIf(Clause condition) { + return conditions.and(condition); + } + + /** + * Adds a conditions clause (IF) to this statement. + * + * @return the conditions of this query to which more conditions can be added. + */ + public Conditions onlyIf() { + return conditions; + } + + /** + * Adds a new options for this DELETE statement. + * + * @param using the option to add. + * @return the options of this DELETE statement. + */ + public Options using(Using using) { + return usings.and(using); + } + + /** + * Returns the options for this DELETE statement. + *

+ * Chain this with {@link Options#and(Using)} to add options. + * + * @return the options of this DELETE statement. + */ + public Options using() { + return usings; + } + + /** + * Sets the 'IF EXISTS' option for this DELETE statement. + * + *

+ * A delete with that option will report whether the statement actually + * resulted in data being deleted. The existence check and deletion are + * done transactionally in the sense that if multiple clients attempt to + * delete a given row with this option, then at most one may succeed. + *

+ *

+ * Please keep in mind that using this option has a non negligible + * performance impact and should be avoided when possible. + *

+ * + * @return this DELETE statement. + */ + public Delete ifExists() { + this.ifExists = true; + return this; + } + + /** + * The WHERE clause of a DELETE statement. + */ + public static class Where extends BuiltStatement.ForwardingStatement { + + private final List clauses = new ArrayList(); + + Where(Delete statement) { + super(statement); + } + + /** + * Adds the provided clause to this WHERE clause. + * + * @param clause the clause to add. + * @return this WHERE clause. + */ + public Where and(Clause clause) + { + clauses.add(clause); + statement.maybeAddRoutingKey(clause.name(), clause.firstValue()); + checkForBindMarkers(clause); + return this; + } + + /** + * Adds an option to the DELETE statement this WHERE clause is part of. + * + * @param using the using clause to add. + * @return the options of the DELETE statement this WHERE clause is part of. + */ + public Options using(Using using) { + return statement.using(using); + } + + /** + * Sets the 'IF EXISTS' option for the DELETE statement this WHERE clause + * is part of. + * + *

+ * A delete with that option will report whether the statement actually + * resulted in data being deleted. The existence check and deletion are + * done transactionally in the sense that if multiple clients attempt to + * delete a given row with this option, then at most one may succeed. + *

+ *

+ * Please keep in mind that using this option has a non negligible + * performance impact and should be avoided when possible. + *

+ * + * @return the DELETE statement this WHERE clause is part of. + */ + public Delete ifExists() { + return statement.ifExists(); + } + + /** + * Adds a condition to the DELETE statement this WHERE clause is part of. + * + * @param condition the condition to add. + * @return the conditions for the DELETE statement this WHERE clause is part of. + */ + public Conditions onlyIf(Clause condition) { + return statement.onlyIf(condition); + } + } + + /** + * The options of a DELETE statement. + */ + public static class Options extends BuiltStatement.ForwardingStatement { + + private final List usings = new ArrayList(); + + Options(Delete statement) { + super(statement); + } + + /** + * Adds the provided option. + * + * @param using a DELETE option. + * @return this {@code Options} object. + */ + public Options and(Using using) { + usings.add(using); + checkForBindMarkers(using); + return this; + } + + /** + * Adds a where clause to the DELETE statement these options are part of. + * + * @param clause clause to add. + * @return the WHERE clause of the DELETE statement these options are part of. + */ + public Where where(Clause clause) { + return statement.where(clause); + } + } + + /** + * An in-construction DELETE statement. + */ + public static class Builder { + + List columns = new ArrayList(); + + Builder() { + } + + Builder(String... columnNames) { + for (String columnName : columnNames) { + this.columns.add(new Selector(columnName)); + } + } + + /** + * Adds the table to delete from. + * + * @param table the name of the table to delete from. + * @return a newly built DELETE statement that deletes from {@code table}. + */ + public Delete from(String table) { + return from(null, table); + } + + /** + * Adds the table to delete from. + * + * @param keyspace the name of the keyspace to delete from. + * @param table the name of the table to delete from. + * @return a newly built DELETE statement that deletes from {@code keyspace.table}. + */ + public Delete from(String keyspace, String table) { + return new Delete(keyspace, table, columns); + } + + /** + * Adds the table to delete from. + * + * @param table the table to delete from. + * @return a newly built DELETE statement that deletes from {@code table}. + */ + public Delete from(TableMetadata table) { + return new Delete(table, columns); + } + } + + /** + * An column selection clause for an in-construction DELETE statement. + */ + public static class Selection extends Builder { + + /** + * Deletes all columns (i.e. "DELETE FROM ...") + * + * @return an in-build DELETE statement. + * + * @throws IllegalStateException if some columns had already been selected for this builder. + */ + public Builder all() { + if (!columns.isEmpty()) + throw new IllegalStateException(String.format("Some columns (%s) have already been selected.", columns)); + + return this; + } + + /** + * Deletes the provided column. + * + * @param columnName the column to select for deletion. + * @return this in-build DELETE Selection + */ + public Selection column(String columnName) { + columns.add(new Selector(columnName)); + return this; + } + + /** + * Deletes the provided list element. + * + * @param columnName the name of the list column. + * @param idx the index of the element to delete. + * @return this in-build DELETE Selection + */ + public Selection listElt(String columnName, int idx) { + columns.add(new CollectionElementSelector(columnName, idx)); + return this; + } + + /** + * Deletes the provided list element, + * specified as a bind marker. + * + * @param columnName the name of the list column. + * @param idx the index of the element to delete. + * @return this in-build DELETE Selection + */ + public Selection listElt(String columnName, BindMarker idx) { + columns.add(new CollectionElementSelector(columnName, idx)); + return this; + } + + /** + * Deletes the provided set element. + * + * @param columnName the name of the set column. + * @param element the element to delete. + * @return this in-build DELETE Selection + */ + public Selection setElt(String columnName, Object element) { + columns.add(new CollectionElementSelector(columnName, element)); + return this; + } + + /** + * Deletes the provided set element, + * specified as a bind marker. + * + * @param columnName the name of the set column. + * @param element the element to delete. + * @return this in-build DELETE Selection + */ + public Selection setElt(String columnName, BindMarker element) { + columns.add(new CollectionElementSelector(columnName, element)); + return this; + } + + /** + * Deletes a map element given a key. + * + * @param columnName the name of the map column. + * @param key the key for the element to delete. + * @return this in-build DELETE Selection + */ + public Selection mapElt(String columnName, Object key) { + columns.add(new CollectionElementSelector(columnName, key)); + return this; + } + } + + /** + * A selector in a DELETE selection clause. + * A selector can be either a column name, + * a list element, a set element or a map entry. + */ + static class Selector extends Utils.Appendeable { + + private final String columnName; + + Selector(String columnName) { + this.columnName = columnName; + } + + @Override + void appendTo(StringBuilder sb, List values) { + Utils.appendName(columnName, sb); + } + + @Override + boolean containsBindMarker() { + return false; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + appendTo(sb, new ArrayList()); + return sb.toString(); + } + } + + /** + * A selector representing a list index, a set element or a map key in a DELETE selection clause. + */ + static class CollectionElementSelector extends Selector { + + private final Object key; + + CollectionElementSelector(String columnName, Object key) { + super(columnName); + this.key = key; + } + + @Override + void appendTo(StringBuilder sb, List values) { + super.appendTo(sb, values); + sb.append('['); + Utils.appendValue(key, sb); + sb.append(']'); + } + + @Override + boolean containsBindMarker() { + return Utils.containsBindMarker(key); + } + + } + + /** + * Conditions for a DELETE statement. + *

+ * When provided some conditions, a deletion will not apply unless the + * provided conditions applies. + *

+ *

+ * Please keep in mind that provided conditions have a non negligible + * performance impact and should be avoided when possible. + *

+ */ + public static class Conditions extends BuiltStatement.ForwardingStatement { + + private final List conditions = new ArrayList(); + + Conditions(Delete statement) { + super(statement); + } + + /** + * Adds the provided condition for the deletion. + *

+ * Note that while the query builder accept any type of {@code Clause} + * as conditions, Cassandra currently only allows equality ones. + * + * @param condition the condition to add. + * @return this {@code Conditions} clause. + */ + public Conditions and(Clause condition) { + conditions.add(condition); + checkForBindMarkers(condition); + return this; + } + + /** + * Adds a where clause to the DELETE statement these conditions are part of. + * + * @param clause clause to add. + * @return the WHERE clause of the DELETE statement these conditions are part of. + */ + public Where where(Clause clause) { + return statement.where(clause); + } + + /** + * Adds an option to the DELETE statement these conditions are part of. + * + * @param using the using clause to add. + * @return the options of the DELETE statement these conditions are part of. + */ + public Options using(Using using) { + return statement.using(using); + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Insert.java b/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Insert.java new file mode 100644 index 00000000000..ebb24c52187 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Insert.java @@ -0,0 +1,215 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.querybuilder; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import com.datastax.driver.core.TableMetadata; + +/** + * A built INSERT statement. + */ +public class Insert extends BuiltStatement { + + private final String table; + private final List names = new ArrayList(); + private final List values = new ArrayList(); + private final Options usings; + private boolean ifNotExists; + + Insert(String keyspace, String table) { + super(keyspace); + this.table = table; + this.usings = new Options(this); + } + + Insert(TableMetadata table) { + super(table); + this.table = escapeId(table.getName()); + this.usings = new Options(this); + } + + @Override + StringBuilder buildQueryString(List variables) { + StringBuilder builder = new StringBuilder(); + + builder.append("INSERT INTO "); + if (keyspace != null) + Utils.appendName(keyspace, builder).append('.'); + Utils.appendName(table, builder); + builder.append('('); + Utils.joinAndAppendNames(builder, ",", names); + builder.append(") VALUES ("); + Utils.joinAndAppendValues(builder, ",", values, variables); + builder.append(')'); + + if (ifNotExists) + builder.append(" IF NOT EXISTS"); + + if (!usings.usings.isEmpty()) { + builder.append(" USING "); + Utils.joinAndAppend(builder, " AND ", usings.usings, variables); + } + return builder; + } + + /** + * Adds a column/value pair to the values inserted by this INSERT statement. + * + * @param name the name of the column to insert/update. + * @param value the value to insert/update for {@code name}. + * @return this INSERT statement. + */ + public Insert value(String name, Object value) { + names.add(name); + values.add(value); + checkForBindMarkers(value); + if (!hasNonIdempotentOps() && !Utils.isIdempotent(value)) + this.setNonIdempotentOps(); + maybeAddRoutingKey(name, value); + return this; + } + + /** + * Adds multiple column/value pairs to the values inserted by this INSERT statement. + * + * @param names a list of column names to insert/update. + * @param values a list of values to insert/update. The {@code i}th + * value in {@code values} will be inserted for the {@code i}th column + * in {@code names}. + * @return this INSERT statement. + * @throws IllegalArgumentException if {@code names.length != values.length}. + */ + public Insert values(String[] names, Object[] values) { + return values(Arrays.asList(names), Arrays.asList(values)); + } + + /** + * Adds multiple column/value pairs to the values inserted by this INSERT statement. + * + * @param names a list of column names to insert/update. + * @param values a list of values to insert/update. The {@code i}th + * value in {@code values} will be inserted for the {@code i}th column + * in {@code names}. + * @return this INSERT statement. + * @throws IllegalArgumentException if {@code names.size() != values.size()}. + */ + public Insert values(List names, List values) { + if (names.size() != values.size()) + throw new IllegalArgumentException(String.format("Got %d names but %d values", names.size(), values.size())); + this.names.addAll(names); + this.values.addAll(values); + for (int i = 0; i < names.size(); i++) { + Object value = values.get(i); + checkForBindMarkers(value); + maybeAddRoutingKey(names.get(i), value); + if (!hasNonIdempotentOps() && !Utils.isIdempotent(value)) + this.setNonIdempotentOps(); + } + return this; + } + + /** + * Adds a new options for this INSERT statement. + * + * @param using the option to add. + * @return the options of this INSERT statement. + */ + public Options using(Using using) { + return usings.and(using); + } + + /** + * Returns the options for this INSERT statement. + *

+ * Chain this with {@link Options#and(Using)} to add options. + * + * @return the options of this INSERT statement. + */ + public Options using() { + return usings; + } + /** + * Sets the 'IF NOT EXISTS' option for this INSERT statement. + *

+ * An insert with that option will not succeed unless the row does not + * exist at the time the insertion is execution. The existence check and + * insertions are done transactionally in the sense that if multiple + * clients attempt to create a given row with this option, then at most one + * may succeed. + *

+ * Please keep in mind that using this option has a non negligible + * performance impact and should be avoided when possible. + * + * @return this INSERT statement. + */ + public Insert ifNotExists() { + this.ifNotExists = true; + return this; + } + + /** + * The options of an INSERT statement. + */ + public static class Options extends BuiltStatement.ForwardingStatement { + + private final List usings = new ArrayList(); + + Options(Insert st) { + super(st); + } + + /** + * Adds the provided option. + * + * @param using an INSERT option. + * @return this {@code Options} object. + */ + public Options and(Using using) { + usings.add(using); + checkForBindMarkers(using); + return this; + } + + /** + * Adds a column/value pair to the values inserted by this INSERT statement. + * + * @param name the name of the column to insert/update. + * @param value the value to insert/update for {@code name}. + * @return the INSERT statement those options are part of. + */ + public Insert value(String name, Object value) { + return statement.value(name, value); + } + + /** + * Adds multiple column/value pairs to the values inserted by this INSERT statement. + * + * @param names a list of column names to insert/update. + * @param values a list of values to insert/update. The {@code i}th + * value in {@code values} will be inserted for the {@code i}th column + * in {@code names}. + * @return the INSERT statement those options are part of. + * + * @throws IllegalArgumentException if {@code names.length != values.length}. + */ + public Insert values(String[] names, Object[] values) { + return statement.values(names, values); + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Ordering.java b/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Ordering.java new file mode 100644 index 00000000000..a1967a8ce41 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Ordering.java @@ -0,0 +1,40 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.querybuilder; + +import java.util.List; + +public class Ordering extends Utils.Appendeable { + + private final String name; + private final boolean isDesc; + + Ordering(String name, boolean isDesc) { + this.name = name; + this.isDesc = isDesc; + } + + @Override + void appendTo(StringBuilder sb, List variables) { + Utils.appendName(name, sb); + sb.append(isDesc ? " DESC" : " ASC"); + } + + @Override + boolean containsBindMarker() { + return false; + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/querybuilder/QueryBuilder.java b/driver-core/src/main/java/com/datastax/driver/core/querybuilder/QueryBuilder.java new file mode 100644 index 00000000000..833fb655a7f --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/querybuilder/QueryBuilder.java @@ -0,0 +1,992 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.querybuilder; + +import java.util.*; + +import com.datastax.driver.core.RegularStatement; +import com.datastax.driver.core.TableMetadata; +import com.datastax.driver.core.exceptions.InvalidQueryException; + +/** + * Static methods to build a CQL3 query. + *

+ * The queries built by this builder will provide a value for the + * {@link com.datastax.driver.core.Statement#getRoutingKey} method only when a + * {@link com.datastax.driver.core.TableMetadata} is provided to the builder. + * It is thus advised to do so if a {@link com.datastax.driver.core.policies.TokenAwarePolicy} + * is in use. + *

+ * The provided builders perform very little validation of the built query. + * There is thus no guarantee that a built query is valid, and it is + * definitively possible to create invalid queries. + *

+ * Note that it could be convenient to use an 'import static' to use the methods of this class. + */ +public final class QueryBuilder { + + private QueryBuilder() {} + + /** + * Start building a new SELECT query that selects the provided names. + * + * Note that {@code select(c1, c2)} is just a shortcut for {@code select().column(c1).column(c2) }. + * + * @param columns the columns names that should be selected by the query. + * @return an in-construction SELECT query (you will need to provide at + * least a FROM clause to complete the query). + */ + public static Select.Builder select(String... columns) { + return new Select.Builder(Arrays.asList((Object[])columns)); + } + + /** + * Start building a new SELECT query. + * + * @return an in-construction SELECT query (you will need to provide a + * column selection and at least a FROM clause to complete the query). + */ + public static Select.Selection select() { + // Note: the fact we return Select.Selection as return type is on purpose. + return new Select.SelectionOrAlias(); + } + + /** + * Start building a new INSERT query. + * + * @param table the name of the table in which to insert. + * @return an in-construction INSERT query. + */ + public static Insert insertInto(String table) { + return new Insert(null, table); + } + + /** + * Start building a new INSERT query. + * + * @param keyspace the name of the keyspace to use. + * @param table the name of the table to insert into. + * @return an in-construction INSERT query. + */ + public static Insert insertInto(String keyspace, String table) { + return new Insert(keyspace, table); + } + + /** + * Start building a new INSERT query. + * + * @param table the name of the table to insert into. + * @return an in-construction INSERT query. + */ + public static Insert insertInto(TableMetadata table) { + return new Insert(table); + } + + /** + * Start building a new UPDATE query. + * + * @param table the name of the table to update. + * @return an in-construction UPDATE query (at least a SET and a WHERE + * clause needs to be provided to complete the query). + */ + public static Update update(String table) { + return new Update(null, table); + } + + /** + * Start building a new UPDATE query. + * + * @param keyspace the name of the keyspace to use. + * @param table the name of the table to update. + * @return an in-construction UPDATE query (at least a SET and a WHERE + * clause needs to be provided to complete the query). + */ + public static Update update(String keyspace, String table) { + return new Update(keyspace, table); + } + + /** + * Start building a new UPDATE query. + * + * @param table the name of the table to update. + * @return an in-construction UPDATE query (at least a SET and a WHERE + * clause needs to be provided to complete the query). + */ + public static Update update(TableMetadata table) { + return new Update(table); + } + + /** + * Start building a new DELETE query that deletes the provided names. + * + * @param columns the columns names that should be deleted by the query. + * @return an in-construction DELETE query (At least a FROM and a WHERE + * clause needs to be provided to complete the query). + */ + public static Delete.Builder delete(String... columns) { + return new Delete.Builder(columns); + } + + /** + * Start building a new DELETE query. + * + * @return an in-construction SELECT query (you will need to provide a + * column selection and at least a FROM and a WHERE clause to complete the + * query). + */ + public static Delete.Selection delete() { + return new Delete.Selection(); + } + + /** + * Built a new BATCH query on the provided statements. + *

+ * This method will build a logged batch (this is the default in CQL3). To + * create unlogged batches, use {@link #unloggedBatch}. Also note that + * for convenience, if the provided statements are counter statements, this + * method will create a COUNTER batch even though COUNTER batches are never + * logged (so for counters, using this method is effectively equivalent to + * using {@link #unloggedBatch}). + * + * @param statements the statements to batch. + * @return a new {@code RegularStatement} that batch {@code statements}. + */ + public static Batch batch(RegularStatement... statements) { + return new Batch(statements, true); + } + + /** + * Built a new UNLOGGED BATCH query on the provided statements. + *

+ * Compared to logged batches (the default), unlogged batch don't + * use the distributed batch log server side and as such are not + * guaranteed to be atomic. In other words, if an unlogged batch + * timeout, some of the batched statements may have been persisted + * while some have not. Unlogged batch will however be slightly + * faster than logged batch. + *

+ * If the statements added to the batch are counter statements, the + * resulting batch will be a COUNTER one. + * + * @param statements the statements to batch. + * @return a new {@code RegularStatement} that batch {@code statements} without + * using the batch log. + */ + public static Batch unloggedBatch(RegularStatement... statements) { + return new Batch(statements, false); + } + + /** + * Creates a new TRUNCATE query. + * + * @param table the name of the table to truncate. + * @return the truncation query. + */ + public static Truncate truncate(String table) { + return new Truncate(null, table); + } + + /** + * Creates a new TRUNCATE query. + * + * @param keyspace the name of the keyspace to use. + * @param table the name of the table to truncate. + * @return the truncation query. + */ + public static Truncate truncate(String keyspace, String table) { + return new Truncate(keyspace, table); + } + + /** + * Creates a new TRUNCATE query. + * + * @param table the table to truncate. + * @return the truncation query. + */ + public static Truncate truncate(TableMetadata table) { + return new Truncate(table); + } + + /** + * Quotes a columnName to make it case sensitive. + * + * @param columnName the column name to quote. + * @return the quoted column name. + */ + public static String quote(String columnName) { + return '"' + columnName + '"'; + } + + /** + * The token of a column name. + * + * @param columnName the column name to take the token of. + * @return {@code "token(" + columnName + ")"}. + */ + public static String token(String columnName) { + StringBuilder sb = new StringBuilder(); + sb.append("token("); + Utils.appendName(columnName, sb); + sb.append(')'); + return sb.toString(); + } + + /** + * The token of column names. + *

+ * This variant is most useful when the partition key is composite. + * + * @param columnNames the column names to take the token of. + * @return a string representing the token of the provided column names. + */ + public static String token(String... columnNames) { + StringBuilder sb = new StringBuilder(); + sb.append("token("); + Utils.joinAndAppendNames(sb, ",", Arrays.asList((Object[])columnNames)); + sb.append(')'); + return sb.toString(); + } + + /** + * Creates an "equal" where clause stating the provided column must be + * equal to the provided value. + * + * @param name the column name + * @param value the value + * @return the corresponding where clause. + */ + public static Clause eq(String name, Object value) { + return new Clause.SimpleClause(name, "=", value); + } + + /** + * Create an "in" where clause stating the provided column must be equal + * to one of the provided values. + * + * @param name the column name + * @param values the values + * @return the corresponding where clause. + */ + public static Clause in(String name, Object... values) { + return new Clause.InClause(name, Arrays.asList(values)); + } + + /** + * Create an "in" where clause stating the provided column must be equal + * to one of the provided values. + * + * @param name the column name + * @param values the values + * @return the corresponding where clause. + */ + public static Clause in(String name, List values) { + return new Clause.InClause(name, values); + } + + /** + * Creates a "contains" where clause stating the provided column must contain + * the value provided. + * + * @param name the column name + * @param value the value + * @return the corresponding where clause. + */ + public static Clause contains(String name, Object value) { + return new Clause.ContainsClause(name, value); + } + + /** + * Creates a "contains key" where clause stating the provided column must contain + * the key provided. + * + * @param name the column name + * @param key the key + * @return the corresponding where clause. + */ + public static Clause containsKey(String name, Object key) { + return new Clause.ContainsKeyClause(name, key); + } + + /** + * Creates a "lesser than" where clause stating the provided column must be less than + * the provided value. + * + * @param name the column name + * @param value the value + * @return the corresponding where clause. + */ + public static Clause lt(String name, Object value) { + return new Clause.SimpleClause(name, "<", value); + } + + /** + * Creates a "lesser than" where clause for a group of clustering columns. + *

+ * For instance, {@code lt(Arrays.asList("a", "b"), Arrays.asList(2, "test"))} + * will generate the CQL WHERE clause {@code (a, b) < (2, 'test') }. + *

+ * Please note that this variant is only supported starting with Cassandra 2.0.6. + * + * @param names the column names + * @param values the values + * @return the corresponding where clause. + * + * @throws IllegalArgumentException if {@code names.size() != values.size()}. + */ + public static Clause lt(List names, List values) { + if (names.size() != values.size()) + throw new IllegalArgumentException(String.format("The number of names (%d) and values (%d) don't match", names.size(), values.size())); + + return new Clause.CompoundClause(names, "<", values); + } + + /** + * Creates a "lesser than or equal" where clause stating the provided column must + * be lesser than or equal to the provided value. + * + * @param name the column name + * @param value the value + * @return the corresponding where clause. + */ + public static Clause lte(String name, Object value) { + return new Clause.SimpleClause(name, "<=", value); + } + + /** + * Creates a "lesser than or equal" where clause for a group of clustering columns. + *

+ * For instance, {@code lte(Arrays.asList("a", "b"), Arrays.asList(2, "test"))} + * will generate the CQL WHERE clause {@code (a, b) <e; (2, 'test') }. + *

+ * Please note that this variant is only supported starting with Cassandra 2.0.6. + * + * @param names the column names + * @param values the values + * @return the corresponding where clause. + * + * @throws IllegalArgumentException if {@code names.size() != values.size()}. + */ + public static Clause lte(List names, List values) { + if (names.size() != values.size()) + throw new IllegalArgumentException(String.format("The number of names (%d) and values (%d) don't match", names.size(), values.size())); + + return new Clause.CompoundClause(names, "<=", values); + } + + /** + * Creates a "greater than" where clause stating the provided column must + * be greater to the provided value. + * + * @param name the column name + * @param value the value + * @return the corresponding where clause. + */ + public static Clause gt(String name, Object value) { + return new Clause.SimpleClause(name, ">", value); + } + + /** + * Creates a "greater than" where clause for a group of clustering columns. + *

+ * For instance, {@code gt(Arrays.asList("a", "b"), Arrays.asList(2, "test"))} + * will generate the CQL WHERE clause {@code (a, b) > (2, 'test') }. + *

+ * Please note that this variant is only supported starting with Cassandra 2.0.6. + * + * @param names the column names + * @param values the values + * @return the corresponding where clause. + * + * @throws IllegalArgumentException if {@code names.size() != values.size()}. + */ + public static Clause gt(List names, List values) { + if (names.size() != values.size()) + throw new IllegalArgumentException(String.format("The number of names (%d) and values (%d) don't match", names.size(), values.size())); + + return new Clause.CompoundClause(names, ">", values); + } + + /** + * Creates a "greater than or equal" where clause stating the provided + * column must be greater than or equal to the provided value. + * + * @param name the column name + * @param value the value + * @return the corresponding where clause. + */ + public static Clause gte(String name, Object value) { + return new Clause.SimpleClause(name, ">=", value); + } + + /** + * Creates a "greater than or equal" where clause for a group of clustering columns. + *

+ * For instance, {@code gte(Arrays.asList("a", "b"), Arrays.asList(2, "test"))} + * will generate the CQL WHERE clause {@code (a, b) >e; (2, 'test') }. + *

+ * Please note that this variant is only supported starting with Cassandra 2.0.6. + * + * @param names the column names + * @param values the values + * @return the corresponding where clause. + * + * @throws IllegalArgumentException if {@code names.size() != values.size()}. + */ + public static Clause gte(List names, List values) { + if (names.size() != values.size()) + throw new IllegalArgumentException(String.format("The number of names (%d) and values (%d) don't match", names.size(), values.size())); + + return new Clause.CompoundClause(names, ">=", values); + } + + /** + * Ascending ordering for the provided column. + * + * @param columnName the column name + * @return the corresponding ordering + */ + public static Ordering asc(String columnName) { + return new Ordering(columnName, false); + } + + /** + * Descending ordering for the provided column. + * + * @param columnName the column name + * @return the corresponding ordering + */ + public static Ordering desc(String columnName) { + return new Ordering(columnName, true); + } + + /** + * Option to set the timestamp for a modification query (insert, update or delete). + * + * @param timestamp the timestamp (in microseconds) to use. + * @return the corresponding option + * + * @throws IllegalArgumentException if {@code timestamp < 0}. + */ + public static Using timestamp(long timestamp) { + if (timestamp < 0) + throw new IllegalArgumentException("Invalid timestamp, must be positive"); + + return new Using.WithValue("TIMESTAMP", timestamp); + } + + /** + * Option to prepare the timestamp (in microseconds) for a modification query (insert, update or delete). + * + * @param marker bind marker to use for the timestamp. + * @return the corresponding option. + */ + public static Using timestamp(BindMarker marker) { + return new Using.WithMarker("TIMESTAMP", marker); + } + + /** + * Option to set the ttl for a modification query (insert, update or delete). + * + * @param ttl the ttl (in seconds) to use. + * @return the corresponding option + * + * @throws IllegalArgumentException if {@code ttl < 0}. + */ + public static Using ttl(int ttl) { + if (ttl < 0) + throw new IllegalArgumentException("Invalid ttl, must be positive"); + + return new Using.WithValue("TTL", ttl); + } + + /** + * Option to prepare the ttl (in seconds) for a modification query (insert, update or delete). + * + * @param marker bind marker to use for the ttl. + * @return the corresponding option + */ + public static Using ttl(BindMarker marker) { + return new Using.WithMarker("TTL", marker); + } + + /** + * Simple "set" assignment of a value to a column. + *

+ * This will generate: {@code name = value}. + * + * @param name the column name + * @param value the value to assign + * @return the correspond assignment (to use in an update query) + */ + public static Assignment set(String name, Object value) { + return new Assignment.SetAssignment(name, value); + } + + /** + * Incrementation of a counter column. + *

+ * This will generate: {@code name = name + 1}. + * + * @param name the column name to increment + * @return the correspond assignment (to use in an update query) + */ + public static Assignment incr(String name) { + return incr(name, 1L); + } + + /** + * Incrementation of a counter column by a provided value. + *

+ * This will generate: {@code name = name + value}. + * + * @param name the column name to increment + * @param value the value by which to increment + * @return the correspond assignment (to use in an update query) + */ + public static Assignment incr(String name, long value) { + return new Assignment.CounterAssignment(name, value, true); + } + + /** + * Incrementation of a counter column by a provided value. + *

+ * This will generate: {@code name = name + value}. + * + * @param name the column name to increment + * @param value a bind marker representing the value by which to increment + * @return the correspond assignment (to use in an update query) + */ + public static Assignment incr(String name, BindMarker value) { + return new Assignment.CounterAssignment(name, value, true); + } + + /** + * Decrementation of a counter column. + *

+ * This will generate: {@code name = name - 1}. + * + * @param name the column name to decrement + * @return the correspond assignment (to use in an update query) + */ + public static Assignment decr(String name) { + return decr(name, 1L); + } + + /** + * Decrementation of a counter column by a provided value. + *

+ * This will generate: {@code name = name - value}. + * + * @param name the column name to decrement + * @param value the value by which to decrement + * @return the correspond assignment (to use in an update query) + */ + public static Assignment decr(String name, long value) { + return new Assignment.CounterAssignment(name, value, false); + } + + /** + * Decrementation of a counter column by a provided value. + *

+ * This will generate: {@code name = name - value}. + * + * @param name the column name to decrement + * @param value a bind marker representing the value by which to decrement + * @return the correspond assignment (to use in an update query) + */ + public static Assignment decr(String name, BindMarker value) { + return new Assignment.CounterAssignment(name, value, false); + } + + /** + * Prepend a value to a list column. + *

+ * This will generate: {@code name = [ value ] + name}. + * + * @param name the column name (must be of type list). + * @param value the value to prepend. Using a BindMarker here is not supported. + * To use a BindMarker use {@code QueryBuilder#prependAll} with a + * singleton list. + * @return the correspond assignment (to use in an update query) + */ + public static Assignment prepend(String name, Object value) { + if (value instanceof BindMarker) { + throw new InvalidQueryException("binding a value in prepend() is not supported, use prependAll() and bind a singleton list"); + } + return prependAll(name, Collections.singletonList(value)); + } + + /** + * Prepend a list of values to a list column. + *

+ * This will generate: {@code name = list + name}. + * + * @param name the column name (must be of type list). + * @param list the list of values to prepend. + * @return the correspond assignment (to use in an update query) + */ + public static Assignment prependAll(String name, List list) { + return new Assignment.ListPrependAssignment(name, list); + } + + /** + * Prepend a list of values to a list column. + *

+ * This will generate: {@code name = list + name}. + * + * @param name the column name (must be of type list). + * @param list a bind marker representing the list of values to prepend. + * @return the correspond assignment (to use in an update query) + */ + public static Assignment prependAll(String name, BindMarker list) { + return new Assignment.ListPrependAssignment(name, list); + } + + /** + * Append a value to a list column. + *

+ * This will generate: {@code name = name + [value]}. + * + * @param name the column name (must be of type list). + * @param value the value to append. Using a BindMarker here is not supported. + * To use a BindMarker use {@code QueryBuilder#appendAll} with a + * singleton list. + * @return the correspond assignment (to use in an update query) + */ + public static Assignment append(String name, Object value) { + if (value instanceof BindMarker) { + throw new InvalidQueryException("Binding a value in append() is not supported, use appendAll() and bind a singleton list"); + } + return appendAll(name, Collections.singletonList(value)); + } + + /** + * Append a list of values to a list column. + *

+ * This will generate: {@code name = name + list}. + * + * @param name the column name (must be of type list). + * @param list the list of values to append + * @return the correspond assignment (to use in an update query) + */ + public static Assignment appendAll(String name, List list) { + return new Assignment.CollectionAssignment(name, list, true, false); + } + + /** + * Append a list of values to a list column. + *

+ * This will generate: {@code name = name + list}. + * + * @param name the column name (must be of type list). + * @param list a bind marker representing the list of values to append + * @return the correspond assignment (to use in an update query) + */ + public static Assignment appendAll(String name, BindMarker list) { + return new Assignment.CollectionAssignment(name, list, true, false); + } + + /** + * Discard a value from a list column. + *

+ * This will generate: {@code name = name - [value]}. + * + * @param name the column name (must be of type list). + * @param value the value to discard. Using a BindMarker here is not supported. + * To use a BindMarker use {@code QueryBuilder#discardAll} with a singleton list. + * @return the correspond assignment (to use in an update query) + */ + public static Assignment discard(String name, Object value) { + if (value instanceof BindMarker) { + throw new InvalidQueryException("Binding a value in discard() is not supported, use discardAll() and bind a singleton list"); + } + return discardAll(name, Collections.singletonList(value)); + } + + /** + * Discard a list of values to a list column. + *

+ * This will generate: {@code name = name - list}. + * + * @param name the column name (must be of type list). + * @param list the list of values to discard + * @return the correspond assignment (to use in an update query) + */ + public static Assignment discardAll(String name, List list) { + return new Assignment.CollectionAssignment(name, list, false); + } + + /** + * Discard a list of values to a list column. + *

+ * This will generate: {@code name = name - list}. + * + * @param name the column name (must be of type list). + * @param list a bind marker representing the list of values to discard + * @return the correspond assignment (to use in an update query) + */ + public static Assignment discardAll(String name, BindMarker list) { + return new Assignment.CollectionAssignment(name, list, false); + } + + /** + * Sets a list column value by index. + *

+ * This will generate: {@code name[idx] = value}. + * + * @param name the column name (must be of type list). + * @param idx the index to set + * @param value the value to set + * @return the correspond assignment (to use in an update query) + */ + public static Assignment setIdx(String name, int idx, Object value) { + return new Assignment.ListSetIdxAssignment(name, idx, value); + } + + /** + * Adds a value to a set column. + *

+ * This will generate: {@code name = name + {value}}. + * + * @param name the column name (must be of type set). + * @param value the value to add. Using a BindMarker here is not supported. + * To use a BindMarker use {@code QueryBuilder#addAll} with a + * singleton set. + * @return the correspond assignment (to use in an update query) + */ + public static Assignment add(String name, Object value) { + if (value instanceof BindMarker){ + throw new InvalidQueryException("Binding a value in add() is not supported, use addAll() and bind a singleton list"); + } + return addAll(name, Collections.singleton(value)); + } + + /** + * Adds a set of values to a set column. + *

+ * This will generate: {@code name = name + set}. + * + * @param name the column name (must be of type set). + * @param set the set of values to append + * @return the correspond assignment (to use in an update query) + */ + public static Assignment addAll(String name, Set set) { + return new Assignment.CollectionAssignment(name, set, true); + } + + /** + * Adds a set of values to a set column. + *

+ * This will generate: {@code name = name + set}. + * + * @param name the column name (must be of type set). + * @param set a bind marker representing the set of values to append + * @return the correspond assignment (to use in an update query) + */ + public static Assignment addAll(String name, BindMarker set) { + return new Assignment.CollectionAssignment(name, set, true); + } + + /** + * Remove a value from a set column. + *

+ * This will generate: {@code name = name - {value}}. + * + * @param name the column name (must be of type set). + * @param value the value to remove. Using a BindMarker here is not supported. + * To use a BindMarker use {@code QueryBuilder#removeAll} with a singleton set. + * @return the correspond assignment (to use in an update query) + */ + public static Assignment remove(String name, Object value) { + if (value instanceof BindMarker) { + throw new InvalidQueryException("Binding a value in remove() is not supported, use removeAll() and bind a singleton set"); + } + return removeAll(name, Collections.singleton(value)); + } + + /** + * Remove a set of values from a set column. + *

+ * This will generate: {@code name = name - set}. + * + * @param name the column name (must be of type set). + * @param set the set of values to remove + * @return the correspond assignment (to use in an update query) + */ + public static Assignment removeAll(String name, Set set) { + return new Assignment.CollectionAssignment(name, set, false); + } + + /** + * Remove a set of values from a set column. + *

+ * This will generate: {@code name = name - set}. + * + * @param name the column name (must be of type set). + * @param set a bind marker representing the set of values to remove + * @return the correspond assignment (to use in an update query) + */ + public static Assignment removeAll(String name, BindMarker set) { + return new Assignment.CollectionAssignment(name, set, false); + } + + /** + * Puts a new key/value pair to a map column. + *

+ * This will generate: {@code name[key] = value}. + * + * @param name the column name (must be of type map). + * @param key the key to put + * @param value the value to put + * @return the correspond assignment (to use in an update query) + */ + public static Assignment put(String name, Object key, Object value) { + return new Assignment.MapPutAssignment(name, key, value); + } + + /** + * Puts a map of new key/value pairs to a map column. + *

+ * This will generate: {@code name = name + map}. + * + * @param name the column name (must be of type map). + * @param map the map of key/value pairs to put + * @return the correspond assignment (to use in an update query) + */ + public static Assignment putAll(String name, Map map) { + return new Assignment.CollectionAssignment(name, map, true); + } + + /** + * Puts a map of new key/value pairs to a map column. + *

+ * This will generate: {@code name = name + map}. + * + * @param name the column name (must be of type map). + * @param map a bind marker representing the map of key/value pairs to put + * @return the correspond assignment (to use in an update query) + */ + public static Assignment putAll(String name, BindMarker map) { + return new Assignment.CollectionAssignment(name, map, true); + } + + /** + * An object representing an anonymous bind marker (a question mark). + *

+ * This can be used wherever a value is expected. For instance, one can do: + *

+     * {@code
+     *     Insert i = QueryBuilder.insertInto("test").value("k", 0)
+     *                                               .value("c", QueryBuilder.bindMarker());
+     *     PreparedState p = session.prepare(i.toString());
+     * }
+     * 
+ * + * @return a new bind marker. + */ + public static BindMarker bindMarker() { + return BindMarker.ANONYMOUS; + } + + /** + * An object representing a named bind marker. + *

+ * This can be used wherever a value is expected. For instance, one can do: + *

+     * {@code
+     *     Insert i = QueryBuilder.insertInto("test").value("k", 0)
+     *                                               .value("c", QueryBuilder.bindMarker("c_val"));
+     *     PreparedState p = session.prepare(i.toString());
+     * }
+     * 
+ *

+ * Please note that named bind makers are only supported starting with Cassandra 2.0.1. + * + * @param name the name for the bind marker. + * @return an object representing a bind marker named {@code name}. + */ + public static BindMarker bindMarker(String name) { + return new BindMarker(name); + } + + /** + * Protects a value from any interpretation by the query builder. + *

+ * The following table exemplify the behavior of this function: + * + * + * + * + * + * + * + * + *
Examples of use
CodeResulting query string
{@code select().from("t").where(eq("c", "C'est la vie!")); }{@code "SELECT * FROM t WHERE c='C''est la vie!';"}
{@code select().from("t").where(eq("c", raw("C'est la vie!"))); }{@code "SELECT * FROM t WHERE c=C'est la vie!;"}
{@code select().from("t").where(eq("c", raw("'C'est la vie!'"))); }{@code "SELECT * FROM t WHERE c='C'est la vie!';"}
{@code select().from("t").where(eq("c", "now()")); }{@code "SELECT * FROM t WHERE c='now()';"}
{@code select().from("t").where(eq("c", raw("now()"))); }{@code "SELECT * FROM t WHERE c=now();"}
+ * Note: the 2nd and 3rd examples in this table are not a valid CQL3 queries. + *

+ * The use of that method is generally discouraged since it lead to security risks. However, + * if you know what you are doing, it allows to escape the interpretations done by the + * QueryBuilder. + * + * @param str the raw value to use as a string + * @return the value but protected from being interpreted/escaped by the query builder. + */ + public static Object raw(String str) { + return new Utils.RawString(str); + } + + /** + * Creates a function call. + * + * @param name the name of the function to call. + * @param parameters the parameters for the function. + * @return the function call. + */ + public static Object fcall(String name, Object... parameters) { + return new Utils.FCall(name, parameters); + } + + /** + * Creates a {@code now()} function call. + * + * @return the function call. + */ + public static Object now() { + return new Utils.FCall("now"); + } + + /** + * Creates a {@code uuid()} function call. + * + * @return the function call. + */ + public static Object uuid() { + return new Utils.FCall("uuid"); + } + + /** + * Declares that the name in argument should be treated as a column name. + *

+ * This mainly meant for use with {@link Select.Selection#fcall} when a + * function should apply to a column name, not a string value. + * + * @param name the name of the column. + * @return the name as a column name. + */ + public static Object column(String name) { + return new Utils.CName(name); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Select.java b/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Select.java new file mode 100644 index 00000000000..2ecebbe5544 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Select.java @@ -0,0 +1,554 @@ +/* + * Copyright (C) 2012-2015 DataStax Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.querybuilder; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +import com.datastax.driver.core.TableMetadata; + +/** + * A built SELECT statement. + */ +public class Select extends BuiltStatement { + + private static final List COUNT_ALL = Collections.singletonList(new Utils.FCall("count", new Utils.RawString("*"))); + + private final String table; + private final boolean isDistinct; + private final List columnNames; + private final Where where; + private List orderings; + private Object limit; + private boolean allowFiltering; + + Select(String keyspace, String table, List columnNames, boolean isDistinct) { + super(keyspace); + this.table = table; + this.isDistinct = isDistinct; + this.columnNames = columnNames; + this.where = new Where(this); + } + + Select(TableMetadata table, List columnNames, boolean isDistinct) { + super(table); + this.table = escapeId(table.getName()); + this.isDistinct = isDistinct; + this.columnNames = columnNames; + this.where = new Where(this); + } + + @Override + StringBuilder buildQueryString(List variables) { + StringBuilder builder = new StringBuilder(); + + builder.append("SELECT "); + if (isDistinct) + builder.append("DISTINCT "); + if (columnNames == null) { + builder.append('*'); + } else { + Utils.joinAndAppendNames(builder, ",", columnNames); + } + builder.append(" FROM "); + if (keyspace != null) + Utils.appendName(keyspace, builder).append('.'); + Utils.appendName(table, builder); + + if (!where.clauses.isEmpty()) { + builder.append(" WHERE "); + Utils.joinAndAppend(builder, " AND ", where.clauses, variables); + } + + if (orderings != null) { + builder.append(" ORDER BY "); + Utils.joinAndAppend(builder, ",", orderings, variables); + } + + if (limit != null) { + builder.append(" LIMIT ").append(limit); + } + + if (allowFiltering) { + builder.append(" ALLOW FILTERING"); + } + + return builder; + } + + /** + * Adds a WHERE clause to this statement. + * + * This is a shorter/more readable version for {@code where().and(clause)}. + * + * @param clause the clause to add. + * @return the where clause of this query to which more clause can be added. + */ + public Where where(Clause clause) { + return where.and(clause); + } + + /** + * Returns a Where statement for this query without adding clause. + * + * @return the where clause of this query to which more clause can be added. + */ + public Where where() { + return where; + } + + /** + * Adds an ORDER BY clause to this statement. + * + * @param orderings the orderings to define for this query. + * @return this statement. + * + * @throws IllegalStateException if an ORDER BY clause has already been + * provided. + */ + public Select orderBy(Ordering... orderings) { + if (this.orderings != null) + throw new IllegalStateException("An ORDER BY clause has already been provided"); + + this.orderings = Arrays.asList(orderings); + for (int i = 0; i < orderings.length; i++) + checkForBindMarkers(orderings[i]); + return this; + } + + /** + * Adds a LIMIT clause to this statement. + * + * @param limit the limit to set. + * @return this statement. + * + * @throws IllegalArgumentException if {@code limit >e; 0}. + * @throws IllegalStateException if a LIMIT clause has already been + * provided. + */ + public Select limit(int limit) { + if (limit <= 0) + throw new IllegalArgumentException("Invalid LIMIT value, must be strictly positive"); + + if (this.limit != null) + throw new IllegalStateException("A LIMIT value has already been provided"); + + this.limit = limit; + checkForBindMarkers(null); + return this; + } + + /** + * Adds a prepared LIMIT clause to this statement. + * + * @param marker the marker to use for the limit. + * @return this statement. + * + * @throws IllegalStateException if a LIMIT clause has already been + * provided. + */ + public Select limit(BindMarker marker) { + if (this.limit != null) + throw new IllegalStateException("A LIMIT value has already been provided"); + + this.limit = marker; + checkForBindMarkers(marker); + return this; + } + + /** + * Adds an ALLOW FILTERING directive to this statement. + * + * @return this statement. + */ + public Select allowFiltering() { + allowFiltering = true; + return this; + } + + /** + * The WHERE clause of a SELECT statement. + */ + public static class Where extends BuiltStatement.ForwardingStatement, BuildableQuery { - - /** - * Adds the provided GROUP BY clauses to the query. - * - *

As of version 4.0, Apache Cassandra only allows grouping by columns, therefore you can use - * the shortcuts {@link #groupByColumns(Iterable)} or {@link #groupByColumnIds(Iterable)}. - */ - @NonNull - Select groupBy(@NonNull Iterable selectors); - - /** Var-arg equivalent of {@link #groupBy(Iterable)}. */ - @NonNull - default Select groupBy(@NonNull Selector... selectors) { - return groupBy(Arrays.asList(selectors)); - } - - /** - * Shortcut for {@link #groupBy(Iterable)} where all the clauses are simple columns. The arguments - * are wrapped with {@link Selector#column(CqlIdentifier)}. - */ - @NonNull - default Select groupByColumnIds(@NonNull Iterable columnIds) { - return groupBy(Iterables.transform(columnIds, Selector::column)); - } - - /** Var-arg equivalent of {@link #groupByColumnIds(Iterable)}. */ - @NonNull - default Select groupByColumnIds(@NonNull CqlIdentifier... columnIds) { - return groupByColumnIds(Arrays.asList(columnIds)); - } - - /** - * Shortcut for {@link #groupBy(Iterable)} where all the clauses are simple columns. The arguments - * are wrapped with {@link Selector#column(String)}. - */ - @NonNull - default Select groupByColumns(@NonNull Iterable columnNames) { - return groupBy(Iterables.transform(columnNames, Selector::column)); - } - - /** Var-arg equivalent of {@link #groupByColumns(Iterable)}. */ - @NonNull - default Select groupByColumns(@NonNull String... columnNames) { - return groupByColumns(Arrays.asList(columnNames)); - } - - /** - * Adds the provided GROUP BY clause to the query. - * - *

As of version 4.0, Apache Cassandra only allows grouping by columns, therefore you can use - * the shortcuts {@link #groupBy(String)} or {@link #groupBy(CqlIdentifier)}. - */ - @NonNull - Select groupBy(@NonNull Selector selector); - - /** Shortcut for {@link #groupBy(Selector) groupBy(Selector.column(columnId))}. */ - @NonNull - default Select groupBy(@NonNull CqlIdentifier columnId) { - return groupBy(Selector.column(columnId)); - } - - /** Shortcut for {@link #groupBy(Selector) groupBy(Selector.column(columnName))}. */ - @NonNull - default Select groupBy(@NonNull String columnName) { - return groupBy(Selector.column(columnName)); - } - - /** - * Adds the provided ORDER BY clauses to the query. - * - *

They will be appended in the iteration order of the provided map. If an ordering was already - * defined for a given identifier, it will be removed and the new ordering will appear in its - * position in the provided map. - */ - @NonNull - Select orderByIds(@NonNull Map orderings); - - /** - * Shortcut for {@link #orderByIds(Map)} with the columns specified as case-insensitive names. - * They will be wrapped with {@link CqlIdentifier#fromCql(String)}. - * - *

Note that it's possible for two different case-insensitive names to resolve to the same - * identifier, for example "foo" and "Foo"; if this happens, a runtime exception will be thrown. - * - * @throws IllegalArgumentException if two names resolve to the same identifier. - */ - @NonNull - default Select orderBy(@NonNull Map orderings) { - return orderByIds(CqlIdentifiers.wrapKeys(orderings)); - } - - /** - * Adds the provided ORDER BY clause to the query. - * - *

If an ordering was already defined for this identifier, it will be removed and the new - * clause will be appended at the end of the current list for this query. - */ - @NonNull - Select orderBy(@NonNull CqlIdentifier columnId, @NonNull ClusteringOrder order); - - /** - * Shortcut for {@link #orderBy(CqlIdentifier, ClusteringOrder) - * orderBy(CqlIdentifier.fromCql(columnName), order)}. - */ - @NonNull - default Select orderBy(@NonNull String columnName, @NonNull ClusteringOrder order) { - return orderBy(CqlIdentifier.fromCql(columnName), order); - } - - /** - * Shortcut for {@link #orderByAnnOf(CqlIdentifier, CqlVector)}, adding an ORDER BY ... ANN OF ... - * clause - */ - @NonNull - Select orderByAnnOf(@NonNull String columnName, @NonNull CqlVector ann); - - /** Adds the ORDER BY ... ANN OF ... clause, usually used for vector search */ - @NonNull - Select orderByAnnOf(@NonNull CqlIdentifier columnId, @NonNull CqlVector ann); - /** - * Adds a LIMIT clause to this query with a literal value. - * - *

If this method or {@link #limit(BindMarker)} is called multiple times, the last value is - * used. - */ - @NonNull - Select limit(int limit); - - /** - * Adds a LIMIT clause to this query with a bind marker. - * - *

To create the argument, use one of the factory methods in {@link QueryBuilder}, for example - * {@link QueryBuilder#bindMarker() bindMarker()}. - * - *

If this method or {@link #limit(int)} is called multiple times, the last value is used. - * {@code null} can be passed to cancel a previous limit. - */ - @NonNull - Select limit(@Nullable BindMarker bindMarker); - - /** - * Adds a PER PARTITION LIMIT clause to this query with a literal value. - * - *

If this method or {@link #perPartitionLimit(BindMarker)} is called multiple times, the last - * value is used. - */ - @NonNull - Select perPartitionLimit(int limit); - - /** - * Adds a PER PARTITION LIMIT clause to this query with a bind marker. - * - *

To create the argument, use one of the factory methods in {@link QueryBuilder}, for example - * {@link QueryBuilder#bindMarker() bindMarker()}. - * - *

If this method or {@link #perPartitionLimit(int)} is called multiple times, the last value - * is used. {@code null} can be passed to cancel a previous limit. - */ - @NonNull - Select perPartitionLimit(@Nullable BindMarker bindMarker); - - /** - * Adds an ALLOW FILTERING clause to this query. - * - *

This method is idempotent, calling it multiple times will only add a single clause. - */ - @NonNull - Select allowFiltering(); -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/select/SelectFrom.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/select/SelectFrom.java deleted file mode 100644 index b39ea8815c6..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/select/SelectFrom.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.select; - -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * The beginning of a SELECT query. - * - *

It only knows about the table, and optionally whether the statement uses JSON or DISTINCT. It - * is not buildable yet: at least one selector needs to be specified. - */ -public interface SelectFrom extends OngoingSelection { - - // Implementation note - this interface exists to make the following a compile-time error: - // selectFrom("foo").distinct().build() - - @NonNull - SelectFrom json(); - - @NonNull - SelectFrom distinct(); -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/select/Selector.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/select/Selector.java deleted file mode 100644 index d82d711b052..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/select/Selector.java +++ /dev/null @@ -1,564 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.select; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.querybuilder.CqlSnippet; -import com.datastax.oss.driver.api.querybuilder.QueryBuilder; -import com.datastax.oss.driver.api.querybuilder.term.Term; -import com.datastax.oss.driver.internal.querybuilder.ArithmeticOperator; -import com.datastax.oss.driver.internal.querybuilder.select.AllSelector; -import com.datastax.oss.driver.internal.querybuilder.select.BinaryArithmeticSelector; -import com.datastax.oss.driver.internal.querybuilder.select.CastSelector; -import com.datastax.oss.driver.internal.querybuilder.select.ColumnSelector; -import com.datastax.oss.driver.internal.querybuilder.select.CountAllSelector; -import com.datastax.oss.driver.internal.querybuilder.select.ElementSelector; -import com.datastax.oss.driver.internal.querybuilder.select.FieldSelector; -import com.datastax.oss.driver.internal.querybuilder.select.FunctionSelector; -import com.datastax.oss.driver.internal.querybuilder.select.ListSelector; -import com.datastax.oss.driver.internal.querybuilder.select.MapSelector; -import com.datastax.oss.driver.internal.querybuilder.select.OppositeSelector; -import com.datastax.oss.driver.internal.querybuilder.select.RangeSelector; -import com.datastax.oss.driver.internal.querybuilder.select.SetSelector; -import com.datastax.oss.driver.internal.querybuilder.select.TupleSelector; -import com.datastax.oss.driver.internal.querybuilder.select.TypeHintSelector; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Arrays; -import java.util.Map; - -/** - * A selected element in a SELECT query. - * - *

To build instances of this type, use the factory methods, such as {@link - * #column(CqlIdentifier) column}, {@link #function(CqlIdentifier, Iterable) function}, etc. - * - *

They are used as arguments to the {@link OngoingSelection#selectors(Iterable) selectors} - * method, for example: - * - *

{@code
- * selectFrom("foo").selectors(Selector.column("bar"), Selector.column("baz"))
- * // SELECT bar,baz FROM foo
- * }
- * - *

There are also shortcuts in the fluent API when you build a statement, for example: - * - *

{@code
- * selectFrom("foo").column("bar").column("baz")
- * // SELECT bar,baz FROM foo
- * }
- */ -public interface Selector extends CqlSnippet { - - /** Selects all columns, as in {@code SELECT *}. */ - @NonNull - static Selector all() { - return AllSelector.INSTANCE; - } - - /** Selects the count of all returned rows, as in {@code SELECT count(*)}. */ - @NonNull - static Selector countAll() { - return new CountAllSelector(); - } - - /** Selects a particular column by its CQL identifier. */ - @NonNull - static Selector column(@NonNull CqlIdentifier columnId) { - return new ColumnSelector(columnId); - } - - /** Shortcut for {@link #column(CqlIdentifier) column(CqlIdentifier.fromCql(columnName))} */ - @NonNull - static Selector column(@NonNull String columnName) { - return column(CqlIdentifier.fromCql(columnName)); - } - - /** - * Selects the sum of two arguments, as in {@code SELECT col1 + col2}. - * - *

This is available in Cassandra 4 and above. - */ - @NonNull - static Selector add(@NonNull Selector left, @NonNull Selector right) { - return new BinaryArithmeticSelector(ArithmeticOperator.SUM, left, right); - } - - /** - * Selects the difference of two arguments, as in {@code SELECT col1 - col2}. - * - *

This is available in Cassandra 4 and above. - */ - @NonNull - static Selector subtract(@NonNull Selector left, @NonNull Selector right) { - return new BinaryArithmeticSelector(ArithmeticOperator.DIFFERENCE, left, right); - } - - /** - * Selects the product of two arguments, as in {@code SELECT col1 * col2}. - * - *

This is available in Cassandra 4 and above. - * - *

The arguments will be parenthesized if they are instances of {@link #add} or {@link - * #subtract}. If they are raw selectors, you might have to parenthesize them yourself. - */ - @NonNull - static Selector multiply(@NonNull Selector left, @NonNull Selector right) { - return new BinaryArithmeticSelector(ArithmeticOperator.PRODUCT, left, right); - } - - /** - * Selects the quotient of two arguments, as in {@code SELECT col1 / col2}. - * - *

This is available in Cassandra 4 and above. - * - *

The arguments will be parenthesized if they are instances of {@link #add} or {@link - * #subtract}. If they are raw selectors, you might have to parenthesize them yourself. - */ - @NonNull - static Selector divide(@NonNull Selector left, @NonNull Selector right) { - return new BinaryArithmeticSelector(ArithmeticOperator.QUOTIENT, left, right); - } - - /** - * Selects the remainder of two arguments, as in {@code SELECT col1 % col2}. - * - *

This is available in Cassandra 4 and above. - * - *

The arguments will be parenthesized if they are instances of {@link #add} or {@link - * #subtract}. If they are raw selectors, you might have to parenthesize them yourself. - */ - @NonNull - static Selector remainder(@NonNull Selector left, @NonNull Selector right) { - return new BinaryArithmeticSelector(ArithmeticOperator.REMAINDER, left, right); - } - - /** - * Selects the opposite of an argument, as in {@code SELECT -col1}. - * - *

This is available in Cassandra 4 and above. - * - *

The argument will be parenthesized if it is an instance of {@link #add} or {@link - * #subtract}. If it is a raw selector, you might have to parenthesize it yourself. - */ - @NonNull - static Selector negate(@NonNull Selector argument) { - return new OppositeSelector(argument); - } - - /** Selects a field inside of a UDT column, as in {@code SELECT user.name}. */ - @NonNull - static Selector field(@NonNull Selector udt, @NonNull CqlIdentifier fieldId) { - return new FieldSelector(udt, fieldId); - } - - /** - * Shortcut for {@link #field(Selector, CqlIdentifier) getUdtField(udt, - * CqlIdentifier.fromCql(fieldName))}. - */ - @NonNull - static Selector field(@NonNull Selector udt, @NonNull String fieldName) { - return field(udt, CqlIdentifier.fromCql(fieldName)); - } - - /** - * Shortcut to select a UDT field when the UDT is a simple column (as opposed to a more complex - * selection, like a nested UDT). - */ - @NonNull - static Selector field(@NonNull CqlIdentifier udtColumnId, @NonNull CqlIdentifier fieldId) { - return field(column(udtColumnId), fieldId); - } - - /** - * Shortcut for {@link #field(CqlIdentifier, CqlIdentifier) - * field(CqlIdentifier.fromCql(udtColumnName), CqlIdentifier.fromCql(fieldName))}. - */ - @NonNull - static Selector field(@NonNull String udtColumnName, @NonNull String fieldName) { - return field(CqlIdentifier.fromCql(udtColumnName), CqlIdentifier.fromCql(fieldName)); - } - - /** - * Selects an element in a collection column, as in {@code SELECT m['key']}. - * - *

As of Cassandra 4, this is only allowed in SELECT for map and set columns. DELETE accepts - * list elements as well. - */ - @NonNull - static Selector element(@NonNull Selector collection, @NonNull Term index) { - return new ElementSelector(collection, index); - } - - /** - * Shortcut for element selection when the target collection is a simple column. - * - *

In other words, this is the equivalent of {@link #element(Selector, Term) - * element(column(collectionId), index)}. - */ - @NonNull - static Selector element(@NonNull CqlIdentifier collectionId, @NonNull Term index) { - return element(column(collectionId), index); - } - - /** - * Shortcut for {@link #element(CqlIdentifier, Term) - * element(CqlIdentifier.fromCql(collectionName), index)}. - */ - @NonNull - static Selector element(@NonNull String collectionName, @NonNull Term index) { - return element(CqlIdentifier.fromCql(collectionName), index); - } - - /** - * Selects a slice in a collection column, as in {@code SELECT s[4..8]}. - * - *

As of Cassandra 4, this is only allowed for set and map columns. Those collections are - * ordered, the elements (or keys in the case of a map), will be compared to the bounds for - * inclusions. Either bound can be unspecified, but not both. - * - * @param left the left bound (inclusive). Can be {@code null} to indicate that the slice is only - * right-bound. - * @param right the right bound (inclusive). Can be {@code null} to indicate that the slice is - * only left-bound. - */ - @NonNull - static Selector range(@NonNull Selector collection, @Nullable Term left, @Nullable Term right) { - return new RangeSelector(collection, left, right); - } - - /** - * Shortcut for slice selection when the target collection is a simple column. - * - *

In other words, this is the equivalent of {@link #range(Selector, Term, Term)} - * range(column(collectionId), left, right)}. - */ - @NonNull - static Selector range( - @NonNull CqlIdentifier collectionId, @Nullable Term left, @Nullable Term right) { - return range(column(collectionId), left, right); - } - - /** - * Shortcut for {@link #range(CqlIdentifier, Term, Term) - * range(CqlIdentifier.fromCql(collectionName), left, right)}. - */ - @NonNull - static Selector range(@NonNull String collectionName, @Nullable Term left, @Nullable Term right) { - return range(CqlIdentifier.fromCql(collectionName), left, right); - } - - /** - * Selects a group of elements as a list, as in {@code SELECT [a,b,c]}. - * - *

None of the selectors should be aliased (the query builder checks this at runtime), and they - * should all produce the same data type (the query builder can't check this, so the query will - * fail at execution time). - * - * @throws IllegalArgumentException if any of the selectors is aliased. - */ - @NonNull - static Selector listOf(@NonNull Iterable elementSelectors) { - return new ListSelector(elementSelectors); - } - - /** Var-arg equivalent of {@link #listOf(Iterable)}. */ - @NonNull - static Selector listOf(@NonNull Selector... elementSelectors) { - return listOf(Arrays.asList(elementSelectors)); - } - - /** - * Selects a group of elements as a set, as in {@code SELECT {a,b,c}}. - * - *

None of the selectors should be aliased (the query builder checks this at runtime), and they - * should all produce the same data type (the query builder can't check this, so the query will - * fail at execution time). - * - * @throws IllegalArgumentException if any of the selectors is aliased. - */ - @NonNull - static Selector setOf(@NonNull Iterable elementSelectors) { - return new SetSelector(elementSelectors); - } - - /** Var-arg equivalent of {@link #setOf(Iterable)}. */ - @NonNull - static Selector setOf(@NonNull Selector... elementSelectors) { - return setOf(Arrays.asList(elementSelectors)); - } - - /** - * Selects a group of elements as a tuple, as in {@code SELECT (a,b,c)}. - * - *

None of the selectors should be aliased (the query builder checks this at runtime). - * - * @throws IllegalArgumentException if any of the selectors is aliased. - */ - @NonNull - static Selector tupleOf(@NonNull Iterable elementSelectors) { - return new TupleSelector(elementSelectors); - } - - /** Var-arg equivalent of {@link #tupleOf(Iterable)}. */ - @NonNull - static Selector tupleOf(@NonNull Selector... elementSelectors) { - return tupleOf(Arrays.asList(elementSelectors)); - } - - /** - * Selects a group of elements as a map, as in {@code SELECT {a:b,c:d}}. - * - *

None of the selectors should be aliased (the query builder checks this at runtime). In - * addition, all key selectors should produce the same type, and all value selectors as well (the - * key and value types can be different); the query builder can't check this, so the query will - * fail at execution time if the types are not uniform. - * - *

Note that Cassandra often has trouble inferring the exact map type. This will manifest as - * the error message: - * - *

-   *   Cannot infer type for term xxx in selection clause (try using a cast to force a type)
-   * 
- * - * If you run into this, consider providing the types explicitly with {@link #mapOf(Map, DataType, - * DataType)}. - * - * @throws IllegalArgumentException if any of the selectors is aliased. - */ - @NonNull - static Selector mapOf(@NonNull Map elementSelectors) { - return mapOf(elementSelectors, null, null); - } - - /** - * Selects a group of elements as a map and force the resulting map type, as in {@code SELECT - * (map){a:b,c:d}}. - * - *

To create the data types, use the constants and static methods in {@link DataTypes}, or - * {@link QueryBuilder#udt(CqlIdentifier)}. - * - * @see #mapOf(Map) - */ - @NonNull - static Selector mapOf( - @NonNull Map elementSelectors, - @Nullable DataType keyType, - @Nullable DataType valueType) { - return new MapSelector(elementSelectors, keyType, valueType); - } - - /** - * Provides a type hint for a selector, as in {@code SELECT (double)1/3}. - * - *

To create the data type, use the constants and static methods in {@link DataTypes}, or - * {@link QueryBuilder#udt(CqlIdentifier)}. - */ - @NonNull - static Selector typeHint(@NonNull Selector selector, @NonNull DataType targetType) { - return new TypeHintSelector(selector, targetType); - } - - /** - * Selects the result of a function call, as is {@code SELECT f(a,b)} - * - *

None of the arguments should be aliased (the query builder checks this at runtime). - * - * @throws IllegalArgumentException if any of the selectors is aliased. - */ - @NonNull - static Selector function( - @NonNull CqlIdentifier functionId, @NonNull Iterable arguments) { - return new FunctionSelector(null, functionId, arguments); - } - - /** Var-arg equivalent of {@link #function(CqlIdentifier, Iterable)}. */ - @NonNull - static Selector function(@NonNull CqlIdentifier functionId, @NonNull Selector... arguments) { - return function(functionId, Arrays.asList(arguments)); - } - - /** - * Shortcut for {@link #function(CqlIdentifier, Iterable) - * function(CqlIdentifier.fromCql(functionName), arguments)}. - */ - @NonNull - static Selector function(@NonNull String functionName, @NonNull Iterable arguments) { - return function(CqlIdentifier.fromCql(functionName), arguments); - } - - /** Var-arg equivalent of {@link #function(String, Iterable)}. */ - @NonNull - static Selector function(@NonNull String functionName, @NonNull Selector... arguments) { - return function(functionName, Arrays.asList(arguments)); - } - - /** - * Selects the result of a function call, as is {@code SELECT ks.f(a,b)} - * - *

None of the arguments should be aliased (the query builder checks this at runtime). - * - * @throws IllegalArgumentException if any of the selectors is aliased. - */ - @NonNull - static Selector function( - @Nullable CqlIdentifier keyspaceId, - @NonNull CqlIdentifier functionId, - @NonNull Iterable arguments) { - return new FunctionSelector(keyspaceId, functionId, arguments); - } - - /** Var-arg equivalent of {@link #function(CqlIdentifier, CqlIdentifier, Iterable)}. */ - @NonNull - static Selector function( - @Nullable CqlIdentifier keyspaceId, - @NonNull CqlIdentifier functionId, - @NonNull Selector... arguments) { - return function(keyspaceId, functionId, Arrays.asList(arguments)); - } - - /** - * Shortcut for {@link #function(CqlIdentifier, CqlIdentifier, Iterable)} - * function(CqlIdentifier.fromCql(functionName), arguments)}. - */ - @NonNull - static Selector function( - @Nullable String keyspaceName, - @NonNull String functionName, - @NonNull Iterable arguments) { - return function( - keyspaceName == null ? null : CqlIdentifier.fromCql(keyspaceName), - CqlIdentifier.fromCql(functionName), - arguments); - } - - /** Var-arg equivalent of {@link #function(String, String, Iterable)}. */ - @NonNull - static Selector function( - @Nullable String keyspaceName, @NonNull String functionName, @NonNull Selector... arguments) { - return function(keyspaceName, functionName, Arrays.asList(arguments)); - } - - /** - * Shortcut to select the result of the built-in {@code writetime} function, as in {@code SELECT - * writetime(c)}. - */ - @NonNull - static Selector writeTime(@NonNull CqlIdentifier columnId) { - return function("writetime", column(columnId)); - } - - /** - * Shortcut for {@link #writeTime(CqlIdentifier) writeTime(CqlIdentifier.fromCql(columnName))}. - */ - @NonNull - static Selector writeTime(@NonNull String columnName) { - return writeTime(CqlIdentifier.fromCql(columnName)); - } - - /** - * Shortcut to select the result of the built-in {@code ttl} function, as in {@code SELECT - * ttl(c)}. - */ - @NonNull - static Selector ttl(@NonNull CqlIdentifier columnId) { - return function("ttl", column(columnId)); - } - - /** Shortcut for {@link #ttl(CqlIdentifier) ttl(CqlIdentifier.fromCql(columnName))}. */ - @NonNull - static Selector ttl(@NonNull String columnName) { - return ttl(CqlIdentifier.fromCql(columnName)); - } - - /** - * Casts a selector to a type, as in {@code SELECT CAST(a AS double)}. - * - *

To create the data type, use the constants and static methods in {@link DataTypes}, or - * {@link QueryBuilder#udt(CqlIdentifier)}. - * - * @throws IllegalArgumentException if the selector is aliased. - */ - @NonNull - static Selector cast(@NonNull Selector selector, @NonNull DataType targetType) { - return new CastSelector(selector, targetType); - } - - /** Shortcut to select the result of the built-in {@code toDate} function on a simple column. */ - @NonNull - static Selector toDate(@NonNull CqlIdentifier columnId) { - return function("todate", Selector.column(columnId)); - } - - /** Shortcut for {@link #toDate(CqlIdentifier) toDate(CqlIdentifier.fromCql(columnName))}. */ - @NonNull - static Selector toDate(@NonNull String columnName) { - return toDate(CqlIdentifier.fromCql(columnName)); - } - - /** - * Shortcut to select the result of the built-in {@code toTimestamp} function on a simple column. - */ - @NonNull - static Selector toTimestamp(@NonNull CqlIdentifier columnId) { - return function("totimestamp", Selector.column(columnId)); - } - - /** - * Shortcut for {@link #toTimestamp(CqlIdentifier) - * toTimestamp(CqlIdentifier.fromCql(columnName))}. - */ - @NonNull - static Selector toTimestamp(@NonNull String columnName) { - return toTimestamp(CqlIdentifier.fromCql(columnName)); - } - - /** - * Shortcut to select the result of the built-in {@code toUnixTimestamp} function on a simple - * column. - */ - @NonNull - static Selector toUnixTimestamp(@NonNull CqlIdentifier columnId) { - return function("tounixtimestamp", Selector.column(columnId)); - } - - /** - * Shortcut for {@link #toUnixTimestamp(CqlIdentifier) - * toUnixTimestamp(CqlIdentifier.fromCql(columnName))}. - */ - @NonNull - static Selector toUnixTimestamp(@NonNull String columnName) { - return toUnixTimestamp(CqlIdentifier.fromCql(columnName)); - } - - /** Aliases the selector, as in {@code SELECT count(*) AS total}. */ - @NonNull - Selector as(@NonNull CqlIdentifier alias); - - /** Shortcut for {@link #as(CqlIdentifier) as(CqlIdentifier.fromCql(alias))} */ - @NonNull - default Selector as(@NonNull String alias) { - return as(CqlIdentifier.fromCql(alias)); - } - - /** @return null if the selector is not aliased. */ - @Nullable - CqlIdentifier getAlias(); -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/term/Term.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/term/Term.java deleted file mode 100644 index 6ff4d32b7de..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/term/Term.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.term; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.querybuilder.BuildableQuery; -import com.datastax.oss.driver.api.querybuilder.CqlSnippet; -import com.datastax.oss.driver.api.querybuilder.QueryBuilder; -import com.datastax.oss.driver.api.querybuilder.relation.Relation; -import com.datastax.oss.driver.api.querybuilder.select.OngoingSelection; -import com.datastax.oss.driver.api.querybuilder.select.Selector; - -/** - * A simple expression that doesn't reference columns. - * - *

It is used as an argument to certain {@linkplain Selector selectors} (for example the indices - * in a {@linkplain OngoingSelection#range(Selector, Term, Term) range}), or as the right operand of - * {@linkplain Relation relations}. - * - *

To create a term, call one of the static factory methods in {@link QueryBuilder}: - * - *

    - *
  • {@link QueryBuilder#literal(Object) literal()} to inline a Java object into the query - * string; - *
  • {@link QueryBuilder#function(CqlIdentifier, CqlIdentifier, Iterable) function()} to invoke - * a built-in or user-defined function; - *
  • an arithmetic operator combining other terms: {@link QueryBuilder#add(Term, Term) add()}, - * {@link QueryBuilder#subtract(Term, Term) subtract()}, {@link QueryBuilder#negate(Term) - * negate()}, {@link QueryBuilder#multiply(Term, Term) multiply()}, {@link - * QueryBuilder#divide(Term, Term) divide()} or {@link QueryBuilder#remainder(Term, Term) - * remainder()}; - *
  • {@link QueryBuilder#typeHint(Term, DataType) typeHint()} to coerce another term to a - * particular CQL type; - *
  • {@link QueryBuilder#raw(String) raw()} for a raw CQL snippet. - *
- * - * Note that some of these methods have multiple overloads. - */ -public interface Term extends CqlSnippet { - - /** - * Whether the term is idempotent. - * - *

That is, whether it always produces the same result when used multiple times. For example, - * the literal {@code 1} is idempotent, the function call {@code now()} isn't. - * - *

This is used internally by the query builder to compute the {@link Statement#isIdempotent()} - * flag on the statements generated by {@link BuildableQuery#build()}. If a term is ambiguous (for - * example a raw snippet or a call to a user function), the builder is pessimistic and assumes the - * term is not idempotent. - */ - boolean isIdempotent(); -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/truncate/Truncate.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/truncate/Truncate.java deleted file mode 100644 index 081fc9a2c5b..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/truncate/Truncate.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.truncate; - -import com.datastax.oss.driver.api.querybuilder.BuildableQuery; - -/** A buildable TRUNCATE statement. */ -public interface Truncate extends BuildableQuery {} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/update/Assignment.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/update/Assignment.java deleted file mode 100644 index 4c763b9930b..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/update/Assignment.java +++ /dev/null @@ -1,420 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.update; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.querybuilder.BuildableQuery; -import com.datastax.oss.driver.api.querybuilder.CqlSnippet; -import com.datastax.oss.driver.api.querybuilder.QueryBuilder; -import com.datastax.oss.driver.api.querybuilder.term.Term; -import com.datastax.oss.driver.internal.querybuilder.lhs.ColumnComponentLeftOperand; -import com.datastax.oss.driver.internal.querybuilder.lhs.ColumnLeftOperand; -import com.datastax.oss.driver.internal.querybuilder.lhs.FieldLeftOperand; -import com.datastax.oss.driver.internal.querybuilder.update.AppendAssignment; -import com.datastax.oss.driver.internal.querybuilder.update.AppendListElementAssignment; -import com.datastax.oss.driver.internal.querybuilder.update.AppendMapEntryAssignment; -import com.datastax.oss.driver.internal.querybuilder.update.AppendSetElementAssignment; -import com.datastax.oss.driver.internal.querybuilder.update.DecrementAssignment; -import com.datastax.oss.driver.internal.querybuilder.update.DefaultAssignment; -import com.datastax.oss.driver.internal.querybuilder.update.IncrementAssignment; -import com.datastax.oss.driver.internal.querybuilder.update.PrependAssignment; -import com.datastax.oss.driver.internal.querybuilder.update.PrependListElementAssignment; -import com.datastax.oss.driver.internal.querybuilder.update.PrependMapEntryAssignment; -import com.datastax.oss.driver.internal.querybuilder.update.PrependSetElementAssignment; -import com.datastax.oss.driver.internal.querybuilder.update.RemoveAssignment; -import com.datastax.oss.driver.internal.querybuilder.update.RemoveListElementAssignment; -import com.datastax.oss.driver.internal.querybuilder.update.RemoveMapEntryAssignment; -import com.datastax.oss.driver.internal.querybuilder.update.RemoveSetElementAssignment; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** An assignment that appears after the SET keyword in an UPDATE statement. */ -public interface Assignment extends CqlSnippet { - - /** Assigns a value to a column, as in {@code SET c=?}. */ - @NonNull - static Assignment setColumn(@NonNull CqlIdentifier columnId, @NonNull Term value) { - return new DefaultAssignment(new ColumnLeftOperand(columnId), "=", value); - } - - /** - * Shortcut for {@link #setColumn(CqlIdentifier, Term) - * setColumn(CqlIdentifier.fromCql(columnName), value)}. - */ - @NonNull - static Assignment setColumn(@NonNull String columnName, @NonNull Term value) { - return setColumn(CqlIdentifier.fromCql(columnName), value); - } - - /** Assigns a value to a field of a UDT, as in {@code SET address.zip=?}. */ - @NonNull - static Assignment setField( - @NonNull CqlIdentifier columnId, @NonNull CqlIdentifier fieldId, @NonNull Term value) { - return new DefaultAssignment(new FieldLeftOperand(columnId, fieldId), "=", value); - } - - /** - * Shortcut for {@link #setField(CqlIdentifier, CqlIdentifier, Term) - * setField(CqlIdentifier.fromCql(columnName), CqlIdentifier.fromCql(fieldName), value)}. - */ - @NonNull - static Assignment setField( - @NonNull String columnName, @NonNull String fieldName, @NonNull Term value) { - return setField(CqlIdentifier.fromCql(columnName), CqlIdentifier.fromCql(fieldName), value); - } - - /** Assigns a value to an entry in a map column, as in {@code SET map[?]=?}. */ - @NonNull - static Assignment setMapValue( - @NonNull CqlIdentifier columnId, @NonNull Term key, @NonNull Term value) { - return new DefaultAssignment(new ColumnComponentLeftOperand(columnId, key), "=", value); - } - - /** - * Shortcut for {@link #setMapValue(CqlIdentifier, Term, Term) - * setMapValue(CqlIdentifier.fromCql(columnName), index, value)}. - */ - @NonNull - static Assignment setMapValue( - @NonNull String columnName, @NonNull Term key, @NonNull Term value) { - return setMapValue(CqlIdentifier.fromCql(columnName), key, value); - } - - /** Assigns a value to an index in a list column, as in {@code SET list[?]=?}. */ - @NonNull - static Assignment setListValue( - @NonNull CqlIdentifier columnId, @NonNull Term index, @NonNull Term value) { - return new DefaultAssignment(new ColumnComponentLeftOperand(columnId, index), "=", value); - } - - /** - * Shortcut for {@link #setListValue(CqlIdentifier, Term, Term) - * setMapValue(CqlIdentifier.fromCql(columnName), index, value)}. - */ - @NonNull - static Assignment setListValue( - @NonNull String columnName, @NonNull Term index, @NonNull Term value) { - return setListValue(CqlIdentifier.fromCql(columnName), index, value); - } - - /** Increments a counter, as in {@code SET c=c+?}. */ - @NonNull - static Assignment increment(@NonNull CqlIdentifier columnId, @NonNull Term amount) { - return new IncrementAssignment(columnId, amount); - } - - /** - * Shortcut for {@link #increment(CqlIdentifier, Term) - * increment(CqlIdentifier.fromCql(columnName), amount)} - */ - @NonNull - static Assignment increment(@NonNull String columnName, @NonNull Term amount) { - return increment(CqlIdentifier.fromCql(columnName), amount); - } - - /** Increments a counter by 1, as in {@code SET c=c+1} . */ - @NonNull - static Assignment increment(@NonNull CqlIdentifier columnId) { - return increment(columnId, QueryBuilder.literal(1)); - } - - /** Shortcut for {@link #increment(CqlIdentifier) CqlIdentifier.fromCql(columnName)}. */ - @NonNull - static Assignment increment(@NonNull String columnName) { - return increment(CqlIdentifier.fromCql(columnName)); - } - - /** Decrements a counter, as in {@code SET c=c-?}. */ - @NonNull - static Assignment decrement(@NonNull CqlIdentifier columnId, @NonNull Term amount) { - return new DecrementAssignment(columnId, amount); - } - - /** - * Shortcut for {@link #decrement(CqlIdentifier, Term) - * decrement(CqlIdentifier.fromCql(columnName), amount)} - */ - @NonNull - static Assignment decrement(@NonNull String columnName, @NonNull Term amount) { - return decrement(CqlIdentifier.fromCql(columnName), amount); - } - - /** Decrements a counter by 1, as in {@code SET c=c-1} . */ - @NonNull - static Assignment decrement(@NonNull CqlIdentifier columnId) { - return decrement(columnId, QueryBuilder.literal(1)); - } - - /** Shortcut for {@link #decrement(CqlIdentifier) CqlIdentifier.fromCql(columnName)}. */ - @NonNull - static Assignment decrement(@NonNull String columnName) { - return decrement(CqlIdentifier.fromCql(columnName)); - } - - /** - * Appends to a collection column, as in {@code SET l=l+?}. - * - *

The term must be a collection of the same type as the column. - */ - @NonNull - static Assignment append(@NonNull CqlIdentifier columnId, @NonNull Term suffix) { - return new AppendAssignment(columnId, suffix); - } - - /** - * Shortcut for {@link #append(CqlIdentifier, Term) append(CqlIdentifier.fromCql(columnName), - * suffix)}. - */ - @NonNull - static Assignment append(@NonNull String columnName, @NonNull Term suffix) { - return append(CqlIdentifier.fromCql(columnName), suffix); - } - - /** - * Appends a single element to a list column, as in {@code SET l=l+[?]}. - * - *

The term must be of the same type as the column's elements. - */ - @NonNull - static Assignment appendListElement(@NonNull CqlIdentifier columnId, @NonNull Term suffix) { - return new AppendListElementAssignment(columnId, suffix); - } - - /** - * Shortcut for {@link #appendListElement(CqlIdentifier, Term) - * appendListElement(CqlIdentifier.fromCql(columnName), suffix)}. - */ - @NonNull - static Assignment appendListElement(@NonNull String columnName, @NonNull Term suffix) { - return appendListElement(CqlIdentifier.fromCql(columnName), suffix); - } - - /** - * Appends a single element to a set column, as in {@code SET s=s+{?}}. - * - *

The term must be of the same type as the column's elements. - */ - @NonNull - static Assignment appendSetElement(@NonNull CqlIdentifier columnId, @NonNull Term suffix) { - return new AppendSetElementAssignment(columnId, suffix); - } - - /** - * Shortcut for {@link #appendSetElement(CqlIdentifier, Term) - * appendSetElement(CqlIdentifier.fromCql(columnName), suffix)}. - */ - @NonNull - static Assignment appendSetElement(@NonNull String columnName, @NonNull Term suffix) { - return appendSetElement(CqlIdentifier.fromCql(columnName), suffix); - } - - /** - * Appends a single entry to a map column, as in {@code SET m=m+{?:?}}. - * - *

The terms must be of the same type as the column's keys and values respectively. - */ - @NonNull - static Assignment appendMapEntry( - @NonNull CqlIdentifier columnId, @NonNull Term key, @NonNull Term value) { - return new AppendMapEntryAssignment(columnId, key, value); - } - - /** - * Shortcut for {@link #appendMapEntry(CqlIdentifier, Term, Term) - * appendMapEntry(CqlIdentifier.fromCql(columnName), key, value)}. - */ - @NonNull - static Assignment appendMapEntry( - @NonNull String columnName, @NonNull Term key, @NonNull Term value) { - return appendMapEntry(CqlIdentifier.fromCql(columnName), key, value); - } - - /** - * Prepends to a collection column, as in {@code SET l=[1,2,3]+l}. - * - *

The term must be a collection of the same type as the column. - */ - @NonNull - static Assignment prepend(@NonNull CqlIdentifier columnId, @NonNull Term prefix) { - return new PrependAssignment(columnId, prefix); - } - - /** - * Shortcut for {@link #prepend(CqlIdentifier, Term) prepend(CqlIdentifier.fromCql(columnName), - * prefix)}. - */ - @NonNull - static Assignment prepend(@NonNull String columnName, @NonNull Term prefix) { - return prepend(CqlIdentifier.fromCql(columnName), prefix); - } - - /** - * Prepends a single element to a list column, as in {@code SET l=[?]+l}. - * - *

The term must be of the same type as the column's elements. - */ - @NonNull - static Assignment prependListElement(@NonNull CqlIdentifier columnId, @NonNull Term suffix) { - return new PrependListElementAssignment(columnId, suffix); - } - - /** - * Shortcut for {@link #prependListElement(CqlIdentifier, Term) - * prependListElement(CqlIdentifier.fromCql(columnName), suffix)}. - */ - @NonNull - static Assignment prependListElement(@NonNull String columnName, @NonNull Term suffix) { - return prependListElement(CqlIdentifier.fromCql(columnName), suffix); - } - - /** - * Prepends a single element to a set column, as in {@code SET s={?}+s}. - * - *

The term must be of the same type as the column's elements. - */ - @NonNull - static Assignment prependSetElement(@NonNull CqlIdentifier columnId, @NonNull Term suffix) { - return new PrependSetElementAssignment(columnId, suffix); - } - - /** - * Shortcut for {@link #prependSetElement(CqlIdentifier, Term) - * prependSetElement(CqlIdentifier.fromCql(columnName), suffix)}. - */ - @NonNull - static Assignment prependSetElement(@NonNull String columnName, @NonNull Term suffix) { - return prependSetElement(CqlIdentifier.fromCql(columnName), suffix); - } - - /** - * Prepends a single entry to a map column, as in {@code SET m={?:?}+m}. - * - *

The terms must be of the same type as the column's keys and values respectively. - */ - @NonNull - static Assignment prependMapEntry( - @NonNull CqlIdentifier columnId, @NonNull Term key, @NonNull Term value) { - return new PrependMapEntryAssignment(columnId, key, value); - } - - /** - * Shortcut for {@link #prependMapEntry(CqlIdentifier, Term, Term) - * prependMapEntry(CqlIdentifier.fromCql(columnName), key, value)}. - */ - @NonNull - static Assignment prependMapEntry( - @NonNull String columnName, @NonNull Term key, @NonNull Term value) { - return prependMapEntry(CqlIdentifier.fromCql(columnName), key, value); - } - - /** - * Removes elements from a collection, as in {@code SET l=l-[1,2,3]}. - * - *

The term must be a collection of the same type as the column. - * - *

DO NOT USE THIS TO DECREMENT COUNTERS. Use the dedicated {@link - * #decrement(CqlIdentifier, Term)} methods instead. While the operator is technically the same, - * and it would be possible to generate an expression such as {@code counter-=1} with this method, - * a collection removal is idempotent while a counter decrement isn't. - */ - @NonNull - static Assignment remove(@NonNull CqlIdentifier columnId, @NonNull Term collectionToRemove) { - return new RemoveAssignment(columnId, collectionToRemove); - } - - /** - * Shortcut for {@link #remove(CqlIdentifier, Term) remove(CqlIdentifier.fromCql(columnName), - * collectionToRemove)}. - */ - @NonNull - static Assignment remove(@NonNull String columnName, @NonNull Term collectionToRemove) { - return remove(CqlIdentifier.fromCql(columnName), collectionToRemove); - } - - /** - * Removes a single element from a list column, as in {@code SET l=l-[?]}. - * - *

The term must be of the same type as the column's elements. - */ - @NonNull - static Assignment removeListElement(@NonNull CqlIdentifier columnId, @NonNull Term suffix) { - return new RemoveListElementAssignment(columnId, suffix); - } - - /** - * Shortcut for {@link #removeListElement(CqlIdentifier, Term) - * removeListElement(CqlIdentifier.fromCql(columnName), suffix)}. - */ - @NonNull - static Assignment removeListElement(@NonNull String columnName, @NonNull Term suffix) { - return removeListElement(CqlIdentifier.fromCql(columnName), suffix); - } - - /** - * Removes a single element from a set column, as in {@code SET s=s-{?}}. - * - *

The term must be of the same type as the column's elements. - */ - @NonNull - static Assignment removeSetElement(@NonNull CqlIdentifier columnId, @NonNull Term suffix) { - return new RemoveSetElementAssignment(columnId, suffix); - } - - /** - * Shortcut for {@link #removeSetElement(CqlIdentifier, Term) - * removeSetElement(CqlIdentifier.fromCql(columnName), suffix)}. - */ - @NonNull - static Assignment removeSetElement(@NonNull String columnName, @NonNull Term suffix) { - return removeSetElement(CqlIdentifier.fromCql(columnName), suffix); - } - - /** - * Removes a single entry from a map column, as in {@code SET m=m-{?:?}}. - * - *

The terms must be of the same type as the column's keys and values respectively. - */ - @NonNull - static Assignment removeMapEntry( - @NonNull CqlIdentifier columnId, @NonNull Term key, @NonNull Term value) { - return new RemoveMapEntryAssignment(columnId, key, value); - } - - /** - * Shortcut for {@link #removeMapEntry(CqlIdentifier, Term, Term) - * removeMapEntry(CqlIdentifier.fromCql(columnName), key, value)}. - */ - @NonNull - static Assignment removeMapEntry( - @NonNull String columnName, @NonNull Term key, @NonNull Term value) { - return removeMapEntry(CqlIdentifier.fromCql(columnName), key, value); - } - - /** - * Whether this assignment is idempotent. - * - *

That is, whether it always sets its target column to the same value when used multiple - * times. For example, {@code UPDATE ... SET c=1} is idempotent, {@code SET l=l+[1]} isn't. - * - *

This is used internally by the query builder to compute the {@link Statement#isIdempotent()} - * flag on the UPDATE statements generated by {@link BuildableQuery#build()}. If an assignment is - * ambiguous (for example a raw snippet or a call to a user function in the right operands), the - * builder is pessimistic and assumes the term is not idempotent. - */ - boolean isIdempotent(); -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/update/OngoingAssignment.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/update/OngoingAssignment.java deleted file mode 100644 index 8264c1b4781..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/update/OngoingAssignment.java +++ /dev/null @@ -1,579 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.update; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.QueryBuilder; -import com.datastax.oss.driver.api.querybuilder.term.Term; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Arrays; - -public interface OngoingAssignment { - - /** - * Adds an assignment to this statement, as in {@code UPDATE foo SET v=1}. - * - *

To create the argument, use one of the factory methods in {@link Assignment}, for example - * Assignment{@link #setColumn(CqlIdentifier, Term)}. This type also provides shortcuts to create - * and add the assignment in one call, for example {@link #setColumn(CqlIdentifier, Term)}. - * - *

If you add multiple assignments as one, consider {@link #set(Iterable)} as a more efficient - * alternative. - */ - @NonNull - UpdateWithAssignments set(@NonNull Assignment assignment); - - /** - * Adds multiple assignments at once. - * - *

This is slightly more efficient than adding the assignments one by one (since the underlying - * implementation of this object is immutable). - * - *

To create the argument, use one of the factory methods in {@link Assignment}, for example - * Assignment{@link #setColumn(CqlIdentifier, Term)}. - */ - @NonNull - UpdateWithAssignments set(@NonNull Iterable additionalAssignments); - - /** Var-arg equivalent of {@link #set(Iterable)}. */ - @NonNull - default UpdateWithAssignments set(@NonNull Assignment... assignments) { - return set(Arrays.asList(assignments)); - } - - /** - * Assigns a value to a column, as in {@code SET c=1}. - * - *

This is a shortcut for {@link #set(Assignment) set(Assignment.setColumn(columnId, value))}. - * - * @see Assignment#setColumn(CqlIdentifier, Term) - */ - @NonNull - default UpdateWithAssignments setColumn(@NonNull CqlIdentifier columnId, @NonNull Term value) { - return set(Assignment.setColumn(columnId, value)); - } - - /** - * Shortcut for {@link #setColumn(CqlIdentifier, Term) - * setColumn(CqlIdentifier.fromCql(columnName), value)}. - * - * @see Assignment#setColumn(String, Term) - */ - @NonNull - default UpdateWithAssignments setColumn(@NonNull String columnName, @NonNull Term value) { - return setColumn(CqlIdentifier.fromCql(columnName), value); - } - - /** - * Assigns a value to a field of a UDT, as in {@code SET address.zip=?}. - * - *

This is a shortcut for {@link #set(Assignment) set(Assignment.setField(columnId, fieldId, - * value))}. - * - * @see Assignment#setField(CqlIdentifier, CqlIdentifier, Term) - */ - @NonNull - default UpdateWithAssignments setField( - @NonNull CqlIdentifier columnId, @NonNull CqlIdentifier fieldId, @NonNull Term value) { - return set(Assignment.setField(columnId, fieldId, value)); - } - - /** - * Shortcut for {@link #setField(CqlIdentifier, CqlIdentifier, Term) - * setField(CqlIdentifier.fromCql(columnName), CqlIdentifier.fromCql(fieldName), value)}. - * - * @see Assignment#setField(String, String, Term) - */ - @NonNull - default UpdateWithAssignments setField( - @NonNull String columnName, @NonNull String fieldName, @NonNull Term value) { - return setField(CqlIdentifier.fromCql(columnName), CqlIdentifier.fromCql(fieldName), value); - } - - /** - * Assigns a value to an entry in a map column, as in {@code SET map[?]=?}. - * - *

This is a shortcut for {@link #set(Assignment) set(Assignment.setMapValue(columnId, key, - * value))}. - * - * @see Assignment#setMapValue(CqlIdentifier, Term, Term) - */ - @NonNull - default UpdateWithAssignments setMapValue( - @NonNull CqlIdentifier columnId, @NonNull Term key, @NonNull Term value) { - return set(Assignment.setMapValue(columnId, key, value)); - } - - /** - * Shortcut for {@link #setMapValue(CqlIdentifier, Term, Term) - * setMapValue(CqlIdentifier.fromCql(columnName), key, value)}. - * - * @see Assignment#setMapValue(String, Term, Term) - */ - @NonNull - default UpdateWithAssignments setMapValue( - @NonNull String columnName, @NonNull Term key, @NonNull Term value) { - return setMapValue(CqlIdentifier.fromCql(columnName), key, value); - } - - /** - * Assigns a value to an index in a list column, as in {@code SET list[?]=?}. - * - *

This is a shortcut for {@link #set(Assignment) set(Assignment.setListValue(columnId, index, - * value))}. - * - * @see Assignment#setListValue(CqlIdentifier, Term, Term) - */ - @NonNull - default UpdateWithAssignments setListValue( - @NonNull CqlIdentifier columnId, @NonNull Term index, @NonNull Term value) { - return set(Assignment.setListValue(columnId, index, value)); - } - - /** - * Shortcut for {@link #setListValue(CqlIdentifier, Term, Term) - * setListValue(CqlIdentifier.fromCql(columnName), index, value)}. - * - * @see Assignment#setListValue(String, Term, Term) - */ - @NonNull - default UpdateWithAssignments setListValue( - @NonNull String columnName, @NonNull Term index, @NonNull Term value) { - return setListValue(CqlIdentifier.fromCql(columnName), index, value); - } - - /** - * Increments a counter, as in {@code SET c+=?}. - * - *

This is a shortcut for {@link #set(Assignment) set(Assignment.increment(columnId, amount))}. - * - * @see Assignment#increment(CqlIdentifier, Term) - */ - @NonNull - default UpdateWithAssignments increment(@NonNull CqlIdentifier columnId, @NonNull Term amount) { - return set(Assignment.increment(columnId, amount)); - } - - /** - * Shortcut for {@link #increment(CqlIdentifier, Term) - * increment(CqlIdentifier.fromCql(columnName), amount)} - * - * @see Assignment#increment(String, Term) - */ - @NonNull - default UpdateWithAssignments increment(@NonNull String columnName, @NonNull Term amount) { - return increment(CqlIdentifier.fromCql(columnName), amount); - } - - /** - * Increments a counter by 1, as in {@code SET c+=1} . - * - *

This is a shortcut for {@link #increment(CqlIdentifier, Term)} increment(columnId, - * QueryBuilder.literal(1))}. - * - * @see Assignment#increment(CqlIdentifier) - */ - @NonNull - default UpdateWithAssignments increment(@NonNull CqlIdentifier columnId) { - return increment(columnId, QueryBuilder.literal(1)); - } - - /** - * Shortcut for {@link #increment(CqlIdentifier) CqlIdentifier.fromCql(columnName)}. - * - * @see Assignment#increment(CqlIdentifier) - */ - @NonNull - default UpdateWithAssignments increment(@NonNull String columnName) { - return increment(CqlIdentifier.fromCql(columnName)); - } - - /** - * Decrements a counter, as in {@code SET c-=?}. - * - *

This is a shortcut for {@link #set(Assignment) set(Assignment.decrement(columnId, amount))}. - * - * @see Assignment#decrement(CqlIdentifier, Term) - */ - @NonNull - default UpdateWithAssignments decrement(@NonNull CqlIdentifier columnId, @NonNull Term amount) { - return set(Assignment.decrement(columnId, amount)); - } - - /** - * Shortcut for {@link #decrement(CqlIdentifier, Term) - * decrement(CqlIdentifier.fromCql(columnName), amount)} - * - * @see Assignment#decrement(String, Term) - */ - @NonNull - default UpdateWithAssignments decrement(@NonNull String columnName, @NonNull Term amount) { - return decrement(CqlIdentifier.fromCql(columnName), amount); - } - - /** - * Decrements a counter by 1, as in {@code SET c-=1}. - * - *

This is a shortcut for {@link #decrement(CqlIdentifier, Term)} decrement(columnId, 1)}. - * - * @see Assignment#decrement(CqlIdentifier) - */ - @NonNull - default UpdateWithAssignments decrement(@NonNull CqlIdentifier columnId) { - return decrement(columnId, QueryBuilder.literal(1)); - } - - /** - * Shortcut for {@link #decrement(CqlIdentifier) CqlIdentifier.fromCql(columnName)}. - * - * @see Assignment#decrement(String) - */ - @NonNull - default UpdateWithAssignments decrement(@NonNull String columnName) { - return decrement(CqlIdentifier.fromCql(columnName)); - } - - /** - * Appends to a collection column, as in {@code SET l=l+?}. - * - *

The term must be a collection of the same type as the column. - * - *

This is a shortcut for {@link #set(Assignment) set(Assignment.append(columnId, suffix))}. - * - * @see Assignment#append(CqlIdentifier, Term) - */ - @NonNull - default UpdateWithAssignments append(@NonNull CqlIdentifier columnId, @NonNull Term suffix) { - return set(Assignment.append(columnId, suffix)); - } - - /** - * Shortcut for {@link #append(CqlIdentifier, Term) append(CqlIdentifier.fromCql(columnName), - * suffix)}. - * - * @see Assignment#append(String, Term) - */ - @NonNull - default UpdateWithAssignments append(@NonNull String columnName, @NonNull Term suffix) { - return append(CqlIdentifier.fromCql(columnName), suffix); - } - - /** - * Appends a single element to a list column, as in {@code SET l=l+[?]}. - * - *

The term must be of the same type as the column's elements. - * - *

This is a shortcut for {@link #set(Assignment) set(Assignment.appendListElement(columnId, - * suffix))}. - * - * @see Assignment#appendListElement(CqlIdentifier, Term) - */ - @NonNull - default UpdateWithAssignments appendListElement( - @NonNull CqlIdentifier columnId, @NonNull Term suffix) { - return set(Assignment.appendListElement(columnId, suffix)); - } - - /** - * Shortcut for {@link #appendListElement(CqlIdentifier, Term) - * appendListElement(CqlIdentifier.fromCql(columnName), suffix)}. - * - * @see Assignment#appendListElement(String, Term) - */ - @NonNull - default UpdateWithAssignments appendListElement( - @NonNull String columnName, @NonNull Term suffix) { - return appendListElement(CqlIdentifier.fromCql(columnName), suffix); - } - - /** - * Appends a single element to a set column, as in {@code SET s=s+{?}}. - * - *

The term must be of the same type as the column's elements. - * - *

This is a shortcut for {@link #set(Assignment) set(Assignment.appendSetElement(columnId, - * suffix))}. - * - * @see Assignment#appendSetElement(CqlIdentifier, Term) - */ - @NonNull - default UpdateWithAssignments appendSetElement( - @NonNull CqlIdentifier columnId, @NonNull Term suffix) { - return set(Assignment.appendSetElement(columnId, suffix)); - } - - /** - * Shortcut for {@link #appendSetElement(CqlIdentifier, Term) - * appendSetElement(CqlIdentifier.fromCql(columnName), suffix)}. - */ - @NonNull - default UpdateWithAssignments appendSetElement(@NonNull String columnName, @NonNull Term suffix) { - return appendSetElement(CqlIdentifier.fromCql(columnName), suffix); - } - - /** - * Appends a single entry to a map column, as in {@code SET m=m+{?:?}}. - * - *

The terms must be of the same type as the column's keys and values respectively. - * - *

This is a shortcut for {@link #set(Assignment) set(Assignment.appendMapEntry(columnId, key, - * value)}. - * - * @see Assignment#appendMapEntry(CqlIdentifier, Term, Term) - */ - @NonNull - default UpdateWithAssignments appendMapEntry( - @NonNull CqlIdentifier columnId, @NonNull Term key, @NonNull Term value) { - return set(Assignment.appendMapEntry(columnId, key, value)); - } - - /** - * Shortcut for {@link #appendMapEntry(CqlIdentifier, Term, Term) - * appendMapEntry(CqlIdentifier.fromCql(columnName), key, value)}. - * - * @see Assignment#appendMapEntry(String, Term, Term) - */ - @NonNull - default UpdateWithAssignments appendMapEntry( - @NonNull String columnName, @NonNull Term key, @NonNull Term value) { - return appendMapEntry(CqlIdentifier.fromCql(columnName), key, value); - } - - /** - * Prepends to a collection column, as in {@code SET l=[1,2,3]+l}. - * - *

The term must be a collection of the same type as the column. - * - *

This is a shortcut for {@link #set(Assignment) set(Assignment.prepend(columnId, prefix))}. - * - * @see Assignment#prepend(CqlIdentifier, Term) - */ - @NonNull - default UpdateWithAssignments prepend(@NonNull CqlIdentifier columnId, @NonNull Term prefix) { - return set(Assignment.prepend(columnId, prefix)); - } - - /** - * Shortcut for {@link #prepend(CqlIdentifier, Term) prepend(CqlIdentifier.fromCql(columnName), - * prefix)}. - * - * @see Assignment#prepend(String, Term) - */ - @NonNull - default UpdateWithAssignments prepend(@NonNull String columnName, @NonNull Term prefix) { - return prepend(CqlIdentifier.fromCql(columnName), prefix); - } - - /** - * Prepends a single element to a list column, as in {@code SET l=[?]+l}. - * - *

The term must be of the same type as the column's elements. - * - *

This is a shortcut for {@link #set(Assignment) set(Assignment.prependListElement(columnId, - * suffix))}. - * - * @see Assignment#prependListElement(CqlIdentifier, Term) - */ - @NonNull - default UpdateWithAssignments prependListElement( - @NonNull CqlIdentifier columnId, @NonNull Term suffix) { - return set(Assignment.prependListElement(columnId, suffix)); - } - - /** - * Shortcut for {@link #prependListElement(CqlIdentifier, Term) - * prependListElement(CqlIdentifier.fromCql(columnName), suffix)}. - * - * @see Assignment#prependListElement(String, Term) - */ - @NonNull - default UpdateWithAssignments prependListElement( - @NonNull String columnName, @NonNull Term suffix) { - return prependListElement(CqlIdentifier.fromCql(columnName), suffix); - } - - /** - * Prepends a single element to a set column, as in {@code SET s={?}+s}. - * - *

The term must be of the same type as the column's elements. - * - *

This is a shortcut for {@link #set(Assignment) set(Assignment.prependSetElement(columnId, - * suffix))}. - * - * @see Assignment#prependSetElement(CqlIdentifier, Term) - */ - @NonNull - default UpdateWithAssignments prependSetElement( - @NonNull CqlIdentifier columnId, @NonNull Term suffix) { - return set(Assignment.prependSetElement(columnId, suffix)); - } - - /** - * Shortcut for {@link #prependSetElement(CqlIdentifier, Term) - * prependSetElement(CqlIdentifier.fromCql(columnName), suffix)}. - * - * @see Assignment#prependSetElement(String, Term) - */ - @NonNull - default UpdateWithAssignments prependSetElement( - @NonNull String columnName, @NonNull Term suffix) { - return prependSetElement(CqlIdentifier.fromCql(columnName), suffix); - } - - /** - * Prepends a single entry to a map column, as in {@code SET m={?:?}+m}. - * - *

The terms must be of the same type as the column's keys and values respectively. - * - *

This is a shortcut for {@link #set(Assignment) set(Assignment.prependMapEntry(columnId, key, - * value))}. - * - * @see Assignment#prependMapEntry(CqlIdentifier, Term, Term) - */ - @NonNull - default UpdateWithAssignments prependMapEntry( - @NonNull CqlIdentifier columnId, @NonNull Term key, @NonNull Term value) { - return set(Assignment.prependMapEntry(columnId, key, value)); - } - - /** - * Shortcut for {@link #prependMapEntry(CqlIdentifier, Term, Term) - * prependMapEntry(CqlIdentifier.fromCql(columnName), key, value)}. - * - * @see Assignment#prependMapEntry(String, Term, Term) - */ - @NonNull - default UpdateWithAssignments prependMapEntry( - @NonNull String columnName, @NonNull Term key, @NonNull Term value) { - return prependMapEntry(CqlIdentifier.fromCql(columnName), key, value); - } - - /** - * Removes elements from a collection, as in {@code SET l=l-[1,2,3]}. - * - *

The term must be a collection of the same type as the column. - * - *

DO NOT USE THIS TO DECREMENT COUNTERS. Use the dedicated {@link - * #decrement(CqlIdentifier, Term)} methods instead. While the operator is technically the same, - * and it would be possible to generate an expression such as {@code counter-=1} with this method, - * a collection removal is idempotent while a counter decrement isn't. - * - *

This is a shortcut for {@link #set(Assignment) set(Assignment.remove(columnId, - * collectionToRemove))}. - * - * @see Assignment#remove(CqlIdentifier, Term) - */ - @NonNull - default UpdateWithAssignments remove( - @NonNull CqlIdentifier columnId, @NonNull Term collectionToRemove) { - return set(Assignment.remove(columnId, collectionToRemove)); - } - - /** - * Shortcut for {@link #remove(CqlIdentifier, Term) remove(CqlIdentifier.fromCql(columnName), - * collectionToRemove)}. - * - * @see Assignment#remove(String, Term) - */ - @NonNull - default UpdateWithAssignments remove( - @NonNull String columnName, @NonNull Term collectionToRemove) { - return remove(CqlIdentifier.fromCql(columnName), collectionToRemove); - } - - /** - * Removes a single element to a list column, as in {@code SET l=l-[?]}. - * - *

The term must be of the same type as the column's elements. - * - *

This is a shortcut for {@link #set(Assignment) set(Assignment.removeListElement(columnId, - * suffix))}. - * - * @see Assignment#removeListElement(CqlIdentifier, Term) - */ - @NonNull - default UpdateWithAssignments removeListElement( - @NonNull CqlIdentifier columnId, @NonNull Term suffix) { - return set(Assignment.removeListElement(columnId, suffix)); - } - - /** - * Shortcut for {@link #removeListElement(CqlIdentifier, Term) - * removeListElement(CqlIdentifier.fromCql(columnName), suffix)}. - * - * @see Assignment#removeListElement(String, Term) - */ - @NonNull - default UpdateWithAssignments removeListElement( - @NonNull String columnName, @NonNull Term suffix) { - return removeListElement(CqlIdentifier.fromCql(columnName), suffix); - } - - /** - * Removes a single element to a set column, as in {@code SET s=s-{?}}. - * - *

The term must be of the same type as the column's elements. - * - *

This is a shortcut for {@link #set(Assignment) set(Assignment.removeSetElement(columnId, - * suffix))}. - * - * @see Assignment#removeSetElement(CqlIdentifier, Term) - */ - @NonNull - default UpdateWithAssignments removeSetElement( - @NonNull CqlIdentifier columnId, @NonNull Term suffix) { - return set(Assignment.removeSetElement(columnId, suffix)); - } - - /** - * Shortcut for {@link #removeSetElement(CqlIdentifier, Term) - * removeSetElement(CqlIdentifier.fromCql(columnName), suffix)}. - */ - @NonNull - default UpdateWithAssignments removeSetElement(@NonNull String columnName, @NonNull Term suffix) { - return removeSetElement(CqlIdentifier.fromCql(columnName), suffix); - } - - /** - * Removes a single entry to a map column, as in {@code SET m=m-{?:?}}. - * - *

The terms must be of the same type as the column's keys and values respectively. - * - *

This is a shortcut for {@link #set(Assignment) set(Assignment.removeMapEntry(columnId, key, - * value)}. - * - * @see Assignment#removeMapEntry(CqlIdentifier, Term, Term) - */ - @NonNull - default UpdateWithAssignments removeMapEntry( - @NonNull CqlIdentifier columnId, @NonNull Term key, @NonNull Term value) { - return set(Assignment.removeMapEntry(columnId, key, value)); - } - - /** - * Shortcut for {@link #removeMapEntry(CqlIdentifier, Term, Term) - * removeMapEntry(CqlIdentifier.fromCql(columnName), key, value)}. - * - * @see Assignment#removeMapEntry(String, Term, Term) - */ - @NonNull - default UpdateWithAssignments removeMapEntry( - @NonNull String columnName, @NonNull Term key, @NonNull Term value) { - return removeMapEntry(CqlIdentifier.fromCql(columnName), key, value); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/update/Update.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/update/Update.java deleted file mode 100644 index eb791ad0cd1..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/update/Update.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.update; - -import com.datastax.oss.driver.api.querybuilder.BuildableQuery; -import com.datastax.oss.driver.api.querybuilder.condition.ConditionalStatement; -import com.datastax.oss.driver.api.querybuilder.relation.OngoingWhereClause; - -/** - * A buildable UPDATE statement that has at least one assignment and one WHERE clause. You can keep - * adding WHERE clauses, or add IF conditions. - */ -public interface Update - extends OngoingWhereClause, ConditionalStatement, BuildableQuery {} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/update/UpdateStart.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/update/UpdateStart.java deleted file mode 100644 index de6712c5b93..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/update/UpdateStart.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.update; - -import com.datastax.oss.driver.api.querybuilder.BindMarker; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * The beginning of an UPDATE statement. It needs at least one assignment before the WHERE clause - * can be added. - */ -public interface UpdateStart extends OngoingAssignment { - - /** - * Adds a USING TIMESTAMP clause to this statement with a literal value. - * - *

If this method or {@link #usingTimestamp(BindMarker)} is called multiple times, the last - * value is used. - */ - @NonNull - UpdateStart usingTimestamp(long timestamp); - - /** - * Adds a USING TIMESTAMP clause to this statement with a bind marker. - * - *

If this method or {@link #usingTimestamp(long)} is called multiple times, the last value is - * used. - */ - @NonNull - UpdateStart usingTimestamp(@NonNull BindMarker bindMarker); - - /** - * Adds a {@code USING TTL} clause to this statement with a literal value. Setting a value of - * {@code null} will remove the {@code USING TTL} clause on this statement. Setting a value of - * {@code 0} will update the data and remove any TTL on the column when the statement is executed, - * overriding any TTL (table or column) that may exist in Cassandra. - * - *

If this method or {@link #usingTtl(BindMarker) } is called multiple times, the value from - * the last invocation is used. - * - * @param ttlInSeconds Time, in seconds, the inserted data should live before expiring. - */ - @NonNull - UpdateStart usingTtl(int ttlInSeconds); - - /** - * Adds a {@code USING TTL} clause to this statement with a bind marker. Setting a value of {@code - * null} will remove the {@code USING TTL} clause on this statement. Binding a value of {@code 0} - * will update the data and remove any TTL on the column when the statement is executed, - * overriding any TTL (table or column) that may exist in Cassandra. - * - *

If this method or {@link #usingTtl(int)} is called multiple times, the value from the last - * invocation is used. - * - * @param bindMarker A bind marker that is understood to be a value in seconds. - */ - @NonNull - UpdateStart usingTtl(@NonNull BindMarker bindMarker); -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/update/UpdateWithAssignments.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/update/UpdateWithAssignments.java deleted file mode 100644 index 106cb5d12eb..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/update/UpdateWithAssignments.java +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.update; - -import com.datastax.oss.driver.api.querybuilder.relation.OngoingWhereClause; - -/** - * An UPDATE statement that has at least one assignment. You can keep adding assignments, or add - * WHERE clauses to get a buildable statement. - */ -public interface UpdateWithAssignments extends OngoingAssignment, OngoingWhereClause {} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/ArithmeticOperator.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/ArithmeticOperator.java deleted file mode 100644 index 71a93b87b18..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/ArithmeticOperator.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder; - -import edu.umd.cs.findbugs.annotations.NonNull; - -public enum ArithmeticOperator { - OPPOSITE("-", 2, 2), - PRODUCT("*", 2, 2), - QUOTIENT("/", 2, 3), - REMAINDER("%", 2, 3), - SUM("+", 1, 1), - DIFFERENCE("-", 1, 2), - ; - - private final String symbol; - private final int precedenceLeft; - private final int precedenceRight; - - ArithmeticOperator(String symbol, int precedenceLeft, int precedenceRight) { - this.symbol = symbol; - this.precedenceLeft = precedenceLeft; - this.precedenceRight = precedenceRight; - } - - @NonNull - public String getSymbol() { - return symbol; - } - - public int getPrecedenceLeft() { - return precedenceLeft; - } - - public int getPrecedenceRight() { - return precedenceRight; - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/CqlHelper.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/CqlHelper.java deleted file mode 100644 index 55a923e46ad..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/CqlHelper.java +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.CqlSnippet; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Collection; - -public class CqlHelper { - - public static void appendIds( - @NonNull Iterable ids, - @NonNull StringBuilder builder, - @Nullable String prefix, - @NonNull String separator, - @Nullable String suffix) { - boolean first = true; - for (CqlIdentifier id : ids) { - if (first) { - if (prefix != null) { - builder.append(prefix); - } - first = false; - } else { - builder.append(separator); - } - builder.append(id.asCql(true)); - } - if (!first && suffix != null) { - builder.append(suffix); - } - } - - public static void append( - @NonNull Iterable snippets, - @NonNull StringBuilder builder, - @Nullable String prefix, - @NonNull String separator, - @Nullable String suffix) { - boolean first = true; - for (CqlSnippet snippet : snippets) { - if (first) { - if (prefix != null) { - builder.append(prefix); - } - first = false; - } else { - builder.append(separator); - } - snippet.appendTo(builder); - } - if (!first && suffix != null) { - builder.append(suffix); - } - } - - public static void qualify( - @Nullable CqlIdentifier keyspace, - @NonNull CqlIdentifier element, - @NonNull StringBuilder builder) { - if (keyspace != null) { - builder.append(keyspace.asCql(true)).append('.'); - } - builder.append(element.asCql(true)); - } - - public static void buildPrimaryKey( - @NonNull Collection partitionKeyColumns, - @NonNull Collection clusteringKeyColumns, - @NonNull StringBuilder builder) { - builder.append("PRIMARY KEY("); - boolean firstKey = true; - - if (partitionKeyColumns.size() > 1) { - builder.append('('); - } - for (CqlIdentifier partitionColumn : partitionKeyColumns) { - if (firstKey) { - firstKey = false; - } else { - builder.append(','); - } - builder.append(partitionColumn.asCql(true)); - } - if (partitionKeyColumns.size() > 1) { - builder.append(')'); - } - - for (CqlIdentifier clusteringColumn : clusteringKeyColumns) { - builder.append(',').append(clusteringColumn.asCql(true)); - } - builder.append(')'); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/DefaultLiteral.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/DefaultLiteral.java deleted file mode 100644 index 3d9349b5536..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/DefaultLiteral.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.querybuilder.Literal; -import com.datastax.oss.driver.api.querybuilder.select.Selector; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultLiteral implements Literal { - - private final ValueT value; - private final TypeCodec codec; - private final CqlIdentifier alias; - - public DefaultLiteral(@Nullable ValueT value, @Nullable TypeCodec codec) { - this(value, codec, null); - } - - public DefaultLiteral( - @Nullable ValueT value, @Nullable TypeCodec codec, @Nullable CqlIdentifier alias) { - Preconditions.checkArgument( - value == null || codec != null, "Must provide a codec if the value is not null"); - this.value = value; - this.codec = codec; - this.alias = alias; - } - - @Override - public void appendTo(@NonNull StringBuilder builder) { - if (value == null) { - builder.append("NULL"); - } else { - builder.append(codec.format(value)); - } - if (alias != null) { - builder.append(" AS ").append(alias.asCql(true)); - } - } - - @Override - public boolean isIdempotent() { - return true; - } - - @NonNull - @Override - public Selector as(@NonNull CqlIdentifier alias) { - return new DefaultLiteral<>(value, codec, alias); - } - - @Nullable - public ValueT getValue() { - return value; - } - - @Nullable - public TypeCodec getCodec() { - return codec; - } - - @Nullable - @Override - public CqlIdentifier getAlias() { - return alias; - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/DefaultRaw.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/DefaultRaw.java deleted file mode 100644 index ad07b895304..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/DefaultRaw.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.Raw; -import com.datastax.oss.driver.api.querybuilder.select.Selector; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultRaw implements Raw { - - private final String rawExpression; - private final CqlIdentifier alias; - - public DefaultRaw(@NonNull String rawExpression) { - this(rawExpression, null); - } - - private DefaultRaw(@NonNull String rawExpression, @Nullable CqlIdentifier alias) { - Preconditions.checkNotNull(rawExpression); - this.rawExpression = rawExpression; - this.alias = alias; - } - - @NonNull - @Override - public Selector as(@NonNull CqlIdentifier alias) { - return new DefaultRaw(rawExpression, alias); - } - - @Override - public void appendTo(@NonNull StringBuilder builder) { - builder.append(rawExpression); - if (alias != null) { - builder.append(" AS ").append(alias.asCql(true)); - } - } - - @Override - public boolean isIdempotent() { - return false; - } - - @NonNull - public String getRawExpression() { - return rawExpression; - } - - @Nullable - @Override - public CqlIdentifier getAlias() { - return alias; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof DefaultRaw) { - DefaultRaw that = (DefaultRaw) other; - return this.rawExpression.equals(that.rawExpression) - && Objects.equals(this.alias, that.alias); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(rawExpression, alias); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/ImmutableCollections.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/ImmutableCollections.java deleted file mode 100644 index d60d6f737e3..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/ImmutableCollections.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder; - -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; -import java.util.function.Function; - -public class ImmutableCollections { - - @NonNull - public static ImmutableList append(@NonNull ImmutableList list, @NonNull T newElement) { - return ImmutableList.builder().addAll(list).add(newElement).build(); - } - - @NonNull - public static ImmutableList concat( - @NonNull ImmutableList list1, @NonNull Iterable list2) { - return ImmutableList.builder().addAll(list1).addAll(list2).build(); - } - - @NonNull - public static ImmutableList modifyLast( - @NonNull ImmutableList list, @NonNull Function change) { - ImmutableList.Builder builder = ImmutableList.builder(); - int size = list.size(); - for (int i = 0; i < size - 1; i++) { - builder.add(list.get(i)); - } - builder.add(change.apply(list.get(size - 1))); - return builder.build(); - } - - /** - * If the existing map has an entry with the new key, that old entry will be removed, but the new - * entry will appear last in the iteration order of the resulting map. Example: - * - *

{@code
-   * append({a=>1, b=>2, c=>3}, a, 4) == {b=>2, c=>3, a=>4}
-   * }
- */ - @NonNull - public static ImmutableMap append( - @NonNull ImmutableMap map, @NonNull K newKey, @NonNull V newValue) { - ImmutableMap.Builder builder = ImmutableMap.builder(); - for (Map.Entry entry : map.entrySet()) { - if (!entry.getKey().equals(newKey)) { - builder.put(entry); - } - } - builder.put(newKey, newValue); - return builder.build(); - } - - /** - * If the existing map has entries that collide with the new map, those old entries will be - * removed, but the new entries will appear at their new position in the iteration order of the - * resulting map. Example: - * - *
{@code
-   * concat({a=>1, b=>2, c=>3}, {c=>4, a=>5}) == {b=>2, c=>4, a=>5}
-   * }
- */ - @NonNull - public static ImmutableMap concat( - @NonNull ImmutableMap map1, @NonNull Map map2) { - ImmutableMap.Builder builder = ImmutableMap.builder(); - for (Map.Entry entry : map1.entrySet()) { - if (!map2.containsKey(entry.getKey())) { - builder.put(entry); - } - } - builder.putAll(map2); - return builder.build(); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/condition/DefaultCondition.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/condition/DefaultCondition.java deleted file mode 100644 index 4b24c98a85b..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/condition/DefaultCondition.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.condition; - -import com.datastax.oss.driver.api.querybuilder.condition.Condition; -import com.datastax.oss.driver.api.querybuilder.term.Term; -import com.datastax.oss.driver.internal.querybuilder.lhs.LeftOperand; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultCondition implements Condition { - - private final LeftOperand leftOperand; - private final String operator; - private final Term rightOperand; - - public DefaultCondition( - @NonNull LeftOperand leftOperand, @NonNull String operator, @Nullable Term rightOperand) { - this.leftOperand = leftOperand; - this.operator = operator; - this.rightOperand = rightOperand; - } - - @Override - public void appendTo(@NonNull StringBuilder builder) { - leftOperand.appendTo(builder); - builder.append(operator); - if (rightOperand != null) { - rightOperand.appendTo(builder); - } - } - - @NonNull - public LeftOperand getLeftOperand() { - return leftOperand; - } - - @NonNull - public String getOperator() { - return operator; - } - - @Nullable - public Term getRightOperand() { - return rightOperand; - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/condition/DefaultConditionBuilder.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/condition/DefaultConditionBuilder.java deleted file mode 100644 index d80bdfc3d61..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/condition/DefaultConditionBuilder.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.condition; - -import com.datastax.oss.driver.api.querybuilder.condition.Condition; -import com.datastax.oss.driver.api.querybuilder.condition.ConditionBuilder; -import com.datastax.oss.driver.api.querybuilder.condition.ConditionalStatement; -import com.datastax.oss.driver.api.querybuilder.term.Term; -import com.datastax.oss.driver.internal.querybuilder.lhs.LeftOperand; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultConditionBuilder implements ConditionBuilder { - - private final LeftOperand leftOperand; - - public DefaultConditionBuilder(@NonNull LeftOperand leftOperand) { - this.leftOperand = leftOperand; - } - - @NonNull - @Override - public Condition build(@NonNull String operator, @Nullable Term rightOperand) { - return new DefaultCondition(leftOperand, operator, rightOperand); - } - - @Immutable - public static class Fluent> - implements ConditionBuilder { - - private final ConditionalStatement statement; - private final ConditionBuilder delegate; - - public Fluent( - @NonNull ConditionalStatement statement, @NonNull LeftOperand leftOperand) { - this.statement = statement; - this.delegate = new DefaultConditionBuilder(leftOperand); - } - - @NonNull - @Override - public StatementT build(@NonNull String operator, @Nullable Term rightOperand) { - return statement.if_(delegate.build(operator, rightOperand)); - } - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/delete/DefaultDelete.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/delete/DefaultDelete.java deleted file mode 100644 index 578950bcd40..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/delete/DefaultDelete.java +++ /dev/null @@ -1,252 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.delete; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.cql.SimpleStatementBuilder; -import com.datastax.oss.driver.api.querybuilder.BindMarker; -import com.datastax.oss.driver.api.querybuilder.condition.Condition; -import com.datastax.oss.driver.api.querybuilder.delete.Delete; -import com.datastax.oss.driver.api.querybuilder.delete.DeleteSelection; -import com.datastax.oss.driver.api.querybuilder.relation.Relation; -import com.datastax.oss.driver.api.querybuilder.select.Selector; -import com.datastax.oss.driver.internal.querybuilder.CqlHelper; -import com.datastax.oss.driver.internal.querybuilder.ImmutableCollections; -import com.datastax.oss.driver.internal.querybuilder.select.ElementSelector; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Map; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultDelete implements DeleteSelection, Delete { - - private final CqlIdentifier keyspace; - private final CqlIdentifier table; - private final ImmutableList selectors; - private final ImmutableList relations; - private final Object timestamp; - private final boolean ifExists; - private final ImmutableList conditions; - - public DefaultDelete(@Nullable CqlIdentifier keyspace, @NonNull CqlIdentifier table) { - this(keyspace, table, ImmutableList.of(), ImmutableList.of(), null, false, ImmutableList.of()); - } - - public DefaultDelete( - @Nullable CqlIdentifier keyspace, - @NonNull CqlIdentifier table, - @NonNull ImmutableList selectors, - @NonNull ImmutableList relations, - @Nullable Object timestamp, - boolean ifExists, - @NonNull ImmutableList conditions) { - this.keyspace = keyspace; - this.table = table; - this.selectors = selectors; - this.relations = relations; - this.timestamp = timestamp; - this.ifExists = ifExists; - this.conditions = conditions; - } - - @NonNull - @Override - public DeleteSelection selector(@NonNull Selector selector) { - return withSelectors(ImmutableCollections.append(selectors, selector)); - } - - @NonNull - @Override - public DeleteSelection selectors(@NonNull Iterable additionalSelectors) { - return withSelectors(ImmutableCollections.concat(selectors, additionalSelectors)); - } - - @NonNull - public DeleteSelection withSelectors(@NonNull ImmutableList newSelectors) { - return new DefaultDelete( - keyspace, table, newSelectors, relations, timestamp, ifExists, conditions); - } - - @NonNull - @Override - public Delete where(@NonNull Relation relation) { - return withRelations(ImmutableCollections.append(relations, relation)); - } - - @NonNull - @Override - public Delete where(@NonNull Iterable additionalRelations) { - return withRelations(ImmutableCollections.concat(relations, additionalRelations)); - } - - @NonNull - public Delete withRelations(@NonNull ImmutableList newRelations) { - return new DefaultDelete( - keyspace, table, selectors, newRelations, timestamp, ifExists, conditions); - } - - @NonNull - @Override - public DeleteSelection usingTimestamp(long newTimestamp) { - return new DefaultDelete( - keyspace, table, selectors, relations, newTimestamp, ifExists, conditions); - } - - @NonNull - @Override - public DeleteSelection usingTimestamp(@Nullable BindMarker newTimestamp) { - return new DefaultDelete( - keyspace, table, selectors, relations, newTimestamp, ifExists, conditions); - } - - @NonNull - @Override - public Delete ifExists() { - return new DefaultDelete( - keyspace, table, selectors, relations, timestamp, true, ImmutableList.of()); - } - - @NonNull - @Override - public Delete if_(@NonNull Condition condition) { - return withConditions(ImmutableCollections.append(conditions, condition)); - } - - @NonNull - @Override - public Delete if_(@NonNull Iterable additionalConditions) { - return withConditions(ImmutableCollections.concat(conditions, additionalConditions)); - } - - @NonNull - public Delete withConditions(@NonNull ImmutableList newConditions) { - return new DefaultDelete( - keyspace, table, selectors, relations, timestamp, false, newConditions); - } - - @NonNull - @Override - public String asCql() { - StringBuilder builder = new StringBuilder("DELETE"); - - CqlHelper.append(selectors, builder, " ", ",", null); - - builder.append(" FROM "); - CqlHelper.qualify(keyspace, table, builder); - - if (timestamp != null) { - builder.append(" USING TIMESTAMP "); - if (timestamp instanceof BindMarker) { - ((BindMarker) timestamp).appendTo(builder); - } else { - builder.append(timestamp); - } - } - - CqlHelper.append(relations, builder, " WHERE ", " AND ", null); - - if (ifExists) { - builder.append(" IF EXISTS"); - } else { - CqlHelper.append(conditions, builder, " IF ", " AND ", null); - } - return builder.toString(); - } - - @NonNull - @Override - public SimpleStatement build() { - return builder().build(); - } - - @NonNull - @Override - public SimpleStatement build(@NonNull Object... values) { - return builder().addPositionalValues(values).build(); - } - - @NonNull - @Override - public SimpleStatement build(@NonNull Map namedValues) { - SimpleStatementBuilder builder = builder(); - for (Map.Entry entry : namedValues.entrySet()) { - builder.addNamedValue(entry.getKey(), entry.getValue()); - } - return builder.build(); - } - - @NonNull - @Override - public SimpleStatementBuilder builder() { - return SimpleStatement.builder(asCql()).setIdempotence(isIdempotent()); - } - - public boolean isIdempotent() { - // Conditional queries are never idempotent, see JAVA-819 - if (!conditions.isEmpty() || ifExists) { - return false; - } else { - for (Selector selector : selectors) { - // `DELETE list[0]` is not idempotent. Unfortunately we don't know what type of collection - // an elements selector targets, so be conservative. - if (selector instanceof ElementSelector) { - return false; - } - } - for (Relation relation : relations) { - if (!relation.isIdempotent()) { - return false; - } - } - return true; - } - } - - @Nullable - public CqlIdentifier getKeyspace() { - return keyspace; - } - - @NonNull - public CqlIdentifier getTable() { - return table; - } - - @NonNull - public ImmutableList getSelectors() { - return selectors; - } - - @NonNull - public ImmutableList getRelations() { - return relations; - } - - @Nullable - public Object getTimestamp() { - return timestamp; - } - - @Override - public String toString() { - return asCql(); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/insert/DefaultInsert.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/insert/DefaultInsert.java deleted file mode 100644 index 08717584773..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/insert/DefaultInsert.java +++ /dev/null @@ -1,378 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.insert; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.cql.SimpleStatementBuilder; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.querybuilder.BindMarker; -import com.datastax.oss.driver.api.querybuilder.QueryBuilder; -import com.datastax.oss.driver.api.querybuilder.insert.Insert; -import com.datastax.oss.driver.api.querybuilder.insert.InsertInto; -import com.datastax.oss.driver.api.querybuilder.insert.JsonInsert; -import com.datastax.oss.driver.api.querybuilder.insert.RegularInsert; -import com.datastax.oss.driver.api.querybuilder.term.Term; -import com.datastax.oss.driver.internal.querybuilder.CqlHelper; -import com.datastax.oss.driver.internal.querybuilder.ImmutableCollections; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Map; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultInsert implements InsertInto, RegularInsert, JsonInsert { - - public enum MissingJsonBehavior { - NULL, - UNSET - } - - private final CqlIdentifier keyspace; - private final CqlIdentifier table; - private final Term json; - private final MissingJsonBehavior missingJsonBehavior; - private final ImmutableMap assignments; - private final Object timestamp; - private final Object ttlInSeconds; - private final boolean ifNotExists; - - public DefaultInsert(@Nullable CqlIdentifier keyspace, @NonNull CqlIdentifier table) { - this(keyspace, table, null, null, ImmutableMap.of(), null, null, false); - } - - public DefaultInsert( - @Nullable CqlIdentifier keyspace, - @NonNull CqlIdentifier table, - @Nullable Term json, - @Nullable MissingJsonBehavior missingJsonBehavior, - @NonNull ImmutableMap assignments, - @Nullable Object timestamp, - @Nullable Object ttlInSeconds, - boolean ifNotExists) { - // Note: the public API guarantees this, but check in case someone is calling the internal API - // directly. - Preconditions.checkArgument( - json == null || assignments.isEmpty(), "JSON insert can't have regular assignments"); - Preconditions.checkArgument( - timestamp == null || timestamp instanceof Long || timestamp instanceof BindMarker, - "TIMESTAMP value must be a BindMarker or a Long"); - Preconditions.checkArgument( - ttlInSeconds == null - || ttlInSeconds instanceof Integer - || ttlInSeconds instanceof BindMarker, - "TTL value must be a BindMarker or an Integer"); - this.keyspace = keyspace; - this.table = table; - this.json = json; - this.missingJsonBehavior = missingJsonBehavior; - this.assignments = assignments; - this.timestamp = timestamp; - this.ttlInSeconds = ttlInSeconds; - this.ifNotExists = ifNotExists; - } - - @NonNull - @Override - public JsonInsert json(@NonNull String json) { - return new DefaultInsert( - keyspace, - table, - QueryBuilder.literal(json), - missingJsonBehavior, - ImmutableMap.of(), - timestamp, - ttlInSeconds, - ifNotExists); - } - - @NonNull - @Override - public JsonInsert json(@NonNull BindMarker json) { - return new DefaultInsert( - keyspace, - table, - json, - missingJsonBehavior, - ImmutableMap.of(), - timestamp, - ttlInSeconds, - ifNotExists); - } - - @NonNull - @Override - public JsonInsert json(@NonNull T value, @NonNull TypeCodec codec) { - return new DefaultInsert( - keyspace, - table, - QueryBuilder.literal(value, codec), - missingJsonBehavior, - ImmutableMap.of(), - timestamp, - ttlInSeconds, - ifNotExists); - } - - @NonNull - @Override - public JsonInsert defaultNull() { - return new DefaultInsert( - keyspace, - table, - json, - MissingJsonBehavior.NULL, - ImmutableMap.of(), - timestamp, - ttlInSeconds, - ifNotExists); - } - - @NonNull - @Override - public JsonInsert defaultUnset() { - return new DefaultInsert( - keyspace, - table, - json, - MissingJsonBehavior.UNSET, - ImmutableMap.of(), - timestamp, - ttlInSeconds, - ifNotExists); - } - - @NonNull - @Override - public RegularInsert value(@NonNull CqlIdentifier columnId, @NonNull Term value) { - return new DefaultInsert( - keyspace, - table, - null, - null, - ImmutableCollections.append(assignments, columnId, value), - timestamp, - ttlInSeconds, - ifNotExists); - } - - @NonNull - @Override - public RegularInsert valuesByIds(@NonNull Map newAssignments) { - return new DefaultInsert( - keyspace, - table, - null, - null, - ImmutableCollections.concat(assignments, newAssignments), - timestamp, - ttlInSeconds, - ifNotExists); - } - - @NonNull - @Override - public Insert ifNotExists() { - return new DefaultInsert( - keyspace, table, json, missingJsonBehavior, assignments, timestamp, ttlInSeconds, true); - } - - @NonNull - @Override - public Insert usingTimestamp(long timestamp) { - return new DefaultInsert( - keyspace, - table, - json, - missingJsonBehavior, - assignments, - timestamp, - ttlInSeconds, - ifNotExists); - } - - @NonNull - @Override - public Insert usingTimestamp(@Nullable BindMarker timestamp) { - return new DefaultInsert( - keyspace, - table, - json, - missingJsonBehavior, - assignments, - timestamp, - ttlInSeconds, - ifNotExists); - } - - @NonNull - @Override - public Insert usingTtl(int ttlInSeconds) { - return new DefaultInsert( - keyspace, - table, - json, - missingJsonBehavior, - assignments, - timestamp, - ttlInSeconds, - ifNotExists); - } - - @NonNull - @Override - public Insert usingTtl(@Nullable BindMarker ttlInSeconds) { - return new DefaultInsert( - keyspace, - table, - json, - missingJsonBehavior, - assignments, - timestamp, - ttlInSeconds, - ifNotExists); - } - - @NonNull - @Override - public String asCql() { - StringBuilder builder = new StringBuilder("INSERT INTO "); - CqlHelper.qualify(keyspace, table, builder); - - if (json == null) { - CqlHelper.appendIds(assignments.keySet(), builder, " (", ",", ")"); - CqlHelper.append(assignments.values(), builder, " VALUES (", ",", ")"); - } else { - builder.append(" JSON "); - json.appendTo(builder); - if (missingJsonBehavior == MissingJsonBehavior.NULL) { - builder.append(" DEFAULT NULL"); - } else if (missingJsonBehavior == MissingJsonBehavior.UNSET) { - builder.append(" DEFAULT UNSET"); - } - } - if (ifNotExists) { - builder.append(" IF NOT EXISTS"); - } - if (timestamp != null) { - builder.append(" USING TIMESTAMP "); - if (timestamp instanceof BindMarker) { - ((BindMarker) timestamp).appendTo(builder); - } else { - builder.append(timestamp); - } - } - if (ttlInSeconds != null) { - builder.append((timestamp != null) ? " AND " : " USING ").append("TTL "); - if (ttlInSeconds instanceof BindMarker) { - ((BindMarker) ttlInSeconds).appendTo(builder); - } else { - builder.append(ttlInSeconds); - } - } - return builder.toString(); - } - - @NonNull - @Override - public SimpleStatement build() { - return builder().build(); - } - - @NonNull - @Override - public SimpleStatement build(@NonNull Object... values) { - return builder().addPositionalValues(values).build(); - } - - @NonNull - @Override - public SimpleStatement build(@NonNull Map namedValues) { - SimpleStatementBuilder builder = builder(); - for (Map.Entry entry : namedValues.entrySet()) { - builder.addNamedValue(entry.getKey(), entry.getValue()); - } - return builder.build(); - } - - @NonNull - @Override - public SimpleStatementBuilder builder() { - return SimpleStatement.builder(asCql()).setIdempotence(isIdempotent()); - } - - public boolean isIdempotent() { - // Conditional queries are never idempotent, see JAVA-819 - if (ifNotExists) { - return false; - } else { - for (Term value : assignments.values()) { - if (!value.isIdempotent()) { - return false; - } - } - return true; - } - } - - @Nullable - public CqlIdentifier getKeyspace() { - return keyspace; - } - - @NonNull - public CqlIdentifier getTable() { - return table; - } - - @Nullable - public Object getJson() { - return json; - } - - @Nullable - public MissingJsonBehavior getMissingJsonBehavior() { - return missingJsonBehavior; - } - - @NonNull - public ImmutableMap getAssignments() { - return assignments; - } - - @Nullable - public Object getTimestamp() { - return timestamp; - } - - @Nullable - public Object getTtlInSeconds() { - return ttlInSeconds; - } - - public boolean isIfNotExists() { - return ifNotExists; - } - - @Override - public String toString() { - return asCql(); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/lhs/ColumnComponentLeftOperand.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/lhs/ColumnComponentLeftOperand.java deleted file mode 100644 index 060308d2ce4..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/lhs/ColumnComponentLeftOperand.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.lhs; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.term.Term; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.Immutable; - -@Immutable -public class ColumnComponentLeftOperand implements LeftOperand { - - private final CqlIdentifier columnId; - private final Term index; - - public ColumnComponentLeftOperand(@NonNull CqlIdentifier columnId, @NonNull Term index) { - this.columnId = columnId; - this.index = index; - } - - @Override - public void appendTo(@NonNull StringBuilder builder) { - builder.append(columnId.asCql(true)).append('['); - index.appendTo(builder); - builder.append(']'); - } - - @NonNull - public CqlIdentifier getColumnId() { - return columnId; - } - - @NonNull - public Term getIndex() { - return index; - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/lhs/ColumnLeftOperand.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/lhs/ColumnLeftOperand.java deleted file mode 100644 index 7d4a87f1c0a..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/lhs/ColumnLeftOperand.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.lhs; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.Immutable; - -@Immutable -public class ColumnLeftOperand implements LeftOperand { - - private final CqlIdentifier columnId; - - public ColumnLeftOperand(@NonNull CqlIdentifier columnId) { - this.columnId = columnId; - } - - @Override - public void appendTo(@NonNull StringBuilder builder) { - builder.append(columnId.asCql(true)); - } - - @NonNull - public CqlIdentifier getColumnId() { - return columnId; - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/lhs/FieldLeftOperand.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/lhs/FieldLeftOperand.java deleted file mode 100644 index ccfca2a50dc..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/lhs/FieldLeftOperand.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.lhs; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.Immutable; - -@Immutable -public class FieldLeftOperand implements LeftOperand { - - private final CqlIdentifier columnId; - private final CqlIdentifier fieldId; - - public FieldLeftOperand(@NonNull CqlIdentifier columnId, @NonNull CqlIdentifier fieldId) { - this.columnId = columnId; - this.fieldId = fieldId; - } - - @Override - public void appendTo(@NonNull StringBuilder builder) { - builder.append(columnId.asCql(true)).append('.').append(fieldId.asCql(true)); - } - - @NonNull - public CqlIdentifier getColumnId() { - return columnId; - } - - @NonNull - public CqlIdentifier getFieldId() { - return fieldId; - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/lhs/LeftOperand.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/lhs/LeftOperand.java deleted file mode 100644 index 2eae6ee8382..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/lhs/LeftOperand.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.lhs; - -import com.datastax.oss.driver.api.querybuilder.CqlSnippet; -import com.datastax.oss.driver.api.querybuilder.relation.Relation; -import com.datastax.oss.driver.internal.querybuilder.condition.DefaultCondition; -import com.datastax.oss.driver.internal.querybuilder.relation.DefaultRelation; - -/** - * The left operand of a relation. - * - *

Doesn't need to be in an API package since it's only used internally by {@link - * DefaultRelation} and {@link DefaultCondition}. - * - *

Implementations of this interface are only used temporarily while building a {@link Relation}, - * so they don't need to provide introspection (i.e. public getters). - */ -public interface LeftOperand extends CqlSnippet {} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/lhs/TokenLeftOperand.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/lhs/TokenLeftOperand.java deleted file mode 100644 index 7f0b2d3c9a2..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/lhs/TokenLeftOperand.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.lhs; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.internal.querybuilder.CqlHelper; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.Immutable; - -@Immutable -public class TokenLeftOperand implements LeftOperand { - - private final Iterable identifiers; - - public TokenLeftOperand(@NonNull Iterable identifiers) { - this.identifiers = identifiers; - } - - @Override - public void appendTo(@NonNull StringBuilder builder) { - CqlHelper.appendIds(identifiers, builder, "token(", ",", ")"); - } - - public @NonNull Iterable getIdentifiers() { - return identifiers; - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/lhs/TupleLeftOperand.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/lhs/TupleLeftOperand.java deleted file mode 100644 index 35e60c3a33c..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/lhs/TupleLeftOperand.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.lhs; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.internal.querybuilder.CqlHelper; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.Immutable; - -@Immutable -public class TupleLeftOperand implements LeftOperand { - - private final Iterable identifiers; - - public TupleLeftOperand(@NonNull Iterable identifiers) { - this.identifiers = identifiers; - } - - @Override - public void appendTo(@NonNull StringBuilder builder) { - CqlHelper.appendIds(identifiers, builder, "(", ",", ")"); - } - - @NonNull - public Iterable getIdentifiers() { - return identifiers; - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/relation/CustomIndexRelation.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/relation/CustomIndexRelation.java deleted file mode 100644 index d96ffbe5201..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/relation/CustomIndexRelation.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.relation; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.relation.Relation; -import com.datastax.oss.driver.api.querybuilder.term.Term; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.Immutable; - -@Immutable -public class CustomIndexRelation implements Relation { - - private final CqlIdentifier indexId; - private final Term expression; - - public CustomIndexRelation(@NonNull CqlIdentifier indexId, @NonNull Term expression) { - this.indexId = indexId; - this.expression = expression; - } - - @Override - public void appendTo(@NonNull StringBuilder builder) { - builder.append("expr(").append(indexId.asCql(true)).append(','); - expression.appendTo(builder); - builder.append(')'); - } - - @Override - public boolean isIdempotent() { - return false; - } - - @NonNull - public CqlIdentifier getIndexId() { - return indexId; - } - - @NonNull - public Term getExpression() { - return expression; - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/relation/DefaultColumnComponentRelationBuilder.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/relation/DefaultColumnComponentRelationBuilder.java deleted file mode 100644 index 139827250ef..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/relation/DefaultColumnComponentRelationBuilder.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.relation; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.relation.ColumnComponentRelationBuilder; -import com.datastax.oss.driver.api.querybuilder.relation.OngoingWhereClause; -import com.datastax.oss.driver.api.querybuilder.relation.Relation; -import com.datastax.oss.driver.api.querybuilder.term.Term; -import com.datastax.oss.driver.internal.querybuilder.lhs.ColumnComponentLeftOperand; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultColumnComponentRelationBuilder - implements ColumnComponentRelationBuilder { - - private final CqlIdentifier columnId; - private final Term index; - - public DefaultColumnComponentRelationBuilder( - @NonNull CqlIdentifier columnId, @NonNull Term index) { - this.columnId = columnId; - this.index = index; - } - - @NonNull - @Override - public Relation build(@NonNull String operator, @Nullable Term rightOperand) { - return new DefaultRelation( - new ColumnComponentLeftOperand(columnId, index), operator, rightOperand); - } - - @Immutable - public static class Fluent> - implements ColumnComponentRelationBuilder { - - private final OngoingWhereClause statement; - private final ColumnComponentRelationBuilder delegate; - - public Fluent( - @NonNull OngoingWhereClause statement, - @NonNull CqlIdentifier columnId, - @NonNull Term index) { - this.statement = statement; - this.delegate = new DefaultColumnComponentRelationBuilder(columnId, index); - } - - @NonNull - @Override - public StatementT build(@NonNull String operator, @Nullable Term rightOperand) { - return statement.where(delegate.build(operator, rightOperand)); - } - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/relation/DefaultColumnRelationBuilder.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/relation/DefaultColumnRelationBuilder.java deleted file mode 100644 index 5f085083bc2..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/relation/DefaultColumnRelationBuilder.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.relation; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.relation.ColumnRelationBuilder; -import com.datastax.oss.driver.api.querybuilder.relation.OngoingWhereClause; -import com.datastax.oss.driver.api.querybuilder.relation.Relation; -import com.datastax.oss.driver.api.querybuilder.term.Term; -import com.datastax.oss.driver.internal.querybuilder.lhs.ColumnLeftOperand; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultColumnRelationBuilder implements ColumnRelationBuilder { - - private final CqlIdentifier columnId; - - public DefaultColumnRelationBuilder(@NonNull CqlIdentifier columnId) { - Preconditions.checkNotNull(columnId); - this.columnId = columnId; - } - - @NonNull - @Override - public Relation build(@NonNull String operator, @Nullable Term rightOperand) { - return new DefaultRelation(new ColumnLeftOperand(columnId), operator, rightOperand); - } - - @Immutable - public static class Fluent> - implements ColumnRelationBuilder { - - private final OngoingWhereClause statement; - private final ColumnRelationBuilder delegate; - - public Fluent( - @NonNull OngoingWhereClause statement, @NonNull CqlIdentifier columnId) { - this.statement = statement; - this.delegate = new DefaultColumnRelationBuilder(columnId); - } - - @NonNull - @Override - public StatementT build(@NonNull String operator, @Nullable Term rightOperand) { - return statement.where(delegate.build(operator, rightOperand)); - } - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/relation/DefaultMultiColumnRelationBuilder.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/relation/DefaultMultiColumnRelationBuilder.java deleted file mode 100644 index a0670c47140..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/relation/DefaultMultiColumnRelationBuilder.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.relation; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.relation.MultiColumnRelationBuilder; -import com.datastax.oss.driver.api.querybuilder.relation.OngoingWhereClause; -import com.datastax.oss.driver.api.querybuilder.relation.Relation; -import com.datastax.oss.driver.api.querybuilder.term.Term; -import com.datastax.oss.driver.internal.querybuilder.lhs.TupleLeftOperand; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultMultiColumnRelationBuilder implements MultiColumnRelationBuilder { - - private final Iterable identifiers; - - public DefaultMultiColumnRelationBuilder(@NonNull Iterable identifiers) { - Preconditions.checkNotNull(identifiers); - Preconditions.checkArgument( - identifiers.iterator().hasNext(), "Tuple must contain at least one column"); - this.identifiers = identifiers; - } - - @NonNull - @Override - public Relation build(@NonNull String operator, @Nullable Term rightOperand) { - return new DefaultRelation(new TupleLeftOperand(identifiers), operator, rightOperand); - } - - @Immutable - public static class Fluent> - implements MultiColumnRelationBuilder { - - private final OngoingWhereClause statement; - private final MultiColumnRelationBuilder delegate; - - public Fluent( - @NonNull OngoingWhereClause statement, - @NonNull Iterable identifiers) { - this.statement = statement; - this.delegate = new DefaultMultiColumnRelationBuilder(identifiers); - } - - @NonNull - @Override - public StatementT build(@NonNull String operator, @Nullable Term rightOperand) { - return statement.where(delegate.build(operator, rightOperand)); - } - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/relation/DefaultRelation.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/relation/DefaultRelation.java deleted file mode 100644 index 25786cbfe7f..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/relation/DefaultRelation.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.relation; - -import com.datastax.oss.driver.api.querybuilder.relation.Relation; -import com.datastax.oss.driver.api.querybuilder.term.Term; -import com.datastax.oss.driver.internal.querybuilder.lhs.LeftOperand; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultRelation implements Relation { - - private final LeftOperand leftOperand; - private final String operator; - private final Term rightOperand; - - public DefaultRelation( - @NonNull LeftOperand leftOperand, @NonNull String operator, @Nullable Term rightOperand) { - Preconditions.checkNotNull(leftOperand); - Preconditions.checkNotNull(operator); - this.leftOperand = leftOperand; - this.operator = operator; - this.rightOperand = rightOperand; - } - - @Override - public void appendTo(@NonNull StringBuilder builder) { - leftOperand.appendTo(builder); - builder.append(operator); - if (rightOperand != null) { - rightOperand.appendTo(builder); - } - } - - @Override - public boolean isIdempotent() { - return rightOperand == null || rightOperand.isIdempotent(); - } - - @NonNull - public LeftOperand getLeftOperand() { - return leftOperand; - } - - @NonNull - public String getOperator() { - return operator; - } - - @Nullable - public Term getRightOperand() { - return rightOperand; - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/relation/DefaultTokenRelationBuilder.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/relation/DefaultTokenRelationBuilder.java deleted file mode 100644 index 192eb340bef..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/relation/DefaultTokenRelationBuilder.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.relation; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.relation.OngoingWhereClause; -import com.datastax.oss.driver.api.querybuilder.relation.Relation; -import com.datastax.oss.driver.api.querybuilder.relation.TokenRelationBuilder; -import com.datastax.oss.driver.api.querybuilder.term.Term; -import com.datastax.oss.driver.internal.querybuilder.lhs.TokenLeftOperand; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultTokenRelationBuilder implements TokenRelationBuilder { - - private final Iterable identifiers; - - public DefaultTokenRelationBuilder(@NonNull Iterable identifiers) { - this.identifiers = identifiers; - } - - @NonNull - @Override - public Relation build(@NonNull String operator, @Nullable Term rightOperand) { - return new DefaultRelation(new TokenLeftOperand(identifiers), operator, rightOperand); - } - - @Immutable - public static class Fluent> - implements TokenRelationBuilder { - - private final OngoingWhereClause statement; - private final TokenRelationBuilder delegate; - - public Fluent( - @NonNull OngoingWhereClause statement, - @NonNull Iterable identifiers) { - this.statement = statement; - this.delegate = new DefaultTokenRelationBuilder(identifiers); - } - - @NonNull - @Override - public StatementT build(@NonNull String operator, @Nullable Term rightOperand) { - return statement.where(delegate.build(operator, rightOperand)); - } - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultAlterKeyspace.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultAlterKeyspace.java deleted file mode 100644 index 4dbd876da50..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultAlterKeyspace.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.schema.AlterKeyspace; -import com.datastax.oss.driver.api.querybuilder.schema.AlterKeyspaceStart; -import com.datastax.oss.driver.internal.querybuilder.ImmutableCollections; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultAlterKeyspace implements AlterKeyspaceStart, AlterKeyspace { - - private final CqlIdentifier keyspaceName; - private final ImmutableMap options; - - public DefaultAlterKeyspace(@NonNull CqlIdentifier keyspaceName) { - this(keyspaceName, ImmutableMap.of()); - } - - public DefaultAlterKeyspace( - @NonNull CqlIdentifier keyspaceName, @NonNull ImmutableMap options) { - this.keyspaceName = keyspaceName; - this.options = options; - } - - @NonNull - @Override - public AlterKeyspace withReplicationOptions(@NonNull Map replicationOptions) { - return withOption("replication", replicationOptions); - } - - @NonNull - @Override - public AlterKeyspace withOption(@NonNull String name, @NonNull Object value) { - return new DefaultAlterKeyspace( - keyspaceName, ImmutableCollections.append(options, name, value)); - } - - @NonNull - @Override - public String asCql() { - return "ALTER KEYSPACE " + keyspaceName.asCql(true) + OptionsUtils.buildOptions(options, true); - } - - @NonNull - @Override - public Map getOptions() { - return options; - } - - @NonNull - public CqlIdentifier getKeyspace() { - return keyspaceName; - } - - @Override - public String toString() { - return asCql(); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultAlterMaterializedView.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultAlterMaterializedView.java deleted file mode 100644 index 250e0c37026..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultAlterMaterializedView.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.schema.AlterMaterializedView; -import com.datastax.oss.driver.api.querybuilder.schema.AlterMaterializedViewStart; -import com.datastax.oss.driver.internal.querybuilder.CqlHelper; -import com.datastax.oss.driver.internal.querybuilder.ImmutableCollections; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Map; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultAlterMaterializedView - implements AlterMaterializedViewStart, AlterMaterializedView { - - private final CqlIdentifier keyspace; - private final CqlIdentifier viewName; - - private final ImmutableMap options; - - public DefaultAlterMaterializedView(@NonNull CqlIdentifier viewName) { - this(null, viewName); - } - - public DefaultAlterMaterializedView( - @Nullable CqlIdentifier keyspace, @NonNull CqlIdentifier viewName) { - this(keyspace, viewName, ImmutableMap.of()); - } - - public DefaultAlterMaterializedView( - @Nullable CqlIdentifier keyspace, - @NonNull CqlIdentifier viewName, - @NonNull ImmutableMap options) { - this.keyspace = keyspace; - this.viewName = viewName; - this.options = options; - } - - @NonNull - @Override - public AlterMaterializedView withOption(@NonNull String name, @NonNull Object value) { - return new DefaultAlterMaterializedView( - keyspace, viewName, ImmutableCollections.append(options, name, value)); - } - - @NonNull - @Override - public String asCql() { - StringBuilder builder = new StringBuilder("ALTER MATERIALIZED VIEW "); - CqlHelper.qualify(keyspace, viewName, builder); - builder.append(OptionsUtils.buildOptions(options, true)); - return builder.toString(); - } - - @Override - public String toString() { - return asCql(); - } - - @NonNull - @Override - public Map getOptions() { - return options; - } - - @Nullable - public CqlIdentifier getKeyspace() { - return keyspace; - } - - @NonNull - public CqlIdentifier getMaterializedView() { - return viewName; - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultAlterTable.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultAlterTable.java deleted file mode 100644 index 63e7076e717..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultAlterTable.java +++ /dev/null @@ -1,366 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.schema; - -import static com.datastax.oss.driver.internal.querybuilder.schema.Utils.appendSet; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.querybuilder.BuildableQuery; -import com.datastax.oss.driver.api.querybuilder.schema.AlterTableAddColumnEnd; -import com.datastax.oss.driver.api.querybuilder.schema.AlterTableDropColumnEnd; -import com.datastax.oss.driver.api.querybuilder.schema.AlterTableRenameColumnEnd; -import com.datastax.oss.driver.api.querybuilder.schema.AlterTableStart; -import com.datastax.oss.driver.api.querybuilder.schema.AlterTableWithOptionsEnd; -import com.datastax.oss.driver.internal.querybuilder.CqlHelper; -import com.datastax.oss.driver.internal.querybuilder.ImmutableCollections; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Map; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultAlterTable - implements AlterTableStart, - AlterTableAddColumnEnd, - AlterTableDropColumnEnd, - AlterTableRenameColumnEnd, - AlterTableWithOptionsEnd, - BuildableQuery { - - private final CqlIdentifier keyspace; - private final CqlIdentifier tableName; - - private final ImmutableMap allColumnsToAddInOrder; - private final ImmutableSet columnsToAddRegular; - private final ImmutableSet columnsToAddStatic; - private final ImmutableSet columnsToDrop; - private final ImmutableMap columnsToRename; - private final CqlIdentifier columnToAlter; - private final DataType columnToAlterType; - private final ImmutableMap options; - private final boolean dropCompactStorage; - - public DefaultAlterTable(@NonNull CqlIdentifier tableName) { - this(null, tableName); - } - - public DefaultAlterTable(@Nullable CqlIdentifier keyspace, @NonNull CqlIdentifier tableName) { - this( - keyspace, - tableName, - false, - ImmutableMap.of(), - ImmutableSet.of(), - ImmutableSet.of(), - ImmutableSet.of(), - ImmutableMap.of(), - null, - null, - ImmutableMap.of()); - } - - public DefaultAlterTable( - @Nullable CqlIdentifier keyspace, - @NonNull CqlIdentifier tableName, - boolean dropCompactStorage, - @NonNull ImmutableMap allColumnsToAddInOrder, - @NonNull ImmutableSet columnsToAddRegular, - @NonNull ImmutableSet columnsToAddStatic, - @NonNull ImmutableSet columnsToDrop, - @NonNull ImmutableMap columnsToRename, - @Nullable CqlIdentifier columnToAlter, - @Nullable DataType columnToAlterType, - @NonNull ImmutableMap options) { - this.keyspace = keyspace; - this.tableName = tableName; - this.dropCompactStorage = dropCompactStorage; - this.allColumnsToAddInOrder = allColumnsToAddInOrder; - this.columnsToAddRegular = columnsToAddRegular; - this.columnsToAddStatic = columnsToAddStatic; - this.columnsToDrop = columnsToDrop; - this.columnsToRename = columnsToRename; - this.columnToAlter = columnToAlter; - this.columnToAlterType = columnToAlterType; - this.options = options; - } - - @NonNull - @Override - public AlterTableAddColumnEnd addColumn( - @NonNull CqlIdentifier columnName, @NonNull DataType dataType) { - return new DefaultAlterTable( - keyspace, - tableName, - dropCompactStorage, - ImmutableCollections.append(allColumnsToAddInOrder, columnName, dataType), - appendSet(columnsToAddRegular, columnName), - columnsToAddStatic, - columnsToDrop, - columnsToRename, - columnToAlter, - columnToAlterType, - options); - } - - @NonNull - @Override - public AlterTableAddColumnEnd addStaticColumn( - @NonNull CqlIdentifier columnName, @NonNull DataType dataType) { - return new DefaultAlterTable( - keyspace, - tableName, - dropCompactStorage, - ImmutableCollections.append(allColumnsToAddInOrder, columnName, dataType), - columnsToAddRegular, - appendSet(columnsToAddStatic, columnName), - columnsToDrop, - columnsToRename, - columnToAlter, - columnToAlterType, - options); - } - - @NonNull - @Override - public BuildableQuery dropCompactStorage() { - return new DefaultAlterTable( - keyspace, - tableName, - true, - allColumnsToAddInOrder, - columnsToAddRegular, - columnsToAddStatic, - columnsToDrop, - columnsToRename, - columnToAlter, - columnToAlterType, - options); - } - - @NonNull - @Override - public AlterTableDropColumnEnd dropColumns(@NonNull CqlIdentifier... columnNames) { - ImmutableSet.Builder builder = - ImmutableSet.builder().addAll(columnsToDrop); - for (CqlIdentifier columnName : columnNames) { - builder = builder.add(columnName); - } - - return new DefaultAlterTable( - keyspace, - tableName, - dropCompactStorage, - allColumnsToAddInOrder, - columnsToAddRegular, - columnsToAddStatic, - builder.build(), - columnsToRename, - columnToAlter, - columnToAlterType, - options); - } - - @NonNull - @Override - public AlterTableRenameColumnEnd renameColumn( - @NonNull CqlIdentifier from, @NonNull CqlIdentifier to) { - return new DefaultAlterTable( - keyspace, - tableName, - dropCompactStorage, - allColumnsToAddInOrder, - columnsToAddRegular, - columnsToAddStatic, - columnsToDrop, - ImmutableCollections.append(columnsToRename, from, to), - columnToAlter, - columnToAlterType, - options); - } - - @NonNull - @Override - public BuildableQuery alterColumn(@NonNull CqlIdentifier columnName, @NonNull DataType dataType) { - return new DefaultAlterTable( - keyspace, - tableName, - dropCompactStorage, - allColumnsToAddInOrder, - columnsToAddRegular, - columnsToAddStatic, - columnsToDrop, - columnsToRename, - columnName, - dataType, - options); - } - - @NonNull - @Override - public AlterTableWithOptionsEnd withOption(@NonNull String name, @NonNull Object value) { - return new DefaultAlterTable( - keyspace, - tableName, - dropCompactStorage, - allColumnsToAddInOrder, - columnsToAddRegular, - columnsToAddStatic, - columnsToDrop, - columnsToRename, - columnToAlter, - columnToAlterType, - ImmutableCollections.append(options, name, value)); - } - - @NonNull - @Override - public String asCql() { - StringBuilder builder = new StringBuilder("ALTER TABLE "); - - CqlHelper.qualify(keyspace, tableName, builder); - - if (columnToAlter != null) { - return builder - .append(" ALTER ") - .append(columnToAlter.asCql(true)) - .append(" TYPE ") - .append(columnToAlterType.asCql(true, true)) - .toString(); - } else if (!allColumnsToAddInOrder.isEmpty()) { - builder.append(" ADD "); - if (allColumnsToAddInOrder.size() > 1) { - builder.append('('); - } - boolean first = true; - for (Map.Entry column : allColumnsToAddInOrder.entrySet()) { - if (first) { - first = false; - } else { - builder.append(','); - } - builder - .append(column.getKey().asCql(true)) - .append(' ') - .append(column.getValue().asCql(true, true)); - - if (columnsToAddStatic.contains(column.getKey())) { - builder.append(" STATIC"); - } - } - if (allColumnsToAddInOrder.size() > 1) { - builder.append(')'); - } - return builder.toString(); - } else if (!columnsToDrop.isEmpty()) { - boolean moreThanOneDrop = columnsToDrop.size() > 1; - CqlHelper.appendIds( - columnsToDrop, - builder, - moreThanOneDrop ? " DROP (" : " DROP ", - ",", - moreThanOneDrop ? ")" : ""); - return builder.toString(); - } else if (!columnsToRename.isEmpty()) { - builder.append(" RENAME "); - boolean first = true; - for (Map.Entry entry : columnsToRename.entrySet()) { - if (first) { - first = false; - } else { - builder.append(" AND "); - } - builder - .append(entry.getKey().asCql(true)) - .append(" TO ") - .append(entry.getValue().asCql(true)); - } - return builder.toString(); - } else if (dropCompactStorage) { - return builder.append(" DROP COMPACT STORAGE").toString(); - } else if (!options.isEmpty()) { - return builder.append(OptionsUtils.buildOptions(options, true)).toString(); - } - - // While this is incomplete, we should return partially build query at this point for toString - // purposes. - return builder.toString(); - } - - @Override - public String toString() { - return asCql(); - } - - @NonNull - @Override - public Map getOptions() { - return options; - } - - @Nullable - public CqlIdentifier getKeyspace() { - return keyspace; - } - - @NonNull - public CqlIdentifier getTable() { - return tableName; - } - - @NonNull - public ImmutableMap getAllColumnsToAddInOrder() { - return allColumnsToAddInOrder; - } - - @NonNull - public ImmutableSet getColumnsToAddRegular() { - return columnsToAddRegular; - } - - @NonNull - public ImmutableSet getColumnsToAddStatic() { - return columnsToAddStatic; - } - - @NonNull - public ImmutableSet getColumnsToDrop() { - return columnsToDrop; - } - - @NonNull - public ImmutableMap getColumnsToRename() { - return columnsToRename; - } - - @Nullable - public CqlIdentifier getColumnToAlter() { - return columnToAlter; - } - - @Nullable - public DataType getColumnToAlterType() { - return columnToAlterType; - } - - public boolean isDropCompactStorage() { - return dropCompactStorage; - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultAlterType.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultAlterType.java deleted file mode 100644 index 85b96265270..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultAlterType.java +++ /dev/null @@ -1,184 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.querybuilder.BuildableQuery; -import com.datastax.oss.driver.api.querybuilder.schema.AlterTypeRenameField; -import com.datastax.oss.driver.api.querybuilder.schema.AlterTypeRenameFieldEnd; -import com.datastax.oss.driver.api.querybuilder.schema.AlterTypeStart; -import com.datastax.oss.driver.internal.querybuilder.CqlHelper; -import com.datastax.oss.driver.internal.querybuilder.ImmutableCollections; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Map; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultAlterType - implements AlterTypeStart, AlterTypeRenameField, AlterTypeRenameFieldEnd, BuildableQuery { - - private final CqlIdentifier keyspace; - private final CqlIdentifier typeName; - - private final CqlIdentifier fieldToAdd; - private final DataType fieldToAddType; - - private final ImmutableMap fieldsToRename; - - private final CqlIdentifier fieldToAlter; - private final DataType fieldToAlterType; - - public DefaultAlterType(@NonNull CqlIdentifier typeName) { - this(null, typeName); - } - - public DefaultAlterType(@Nullable CqlIdentifier keyspace, @NonNull CqlIdentifier typeName) { - this(keyspace, typeName, null, null, ImmutableMap.of(), null, null); - } - - public DefaultAlterType( - @Nullable CqlIdentifier keyspace, - @NonNull CqlIdentifier typeName, - @Nullable CqlIdentifier fieldToAdd, - @Nullable DataType fieldToAddType, - @NonNull ImmutableMap fieldsToRename, - @Nullable CqlIdentifier fieldToAlter, - @Nullable DataType fieldToAlterType) { - this.keyspace = keyspace; - this.typeName = typeName; - this.fieldToAdd = fieldToAdd; - this.fieldToAddType = fieldToAddType; - this.fieldsToRename = fieldsToRename; - this.fieldToAlter = fieldToAlter; - this.fieldToAlterType = fieldToAlterType; - } - - @NonNull - @Override - public BuildableQuery alterField(@NonNull CqlIdentifier fieldName, @NonNull DataType dataType) { - return new DefaultAlterType( - keyspace, typeName, fieldToAdd, fieldToAddType, fieldsToRename, fieldName, dataType); - } - - @NonNull - @Override - public BuildableQuery addField(@NonNull CqlIdentifier fieldName, @NonNull DataType dataType) { - return new DefaultAlterType( - keyspace, typeName, fieldName, dataType, fieldsToRename, fieldToAlter, fieldToAlterType); - } - - @NonNull - @Override - public AlterTypeRenameFieldEnd renameField( - @NonNull CqlIdentifier from, @NonNull CqlIdentifier to) { - return new DefaultAlterType( - keyspace, - typeName, - fieldToAdd, - fieldToAddType, - ImmutableCollections.append(fieldsToRename, from, to), - fieldToAlter, - fieldToAlterType); - } - - @NonNull - @Override - public String asCql() { - StringBuilder builder = new StringBuilder("ALTER TYPE "); - - CqlHelper.qualify(keyspace, typeName, builder); - - if (fieldToAlter != null) { - return builder - .append(" ALTER ") - .append(fieldToAlter.asCql(true)) - .append(" TYPE ") - .append(fieldToAlterType.asCql(true, true)) - .toString(); - } else if (fieldToAdd != null) { - return builder - .append(" ADD ") - .append(fieldToAdd.asCql(true)) - .append(" ") - .append(fieldToAddType.asCql(true, true)) - .toString(); - } else if (!fieldsToRename.isEmpty()) { - builder.append(" RENAME "); - boolean first = true; - for (Map.Entry entry : fieldsToRename.entrySet()) { - if (first) { - first = false; - } else { - builder.append(" AND "); - } - builder - .append(entry.getKey().asCql(true)) - .append(" TO ") - .append(entry.getValue().asCql(true)); - } - return builder.toString(); - } - - // While this is incomplete, we should return partially built query at this point for toString - // purposes. - return builder.toString(); - } - - @Override - public String toString() { - return asCql(); - } - - @Nullable - public CqlIdentifier getKeyspace() { - return keyspace; - } - - @NonNull - public CqlIdentifier getType() { - return typeName; - } - - @Nullable - public CqlIdentifier getFieldToAdd() { - return fieldToAdd; - } - - @Nullable - public DataType getFieldToAddType() { - return fieldToAddType; - } - - @NonNull - public ImmutableMap getFieldsToRename() { - return fieldsToRename; - } - - @Nullable - public CqlIdentifier getFieldToAlter() { - return fieldToAlter; - } - - @Nullable - public DataType getFieldToAlterType() { - return fieldToAlterType; - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultCreateAggregate.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultCreateAggregate.java deleted file mode 100644 index 1e0b4892277..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultCreateAggregate.java +++ /dev/null @@ -1,227 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.querybuilder.schema.CreateAggregateEnd; -import com.datastax.oss.driver.api.querybuilder.schema.CreateAggregateStart; -import com.datastax.oss.driver.api.querybuilder.schema.CreateAggregateStateFunc; -import com.datastax.oss.driver.api.querybuilder.term.Term; -import com.datastax.oss.driver.internal.querybuilder.CqlHelper; -import com.datastax.oss.driver.internal.querybuilder.ImmutableCollections; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultCreateAggregate - implements CreateAggregateStart, CreateAggregateStateFunc, CreateAggregateEnd { - - private final CqlIdentifier keyspace; - private final CqlIdentifier functionName; - private boolean orReplace; - private boolean ifNotExists; - private final ImmutableList parameters; - private final CqlIdentifier sFunc; - private final DataType sType; - private final CqlIdentifier finalFunc; - private final Term term; - - public DefaultCreateAggregate(@NonNull CqlIdentifier functionName) { - this(null, functionName); - } - - public DefaultCreateAggregate( - @Nullable CqlIdentifier keyspace, @NonNull CqlIdentifier functionName) { - this(keyspace, functionName, false, false, ImmutableList.of(), null, null, null, null); - } - - public DefaultCreateAggregate( - @Nullable CqlIdentifier keyspace, - @NonNull CqlIdentifier functionName, - boolean orReplace, - boolean ifNotExists, - @NonNull ImmutableList parameters, - @Nullable CqlIdentifier sFunc, - @Nullable DataType sType, - @Nullable CqlIdentifier finalFunc, - @Nullable Term term) { - this.keyspace = keyspace; - this.functionName = functionName; - this.orReplace = orReplace; - this.ifNotExists = ifNotExists; - this.parameters = parameters; - this.sFunc = sFunc; - this.sType = sType; - this.finalFunc = finalFunc; - this.term = term; - } - - @NonNull - @Override - public String asCql() { - StringBuilder builder = new StringBuilder(); - - builder.append("CREATE "); - if (orReplace) { - builder.append("OR REPLACE "); - } - builder.append("AGGREGATE "); - - if (ifNotExists) { - builder.append("IF NOT EXISTS "); - } - CqlHelper.qualify(keyspace, functionName, builder); - - builder.append(" ("); - boolean first = true; - for (DataType param : parameters) { - if (first) { - first = false; - } else { - builder.append(','); - } - builder.append(param.asCql(false, true)); - } - builder.append(')'); - if (sFunc != null) { - builder.append(" SFUNC "); - builder.append(sFunc.asCql(true)); - } - if (sType != null) { - builder.append(" STYPE "); - builder.append(sType.asCql(false, true)); - } - if (finalFunc != null) { - builder.append(" FINALFUNC "); - builder.append(finalFunc.asCql(true)); - } - if (term != null) { - builder.append(" INITCOND "); - term.appendTo(builder); - } - return builder.toString(); - } - - @NonNull - @Override - public CreateAggregateEnd withInitCond(@NonNull Term term) { - return new DefaultCreateAggregate( - keyspace, functionName, orReplace, ifNotExists, parameters, sFunc, sType, finalFunc, term); - } - - @NonNull - @Override - public CreateAggregateStart ifNotExists() { - return new DefaultCreateAggregate( - keyspace, functionName, orReplace, true, parameters, sFunc, sType, finalFunc, term); - } - - @NonNull - @Override - public CreateAggregateStart orReplace() { - return new DefaultCreateAggregate( - keyspace, functionName, true, ifNotExists, parameters, sFunc, sType, finalFunc, term); - } - - @NonNull - @Override - public CreateAggregateStart withParameter(@NonNull DataType paramType) { - return new DefaultCreateAggregate( - keyspace, - functionName, - orReplace, - ifNotExists, - ImmutableCollections.append(parameters, paramType), - sFunc, - sType, - finalFunc, - term); - } - - @NonNull - @Override - public CreateAggregateStateFunc withSFunc(@NonNull CqlIdentifier sFunc) { - return new DefaultCreateAggregate( - keyspace, functionName, orReplace, ifNotExists, parameters, sFunc, sType, finalFunc, term); - } - - @NonNull - @Override - public CreateAggregateEnd withSType(@NonNull DataType sType) { - return new DefaultCreateAggregate( - keyspace, functionName, orReplace, ifNotExists, parameters, sFunc, sType, finalFunc, term); - } - - @NonNull - @Override - public CreateAggregateEnd withFinalFunc(@NonNull CqlIdentifier finalFunc) { - return new DefaultCreateAggregate( - keyspace, functionName, orReplace, ifNotExists, parameters, sFunc, sType, finalFunc, term); - } - - @Override - public String toString() { - return asCql(); - } - - @Nullable - public CqlIdentifier getKeyspace() { - return keyspace; - } - - @NonNull - public CqlIdentifier getFunctionName() { - return functionName; - } - - public boolean isOrReplace() { - return orReplace; - } - - public boolean isIfNotExists() { - return ifNotExists; - } - - @NonNull - public ImmutableList getParameters() { - return parameters; - } - - @Nullable - public CqlIdentifier getsFunc() { - return sFunc; - } - - @Nullable - public DataType getsType() { - return sType; - } - - @Nullable - public CqlIdentifier getFinalFunc() { - return finalFunc; - } - - @Nullable - public Term getTerm() { - return term; - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultCreateFunction.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultCreateFunction.java deleted file mode 100644 index 85035cde915..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultCreateFunction.java +++ /dev/null @@ -1,315 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.querybuilder.schema.CreateFunctionEnd; -import com.datastax.oss.driver.api.querybuilder.schema.CreateFunctionStart; -import com.datastax.oss.driver.api.querybuilder.schema.CreateFunctionWithLanguage; -import com.datastax.oss.driver.api.querybuilder.schema.CreateFunctionWithNullOption; -import com.datastax.oss.driver.api.querybuilder.schema.CreateFunctionWithType; -import com.datastax.oss.driver.internal.querybuilder.CqlHelper; -import com.datastax.oss.driver.internal.querybuilder.ImmutableCollections; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Map; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultCreateFunction - implements CreateFunctionStart, - CreateFunctionWithNullOption, - CreateFunctionWithType, - CreateFunctionWithLanguage, - CreateFunctionEnd { - - private final CqlIdentifier keyspace; - private final CqlIdentifier functionName; - private boolean orReplace; - private boolean ifNotExists; - private final ImmutableMap parameters; - private boolean returnsNullOnNull; - private final DataType returnType; - private final String language; - private final String functionBody; - - public DefaultCreateFunction(@NonNull CqlIdentifier functionName) { - this(null, functionName); - } - - public DefaultCreateFunction( - @Nullable CqlIdentifier keyspace, @NonNull CqlIdentifier functionName) { - this(keyspace, functionName, false, false, ImmutableMap.of(), false, null, null, null); - } - - public DefaultCreateFunction( - @Nullable CqlIdentifier keyspace, - @NonNull CqlIdentifier functionName, - boolean orReplace, - boolean ifNotExists, - @NonNull ImmutableMap parameters, - boolean returnsNullOnNull, - @Nullable DataType returns, - @Nullable String language, - @Nullable String functionBody) { - this.keyspace = keyspace; - this.functionName = functionName; - this.orReplace = orReplace; - this.ifNotExists = ifNotExists; - this.parameters = parameters; - this.returnsNullOnNull = returnsNullOnNull; - this.returnType = returns; - this.language = language; - this.functionBody = functionBody; - } - - @NonNull - @Override - public String asCql() { - StringBuilder builder = new StringBuilder(); - - builder.append("CREATE "); - if (orReplace) { - builder.append("OR REPLACE "); - } - builder.append("FUNCTION "); - - if (ifNotExists) { - builder.append("IF NOT EXISTS "); - } - CqlHelper.qualify(keyspace, functionName, builder); - - builder.append(" ("); - - boolean first = true; - for (Map.Entry param : parameters.entrySet()) { - if (first) { - first = false; - } else { - builder.append(','); - } - builder - .append(param.getKey().asCql(true)) - .append(' ') - .append(param.getValue().asCql(false, true)); - } - builder.append(')'); - if (returnsNullOnNull) { - builder.append(" RETURNS NULL"); - } else { - builder.append(" CALLED"); - } - - builder.append(" ON NULL INPUT"); - - if (returnType == null) { - // return type has not been provided yet. - return builder.toString(); - } - - builder.append(" RETURNS "); - builder.append(returnType.asCql(false, true)); - - if (language == null) { - // language has not been provided yet. - return builder.toString(); - } - - builder.append(" LANGUAGE "); - builder.append(language); - - if (functionBody == null) { - // body has not been provided yet. - return builder.toString(); - } - - builder.append(" AS "); - builder.append(functionBody); - return builder.toString(); - } - - @Override - public String toString() { - return asCql(); - } - - @NonNull - @Override - public CreateFunctionEnd as(@NonNull String functionBody) { - return new DefaultCreateFunction( - keyspace, - functionName, - orReplace, - ifNotExists, - parameters, - returnsNullOnNull, - returnType, - language, - functionBody); - } - - @NonNull - @Override - public CreateFunctionWithLanguage withLanguage(@NonNull String language) { - return new DefaultCreateFunction( - keyspace, - functionName, - orReplace, - ifNotExists, - parameters, - returnsNullOnNull, - returnType, - language, - functionBody); - } - - @NonNull - @Override - public CreateFunctionWithType returnsType(@NonNull DataType returnType) { - return new DefaultCreateFunction( - keyspace, - functionName, - orReplace, - ifNotExists, - parameters, - returnsNullOnNull, - returnType, - language, - functionBody); - } - - @NonNull - @Override - public CreateFunctionStart ifNotExists() { - return new DefaultCreateFunction( - keyspace, - functionName, - orReplace, - true, - parameters, - returnsNullOnNull, - returnType, - language, - functionBody); - } - - @NonNull - @Override - public CreateFunctionStart orReplace() { - return new DefaultCreateFunction( - keyspace, - functionName, - true, - ifNotExists, - parameters, - returnsNullOnNull, - returnType, - language, - functionBody); - } - - @NonNull - @Override - public CreateFunctionStart withParameter( - @NonNull CqlIdentifier paramName, @NonNull DataType paramType) { - return new DefaultCreateFunction( - keyspace, - functionName, - orReplace, - ifNotExists, - ImmutableCollections.append(parameters, paramName, paramType), - returnsNullOnNull, - returnType, - language, - functionBody); - } - - @NonNull - @Override - public CreateFunctionWithNullOption returnsNullOnNull() { - return new DefaultCreateFunction( - keyspace, - functionName, - orReplace, - ifNotExists, - parameters, - true, - returnType, - language, - functionBody); - } - - @NonNull - @Override - public CreateFunctionWithNullOption calledOnNull() { - return new DefaultCreateFunction( - keyspace, - functionName, - orReplace, - ifNotExists, - parameters, - false, - returnType, - language, - functionBody); - } - - @Nullable - public CqlIdentifier getKeyspace() { - return keyspace; - } - - @NonNull - public CqlIdentifier getFunction() { - return functionName; - } - - public boolean isOrReplace() { - return orReplace; - } - - public boolean isIfNotExists() { - return ifNotExists; - } - - @NonNull - public ImmutableMap getParameters() { - return parameters; - } - - public boolean isReturnsNullOnNull() { - return returnsNullOnNull; - } - - @Nullable - public DataType getReturnType() { - return returnType; - } - - @Nullable - public String getLanguage() { - return language; - } - - @Nullable - public String getFunctionBody() { - return functionBody; - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultCreateIndex.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultCreateIndex.java deleted file mode 100644 index 309beaa4afa..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultCreateIndex.java +++ /dev/null @@ -1,224 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.schema.CreateIndex; -import com.datastax.oss.driver.api.querybuilder.schema.CreateIndexOnTable; -import com.datastax.oss.driver.api.querybuilder.schema.CreateIndexStart; -import com.datastax.oss.driver.internal.querybuilder.CqlHelper; -import com.datastax.oss.driver.internal.querybuilder.ImmutableCollections; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Map; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultCreateIndex implements CreateIndexStart, CreateIndexOnTable, CreateIndex { - - private static final String NO_INDEX_TYPE = "__NO_INDEX_TYPE"; - - private final CqlIdentifier indexName; - - private final boolean ifNotExists; - - private final CqlIdentifier keyspace; - private final CqlIdentifier table; - - private final ImmutableMap columnToIndexType; - - private final String usingClass; - - private final ImmutableMap options; - - public DefaultCreateIndex() { - this(null); - } - - public DefaultCreateIndex(@Nullable CqlIdentifier indexName) { - this(indexName, false, null, null, ImmutableMap.of(), null, ImmutableMap.of()); - } - - public DefaultCreateIndex( - @Nullable CqlIdentifier indexName, - boolean ifNotExists, - @Nullable CqlIdentifier keyspace, - @Nullable CqlIdentifier table, - @NonNull ImmutableMap columnToIndexType, - @Nullable String usingClass, - @NonNull ImmutableMap options) { - this.indexName = indexName; - this.ifNotExists = ifNotExists; - this.keyspace = keyspace; - this.table = table; - this.columnToIndexType = columnToIndexType; - this.usingClass = usingClass; - this.options = options; - } - - @NonNull - @Override - public CreateIndex andColumn(@NonNull CqlIdentifier column, @Nullable String indexType) { - // use placeholder index type when none present as immutable map does not allow null values. - if (indexType == null) { - indexType = NO_INDEX_TYPE; - } - - return new DefaultCreateIndex( - indexName, - ifNotExists, - keyspace, - table, - ImmutableCollections.append(columnToIndexType, column, indexType), - usingClass, - options); - } - - @NonNull - @Override - public CreateIndexStart ifNotExists() { - return new DefaultCreateIndex( - indexName, true, keyspace, table, columnToIndexType, usingClass, options); - } - - @NonNull - @Override - public CreateIndexStart custom(@NonNull String className) { - return new DefaultCreateIndex( - indexName, ifNotExists, keyspace, table, columnToIndexType, className, options); - } - - @NonNull - @Override - public CreateIndexOnTable onTable(CqlIdentifier keyspace, @NonNull CqlIdentifier table) { - return new DefaultCreateIndex( - indexName, ifNotExists, keyspace, table, columnToIndexType, usingClass, options); - } - - @NonNull - @Override - public CreateIndex withOption(@NonNull String name, @NonNull Object value) { - return new DefaultCreateIndex( - indexName, - ifNotExists, - keyspace, - table, - columnToIndexType, - usingClass, - ImmutableCollections.append(options, name, value)); - } - - @NonNull - @Override - public String asCql() { - StringBuilder builder = new StringBuilder("CREATE "); - if (usingClass != null) { - builder.append("CUSTOM "); - } - builder.append("INDEX"); - if (ifNotExists) { - builder.append(" IF NOT EXISTS"); - } - - if (indexName != null) { - builder.append(' ').append(indexName.asCql(true)); - } - - if (table == null) { - // Table not provided yet. - return builder.toString(); - } - - builder.append(" ON "); - - CqlHelper.qualify(keyspace, table, builder); - - if (columnToIndexType.isEmpty()) { - // columns not provided yet - return builder.toString(); - } - - builder.append(" ("); - - boolean firstColumn = true; - for (Map.Entry entry : columnToIndexType.entrySet()) { - if (firstColumn) { - firstColumn = false; - } else { - builder.append(","); - } - if (entry.getValue().equals(NO_INDEX_TYPE)) { - builder.append(entry.getKey()); - } else { - builder.append(entry.getValue()).append("(").append(entry.getKey()).append(")"); - } - } - builder.append(")"); - - if (usingClass != null) { - builder.append(" USING '").append(usingClass).append('\''); - } - - if (!options.isEmpty()) { - builder.append(OptionsUtils.buildOptions(options, true)); - } - - return builder.toString(); - } - - @Override - public String toString() { - return asCql(); - } - - @NonNull - @Override - public Map getOptions() { - return options; - } - - @Nullable - public CqlIdentifier getIndex() { - return indexName; - } - - public boolean isIfNotExists() { - return ifNotExists; - } - - @Nullable - public CqlIdentifier getKeyspace() { - return keyspace; - } - - @Nullable - public CqlIdentifier getTable() { - return table; - } - - @NonNull - public ImmutableMap getColumnToIndexType() { - return columnToIndexType; - } - - @Nullable - public String getUsingClass() { - return usingClass; - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultCreateKeyspace.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultCreateKeyspace.java deleted file mode 100644 index b8cb237d5ff..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultCreateKeyspace.java +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.schema.CreateKeyspace; -import com.datastax.oss.driver.api.querybuilder.schema.CreateKeyspaceStart; -import com.datastax.oss.driver.internal.querybuilder.ImmutableCollections; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultCreateKeyspace implements CreateKeyspace, CreateKeyspaceStart { - - private final CqlIdentifier keyspaceName; - private final boolean ifNotExists; - private final ImmutableMap options; - - public DefaultCreateKeyspace(@NonNull CqlIdentifier keyspaceName) { - this(keyspaceName, false, ImmutableMap.of()); - } - - public DefaultCreateKeyspace( - @NonNull CqlIdentifier keyspaceName, - boolean ifNotExists, - @NonNull ImmutableMap options) { - this.keyspaceName = keyspaceName; - this.ifNotExists = ifNotExists; - this.options = options; - } - - @NonNull - @Override - public CreateKeyspace withOption(@NonNull String name, @NonNull Object value) { - return new DefaultCreateKeyspace( - keyspaceName, ifNotExists, ImmutableCollections.append(options, name, value)); - } - - @NonNull - @Override - public CreateKeyspaceStart ifNotExists() { - return new DefaultCreateKeyspace(keyspaceName, true, options); - } - - @NonNull - @Override - public CreateKeyspace withReplicationOptions(@NonNull Map replicationOptions) { - return withOption("replication", replicationOptions); - } - - @NonNull - @Override - public String asCql() { - StringBuilder builder = new StringBuilder(); - - builder.append("CREATE KEYSPACE "); - if (ifNotExists) { - builder.append("IF NOT EXISTS "); - } - - builder.append(keyspaceName.asCql(true)); - builder.append(OptionsUtils.buildOptions(options, true)); - return builder.toString(); - } - - @Override - public String toString() { - return asCql(); - } - - @NonNull - @Override - public Map getOptions() { - return options; - } - - @NonNull - public CqlIdentifier getKeyspace() { - return keyspaceName; - } - - public boolean isIfNotExists() { - return ifNotExists; - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultCreateMaterializedView.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultCreateMaterializedView.java deleted file mode 100644 index bfd8fba51eb..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultCreateMaterializedView.java +++ /dev/null @@ -1,445 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.ClusteringOrder; -import com.datastax.oss.driver.api.querybuilder.relation.Relation; -import com.datastax.oss.driver.api.querybuilder.schema.CreateMaterializedView; -import com.datastax.oss.driver.api.querybuilder.schema.CreateMaterializedViewPrimaryKey; -import com.datastax.oss.driver.api.querybuilder.schema.CreateMaterializedViewSelection; -import com.datastax.oss.driver.api.querybuilder.schema.CreateMaterializedViewSelectionWithColumns; -import com.datastax.oss.driver.api.querybuilder.schema.CreateMaterializedViewStart; -import com.datastax.oss.driver.api.querybuilder.schema.CreateMaterializedViewWhere; -import com.datastax.oss.driver.api.querybuilder.schema.CreateMaterializedViewWhereStart; -import com.datastax.oss.driver.api.querybuilder.select.Selector; -import com.datastax.oss.driver.internal.querybuilder.CqlHelper; -import com.datastax.oss.driver.internal.querybuilder.ImmutableCollections; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Map; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultCreateMaterializedView - implements CreateMaterializedViewStart, - CreateMaterializedViewSelectionWithColumns, - CreateMaterializedViewWhere, - CreateMaterializedViewPrimaryKey, - CreateMaterializedView { - - private final CqlIdentifier keyspace; - private final CqlIdentifier viewName; - - private final boolean ifNotExists; - - private final CqlIdentifier baseTableKeyspace; - private final CqlIdentifier baseTable; - - private final ImmutableList selectors; - private final ImmutableList whereRelations; - private final ImmutableSet partitionKeyColumns; - private final ImmutableSet clusteringKeyColumns; - - private final ImmutableMap orderings; - - private final ImmutableMap options; - - public DefaultCreateMaterializedView(@NonNull CqlIdentifier viewName) { - this(null, viewName); - } - - public DefaultCreateMaterializedView( - @Nullable CqlIdentifier keyspace, @NonNull CqlIdentifier viewName) { - this( - keyspace, - viewName, - false, - null, - null, - ImmutableList.of(), - ImmutableList.of(), - ImmutableSet.of(), - ImmutableSet.of(), - ImmutableMap.of(), - ImmutableMap.of()); - } - - public DefaultCreateMaterializedView( - @Nullable CqlIdentifier keyspace, - @NonNull CqlIdentifier viewName, - boolean ifNotExists, - @Nullable CqlIdentifier baseTableKeyspace, - @Nullable CqlIdentifier baseTable, - @NonNull ImmutableList selectors, - @NonNull ImmutableList whereRelations, - @NonNull ImmutableSet partitionKeyColumns, - @NonNull ImmutableSet clusteringKeyColumns, - @NonNull ImmutableMap orderings, - @NonNull ImmutableMap options) { - this.keyspace = keyspace; - this.viewName = viewName; - this.ifNotExists = ifNotExists; - this.baseTableKeyspace = baseTableKeyspace; - this.baseTable = baseTable; - this.selectors = selectors; - this.whereRelations = whereRelations; - this.partitionKeyColumns = partitionKeyColumns; - this.clusteringKeyColumns = clusteringKeyColumns; - this.orderings = orderings; - this.options = options; - } - - @NonNull - @Override - public CreateMaterializedViewWhereStart all() { - return new DefaultCreateMaterializedView( - keyspace, - viewName, - ifNotExists, - baseTableKeyspace, - baseTable, - ImmutableCollections.append(selectors, Selector.all()), - whereRelations, - partitionKeyColumns, - clusteringKeyColumns, - orderings, - options); - } - - @NonNull - @Override - public CreateMaterializedViewSelectionWithColumns column(@NonNull CqlIdentifier columnName) { - return new DefaultCreateMaterializedView( - keyspace, - viewName, - ifNotExists, - baseTableKeyspace, - baseTable, - ImmutableCollections.append(selectors, Selector.column(columnName)), - whereRelations, - partitionKeyColumns, - clusteringKeyColumns, - orderings, - options); - } - - @NonNull - @Override - public CreateMaterializedViewSelectionWithColumns columnsIds( - @NonNull Iterable columnIds) { - ImmutableList.Builder columnSelectors = ImmutableList.builder(); - for (CqlIdentifier column : columnIds) { - columnSelectors.add(Selector.column(column)); - } - return new DefaultCreateMaterializedView( - keyspace, - viewName, - ifNotExists, - baseTableKeyspace, - baseTable, - ImmutableCollections.concat(selectors, columnSelectors.build()), - whereRelations, - partitionKeyColumns, - clusteringKeyColumns, - orderings, - options); - } - - @NonNull - @Override - public CreateMaterializedViewWhere where(@NonNull Relation relation) { - return new DefaultCreateMaterializedView( - keyspace, - viewName, - ifNotExists, - baseTableKeyspace, - baseTable, - selectors, - ImmutableCollections.append(whereRelations, relation), - partitionKeyColumns, - clusteringKeyColumns, - orderings, - options); - } - - @NonNull - @Override - public CreateMaterializedViewWhere where(@NonNull Iterable additionalRelations) { - return new DefaultCreateMaterializedView( - keyspace, - viewName, - ifNotExists, - baseTableKeyspace, - baseTable, - selectors, - ImmutableCollections.concat(whereRelations, additionalRelations), - partitionKeyColumns, - clusteringKeyColumns, - orderings, - options); - } - - @NonNull - @Override - public CreateMaterializedViewPrimaryKey withPartitionKey(@NonNull CqlIdentifier columnName) { - return new DefaultCreateMaterializedView( - keyspace, - viewName, - ifNotExists, - baseTableKeyspace, - baseTable, - selectors, - whereRelations, - Utils.appendSet(partitionKeyColumns, columnName), - clusteringKeyColumns, - orderings, - options); - } - - @NonNull - @Override - public CreateMaterializedViewPrimaryKey withClusteringColumn(@NonNull CqlIdentifier columnName) { - return new DefaultCreateMaterializedView( - keyspace, - viewName, - ifNotExists, - baseTableKeyspace, - baseTable, - selectors, - whereRelations, - partitionKeyColumns, - Utils.appendSet(clusteringKeyColumns, columnName), - orderings, - options); - } - - @NonNull - @Override - public CreateMaterializedViewStart ifNotExists() { - return new DefaultCreateMaterializedView( - keyspace, - viewName, - true, - baseTableKeyspace, - baseTable, - selectors, - whereRelations, - partitionKeyColumns, - clusteringKeyColumns, - orderings, - options); - } - - @NonNull - @Override - public CreateMaterializedViewSelection asSelectFrom(@NonNull CqlIdentifier table) { - return asSelectFrom(null, table); - } - - @NonNull - @Override - public CreateMaterializedViewSelection asSelectFrom( - CqlIdentifier baseTableKeyspace, @NonNull CqlIdentifier baseTable) { - return new DefaultCreateMaterializedView( - keyspace, - viewName, - ifNotExists, - baseTableKeyspace, - baseTable, - selectors, - whereRelations, - partitionKeyColumns, - clusteringKeyColumns, - orderings, - options); - } - - @NonNull - @Override - public CreateMaterializedView withClusteringOrderByIds( - @NonNull Map orderings) { - return withClusteringOrders(ImmutableCollections.concat(this.orderings, orderings)); - } - - @NonNull - @Override - public CreateMaterializedView withClusteringOrder( - @NonNull CqlIdentifier columnName, @NonNull ClusteringOrder order) { - return withClusteringOrders(ImmutableCollections.append(orderings, columnName, order)); - } - - @NonNull - public CreateMaterializedView withClusteringOrders( - @NonNull ImmutableMap orderings) { - return new DefaultCreateMaterializedView( - keyspace, - viewName, - ifNotExists, - baseTableKeyspace, - baseTable, - selectors, - whereRelations, - partitionKeyColumns, - clusteringKeyColumns, - orderings, - options); - } - - @NonNull - @Override - public CreateMaterializedView withOption(@NonNull String name, @NonNull Object value) { - return new DefaultCreateMaterializedView( - keyspace, - viewName, - ifNotExists, - baseTableKeyspace, - baseTable, - selectors, - whereRelations, - partitionKeyColumns, - clusteringKeyColumns, - orderings, - ImmutableCollections.append(options, name, value)); - } - - @NonNull - @Override - public String asCql() { - StringBuilder builder = new StringBuilder("CREATE MATERIALIZED VIEW "); - if (ifNotExists) { - builder.append("IF NOT EXISTS "); - } - - CqlHelper.qualify(keyspace, viewName, builder); - - if (selectors.isEmpty()) { - // selectors not provided yet. - return builder.toString(); - } - - CqlHelper.append(selectors, builder, " AS SELECT ", ",", " FROM "); - - if (baseTable == null) { - // base table not provided yet. - return builder.toString(); - } - - CqlHelper.qualify(baseTableKeyspace, baseTable, builder); - - if (whereRelations.isEmpty()) { - // where clause not provided yet. - return builder.toString(); - } - - CqlHelper.append(whereRelations, builder, " WHERE ", " AND ", " "); - - CqlHelper.buildPrimaryKey(partitionKeyColumns, clusteringKeyColumns, builder); - - if (!orderings.isEmpty() || !options.isEmpty()) { - boolean firstOption = true; - - if (!orderings.isEmpty()) { - builder.append(" WITH "); - firstOption = false; - builder.append("CLUSTERING ORDER BY ("); - boolean firstClustering = true; - - for (Map.Entry ordering : orderings.entrySet()) { - if (firstClustering) { - firstClustering = false; - } else { - builder.append(','); - } - builder - .append(ordering.getKey().asCql(true)) - .append(' ') - .append(ordering.getValue().toString()); - } - - builder.append(')'); - } - - builder.append(OptionsUtils.buildOptions(options, firstOption)); - } - return builder.toString(); - } - - @Override - public String toString() { - return asCql(); - } - - @NonNull - @Override - public Map getOptions() { - return options; - } - - @Nullable - public CqlIdentifier getKeyspace() { - return keyspace; - } - - @NonNull - public CqlIdentifier getMaterializedView() { - return viewName; - } - - public boolean isIfNotExists() { - return ifNotExists; - } - - @Nullable - public CqlIdentifier getBaseTableKeyspace() { - return baseTableKeyspace; - } - - @Nullable - public CqlIdentifier getBaseTable() { - return baseTable; - } - - @NonNull - public ImmutableList getSelectors() { - return selectors; - } - - @NonNull - public ImmutableList getWhereRelations() { - return whereRelations; - } - - @NonNull - public ImmutableSet getPartitionKeyColumns() { - return partitionKeyColumns; - } - - @NonNull - public ImmutableSet getClusteringKeyColumns() { - return clusteringKeyColumns; - } - - @NonNull - public ImmutableMap getOrderings() { - return orderings; - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultCreateTable.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultCreateTable.java deleted file mode 100644 index 058aeccdd24..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultCreateTable.java +++ /dev/null @@ -1,398 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.schema; - -import static com.datastax.oss.driver.internal.querybuilder.schema.Utils.appendSet; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.ClusteringOrder; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.querybuilder.schema.CreateTable; -import com.datastax.oss.driver.api.querybuilder.schema.CreateTableStart; -import com.datastax.oss.driver.api.querybuilder.schema.CreateTableWithOptions; -import com.datastax.oss.driver.internal.querybuilder.CqlHelper; -import com.datastax.oss.driver.internal.querybuilder.ImmutableCollections; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Map; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultCreateTable implements CreateTableStart, CreateTable, CreateTableWithOptions { - - private final CqlIdentifier keyspace; - private final CqlIdentifier tableName; - - private final boolean ifNotExists; - private final boolean compactStorage; - - private final ImmutableMap options; - - private final ImmutableMap columnsInOrder; - - private final ImmutableSet partitionKeyColumns; - private final ImmutableSet clusteringKeyColumns; - private final ImmutableSet staticColumns; - private final ImmutableSet regularColumns; - - private final ImmutableMap orderings; - - public DefaultCreateTable(@NonNull CqlIdentifier tableName) { - this(null, tableName); - } - - public DefaultCreateTable(@Nullable CqlIdentifier keyspace, @NonNull CqlIdentifier tableName) { - this( - keyspace, - tableName, - false, - false, - ImmutableMap.of(), - ImmutableSet.of(), - ImmutableSet.of(), - ImmutableSet.of(), - ImmutableSet.of(), - ImmutableMap.of(), - ImmutableMap.of()); - } - - public DefaultCreateTable( - @Nullable CqlIdentifier keyspace, - @NonNull CqlIdentifier tableName, - boolean ifNotExists, - boolean compactStorage, - @NonNull ImmutableMap columnsInOrder, - @NonNull ImmutableSet partitionKeyColumns, - @NonNull ImmutableSet clusteringKeyColumns, - @NonNull ImmutableSet staticColumns, - @NonNull ImmutableSet regularColumns, - @NonNull ImmutableMap orderings, - @NonNull ImmutableMap options) { - this.keyspace = keyspace; - this.tableName = tableName; - this.ifNotExists = ifNotExists; - this.compactStorage = compactStorage; - this.columnsInOrder = columnsInOrder; - this.partitionKeyColumns = partitionKeyColumns; - this.clusteringKeyColumns = clusteringKeyColumns; - this.staticColumns = staticColumns; - this.regularColumns = regularColumns; - this.orderings = orderings; - this.options = options; - } - - @NonNull - @Override - public CreateTableStart ifNotExists() { - return new DefaultCreateTable( - keyspace, - tableName, - true, - compactStorage, - columnsInOrder, - partitionKeyColumns, - clusteringKeyColumns, - staticColumns, - regularColumns, - orderings, - options); - } - - @NonNull - @Override - public CreateTable withPartitionKey( - @NonNull CqlIdentifier columnName, @NonNull DataType dataType) { - return new DefaultCreateTable( - keyspace, - tableName, - ifNotExists, - compactStorage, - ImmutableCollections.append(columnsInOrder, columnName, dataType), - appendSet(partitionKeyColumns, columnName), - clusteringKeyColumns, - staticColumns, - regularColumns, - orderings, - options); - } - - @NonNull - @Override - public CreateTable withClusteringColumn( - @NonNull CqlIdentifier columnName, @NonNull DataType dataType) { - return new DefaultCreateTable( - keyspace, - tableName, - ifNotExists, - compactStorage, - ImmutableCollections.append(columnsInOrder, columnName, dataType), - partitionKeyColumns, - appendSet(clusteringKeyColumns, columnName), - staticColumns, - regularColumns, - orderings, - options); - } - - @NonNull - @Override - public CreateTable withColumn(@NonNull CqlIdentifier columnName, @NonNull DataType dataType) { - return new DefaultCreateTable( - keyspace, - tableName, - ifNotExists, - compactStorage, - ImmutableCollections.append(columnsInOrder, columnName, dataType), - partitionKeyColumns, - clusteringKeyColumns, - staticColumns, - appendSet(regularColumns, columnName), - orderings, - options); - } - - @NonNull - @Override - public CreateTable withStaticColumn( - @NonNull CqlIdentifier columnName, @NonNull DataType dataType) { - return new DefaultCreateTable( - keyspace, - tableName, - ifNotExists, - compactStorage, - ImmutableCollections.append(columnsInOrder, columnName, dataType), - partitionKeyColumns, - clusteringKeyColumns, - appendSet(staticColumns, columnName), - regularColumns, - orderings, - options); - } - - @NonNull - @Override - public CreateTableWithOptions withCompactStorage() { - return new DefaultCreateTable( - keyspace, - tableName, - ifNotExists, - true, - columnsInOrder, - partitionKeyColumns, - clusteringKeyColumns, - staticColumns, - regularColumns, - orderings, - options); - } - - @NonNull - @Override - public CreateTableWithOptions withClusteringOrderByIds( - @NonNull Map orderings) { - return withClusteringOrders(ImmutableCollections.concat(this.orderings, orderings)); - } - - @NonNull - @Override - public CreateTableWithOptions withClusteringOrder( - @NonNull CqlIdentifier columnName, @NonNull ClusteringOrder order) { - return withClusteringOrders(ImmutableCollections.append(orderings, columnName, order)); - } - - @NonNull - public CreateTableWithOptions withClusteringOrders( - @NonNull ImmutableMap orderings) { - return new DefaultCreateTable( - keyspace, - tableName, - ifNotExists, - compactStorage, - columnsInOrder, - partitionKeyColumns, - clusteringKeyColumns, - staticColumns, - regularColumns, - orderings, - options); - } - - @NonNull - @Override - public String asCql() { - StringBuilder builder = new StringBuilder(); - - builder.append("CREATE TABLE "); - if (ifNotExists) { - builder.append("IF NOT EXISTS "); - } - - CqlHelper.qualify(keyspace, tableName, builder); - - if (columnsInOrder.isEmpty()) { - // no columns provided yet. - return builder.toString(); - } - - boolean singlePrimaryKey = partitionKeyColumns.size() == 1 && clusteringKeyColumns.size() == 0; - - builder.append(" ("); - - boolean first = true; - for (Map.Entry column : columnsInOrder.entrySet()) { - if (first) { - first = false; - } else { - builder.append(','); - } - builder - .append(column.getKey().asCql(true)) - .append(' ') - .append(column.getValue().asCql(true, true)); - - if (singlePrimaryKey && partitionKeyColumns.contains(column.getKey())) { - builder.append(" PRIMARY KEY"); - } else if (staticColumns.contains(column.getKey())) { - builder.append(" STATIC"); - } - } - - if (!singlePrimaryKey) { - builder.append(","); - CqlHelper.buildPrimaryKey(partitionKeyColumns, clusteringKeyColumns, builder); - } - - builder.append(')'); - - if (compactStorage || !orderings.isEmpty() || !options.isEmpty()) { - boolean firstOption = true; - - if (compactStorage) { - firstOption = false; - builder.append(" WITH COMPACT STORAGE"); - } - - if (!orderings.isEmpty()) { - if (firstOption) { - builder.append(" WITH "); - firstOption = false; - } else { - builder.append(" AND "); - } - builder.append("CLUSTERING ORDER BY ("); - boolean firstClustering = true; - - for (Map.Entry ordering : orderings.entrySet()) { - if (firstClustering) { - firstClustering = false; - } else { - builder.append(','); - } - builder - .append(ordering.getKey().asCql(true)) - .append(' ') - .append(ordering.getValue().toString()); - } - - builder.append(')'); - } - - builder.append(OptionsUtils.buildOptions(options, firstOption)); - } - - return builder.toString(); - } - - @Override - public String toString() { - return asCql(); - } - - @NonNull - @Override - public CreateTable withOption(@NonNull String name, @NonNull Object value) { - return new DefaultCreateTable( - keyspace, - tableName, - ifNotExists, - compactStorage, - columnsInOrder, - partitionKeyColumns, - clusteringKeyColumns, - staticColumns, - regularColumns, - orderings, - ImmutableCollections.append(options, name, value)); - } - - @NonNull - @Override - public Map getOptions() { - return options; - } - - @Nullable - public CqlIdentifier getKeyspace() { - return keyspace; - } - - @NonNull - public CqlIdentifier getTable() { - return tableName; - } - - public boolean isIfNotExists() { - return ifNotExists; - } - - public boolean isCompactStorage() { - return compactStorage; - } - - @NonNull - public ImmutableMap getColumnsInOrder() { - return columnsInOrder; - } - - @NonNull - public ImmutableSet getPartitionKeyColumns() { - return partitionKeyColumns; - } - - @NonNull - public ImmutableSet getClusteringKeyColumns() { - return clusteringKeyColumns; - } - - @NonNull - public ImmutableSet getStaticColumns() { - return staticColumns; - } - - @NonNull - public ImmutableSet getRegularColumns() { - return regularColumns; - } - - @NonNull - public ImmutableMap getOrderings() { - return orderings; - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultCreateType.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultCreateType.java deleted file mode 100644 index 9f304ced084..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultCreateType.java +++ /dev/null @@ -1,133 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.querybuilder.schema.CreateType; -import com.datastax.oss.driver.api.querybuilder.schema.CreateTypeStart; -import com.datastax.oss.driver.internal.querybuilder.CqlHelper; -import com.datastax.oss.driver.internal.querybuilder.ImmutableCollections; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Map; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultCreateType implements CreateTypeStart, CreateType { - - private final CqlIdentifier keyspace; - private final CqlIdentifier typeName; - private final boolean ifNotExists; - private final ImmutableMap fieldsInOrder; - - public DefaultCreateType(@NonNull CqlIdentifier typeName) { - this(null, typeName); - } - - public DefaultCreateType(@Nullable CqlIdentifier keyspace, @NonNull CqlIdentifier typeName) { - this(keyspace, typeName, false, ImmutableMap.of()); - } - - public DefaultCreateType( - @Nullable CqlIdentifier keyspace, - @NonNull CqlIdentifier typeName, - boolean ifNotExists, - @NonNull ImmutableMap fieldsInOrder) { - this.keyspace = keyspace; - this.typeName = typeName; - this.ifNotExists = ifNotExists; - this.fieldsInOrder = fieldsInOrder; - } - - @NonNull - @Override - public CreateType withField(@NonNull CqlIdentifier fieldName, @NonNull DataType dataType) { - return new DefaultCreateType( - keyspace, - typeName, - ifNotExists, - ImmutableCollections.append(fieldsInOrder, fieldName, dataType)); - } - - @NonNull - @Override - public CreateTypeStart ifNotExists() { - return new DefaultCreateType(keyspace, typeName, true, fieldsInOrder); - } - - @NonNull - @Override - public String asCql() { - StringBuilder builder = new StringBuilder(); - - builder.append("CREATE TYPE "); - if (ifNotExists) { - builder.append("IF NOT EXISTS "); - } - - CqlHelper.qualify(keyspace, typeName, builder); - - if (fieldsInOrder.isEmpty()) { - // no fields provided yet. - return builder.toString(); - } - - builder.append(" ("); - - boolean first = true; - for (Map.Entry field : fieldsInOrder.entrySet()) { - if (first) { - first = false; - } else { - builder.append(','); - } - builder - .append(field.getKey().asCql(true)) - .append(' ') - .append(field.getValue().asCql(true, true)); - } - builder.append(')'); - return builder.toString(); - } - - @Override - public String toString() { - return asCql(); - } - - @Nullable - public CqlIdentifier getKeyspace() { - return keyspace; - } - - @NonNull - public CqlIdentifier getType() { - return typeName; - } - - public boolean isIfNotExists() { - return ifNotExists; - } - - @NonNull - public ImmutableMap getFieldsInOrder() { - return fieldsInOrder; - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultDrop.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultDrop.java deleted file mode 100644 index b5d164e77db..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultDrop.java +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.schema.Drop; -import com.datastax.oss.driver.internal.querybuilder.CqlHelper; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultDrop implements Drop { - - private final CqlIdentifier keyspace; - private final CqlIdentifier itemName; - private final String schemaTypeName; - - private final boolean ifExists; - - public DefaultDrop(@NonNull CqlIdentifier itemName, @NonNull String schemaTypeName) { - this(null, itemName, schemaTypeName); - } - - public DefaultDrop( - @Nullable CqlIdentifier keyspace, - @NonNull CqlIdentifier itemName, - @NonNull String schemaTypeName) { - this(keyspace, itemName, schemaTypeName, false); - } - - public DefaultDrop( - @Nullable CqlIdentifier keyspace, - @NonNull CqlIdentifier itemName, - @NonNull String schemaTypeName, - boolean ifExists) { - this.keyspace = keyspace; - this.itemName = itemName; - this.schemaTypeName = schemaTypeName; - this.ifExists = ifExists; - } - - @NonNull - @Override - public Drop ifExists() { - return new DefaultDrop(keyspace, itemName, schemaTypeName, true); - } - - @NonNull - @Override - public String asCql() { - StringBuilder builder = new StringBuilder("DROP ").append(schemaTypeName).append(' '); - - if (ifExists) { - builder.append("IF EXISTS "); - } - - CqlHelper.qualify(keyspace, itemName, builder); - - return builder.toString(); - } - - @Override - public String toString() { - return asCql(); - } - - @Nullable - public CqlIdentifier getKeyspace() { - return keyspace; - } - - @NonNull - public CqlIdentifier getName() { - return itemName; - } - - @NonNull - public String getSchemaType() { - return schemaTypeName; - } - - public boolean isIfExists() { - return ifExists; - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultDropKeyspace.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultDropKeyspace.java deleted file mode 100644 index 905dfa16871..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultDropKeyspace.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.schema.Drop; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultDropKeyspace implements Drop { - - private final CqlIdentifier keyspaceName; - private final boolean ifExists; - - public DefaultDropKeyspace(@NonNull CqlIdentifier keyspaceName) { - this(keyspaceName, false); - } - - public DefaultDropKeyspace(@NonNull CqlIdentifier keyspaceName, boolean ifExists) { - this.keyspaceName = keyspaceName; - this.ifExists = ifExists; - } - - @NonNull - @Override - public Drop ifExists() { - return new DefaultDropKeyspace(keyspaceName, true); - } - - @NonNull - @Override - public String asCql() { - StringBuilder builder = new StringBuilder("DROP KEYSPACE "); - - if (ifExists) { - builder.append("IF EXISTS "); - } - - builder.append(keyspaceName.asCql(true)); - - return builder.toString(); - } - - @Override - public String toString() { - return asCql(); - } - - @NonNull - public CqlIdentifier getKeyspace() { - return keyspaceName; - } - - public boolean isIfExists() { - return ifExists; - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/OptionsUtils.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/OptionsUtils.java deleted file mode 100644 index 4e80f72f1e3..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/OptionsUtils.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.schema; - -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; - -public class OptionsUtils { - @NonNull - public static String buildOptions(@NonNull Map options, boolean first) { - StringBuilder builder = new StringBuilder(); - for (Map.Entry option : options.entrySet()) { - if (first) { - builder.append(" WITH "); - first = false; - } else { - builder.append(" AND "); - } - String value = OptionsUtils.extractOptionValue(option.getValue()); - builder.append(option.getKey()).append("=").append(value); - } - return builder.toString(); - } - - @NonNull - private static String extractOptionValue(@NonNull Object option) { - StringBuilder optionValue = new StringBuilder(); - if (option instanceof String) { - optionValue.append("'").append((String) option).append("'"); - } else if (option instanceof Map) { - @SuppressWarnings("unchecked") - Map optionMap = (Map) option; - boolean first = true; - optionValue.append("{"); - for (Map.Entry subOption : optionMap.entrySet()) { - if (first) { - first = false; - } else { - optionValue.append(","); - } - optionValue - .append("'") - .append(subOption.getKey()) - .append("':") - .append(extractOptionValue(subOption.getValue())); - } - optionValue.append("}"); - } else { - optionValue.append(option); - } - return optionValue.toString(); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/RawOptionsWrapper.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/RawOptionsWrapper.java deleted file mode 100644 index 64cdb50f887..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/RawOptionsWrapper.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.schema; - -import com.datastax.oss.driver.api.core.data.ByteUtils; - -/** - * Wrapper class to indicate that the contained String value should be understood to represent a CQL - * literal that can be included directly in a CQL statement (i.e. without escaping). - */ -public class RawOptionsWrapper { - private final String val; - - private RawOptionsWrapper(String val) { - this.val = val; - } - - public static RawOptionsWrapper of(String val) { - return new RawOptionsWrapper(val); - } - - public static RawOptionsWrapper of(byte[] val) { - return new RawOptionsWrapper(ByteUtils.toHexString(val)); - } - - @Override - public String toString() { - return this.val; - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/Utils.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/Utils.java deleted file mode 100644 index 166c0b29290..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/Utils.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.schema; - -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import edu.umd.cs.findbugs.annotations.NonNull; - -public class Utils { - /** Convenience method for creating a new {@link ImmutableSet} with an appended value. */ - @NonNull - public static ImmutableSet appendSet(@NonNull ImmutableSet set, @NonNull E newValue) { - return ImmutableSet.builder().addAll(set).add(newValue).build(); - } - - /** Convenience method for creating a new {@link ImmutableSet} with concatenated iterable. */ - @NonNull - public static ImmutableSet concatSet( - @NonNull ImmutableSet set, @NonNull Iterable toConcat) { - return ImmutableSet.builder().addAll(set).addAll(toConcat).build(); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/compaction/DefaultCompactionStrategy.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/compaction/DefaultCompactionStrategy.java deleted file mode 100644 index 11d5341fa0a..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/compaction/DefaultCompactionStrategy.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.schema.compaction; - -import com.datastax.oss.driver.api.querybuilder.schema.compaction.CompactionStrategy; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; -import net.jcip.annotations.Immutable; - -@Immutable -public abstract class DefaultCompactionStrategy> - implements CompactionStrategy { - - private final ImmutableMap options; - - protected DefaultCompactionStrategy(@NonNull String className) { - this(ImmutableMap.of("class", className)); - } - - protected DefaultCompactionStrategy(@NonNull ImmutableMap options) { - this.options = options; - } - - @NonNull - public ImmutableMap getInternalOptions() { - return options; - } - - @NonNull - @Override - public Map getOptions() { - return options; - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/compaction/DefaultLeveledCompactionStrategy.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/compaction/DefaultLeveledCompactionStrategy.java deleted file mode 100644 index a6933a9d1b5..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/compaction/DefaultLeveledCompactionStrategy.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.schema.compaction; - -import com.datastax.oss.driver.api.querybuilder.schema.compaction.LeveledCompactionStrategy; -import com.datastax.oss.driver.internal.querybuilder.ImmutableCollections; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultLeveledCompactionStrategy - extends DefaultCompactionStrategy - implements LeveledCompactionStrategy { - - public DefaultLeveledCompactionStrategy() { - super("LeveledCompactionStrategy"); - } - - protected DefaultLeveledCompactionStrategy(@NonNull ImmutableMap options) { - super(options); - } - - @NonNull - @Override - public DefaultLeveledCompactionStrategy withOption(@NonNull String name, @NonNull Object value) { - return new DefaultLeveledCompactionStrategy( - ImmutableCollections.append(getInternalOptions(), name, value)); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/compaction/DefaultSizeTieredCompactionStrategy.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/compaction/DefaultSizeTieredCompactionStrategy.java deleted file mode 100644 index 2c3710452a6..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/compaction/DefaultSizeTieredCompactionStrategy.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.schema.compaction; - -import com.datastax.oss.driver.api.querybuilder.schema.compaction.SizeTieredCompactionStrategy; -import com.datastax.oss.driver.internal.querybuilder.ImmutableCollections; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultSizeTieredCompactionStrategy - extends DefaultCompactionStrategy - implements SizeTieredCompactionStrategy { - - public DefaultSizeTieredCompactionStrategy() { - super("SizeTieredCompactionStrategy"); - } - - protected DefaultSizeTieredCompactionStrategy(@NonNull ImmutableMap options) { - super(options); - } - - @NonNull - @Override - public DefaultSizeTieredCompactionStrategy withOption( - @NonNull String name, @NonNull Object value) { - return new DefaultSizeTieredCompactionStrategy( - ImmutableCollections.append(getInternalOptions(), name, value)); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/compaction/DefaultTimeWindowCompactionStrategy.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/compaction/DefaultTimeWindowCompactionStrategy.java deleted file mode 100644 index 4a2d28bb87e..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/compaction/DefaultTimeWindowCompactionStrategy.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.schema.compaction; - -import com.datastax.oss.driver.api.querybuilder.schema.compaction.TimeWindowCompactionStrategy; -import com.datastax.oss.driver.internal.querybuilder.ImmutableCollections; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultTimeWindowCompactionStrategy - extends DefaultCompactionStrategy - implements TimeWindowCompactionStrategy { - public DefaultTimeWindowCompactionStrategy() { - super("TimeWindowCompactionStrategy"); - } - - protected DefaultTimeWindowCompactionStrategy(@NonNull ImmutableMap options) { - super(options); - } - - @NonNull - @Override - public DefaultTimeWindowCompactionStrategy withOption( - @NonNull String name, @NonNull Object value) { - return new DefaultTimeWindowCompactionStrategy( - ImmutableCollections.append(getInternalOptions(), name, value)); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/AllSelector.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/AllSelector.java deleted file mode 100644 index 3d87adf4f0d..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/AllSelector.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.select; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.select.Selector; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -public enum AllSelector implements Selector { - INSTANCE; - - @NonNull - @Override - public Selector as(@NonNull CqlIdentifier alias) { - throw new IllegalStateException("Can't alias the '*' selector"); - } - - @Override - public void appendTo(@NonNull StringBuilder builder) { - builder.append('*'); - } - - @Nullable - @Override - public CqlIdentifier getAlias() { - return null; - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/ArithmeticSelector.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/ArithmeticSelector.java deleted file mode 100644 index 6af0ecfee87..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/ArithmeticSelector.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.select; - -import com.datastax.oss.driver.api.querybuilder.select.Selector; -import com.datastax.oss.driver.internal.querybuilder.ArithmeticOperator; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.Immutable; - -@Immutable -public abstract class ArithmeticSelector implements Selector { - - protected final ArithmeticOperator operator; - - protected ArithmeticSelector(@NonNull ArithmeticOperator operator) { - Preconditions.checkNotNull(operator); - this.operator = operator; - } - - @NonNull - public ArithmeticOperator getOperator() { - return operator; - } - - protected static void appendAndMaybeParenthesize( - int myPrecedence, @NonNull Selector child, @NonNull StringBuilder builder) { - boolean parenthesize = - (child instanceof ArithmeticSelector) - && (((ArithmeticSelector) child).operator.getPrecedenceLeft() < myPrecedence); - if (parenthesize) { - builder.append('('); - } - child.appendTo(builder); - if (parenthesize) { - builder.append(')'); - } - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/BinaryArithmeticSelector.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/BinaryArithmeticSelector.java deleted file mode 100644 index d4e3b652dba..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/BinaryArithmeticSelector.java +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.select; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.select.Selector; -import com.datastax.oss.driver.internal.querybuilder.ArithmeticOperator; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -@Immutable -public class BinaryArithmeticSelector extends ArithmeticSelector { - - private final Selector left; - private final Selector right; - private final CqlIdentifier alias; - - public BinaryArithmeticSelector( - @NonNull ArithmeticOperator operator, @NonNull Selector left, @NonNull Selector right) { - this(operator, left, right, null); - } - - public BinaryArithmeticSelector( - @NonNull ArithmeticOperator operator, - @NonNull Selector left, - @NonNull Selector right, - @Nullable CqlIdentifier alias) { - super(operator); - Preconditions.checkNotNull(left); - Preconditions.checkNotNull(right); - this.left = left; - this.right = right; - this.alias = alias; - } - - @NonNull - @Override - public Selector as(@NonNull CqlIdentifier alias) { - return new BinaryArithmeticSelector(operator, left, right, alias); - } - - @Override - public void appendTo(@NonNull StringBuilder builder) { - appendAndMaybeParenthesize(operator.getPrecedenceLeft(), left, builder); - builder.append(operator.getSymbol()); - appendAndMaybeParenthesize(operator.getPrecedenceRight(), right, builder); - if (alias != null) { - builder.append(" AS ").append(alias.asCql(true)); - } - } - - @NonNull - public Selector getLeft() { - return left; - } - - @NonNull - public Selector getRight() { - return right; - } - - @Nullable - @Override - public CqlIdentifier getAlias() { - return alias; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof BinaryArithmeticSelector) { - BinaryArithmeticSelector that = (BinaryArithmeticSelector) other; - return this.operator.equals(that.operator) - && this.left.equals(that.left) - && this.right.equals(that.right) - && Objects.equals(this.alias, that.alias); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(operator, left, right, alias); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/CastSelector.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/CastSelector.java deleted file mode 100644 index d256407421e..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/CastSelector.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.select; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.querybuilder.select.Selector; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -@Immutable -public class CastSelector implements Selector { - - private final Selector selector; - private final DataType targetType; - private final CqlIdentifier alias; - - public CastSelector(@NonNull Selector selector, @NonNull DataType targetType) { - this(selector, targetType, null); - } - - public CastSelector( - @NonNull Selector selector, @NonNull DataType targetType, @Nullable CqlIdentifier alias) { - Preconditions.checkNotNull(selector); - Preconditions.checkNotNull(targetType); - Preconditions.checkArgument(selector.getAlias() == null, "Inner selector can't be aliased"); - this.selector = selector; - this.targetType = targetType; - this.alias = alias; - } - - @Override - public void appendTo(@NonNull StringBuilder builder) { - builder.append("CAST("); - selector.appendTo(builder); - builder.append(" AS ").append(targetType.asCql(false, true)).append(')'); - if (alias != null) { - builder.append(" AS ").append(alias.asCql(true)); - } - } - - @NonNull - @Override - public Selector as(@NonNull CqlIdentifier alias) { - return new CastSelector(selector, targetType, alias); - } - - @NonNull - public Selector getSelector() { - return selector; - } - - @NonNull - public DataType getTargetType() { - return targetType; - } - - @Nullable - @Override - public CqlIdentifier getAlias() { - return alias; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof CastSelector) { - CastSelector that = (CastSelector) other; - return this.selector.equals(that.selector) - && this.targetType.equals(that.targetType) - && Objects.equals(this.alias, that.alias); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(selector, targetType, alias); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/CollectionSelector.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/CollectionSelector.java deleted file mode 100644 index dc9929a0f18..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/CollectionSelector.java +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.select; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.select.Selector; -import com.datastax.oss.driver.internal.querybuilder.CqlHelper; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.driver.shaded.guava.common.collect.Iterables; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -@Immutable -public abstract class CollectionSelector implements Selector { - - private final Iterable elementSelectors; - private final String opening; - private final String closing; - private final CqlIdentifier alias; - - protected CollectionSelector( - @NonNull Iterable elementSelectors, - @NonNull String opening, - @NonNull String closing, - @Nullable CqlIdentifier alias) { - Preconditions.checkNotNull(elementSelectors); - Preconditions.checkArgument( - elementSelectors.iterator().hasNext(), "Must have at least one selector"); - checkNoAlias(elementSelectors); - Preconditions.checkNotNull(opening); - Preconditions.checkNotNull(closing); - this.elementSelectors = elementSelectors; - this.opening = opening; - this.closing = closing; - this.alias = alias; - } - - @Override - public void appendTo(@NonNull StringBuilder builder) { - CqlHelper.append(elementSelectors, builder, opening, ",", closing); - if (alias != null) { - builder.append(" AS ").append(alias.asCql(true)); - } - } - - @NonNull - public Iterable getElementSelectors() { - return elementSelectors; - } - - @Nullable - @Override - public CqlIdentifier getAlias() { - return alias; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof CollectionSelector) { - CollectionSelector that = (CollectionSelector) other; - return Iterables.elementsEqual(this.elementSelectors, that.elementSelectors) - && this.opening.equals(that.opening) - && this.closing.equals(that.closing) - && Objects.equals(this.alias, that.alias); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(elementSelectors, opening, closing, alias); - } - - private static void checkNoAlias(Iterable elementSelectors) { - String offendingAliases = null; - for (Selector selector : elementSelectors) { - CqlIdentifier alias = selector.getAlias(); - if (alias != null) { - if (offendingAliases == null) { - offendingAliases = alias.asCql(true); - } else { - offendingAliases += ", " + alias.asCql(true); - } - } - } - if (offendingAliases != null) { - throw new IllegalArgumentException( - "Can't use aliases in selection list, offending aliases: " + offendingAliases); - } - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/ColumnSelector.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/ColumnSelector.java deleted file mode 100644 index 43dcd46042d..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/ColumnSelector.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.select; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.select.Selector; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -@Immutable -public class ColumnSelector implements Selector { - - private final CqlIdentifier columnId; - private final CqlIdentifier alias; - - public ColumnSelector(@NonNull CqlIdentifier columnId) { - this(columnId, null); - } - - public ColumnSelector(@NonNull CqlIdentifier columnId, @Nullable CqlIdentifier alias) { - Preconditions.checkNotNull(columnId); - this.columnId = columnId; - this.alias = alias; - } - - @NonNull - @Override - public Selector as(@NonNull CqlIdentifier alias) { - return new ColumnSelector(columnId, alias); - } - - @Override - public void appendTo(@NonNull StringBuilder builder) { - builder.append(columnId.asCql(true)); - if (alias != null) { - builder.append(" AS ").append(alias.asCql(true)); - } - } - - @NonNull - public CqlIdentifier getColumnId() { - return columnId; - } - - @Nullable - @Override - public CqlIdentifier getAlias() { - return alias; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof ColumnSelector) { - ColumnSelector that = (ColumnSelector) other; - return this.columnId.equals(that.columnId) && Objects.equals(this.alias, that.alias); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(columnId, alias); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/CountAllSelector.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/CountAllSelector.java deleted file mode 100644 index 4efbaae5924..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/CountAllSelector.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.select; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.select.Selector; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -@Immutable -public class CountAllSelector implements Selector { - - private final CqlIdentifier alias; - - public CountAllSelector() { - this(null); - } - - public CountAllSelector(@Nullable CqlIdentifier alias) { - this.alias = alias; - } - - @NonNull - @Override - public Selector as(@NonNull CqlIdentifier alias) { - return new CountAllSelector(alias); - } - - @Override - public void appendTo(@NonNull StringBuilder builder) { - builder.append("count(*)"); - if (alias != null) { - builder.append(" AS ").append(alias.asCql(true)); - } - } - - @Nullable - @Override - public CqlIdentifier getAlias() { - return alias; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof CountAllSelector) { - CountAllSelector that = (CountAllSelector) other; - return Objects.equals(this.alias, that.alias); - } else { - return false; - } - } - - @Override - public int hashCode() { - return (alias == null) ? 0 : alias.hashCode(); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/DefaultBindMarker.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/DefaultBindMarker.java deleted file mode 100644 index 328c51328ba..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/DefaultBindMarker.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.select; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.BindMarker; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultBindMarker implements BindMarker { - - private final CqlIdentifier id; - - public DefaultBindMarker(@Nullable CqlIdentifier id) { - this.id = id; - } - - public DefaultBindMarker() { - this(null); - } - - @Override - public void appendTo(@NonNull StringBuilder builder) { - if (id == null) { - builder.append('?'); - } else { - builder.append(':').append(id.asCql(true)); - } - } - - @Override - public boolean isIdempotent() { - return true; - } - - @Nullable - public CqlIdentifier getId() { - return id; - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/DefaultSelect.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/DefaultSelect.java deleted file mode 100644 index 5daf252a9eb..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/DefaultSelect.java +++ /dev/null @@ -1,585 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.select; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.cql.SimpleStatementBuilder; -import com.datastax.oss.driver.api.core.data.CqlVector; -import com.datastax.oss.driver.api.core.metadata.schema.ClusteringOrder; -import com.datastax.oss.driver.api.querybuilder.BindMarker; -import com.datastax.oss.driver.api.querybuilder.QueryBuilder; -import com.datastax.oss.driver.api.querybuilder.relation.Relation; -import com.datastax.oss.driver.api.querybuilder.select.Select; -import com.datastax.oss.driver.api.querybuilder.select.SelectFrom; -import com.datastax.oss.driver.api.querybuilder.select.Selector; -import com.datastax.oss.driver.internal.querybuilder.CqlHelper; -import com.datastax.oss.driver.internal.querybuilder.ImmutableCollections; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Map; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultSelect implements SelectFrom, Select { - - private static final ImmutableList SELECT_ALL = ImmutableList.of(AllSelector.INSTANCE); - - private final CqlIdentifier keyspace; - private final CqlIdentifier table; - private final boolean isJson; - private final boolean isDistinct; - private final ImmutableList selectors; - private final ImmutableList relations; - private final ImmutableList groupByClauses; - private final ImmutableMap orderings; - private final Ann ann; - private final Object limit; - private final Object perPartitionLimit; - private final boolean allowsFiltering; - - public DefaultSelect(@Nullable CqlIdentifier keyspace, @NonNull CqlIdentifier table) { - this( - keyspace, - table, - false, - false, - ImmutableList.of(), - ImmutableList.of(), - ImmutableList.of(), - ImmutableMap.of(), - null, - null, - null, - false); - } - - /** - * This constructor is public only as a convenience for custom extensions of the query builder. - * - * @param selectors if it contains {@link AllSelector#INSTANCE}, that must be the only element. - * This isn't re-checked because methods that call this constructor internally already do it, - * make sure you do it yourself. - * @param ann Approximate nearest neighbor. ANN ordering does not support secondary ordering or - * ASC order. - */ - public DefaultSelect( - @Nullable CqlIdentifier keyspace, - @NonNull CqlIdentifier table, - boolean isJson, - boolean isDistinct, - @NonNull ImmutableList selectors, - @NonNull ImmutableList relations, - @NonNull ImmutableList groupByClauses, - @NonNull ImmutableMap orderings, - @Nullable Ann ann, - @Nullable Object limit, - @Nullable Object perPartitionLimit, - boolean allowsFiltering) { - this.groupByClauses = groupByClauses; - this.orderings = orderings; - Preconditions.checkArgument( - limit == null - || (limit instanceof Integer && (Integer) limit > 0) - || limit instanceof BindMarker, - "limit must be a strictly positive integer or a bind marker"); - Preconditions.checkArgument( - orderings.isEmpty() || ann == null, "ANN ordering does not support secondary ordering"); - this.ann = ann; - this.keyspace = keyspace; - this.table = table; - this.isJson = isJson; - this.isDistinct = isDistinct; - this.selectors = selectors; - this.relations = relations; - this.limit = limit; - this.perPartitionLimit = perPartitionLimit; - this.allowsFiltering = allowsFiltering; - } - - @NonNull - @Override - public SelectFrom json() { - return new DefaultSelect( - keyspace, - table, - true, - isDistinct, - selectors, - relations, - groupByClauses, - orderings, - ann, - limit, - perPartitionLimit, - allowsFiltering); - } - - @NonNull - @Override - public SelectFrom distinct() { - return new DefaultSelect( - keyspace, - table, - isJson, - true, - selectors, - relations, - groupByClauses, - orderings, - ann, - limit, - perPartitionLimit, - allowsFiltering); - } - - @NonNull - @Override - public Select selector(@NonNull Selector selector) { - ImmutableList newSelectors; - if (selector == AllSelector.INSTANCE) { - // '*' cancels any previous one - newSelectors = SELECT_ALL; - } else if (SELECT_ALL.equals(selectors)) { - // previous '*' gets cancelled - newSelectors = ImmutableList.of(selector); - } else { - newSelectors = ImmutableCollections.append(selectors, selector); - } - return withSelectors(newSelectors); - } - - @NonNull - @Override - public Select selectors(@NonNull Iterable additionalSelectors) { - ImmutableList.Builder newSelectors = ImmutableList.builder(); - if (!SELECT_ALL.equals(selectors)) { // previous '*' gets cancelled - newSelectors.addAll(selectors); - } - for (Selector selector : additionalSelectors) { - if (selector == AllSelector.INSTANCE) { - throw new IllegalArgumentException("Can't pass the * selector to selectors()"); - } - newSelectors.add(selector); - } - return withSelectors(newSelectors.build()); - } - - @NonNull - @Override - public Select as(@NonNull CqlIdentifier alias) { - if (SELECT_ALL.equals(selectors)) { - throw new IllegalStateException("Can't alias the * selector"); - } else if (selectors.isEmpty()) { - throw new IllegalStateException("Can't alias, no selectors defined"); - } - return withSelectors(ImmutableCollections.modifyLast(selectors, last -> last.as(alias))); - } - - @NonNull - public Select withSelectors(@NonNull ImmutableList newSelectors) { - return new DefaultSelect( - keyspace, - table, - isJson, - isDistinct, - newSelectors, - relations, - groupByClauses, - orderings, - ann, - limit, - perPartitionLimit, - allowsFiltering); - } - - @NonNull - @Override - public Select where(@NonNull Relation relation) { - return withRelations(ImmutableCollections.append(relations, relation)); - } - - @NonNull - @Override - public Select where(@NonNull Iterable additionalRelations) { - return withRelations(ImmutableCollections.concat(relations, additionalRelations)); - } - - @NonNull - public Select withRelations(@NonNull ImmutableList newRelations) { - return new DefaultSelect( - keyspace, - table, - isJson, - isDistinct, - selectors, - newRelations, - groupByClauses, - orderings, - ann, - limit, - perPartitionLimit, - allowsFiltering); - } - - @NonNull - @Override - public Select groupBy(@NonNull Selector groupByClause) { - return withGroupByClauses(ImmutableCollections.append(groupByClauses, groupByClause)); - } - - @NonNull - @Override - public Select groupBy(@NonNull Iterable newGroupByClauses) { - return withGroupByClauses(ImmutableCollections.concat(groupByClauses, newGroupByClauses)); - } - - @NonNull - public Select withGroupByClauses(@NonNull ImmutableList newGroupByClauses) { - return new DefaultSelect( - keyspace, - table, - isJson, - isDistinct, - selectors, - relations, - newGroupByClauses, - orderings, - ann, - limit, - perPartitionLimit, - allowsFiltering); - } - - @NonNull - @Override - public Select orderBy(@NonNull CqlIdentifier columnId, @NonNull ClusteringOrder order) { - return withOrderings(ImmutableCollections.append(orderings, columnId, order)); - } - - @NonNull - @Override - public Select orderByAnnOf(@NonNull String columnName, @NonNull CqlVector ann) { - return withAnn(new Ann(CqlIdentifier.fromCql(columnName), ann)); - } - - @NonNull - @Override - public Select orderByAnnOf(@NonNull CqlIdentifier columnId, @NonNull CqlVector ann) { - return withAnn(new Ann(columnId, ann)); - } - - @NonNull - @Override - public Select orderByIds(@NonNull Map newOrderings) { - return withOrderings(ImmutableCollections.concat(orderings, newOrderings)); - } - - @NonNull - public Select withOrderings(@NonNull ImmutableMap newOrderings) { - return new DefaultSelect( - keyspace, - table, - isJson, - isDistinct, - selectors, - relations, - groupByClauses, - newOrderings, - ann, - limit, - perPartitionLimit, - allowsFiltering); - } - - @NonNull - Select withAnn(@NonNull Ann ann) { - return new DefaultSelect( - keyspace, - table, - isJson, - isDistinct, - selectors, - relations, - groupByClauses, - orderings, - ann, - limit, - perPartitionLimit, - allowsFiltering); - } - - @NonNull - @Override - public Select limit(int limit) { - Preconditions.checkArgument(limit > 0, "Limit must be strictly positive"); - return new DefaultSelect( - keyspace, - table, - isJson, - isDistinct, - selectors, - relations, - groupByClauses, - orderings, - ann, - limit, - perPartitionLimit, - allowsFiltering); - } - - @NonNull - @Override - public Select limit(@Nullable BindMarker bindMarker) { - return new DefaultSelect( - keyspace, - table, - isJson, - isDistinct, - selectors, - relations, - groupByClauses, - orderings, - ann, - bindMarker, - perPartitionLimit, - allowsFiltering); - } - - @NonNull - @Override - public Select perPartitionLimit(int perPartitionLimit) { - Preconditions.checkArgument( - perPartitionLimit > 0, "perPartitionLimit must be strictly positive"); - return new DefaultSelect( - keyspace, - table, - isJson, - isDistinct, - selectors, - relations, - groupByClauses, - orderings, - ann, - limit, - perPartitionLimit, - allowsFiltering); - } - - @NonNull - @Override - public Select perPartitionLimit(@Nullable BindMarker bindMarker) { - return new DefaultSelect( - keyspace, - table, - isJson, - isDistinct, - selectors, - relations, - groupByClauses, - orderings, - ann, - limit, - bindMarker, - allowsFiltering); - } - - @NonNull - @Override - public Select allowFiltering() { - return new DefaultSelect( - keyspace, - table, - isJson, - isDistinct, - selectors, - relations, - groupByClauses, - orderings, - ann, - limit, - perPartitionLimit, - true); - } - - @NonNull - @Override - public String asCql() { - StringBuilder builder = new StringBuilder(); - - builder.append("SELECT"); - if (isJson) { - builder.append(" JSON"); - } - if (isDistinct) { - builder.append(" DISTINCT"); - } - - CqlHelper.append(selectors, builder, " ", ",", null); - - builder.append(" FROM "); - CqlHelper.qualify(keyspace, table, builder); - - CqlHelper.append(relations, builder, " WHERE ", " AND ", null); - CqlHelper.append(groupByClauses, builder, " GROUP BY ", ",", null); - - if (ann != null) { - builder.append(" ORDER BY ").append(this.ann.columnId.asCql(true)).append(" ANN OF "); - QueryBuilder.literal(ann.vector).appendTo(builder); - } else { - boolean first = true; - for (Map.Entry entry : orderings.entrySet()) { - if (first) { - builder.append(" ORDER BY "); - first = false; - } else { - builder.append(","); - } - builder.append(entry.getKey().asCql(true)).append(" ").append(entry.getValue().name()); - } - } - - if (limit != null) { - builder.append(" LIMIT "); - if (limit instanceof BindMarker) { - ((BindMarker) limit).appendTo(builder); - } else { - builder.append(limit); - } - } - - if (perPartitionLimit != null) { - builder.append(" PER PARTITION LIMIT "); - if (perPartitionLimit instanceof BindMarker) { - ((BindMarker) perPartitionLimit).appendTo(builder); - } else { - builder.append(perPartitionLimit); - } - } - - if (allowsFiltering) { - builder.append(" ALLOW FILTERING"); - } - - return builder.toString(); - } - - @NonNull - @Override - public SimpleStatement build() { - return builder().build(); - } - - @NonNull - @Override - public SimpleStatement build(@NonNull Object... values) { - return builder().addPositionalValues(values).build(); - } - - @NonNull - @Override - public SimpleStatement build(@NonNull Map namedValues) { - SimpleStatementBuilder builder = builder(); - for (Map.Entry entry : namedValues.entrySet()) { - builder.addNamedValue(entry.getKey(), entry.getValue()); - } - return builder.build(); - } - - @NonNull - @Override - public SimpleStatementBuilder builder() { - // SELECT statements are always idempotent - return SimpleStatement.builder(asCql()).setIdempotence(true); - } - - @Nullable - public CqlIdentifier getKeyspace() { - return keyspace; - } - - @NonNull - public CqlIdentifier getTable() { - return table; - } - - public boolean isJson() { - return isJson; - } - - public boolean isDistinct() { - return isDistinct; - } - - @NonNull - public ImmutableList getSelectors() { - return selectors; - } - - @NonNull - public ImmutableList getRelations() { - return relations; - } - - @NonNull - public ImmutableList getGroupByClauses() { - return groupByClauses; - } - - @NonNull - public ImmutableMap getOrderings() { - return orderings; - } - - @Nullable - public Object getLimit() { - return limit; - } - - @Nullable - public Ann getAnn() { - return ann; - } - - @Nullable - public Object getPerPartitionLimit() { - return perPartitionLimit; - } - - public boolean allowsFiltering() { - return allowsFiltering; - } - - @Override - public String toString() { - return asCql(); - } - - public static class Ann { - private final CqlVector vector; - private final CqlIdentifier columnId; - - private Ann(CqlIdentifier columnId, CqlVector vector) { - this.vector = vector; - this.columnId = columnId; - } - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/ElementSelector.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/ElementSelector.java deleted file mode 100644 index d2ac61e8aee..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/ElementSelector.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.select; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.select.Selector; -import com.datastax.oss.driver.api.querybuilder.term.Term; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -@Immutable -public class ElementSelector implements Selector { - - private final Selector collection; - private final Term index; - private final CqlIdentifier alias; - - public ElementSelector(@NonNull Selector collection, @NonNull Term index) { - this(collection, index, null); - } - - public ElementSelector( - @NonNull Selector collection, @NonNull Term index, @Nullable CqlIdentifier alias) { - Preconditions.checkNotNull(collection); - Preconditions.checkNotNull(index); - this.collection = collection; - this.index = index; - this.alias = alias; - } - - @NonNull - @Override - public Selector as(@NonNull CqlIdentifier alias) { - return new ElementSelector(collection, index, alias); - } - - @Override - public void appendTo(@NonNull StringBuilder builder) { - collection.appendTo(builder); - builder.append('['); - index.appendTo(builder); - builder.append(']'); - if (alias != null) { - builder.append(" AS ").append(alias.asCql(true)); - } - } - - @NonNull - public Selector getCollection() { - return collection; - } - - @NonNull - public Term getIndex() { - return index; - } - - @Nullable - @Override - public CqlIdentifier getAlias() { - return alias; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof ElementSelector) { - ElementSelector that = (ElementSelector) other; - return this.collection.equals(that.collection) - && this.index.equals(that.index) - && Objects.equals(this.alias, that.alias); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(collection, index, alias); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/FieldSelector.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/FieldSelector.java deleted file mode 100644 index a39a270d9f9..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/FieldSelector.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.select; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.select.Selector; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -@Immutable -public class FieldSelector implements Selector { - - private final Selector udt; - private final CqlIdentifier fieldId; - private final CqlIdentifier alias; - - public FieldSelector(@NonNull Selector udt, @NonNull CqlIdentifier fieldId) { - this(udt, fieldId, null); - } - - public FieldSelector( - @NonNull Selector udt, @NonNull CqlIdentifier fieldId, @Nullable CqlIdentifier alias) { - Preconditions.checkNotNull(udt); - Preconditions.checkNotNull(fieldId); - this.udt = udt; - this.fieldId = fieldId; - this.alias = alias; - } - - @NonNull - @Override - public Selector as(@NonNull CqlIdentifier alias) { - return new FieldSelector(udt, fieldId, alias); - } - - @Override - public void appendTo(@NonNull StringBuilder builder) { - udt.appendTo(builder); - builder.append('.').append(fieldId.asCql(true)); - if (alias != null) { - builder.append(" AS ").append(alias.asCql(true)); - } - } - - @NonNull - public Selector getUdt() { - return udt; - } - - @NonNull - public CqlIdentifier getFieldId() { - return fieldId; - } - - @Nullable - @Override - public CqlIdentifier getAlias() { - return alias; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof FieldSelector) { - FieldSelector that = (FieldSelector) other; - return this.udt.equals(that.udt) - && this.fieldId.equals(that.fieldId) - && Objects.equals(this.alias, that.alias); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(udt, fieldId, alias); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/FunctionSelector.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/FunctionSelector.java deleted file mode 100644 index 98a0bb07c41..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/FunctionSelector.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.select; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.select.Selector; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import net.jcip.annotations.Immutable; - -@Immutable -public class FunctionSelector extends CollectionSelector { - - private final CqlIdentifier keyspaceId; - private final CqlIdentifier functionId; - - public FunctionSelector( - @Nullable CqlIdentifier keyspaceId, - @NonNull CqlIdentifier functionId, - @NonNull Iterable arguments) { - this(keyspaceId, functionId, arguments, null); - } - - public FunctionSelector( - @Nullable CqlIdentifier keyspaceId, - @NonNull CqlIdentifier functionId, - @NonNull Iterable elementSelectors, - @Nullable CqlIdentifier alias) { - super(elementSelectors, buildOpening(keyspaceId, functionId), ")", alias); - this.keyspaceId = keyspaceId; - this.functionId = functionId; - } - - @NonNull - @Override - public Selector as(@NonNull CqlIdentifier alias) { - return new FunctionSelector(keyspaceId, functionId, getElementSelectors(), alias); - } - - @Nullable - public CqlIdentifier getKeyspaceId() { - return keyspaceId; - } - - @NonNull - public CqlIdentifier getFunctionId() { - return functionId; - } - - /** Returns the arguments of the function. */ - @NonNull - @Override - public Iterable getElementSelectors() { - // Overridden only to customize the javadoc - return super.getElementSelectors(); - } - - private static String buildOpening(CqlIdentifier keyspaceId, CqlIdentifier functionId) { - return (keyspaceId == null) - ? functionId.asCql(true) + "(" - : keyspaceId.asCql(true) + "." + functionId.asCql(true) + "("; - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/ListSelector.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/ListSelector.java deleted file mode 100644 index 2a8ea73e474..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/ListSelector.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.select; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.select.Selector; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import net.jcip.annotations.Immutable; - -@Immutable -public class ListSelector extends CollectionSelector { - - public ListSelector(@NonNull Iterable elementSelectors) { - this(elementSelectors, null); - } - - public ListSelector(@NonNull Iterable elementSelectors, @Nullable CqlIdentifier alias) { - super(elementSelectors, "[", "]", alias); - } - - @NonNull - @Override - public Selector as(@NonNull CqlIdentifier alias) { - return new ListSelector(getElementSelectors(), alias); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/MapSelector.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/MapSelector.java deleted file mode 100644 index 27f28d3e0e2..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/MapSelector.java +++ /dev/null @@ -1,161 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.select; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.querybuilder.select.Selector; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Map; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -@Immutable -public class MapSelector implements Selector { - - private final Map elementSelectors; - private final DataType keyType; - private final DataType valueType; - private final CqlIdentifier alias; - - public MapSelector( - @NonNull Map elementSelectors, - @Nullable DataType keyType, - @Nullable DataType valueType) { - this(elementSelectors, keyType, valueType, null); - } - - public MapSelector( - @NonNull Map elementSelectors, - @Nullable DataType keyType, - @Nullable DataType valueType, - @Nullable CqlIdentifier alias) { - Preconditions.checkNotNull(elementSelectors); - Preconditions.checkArgument( - !elementSelectors.isEmpty(), "Must have at least one key/value pair"); - checkNoAlias(elementSelectors); - Preconditions.checkArgument( - (keyType == null) == (valueType == null), - "Key and value type must be either both null or both non-null"); - this.elementSelectors = elementSelectors; - this.keyType = keyType; - this.valueType = valueType; - this.alias = alias; - } - - @NonNull - @Override - public Selector as(@NonNull CqlIdentifier alias) { - return new MapSelector(elementSelectors, keyType, valueType, alias); - } - - @Override - public void appendTo(@NonNull StringBuilder builder) { - if (keyType != null) { - assert valueType != null; - builder - .append("(map<") - .append(keyType.asCql(false, true)) - .append(',') - .append(valueType.asCql(false, true)) - .append(">)"); - } - builder.append("{"); - boolean first = true; - for (Map.Entry entry : elementSelectors.entrySet()) { - if (first) { - first = false; - } else { - builder.append(","); - } - entry.getKey().appendTo(builder); - builder.append(":"); - entry.getValue().appendTo(builder); - } - builder.append("}"); - - if (alias != null) { - builder.append(" AS ").append(alias.asCql(true)); - } - } - - @NonNull - public Map getElementSelectors() { - return elementSelectors; - } - - @Nullable - public DataType getKeyType() { - return keyType; - } - - @Nullable - public DataType getValueType() { - return valueType; - } - - @Nullable - @Override - public CqlIdentifier getAlias() { - return alias; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof MapSelector) { - MapSelector that = (MapSelector) other; - return this.elementSelectors.equals(that.elementSelectors) - && Objects.equals(this.keyType, that.keyType) - && Objects.equals(this.valueType, that.valueType) - && Objects.equals(this.alias, that.alias); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(elementSelectors, keyType, valueType, alias); - } - - private static void checkNoAlias(Map elementSelectors) { - String offendingAliases = null; - for (Map.Entry entry : elementSelectors.entrySet()) { - offendingAliases = appendIfNotNull(offendingAliases, entry.getKey().getAlias()); - offendingAliases = appendIfNotNull(offendingAliases, entry.getValue().getAlias()); - } - if (offendingAliases != null) { - throw new IllegalArgumentException( - "Can't use aliases in selection map, offending aliases: " + offendingAliases); - } - } - - private static String appendIfNotNull(String offendingAliases, CqlIdentifier alias) { - if (alias == null) { - return offendingAliases; - } else if (offendingAliases == null) { - return alias.asCql(true); - } else { - return offendingAliases + ", " + alias.asCql(true); - } - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/OppositeSelector.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/OppositeSelector.java deleted file mode 100644 index 05d27421cb8..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/OppositeSelector.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.select; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.select.Selector; -import com.datastax.oss.driver.internal.querybuilder.ArithmeticOperator; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -@Immutable -public class OppositeSelector extends ArithmeticSelector { - - private final Selector argument; - private final CqlIdentifier alias; - - public OppositeSelector(@NonNull Selector argument) { - this(argument, null); - } - - public OppositeSelector(@NonNull Selector argument, @Nullable CqlIdentifier alias) { - super(ArithmeticOperator.OPPOSITE); - Preconditions.checkNotNull(argument); - this.argument = argument; - this.alias = alias; - } - - @NonNull - @Override - public Selector as(@NonNull CqlIdentifier alias) { - return new OppositeSelector(argument, alias); - } - - @Override - public void appendTo(@NonNull StringBuilder builder) { - builder.append('-'); - appendAndMaybeParenthesize(operator.getPrecedenceLeft(), argument, builder); - if (alias != null) { - builder.append(" AS ").append(alias.asCql(true)); - } - } - - @NonNull - public Selector getArgument() { - return argument; - } - - @Nullable - @Override - public CqlIdentifier getAlias() { - return alias; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof OppositeSelector) { - OppositeSelector that = (OppositeSelector) other; - return this.argument.equals(that.argument) && Objects.equals(this.alias, that.alias); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(argument, alias); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/RangeSelector.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/RangeSelector.java deleted file mode 100644 index e63eef0da50..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/RangeSelector.java +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.select; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.select.Selector; -import com.datastax.oss.driver.api.querybuilder.term.Term; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -@Immutable -public class RangeSelector implements Selector { - - private final Selector collection; - private final Term left; - private final Term right; - private final CqlIdentifier alias; - - public RangeSelector(@NonNull Selector collection, @Nullable Term left, @Nullable Term right) { - this(collection, left, right, null); - } - - public RangeSelector( - @NonNull Selector collection, - @Nullable Term left, - @Nullable Term right, - @Nullable CqlIdentifier alias) { - Preconditions.checkNotNull(collection); - Preconditions.checkArgument( - left != null || right != null, "At least one of the bounds must be specified"); - this.collection = collection; - this.left = left; - this.right = right; - this.alias = alias; - } - - @NonNull - @Override - public Selector as(@NonNull CqlIdentifier alias) { - return new RangeSelector(collection, left, right, alias); - } - - @Override - public void appendTo(@NonNull StringBuilder builder) { - collection.appendTo(builder); - builder.append('['); - if (left != null) { - left.appendTo(builder); - } - builder.append(".."); - if (right != null) { - right.appendTo(builder); - } - builder.append(']'); - if (alias != null) { - builder.append(" AS ").append(alias.asCql(true)); - } - } - - @NonNull - public Selector getCollection() { - return collection; - } - - @Nullable - public Term getLeft() { - return left; - } - - @Nullable - public Term getRight() { - return right; - } - - @Nullable - @Override - public CqlIdentifier getAlias() { - return alias; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof RangeSelector) { - RangeSelector that = (RangeSelector) other; - return this.collection.equals(that.collection) - && Objects.equals(this.left, that.left) - && Objects.equals(this.right, that.right) - && Objects.equals(this.alias, that.alias); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(collection, left, right, alias); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/SetSelector.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/SetSelector.java deleted file mode 100644 index 4e2d4221a31..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/SetSelector.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.select; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.select.Selector; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import net.jcip.annotations.Immutable; - -@Immutable -public class SetSelector extends CollectionSelector { - - public SetSelector(@NonNull Iterable elementSelectors) { - this(elementSelectors, null); - } - - public SetSelector(@NonNull Iterable elementSelectors, @Nullable CqlIdentifier alias) { - super(elementSelectors, "{", "}", alias); - } - - @NonNull - @Override - public Selector as(@NonNull CqlIdentifier alias) { - return new SetSelector(getElementSelectors(), alias); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/TupleSelector.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/TupleSelector.java deleted file mode 100644 index 3d3a351f7c8..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/TupleSelector.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.select; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.select.Selector; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import net.jcip.annotations.Immutable; - -@Immutable -public class TupleSelector extends CollectionSelector { - - public TupleSelector(@NonNull Iterable elementSelectors) { - this(elementSelectors, null); - } - - public TupleSelector( - @NonNull Iterable elementSelectors, @Nullable CqlIdentifier alias) { - super(elementSelectors, "(", ")", alias); - } - - @NonNull - @Override - public Selector as(@NonNull CqlIdentifier alias) { - return new TupleSelector(getElementSelectors(), alias); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/TypeHintSelector.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/TypeHintSelector.java deleted file mode 100644 index 491ffe16adc..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/TypeHintSelector.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.select; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.querybuilder.select.Selector; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -@Immutable -public class TypeHintSelector implements Selector { - - private final Selector selector; - private final DataType targetType; - private final CqlIdentifier alias; - - public TypeHintSelector(@NonNull Selector selector, @NonNull DataType targetType) { - this(selector, targetType, null); - } - - public TypeHintSelector( - @NonNull Selector selector, @NonNull DataType targetType, @Nullable CqlIdentifier alias) { - Preconditions.checkNotNull(selector); - Preconditions.checkNotNull(targetType); - this.selector = selector; - this.targetType = targetType; - this.alias = alias; - } - - @NonNull - @Override - public Selector as(@NonNull CqlIdentifier alias) { - return new TypeHintSelector(selector, targetType, alias); - } - - @Override - public void appendTo(@NonNull StringBuilder builder) { - builder.append('(').append(targetType.asCql(false, true)).append(')'); - selector.appendTo(builder); - if (alias != null) { - builder.append(" AS ").append(alias.asCql(true)); - } - } - - @NonNull - public Selector getSelector() { - return selector; - } - - @NonNull - public DataType getTargetType() { - return targetType; - } - - @Nullable - @Override - public CqlIdentifier getAlias() { - return alias; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof TypeHintSelector) { - TypeHintSelector that = (TypeHintSelector) other; - return this.selector.equals(that.selector) - && this.targetType.equals(that.targetType) - && Objects.equals(this.alias, that.alias); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(selector, targetType, alias); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/term/ArithmeticTerm.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/term/ArithmeticTerm.java deleted file mode 100644 index 2d6f2094b07..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/term/ArithmeticTerm.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.term; - -import com.datastax.oss.driver.api.querybuilder.term.Term; -import com.datastax.oss.driver.internal.querybuilder.ArithmeticOperator; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.Immutable; - -@Immutable -public abstract class ArithmeticTerm implements Term { - - protected final ArithmeticOperator operator; - - protected ArithmeticTerm(@NonNull ArithmeticOperator operator) { - Preconditions.checkNotNull(operator); - this.operator = operator; - } - - @NonNull - public ArithmeticOperator getOperator() { - return operator; - } - - protected static void appendAndMaybeParenthesize( - int myPrecedence, @NonNull Term child, @NonNull StringBuilder builder) { - boolean parenthesize = - (child instanceof ArithmeticTerm) - && (((ArithmeticTerm) child).operator.getPrecedenceLeft() < myPrecedence); - if (parenthesize) { - builder.append('('); - } - child.appendTo(builder); - if (parenthesize) { - builder.append(')'); - } - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/term/BinaryArithmeticTerm.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/term/BinaryArithmeticTerm.java deleted file mode 100644 index 05e829af9f8..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/term/BinaryArithmeticTerm.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.term; - -import com.datastax.oss.driver.api.querybuilder.term.Term; -import com.datastax.oss.driver.internal.querybuilder.ArithmeticOperator; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -@Immutable -public class BinaryArithmeticTerm extends ArithmeticTerm { - - private final Term left; - private final Term right; - - public BinaryArithmeticTerm( - @NonNull ArithmeticOperator operator, @NonNull Term left, @NonNull Term right) { - super(operator); - Preconditions.checkNotNull(left); - Preconditions.checkNotNull(right); - this.left = left; - this.right = right; - } - - @Override - public void appendTo(@NonNull StringBuilder builder) { - appendAndMaybeParenthesize(operator.getPrecedenceLeft(), left, builder); - builder.append(operator.getSymbol()); - appendAndMaybeParenthesize(operator.getPrecedenceRight(), right, builder); - } - - @Override - public boolean isIdempotent() { - return left.isIdempotent() && right.isIdempotent(); - } - - @NonNull - public Term getLeft() { - return left; - } - - @NonNull - public Term getRight() { - return right; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof BinaryArithmeticTerm) { - BinaryArithmeticTerm that = (BinaryArithmeticTerm) other; - return this.operator.equals(that.operator) - && this.left.equals(that.left) - && this.right.equals(that.right); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(operator, left, right); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/term/FunctionTerm.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/term/FunctionTerm.java deleted file mode 100644 index 0980925288e..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/term/FunctionTerm.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.term; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.term.Term; -import com.datastax.oss.driver.internal.querybuilder.CqlHelper; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import net.jcip.annotations.Immutable; - -@Immutable -public class FunctionTerm implements Term { - - private final CqlIdentifier keyspaceId; - private final CqlIdentifier functionId; - private final Iterable arguments; - - public FunctionTerm( - @Nullable CqlIdentifier keyspaceId, - @NonNull CqlIdentifier functionId, - @NonNull Iterable arguments) { - Preconditions.checkNotNull(functionId); - Preconditions.checkNotNull(arguments); - this.keyspaceId = keyspaceId; - this.functionId = functionId; - this.arguments = arguments; - } - - @Override - public void appendTo(@NonNull StringBuilder builder) { - // The function name appears even without arguments, so don't use prefix/suffix in CqlHelper - CqlHelper.qualify(keyspaceId, functionId, builder); - builder.append('('); - CqlHelper.append(arguments, builder, null, ",", null); - builder.append(')'); - } - - @Override - public boolean isIdempotent() { - return false; - } - - @Nullable - public CqlIdentifier getKeyspaceId() { - return keyspaceId; - } - - @NonNull - public CqlIdentifier getFunctionId() { - return functionId; - } - - @NonNull - public Iterable getArguments() { - return arguments; - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/term/OppositeTerm.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/term/OppositeTerm.java deleted file mode 100644 index 28010befc44..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/term/OppositeTerm.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.term; - -import com.datastax.oss.driver.api.querybuilder.term.Term; -import com.datastax.oss.driver.internal.querybuilder.ArithmeticOperator; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.Immutable; - -@Immutable -public class OppositeTerm extends ArithmeticTerm { - - @NonNull private final Term argument; - - public OppositeTerm(@NonNull Term argument) { - super(ArithmeticOperator.OPPOSITE); - Preconditions.checkNotNull(argument); - this.argument = argument; - } - - @Override - public void appendTo(@NonNull StringBuilder builder) { - builder.append('-'); - appendAndMaybeParenthesize(operator.getPrecedenceLeft(), argument, builder); - } - - @Override - public boolean isIdempotent() { - return argument.isIdempotent(); - } - - @NonNull - public Term getArgument() { - return argument; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof OppositeTerm) { - OppositeTerm that = (OppositeTerm) other; - return this.argument.equals(that.argument); - } else { - return false; - } - } - - @Override - public int hashCode() { - return argument.hashCode(); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/term/TupleTerm.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/term/TupleTerm.java deleted file mode 100644 index eb6bd94c6e3..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/term/TupleTerm.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.term; - -import com.datastax.oss.driver.api.querybuilder.term.Term; -import com.datastax.oss.driver.internal.querybuilder.CqlHelper; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.Immutable; - -@Immutable -public class TupleTerm implements Term { - - private final Iterable components; - - public TupleTerm(@NonNull Iterable components) { - this.components = components; - } - - @Override - public void appendTo(@NonNull StringBuilder builder) { - CqlHelper.append(components, builder, "(", ",", ")"); - } - - @Override - public boolean isIdempotent() { - for (Term component : components) { - if (!component.isIdempotent()) { - return false; - } - } - return true; - } - - @NonNull - public Iterable getComponents() { - return components; - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/term/TypeHintTerm.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/term/TypeHintTerm.java deleted file mode 100644 index 9ed45f852a5..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/term/TypeHintTerm.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.term; - -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.querybuilder.term.Term; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.Immutable; - -@Immutable -public class TypeHintTerm implements Term { - - private final Term term; - private final DataType targetType; - - public TypeHintTerm(@NonNull Term term, @NonNull DataType targetType) { - this.term = term; - this.targetType = targetType; - } - - @Override - public void appendTo(@NonNull StringBuilder builder) { - builder.append('(').append(targetType.asCql(false, true)).append(')'); - term.appendTo(builder); - } - - @Override - public boolean isIdempotent() { - return term.isIdempotent(); - } - - @NonNull - public Term getTerm() { - return term; - } - - @NonNull - public DataType getTargetType() { - return targetType; - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/truncate/DefaultTruncate.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/truncate/DefaultTruncate.java deleted file mode 100644 index f3aa0006756..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/truncate/DefaultTruncate.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.truncate; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.cql.SimpleStatementBuilder; -import com.datastax.oss.driver.api.querybuilder.truncate.Truncate; -import com.datastax.oss.driver.internal.querybuilder.CqlHelper; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Map; - -public class DefaultTruncate implements Truncate { - private final CqlIdentifier keyspace; - private final CqlIdentifier table; - - public DefaultTruncate(@Nullable CqlIdentifier keyspace, @NonNull CqlIdentifier table) { - this.keyspace = keyspace; - this.table = table; - } - - @NonNull - @Override - public String asCql() { - StringBuilder builder = new StringBuilder(); - builder.append("TRUNCATE "); - CqlHelper.qualify(keyspace, table, builder); - return builder.toString(); - } - - @NonNull - @Override - public SimpleStatementBuilder builder() { - return SimpleStatement.builder(asCql()).setIdempotence(true); - } - - @NonNull - @Override - public SimpleStatement build(@NonNull Object... values) { - throw new UnsupportedOperationException( - "TRUNCATE doesn't take values as parameters. Use build() method instead."); - } - - @NonNull - @Override - public SimpleStatement build(@NonNull Map namedValues) { - throw new UnsupportedOperationException( - "TRUNCATE doesn't take namedValues as parameters. Use build() method instead."); - } - - @NonNull - @Override - public SimpleStatement build() { - return builder().build(); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/AppendAssignment.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/AppendAssignment.java deleted file mode 100644 index 7d2c653cee6..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/AppendAssignment.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.update; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.term.Term; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.Immutable; - -@Immutable -public class AppendAssignment extends CollectionAssignment { - - public AppendAssignment(@NonNull CqlIdentifier columnId, @NonNull Term value) { - super(columnId, Operator.APPEND, value); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/AppendListElementAssignment.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/AppendListElementAssignment.java deleted file mode 100644 index 717e07f9026..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/AppendListElementAssignment.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.update; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.term.Term; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.Immutable; - -@Immutable -public class AppendListElementAssignment extends CollectionElementAssignment { - - public AppendListElementAssignment(@NonNull CqlIdentifier columnId, @NonNull Term element) { - super(columnId, Operator.APPEND, null, element, '[', ']'); - } - - @Override - public boolean isIdempotent() { - return false; - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/AppendMapEntryAssignment.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/AppendMapEntryAssignment.java deleted file mode 100644 index 1001fa919e1..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/AppendMapEntryAssignment.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.update; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.term.Term; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.Immutable; - -@Immutable -public class AppendMapEntryAssignment extends CollectionElementAssignment { - - public AppendMapEntryAssignment( - @NonNull CqlIdentifier columnId, @NonNull Term key, @NonNull Term value) { - super(columnId, Operator.APPEND, key, value, '{', '}'); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/AppendSetElementAssignment.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/AppendSetElementAssignment.java deleted file mode 100644 index 6d3a11afc8a..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/AppendSetElementAssignment.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.update; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.term.Term; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.Immutable; - -@Immutable -public class AppendSetElementAssignment extends CollectionElementAssignment { - - public AppendSetElementAssignment(@NonNull CqlIdentifier columnId, @NonNull Term element) { - super(columnId, Operator.APPEND, null, element, '{', '}'); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/CollectionAssignment.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/CollectionAssignment.java deleted file mode 100644 index 3dddd21d143..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/CollectionAssignment.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.update; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.term.Term; -import com.datastax.oss.driver.api.querybuilder.update.Assignment; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.Immutable; - -@Immutable -public abstract class CollectionAssignment implements Assignment { - - public enum Operator { - APPEND("%1$s=%1$s+%2$s"), - PREPEND("%1$s=%2$s+%1$s"), - REMOVE("%1$s=%1$s-%2$s"), - ; - - public final String pattern; - - Operator(String pattern) { - this.pattern = pattern; - } - } - - private final CqlIdentifier columnId; - private final Operator operator; - private final Term value; - - protected CollectionAssignment( - @NonNull CqlIdentifier columnId, @NonNull Operator operator, @NonNull Term value) { - Preconditions.checkNotNull(columnId); - Preconditions.checkNotNull(value); - this.columnId = columnId; - this.operator = operator; - this.value = value; - } - - @Override - public void appendTo(@NonNull StringBuilder builder) { - builder.append(String.format(operator.pattern, columnId.asCql(true), buildRightOperand())); - } - - private String buildRightOperand() { - StringBuilder builder = new StringBuilder(); - value.appendTo(builder); - return builder.toString(); - } - - @Override - public boolean isIdempotent() { - // REMOVE is idempotent if the collection being removed is idempotent; APPEND and PREPEND are - // not idempotent for lists, so be pessimistic - return operator == Operator.REMOVE && value.isIdempotent(); - } - - @NonNull - public CqlIdentifier getColumnId() { - return columnId; - } - - @NonNull - public Term getValue() { - return value; - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/CollectionElementAssignment.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/CollectionElementAssignment.java deleted file mode 100644 index d4b2f532155..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/CollectionElementAssignment.java +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.update; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.term.Term; -import com.datastax.oss.driver.api.querybuilder.update.Assignment; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import net.jcip.annotations.Immutable; - -@Immutable -public abstract class CollectionElementAssignment implements Assignment { - - public enum Operator { - APPEND("%1$s=%1$s+%2$s"), - PREPEND("%1$s=%2$s+%1$s"), - REMOVE("%1$s=%1$s-%2$s"), - ; - - public final String pattern; - - Operator(String pattern) { - this.pattern = pattern; - } - } - - private final CqlIdentifier columnId; - private final Operator operator; - private final Term key; - private final Term value; - private final char opening; - private final char closing; - - protected CollectionElementAssignment( - @NonNull CqlIdentifier columnId, - @NonNull Operator operator, - @Nullable Term key, - @NonNull Term value, - char opening, - char closing) { - Preconditions.checkNotNull(columnId); - Preconditions.checkNotNull(value); - this.columnId = columnId; - this.operator = operator; - this.key = key; - this.value = value; - this.opening = opening; - this.closing = closing; - } - - @Override - public void appendTo(@NonNull StringBuilder builder) { - builder.append(String.format(operator.pattern, columnId.asCql(true), buildRightOperand())); - } - - private String buildRightOperand() { - StringBuilder builder = new StringBuilder(); - builder.append(opening); - if (key != null) { - key.appendTo(builder); - builder.append(':'); - } - value.appendTo(builder); - return builder.append(closing).toString(); - } - - @Override - public boolean isIdempotent() { - return (key == null || key.isIdempotent()) && value.isIdempotent(); - } - - @NonNull - public CqlIdentifier getColumnId() { - return columnId; - } - - @Nullable - public Term getKey() { - return key; - } - - @NonNull - public Term getValue() { - return value; - } - - public char getOpening() { - return opening; - } - - public char getClosing() { - return closing; - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/CounterAssignment.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/CounterAssignment.java deleted file mode 100644 index 3751255eef9..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/CounterAssignment.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.update; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.term.Term; -import com.datastax.oss.driver.api.querybuilder.update.Assignment; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.Immutable; - -@Immutable -public abstract class CounterAssignment implements Assignment { - - public enum Operator { - INCREMENT("%1$s=%1$s+%2$s"), - DECREMENT("%1$s=%1$s-%2$s"), - ; - - public final String pattern; - - Operator(String pattern) { - this.pattern = pattern; - } - } - - private final CqlIdentifier columnId; - private final Operator operator; - private final Term value; - - protected CounterAssignment( - @NonNull CqlIdentifier columnId, @NonNull Operator operator, @NonNull Term value) { - Preconditions.checkNotNull(columnId); - Preconditions.checkNotNull(value); - this.columnId = columnId; - this.operator = operator; - this.value = value; - } - - @Override - public void appendTo(@NonNull StringBuilder builder) { - builder.append(String.format(operator.pattern, columnId.asCql(true), buildRightOperand())); - } - - private String buildRightOperand() { - StringBuilder builder = new StringBuilder(); - value.appendTo(builder); - return builder.toString(); - } - - @Override - public boolean isIdempotent() { - return false; - } - - @NonNull - public CqlIdentifier getColumnId() { - return columnId; - } - - @NonNull - public Term getValue() { - return value; - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/DecrementAssignment.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/DecrementAssignment.java deleted file mode 100644 index aabf59019c6..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/DecrementAssignment.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.update; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.term.Term; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.Immutable; - -@Immutable -public class DecrementAssignment extends CounterAssignment { - - public DecrementAssignment(@NonNull CqlIdentifier columnId, @NonNull Term value) { - super(columnId, Operator.DECREMENT, value); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/DefaultAssignment.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/DefaultAssignment.java deleted file mode 100644 index 7f138c21d43..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/DefaultAssignment.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.update; - -import com.datastax.oss.driver.api.querybuilder.term.Term; -import com.datastax.oss.driver.api.querybuilder.update.Assignment; -import com.datastax.oss.driver.internal.querybuilder.lhs.LeftOperand; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultAssignment implements Assignment { - - private final LeftOperand leftOperand; - private final String operator; - private final Term rightOperand; - - public DefaultAssignment( - @NonNull LeftOperand leftOperand, @NonNull String operator, @Nullable Term rightOperand) { - this.leftOperand = leftOperand; - this.operator = operator; - this.rightOperand = rightOperand; - } - - @Override - public void appendTo(@NonNull StringBuilder builder) { - leftOperand.appendTo(builder); - builder.append(operator); - if (rightOperand != null) { - rightOperand.appendTo(builder); - } - } - - @Override - public boolean isIdempotent() { - return rightOperand == null || rightOperand.isIdempotent(); - } - - @NonNull - public LeftOperand getLeftOperand() { - return leftOperand; - } - - @NonNull - public String getOperator() { - return operator; - } - - @Nullable - public Term getRightOperand() { - return rightOperand; - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/DefaultUpdate.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/DefaultUpdate.java deleted file mode 100644 index 4d9d18f3aa4..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/DefaultUpdate.java +++ /dev/null @@ -1,306 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.update; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.cql.SimpleStatementBuilder; -import com.datastax.oss.driver.api.querybuilder.BindMarker; -import com.datastax.oss.driver.api.querybuilder.condition.Condition; -import com.datastax.oss.driver.api.querybuilder.relation.Relation; -import com.datastax.oss.driver.api.querybuilder.update.Assignment; -import com.datastax.oss.driver.api.querybuilder.update.Update; -import com.datastax.oss.driver.api.querybuilder.update.UpdateStart; -import com.datastax.oss.driver.api.querybuilder.update.UpdateWithAssignments; -import com.datastax.oss.driver.internal.querybuilder.CqlHelper; -import com.datastax.oss.driver.internal.querybuilder.ImmutableCollections; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Map; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultUpdate implements UpdateStart, UpdateWithAssignments, Update { - - private final CqlIdentifier keyspace; - private final CqlIdentifier table; - private final Object timestamp; - private final Object ttlInSeconds; - private final ImmutableList assignments; - private final ImmutableList relations; - private final boolean ifExists; - private final ImmutableList conditions; - - public DefaultUpdate(@Nullable CqlIdentifier keyspace, @NonNull CqlIdentifier table) { - this( - keyspace, - table, - null, - null, - ImmutableList.of(), - ImmutableList.of(), - false, - ImmutableList.of()); - } - - public DefaultUpdate( - @Nullable CqlIdentifier keyspace, - @NonNull CqlIdentifier table, - @Nullable Object timestamp, - @Nullable Object ttlInSeconds, - @NonNull ImmutableList assignments, - @NonNull ImmutableList relations, - boolean ifExists, - @NonNull ImmutableList conditions) { - Preconditions.checkArgument( - timestamp == null || timestamp instanceof Long || timestamp instanceof BindMarker, - "TIMESTAMP value must be a BindMarker or a Long"); - Preconditions.checkArgument( - ttlInSeconds == null - || ttlInSeconds instanceof Integer - || ttlInSeconds instanceof BindMarker, - "TTL value must be a BindMarker or an Integer"); - - this.keyspace = keyspace; - this.table = table; - this.timestamp = timestamp; - this.ttlInSeconds = ttlInSeconds; - this.assignments = assignments; - this.relations = relations; - this.ifExists = ifExists; - this.conditions = conditions; - } - - @NonNull - @Override - public UpdateStart usingTimestamp(long newTimestamp) { - return new DefaultUpdate( - keyspace, table, newTimestamp, ttlInSeconds, assignments, relations, ifExists, conditions); - } - - @NonNull - @Override - public UpdateStart usingTimestamp(@NonNull BindMarker newTimestamp) { - return new DefaultUpdate( - keyspace, table, newTimestamp, ttlInSeconds, assignments, relations, ifExists, conditions); - } - - @NonNull - @Override - public UpdateStart usingTtl(int ttlInSeconds) { - return new DefaultUpdate( - keyspace, table, timestamp, ttlInSeconds, assignments, relations, ifExists, conditions); - } - - @NonNull - @Override - public UpdateStart usingTtl(@NonNull BindMarker ttlInSeconds) { - return new DefaultUpdate( - keyspace, table, timestamp, ttlInSeconds, assignments, relations, ifExists, conditions); - } - - @NonNull - @Override - public UpdateWithAssignments set(@NonNull Assignment assignment) { - return withAssignments(ImmutableCollections.append(assignments, assignment)); - } - - @NonNull - @Override - public UpdateWithAssignments set(@NonNull Iterable additionalAssignments) { - return withAssignments(ImmutableCollections.concat(assignments, additionalAssignments)); - } - - @NonNull - public UpdateWithAssignments withAssignments(@NonNull ImmutableList newAssignments) { - return new DefaultUpdate( - keyspace, table, timestamp, ttlInSeconds, newAssignments, relations, ifExists, conditions); - } - - @NonNull - @Override - public Update where(@NonNull Relation relation) { - return withRelations(ImmutableCollections.append(relations, relation)); - } - - @NonNull - @Override - public Update where(@NonNull Iterable additionalRelations) { - return withRelations(ImmutableCollections.concat(relations, additionalRelations)); - } - - @NonNull - public Update withRelations(@NonNull ImmutableList newRelations) { - return new DefaultUpdate( - keyspace, table, timestamp, ttlInSeconds, assignments, newRelations, ifExists, conditions); - } - - @NonNull - @Override - public Update ifExists() { - return new DefaultUpdate( - keyspace, table, timestamp, ttlInSeconds, assignments, relations, true, conditions); - } - - @NonNull - @Override - public Update if_(@NonNull Condition condition) { - return withConditions(ImmutableCollections.append(conditions, condition)); - } - - @NonNull - @Override - public Update if_(@NonNull Iterable additionalConditions) { - return withConditions(ImmutableCollections.concat(conditions, additionalConditions)); - } - - @NonNull - public Update withConditions(@NonNull ImmutableList newConditions) { - return new DefaultUpdate( - keyspace, table, timestamp, ttlInSeconds, assignments, relations, false, newConditions); - } - - @NonNull - @Override - public String asCql() { - StringBuilder builder = new StringBuilder("UPDATE "); - CqlHelper.qualify(keyspace, table, builder); - - if (timestamp != null) { - builder.append(" USING TIMESTAMP "); - if (timestamp instanceof BindMarker) { - ((BindMarker) timestamp).appendTo(builder); - } else { - builder.append(timestamp); - } - } - - if (ttlInSeconds != null) { - // choose the correct keyword based on whether or not we have a timestamp - builder.append((timestamp != null) ? " AND " : " USING ").append("TTL "); - if (ttlInSeconds instanceof BindMarker) { - ((BindMarker) ttlInSeconds).appendTo(builder); - } else { - builder.append(ttlInSeconds); - } - } - - CqlHelper.append(assignments, builder, " SET ", ", ", null); - CqlHelper.append(relations, builder, " WHERE ", " AND ", null); - - if (ifExists) { - builder.append(" IF EXISTS"); - } else { - CqlHelper.append(conditions, builder, " IF ", " AND ", null); - } - return builder.toString(); - } - - @NonNull - @Override - public SimpleStatement build() { - return builder().build(); - } - - @NonNull - @Override - public SimpleStatement build(@NonNull Object... values) { - return builder().addPositionalValues(values).build(); - } - - @NonNull - @Override - public SimpleStatement build(@NonNull Map namedValues) { - SimpleStatementBuilder builder = builder(); - for (Map.Entry entry : namedValues.entrySet()) { - builder.addNamedValue(entry.getKey(), entry.getValue()); - } - return builder.build(); - } - - @NonNull - @Override - public SimpleStatementBuilder builder() { - return SimpleStatement.builder(asCql()).setIdempotence(isIdempotent()); - } - - public boolean isIdempotent() { - // Conditional queries are never idempotent, see JAVA-819 - if (!conditions.isEmpty() || ifExists) { - return false; - } else { - for (Assignment assignment : assignments) { - if (!assignment.isIdempotent()) { - return false; - } - } - for (Relation relation : relations) { - if (!relation.isIdempotent()) { - return false; - } - } - return true; - } - } - - @Nullable - public CqlIdentifier getKeyspace() { - return keyspace; - } - - @NonNull - public CqlIdentifier getTable() { - return table; - } - - @Nullable - public Object getTimestamp() { - return timestamp; - } - - @Nullable - public Object getTtl() { - return ttlInSeconds; - } - - @NonNull - public ImmutableList getAssignments() { - return assignments; - } - - @NonNull - public ImmutableList getRelations() { - return relations; - } - - public boolean isIfExists() { - return ifExists; - } - - @NonNull - public ImmutableList getConditions() { - return conditions; - } - - @Override - public String toString() { - return asCql(); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/IncrementAssignment.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/IncrementAssignment.java deleted file mode 100644 index 4aba6b983f6..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/IncrementAssignment.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.update; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.term.Term; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.Immutable; - -@Immutable -public class IncrementAssignment extends CounterAssignment { - - public IncrementAssignment(@NonNull CqlIdentifier columnId, @NonNull Term value) { - super(columnId, Operator.INCREMENT, value); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/PrependAssignment.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/PrependAssignment.java deleted file mode 100644 index 4094f3272a8..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/PrependAssignment.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.update; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.term.Term; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.Immutable; - -@Immutable -public class PrependAssignment extends CollectionAssignment { - - public PrependAssignment(@NonNull CqlIdentifier columnId, @NonNull Term prefix) { - super(columnId, Operator.PREPEND, prefix); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/PrependListElementAssignment.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/PrependListElementAssignment.java deleted file mode 100644 index de73a9d0840..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/PrependListElementAssignment.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.update; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.term.Term; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.Immutable; - -@Immutable -public class PrependListElementAssignment extends CollectionElementAssignment { - - public PrependListElementAssignment(@NonNull CqlIdentifier columnId, @NonNull Term element) { - super(columnId, Operator.PREPEND, null, element, '[', ']'); - } - - @Override - public boolean isIdempotent() { - return false; - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/PrependMapEntryAssignment.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/PrependMapEntryAssignment.java deleted file mode 100644 index 093d1e58613..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/PrependMapEntryAssignment.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.update; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.term.Term; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.Immutable; - -@Immutable -public class PrependMapEntryAssignment extends CollectionElementAssignment { - - public PrependMapEntryAssignment( - @NonNull CqlIdentifier columnId, @NonNull Term key, @NonNull Term value) { - super(columnId, Operator.PREPEND, key, value, '{', '}'); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/PrependSetElementAssignment.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/PrependSetElementAssignment.java deleted file mode 100644 index 00083648aa4..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/PrependSetElementAssignment.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.update; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.term.Term; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.Immutable; - -@Immutable -public class PrependSetElementAssignment extends CollectionElementAssignment { - - public PrependSetElementAssignment(@NonNull CqlIdentifier columnId, @NonNull Term element) { - super(columnId, Operator.PREPEND, null, element, '{', '}'); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/RemoveAssignment.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/RemoveAssignment.java deleted file mode 100644 index 618ccdbdc89..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/RemoveAssignment.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.update; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.term.Term; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.Immutable; - -@Immutable -public class RemoveAssignment extends CollectionAssignment { - - public RemoveAssignment(@NonNull CqlIdentifier columnId, @NonNull Term value) { - super(columnId, Operator.REMOVE, value); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/RemoveListElementAssignment.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/RemoveListElementAssignment.java deleted file mode 100644 index ce60c6e1d9a..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/RemoveListElementAssignment.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.update; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.term.Term; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.Immutable; - -@Immutable -public class RemoveListElementAssignment extends CollectionElementAssignment { - - public RemoveListElementAssignment(@NonNull CqlIdentifier columnId, @NonNull Term element) { - super(columnId, Operator.REMOVE, null, element, '[', ']'); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/RemoveMapEntryAssignment.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/RemoveMapEntryAssignment.java deleted file mode 100644 index 598dd215e5f..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/RemoveMapEntryAssignment.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.update; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.term.Term; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.Immutable; - -@Immutable -public class RemoveMapEntryAssignment extends CollectionElementAssignment { - - public RemoveMapEntryAssignment( - @NonNull CqlIdentifier columnId, @NonNull Term key, @NonNull Term value) { - super(columnId, Operator.REMOVE, key, value, '{', '}'); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/RemoveSetElementAssignment.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/RemoveSetElementAssignment.java deleted file mode 100644 index 7a8e73da1fd..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/RemoveSetElementAssignment.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.querybuilder.update; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.term.Term; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.Immutable; - -@Immutable -public class RemoveSetElementAssignment extends CollectionElementAssignment { - - public RemoveSetElementAssignment(@NonNull CqlIdentifier columnId, @NonNull Term element) { - super(columnId, Operator.REMOVE, null, element, '{', '}'); - } -} diff --git a/query-builder/src/test/java/com/datastax/dse/driver/api/querybuilder/Assertions.java b/query-builder/src/test/java/com/datastax/dse/driver/api/querybuilder/Assertions.java deleted file mode 100644 index b1a463378e0..00000000000 --- a/query-builder/src/test/java/com/datastax/dse/driver/api/querybuilder/Assertions.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.querybuilder; - -import com.datastax.oss.driver.api.querybuilder.BuildableQuery; -import com.datastax.oss.driver.api.querybuilder.CqlSnippet; - -public class Assertions extends org.assertj.core.api.Assertions { - - public static BuildableQueryAssert assertThat(BuildableQuery actual) { - return new BuildableQueryAssert(actual); - } - - public static CqlSnippetAssert assertThat(CqlSnippet actual) { - return new CqlSnippetAssert(actual); - } -} diff --git a/query-builder/src/test/java/com/datastax/dse/driver/api/querybuilder/BuildableQueryAssert.java b/query-builder/src/test/java/com/datastax/dse/driver/api/querybuilder/BuildableQueryAssert.java deleted file mode 100644 index 3173723353a..00000000000 --- a/query-builder/src/test/java/com/datastax/dse/driver/api/querybuilder/BuildableQueryAssert.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.querybuilder; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.querybuilder.BuildableQuery; -import org.assertj.core.api.AbstractAssert; - -public class BuildableQueryAssert extends AbstractAssert { - - public BuildableQueryAssert(BuildableQuery actual) { - super(actual, BuildableQueryAssert.class); - } - - public BuildableQueryAssert hasCql(String expected) { - assertThat(actual.asCql()).isEqualTo(expected); - return this; - } - - public BuildableQueryAssert isIdempotent() { - assertThat(actual.build().isIdempotent()).isTrue(); - return this; - } - - public BuildableQueryAssert isNotIdempotent() { - assertThat(actual.build().isIdempotent()).isFalse(); - return this; - } -} diff --git a/query-builder/src/test/java/com/datastax/dse/driver/api/querybuilder/CqlSnippetAssert.java b/query-builder/src/test/java/com/datastax/dse/driver/api/querybuilder/CqlSnippetAssert.java deleted file mode 100644 index 1aa165d1319..00000000000 --- a/query-builder/src/test/java/com/datastax/dse/driver/api/querybuilder/CqlSnippetAssert.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.querybuilder; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.querybuilder.CqlSnippet; -import org.assertj.core.api.AbstractAssert; - -public class CqlSnippetAssert extends AbstractAssert { - - public CqlSnippetAssert(CqlSnippet actual) { - super(actual, CqlSnippetAssert.class); - } - - public CqlSnippetAssert hasCql(String expected) { - StringBuilder builder = new StringBuilder(); - actual.appendTo(builder); - assertThat(builder.toString()).isEqualTo(expected); - return this; - } -} diff --git a/query-builder/src/test/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseKeyspaceTest.java b/query-builder/src/test/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseKeyspaceTest.java deleted file mode 100644 index a3e2c44cfac..00000000000 --- a/query-builder/src/test/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseKeyspaceTest.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.querybuilder.schema; - -import static com.datastax.dse.driver.api.querybuilder.Assertions.assertThat; -import static com.datastax.dse.driver.api.querybuilder.DseSchemaBuilder.alterDseKeyspace; - -import org.junit.Test; - -public class AlterDseKeyspaceTest { - - @Test - public void should_not_throw_on_toString_for_AlterKeyspaceStart() { - assertThat(alterDseKeyspace("foo").toString()).isEqualTo("ALTER KEYSPACE foo"); - } - - @Test - public void should_generate_alter_keyspace_with_replication() { - assertThat(alterDseKeyspace("foo").withSimpleStrategy(3)) - .hasCql( - "ALTER KEYSPACE foo WITH replication={'class':'SimpleStrategy','replication_factor':3}"); - } - - @Test - public void should_generate_alter_keyspace_with_graph_engine() { - assertThat(alterDseKeyspace("foo").withSimpleStrategy(3).withGraphEngine("Core")) - .hasCql( - "ALTER KEYSPACE foo " - + "WITH replication={'class':'SimpleStrategy','replication_factor':3} " - + "AND graph_engine='Core'"); - } - - @Test - public void should_generate_alter_keyspace_with_durable_writes_and_options() { - assertThat(alterDseKeyspace("foo").withDurableWrites(true).withOption("hello", "world")) - .hasCql("ALTER KEYSPACE foo WITH durable_writes=true AND hello='world'"); - } -} diff --git a/query-builder/src/test/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableTest.java b/query-builder/src/test/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableTest.java deleted file mode 100644 index b2b5965fbc7..00000000000 --- a/query-builder/src/test/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableTest.java +++ /dev/null @@ -1,168 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.querybuilder.schema; - -import static com.datastax.dse.driver.api.querybuilder.Assertions.assertThat; -import static com.datastax.dse.driver.api.querybuilder.DseSchemaBuilder.alterDseTable; -import static com.datastax.dse.driver.api.querybuilder.schema.DseGraphEdgeSide.table; - -import com.datastax.oss.driver.api.core.type.DataTypes; -import org.junit.Test; - -public class AlterDseTableTest { - - @Test - public void should_not_throw_on_toString_for_AlterTableStart() { - assertThat(alterDseTable("foo").toString()).isEqualTo("ALTER TABLE foo"); - } - - @Test - public void should_generate_alter_table_with_alter_column_type() { - assertThat(alterDseTable("foo", "bar").alterColumn("x", DataTypes.TEXT)) - .hasCql("ALTER TABLE foo.bar ALTER x TYPE text"); - } - - @Test - public void should_generate_alter_table_with_add_single_column() { - assertThat(alterDseTable("foo", "bar").addColumn("x", DataTypes.TEXT)) - .hasCql("ALTER TABLE foo.bar ADD x text"); - } - - @Test - public void should_generate_alter_table_with_add_three_columns() { - assertThat( - alterDseTable("foo", "bar") - .addColumn("x", DataTypes.TEXT) - .addStaticColumn("y", DataTypes.FLOAT) - .addColumn("z", DataTypes.DOUBLE)) - .hasCql("ALTER TABLE foo.bar ADD (x text,y float STATIC,z double)"); - } - - @Test - public void should_generate_alter_table_with_drop_single_column() { - assertThat(alterDseTable("foo", "bar").dropColumn("x")).hasCql("ALTER TABLE foo.bar DROP x"); - } - - @Test - public void should_generate_alter_table_with_drop_two_columns() { - assertThat(alterDseTable("foo", "bar").dropColumn("x").dropColumn("y")) - .hasCql("ALTER TABLE foo.bar DROP (x,y)"); - } - - @Test - public void should_generate_alter_table_with_drop_two_columns_at_once() { - assertThat(alterDseTable("foo", "bar").dropColumns("x", "y")) - .hasCql("ALTER TABLE foo.bar DROP (x,y)"); - } - - @Test - public void should_generate_alter_table_with_rename_single_column() { - assertThat(alterDseTable("foo", "bar").renameColumn("x", "y")) - .hasCql("ALTER TABLE foo.bar RENAME x TO y"); - } - - @Test - public void should_generate_alter_table_with_rename_three_columns() { - assertThat( - alterDseTable("foo", "bar") - .renameColumn("x", "y") - .renameColumn("u", "v") - .renameColumn("b", "a")) - .hasCql("ALTER TABLE foo.bar RENAME x TO y AND u TO v AND b TO a"); - } - - @Test - public void should_generate_alter_table_with_drop_compact_storage() { - assertThat(alterDseTable("bar").dropCompactStorage()) - .hasCql("ALTER TABLE bar DROP COMPACT STORAGE"); - } - - @Test - public void should_generate_alter_table_with_options() { - assertThat(alterDseTable("bar").withComment("Hello").withCDC(true)) - .hasCql("ALTER TABLE bar WITH comment='Hello' AND cdc=true"); - } - - @Test - public void should_generate_alter_table_with_no_compression() { - assertThat(alterDseTable("bar").withNoCompression()) - .hasCql("ALTER TABLE bar WITH compression={'sstable_compression':''}"); - } - - @Test - public void should_generate_alter_table_to_add_anonymous_vertex_label() { - assertThat(alterDseTable("bar").withVertexLabel()).hasCql("ALTER TABLE bar WITH VERTEX LABEL"); - } - - @Test - public void should_generate_alter_table_to_add_named_vertex_label() { - assertThat(alterDseTable("bar").withVertexLabel("baz")) - .hasCql("ALTER TABLE bar WITH VERTEX LABEL baz"); - } - - @Test - public void should_generate_alter_table_to_remove_anonymous_vertex_label() { - assertThat(alterDseTable("bar").withoutVertexLabel()) - .hasCql("ALTER TABLE bar WITHOUT VERTEX LABEL"); - } - - @Test - public void should_generate_alter_table_to_remove_named_vertex_label() { - assertThat(alterDseTable("bar").withoutVertexLabel("baz")) - .hasCql("ALTER TABLE bar WITHOUT VERTEX LABEL baz"); - } - - @Test - public void should_generate_alter_table_to_add_anonymous_edge_label() { - assertThat( - alterDseTable("bar") - .withEdgeLabel( - table("source").withPartitionKey("pk"), - table("dest") - .withPartitionKey("pk1") - .withPartitionKey("pk2") - .withClusteringColumn("cc"))) - .hasCql("ALTER TABLE bar WITH EDGE LABEL FROM source(pk) TO dest((pk1,pk2),cc)"); - } - - @Test - public void should_generate_alter_table_to_add_named_edge_label() { - assertThat( - alterDseTable("bar") - .withEdgeLabel( - "e", - table("source").withPartitionKey("pk"), - table("dest") - .withPartitionKey("pk1") - .withPartitionKey("pk2") - .withClusteringColumn("cc"))) - .hasCql("ALTER TABLE bar WITH EDGE LABEL e FROM source(pk) TO dest((pk1,pk2),cc)"); - } - - @Test - public void should_generate_alter_table_to_remove_anonymous_edge_label() { - assertThat(alterDseTable("bar").withoutEdgeLabel()) - .hasCql("ALTER TABLE bar WITHOUT EDGE LABEL"); - } - - @Test - public void should_generate_alter_table_to_remove_named_edge_label() { - assertThat(alterDseTable("bar").withoutEdgeLabel("baz")) - .hasCql("ALTER TABLE bar WITHOUT EDGE LABEL baz"); - } -} diff --git a/query-builder/src/test/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseKeyspaceTest.java b/query-builder/src/test/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseKeyspaceTest.java deleted file mode 100644 index d92659b2d1c..00000000000 --- a/query-builder/src/test/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseKeyspaceTest.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.querybuilder.schema; - -import static com.datastax.dse.driver.api.querybuilder.Assertions.assertThat; -import static com.datastax.dse.driver.api.querybuilder.DseSchemaBuilder.createDseKeyspace; - -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import org.junit.Test; - -public class CreateDseKeyspaceTest { - - @Test - public void should_not_throw_on_toString_for_CreateKeyspaceStart() { - assertThat(createDseKeyspace("foo").toString()).isEqualTo("CREATE KEYSPACE foo"); - } - - @Test - public void should_generate_create_keyspace_simple_strategy() { - assertThat(createDseKeyspace("foo").withSimpleStrategy(5)) - .hasCql( - "CREATE KEYSPACE foo WITH replication={'class':'SimpleStrategy','replication_factor':5}"); - } - - @Test - public void should_generate_create_keyspace_simple_strategy_and_durable_writes() { - assertThat(createDseKeyspace("foo").withSimpleStrategy(5).withDurableWrites(true)) - .hasCql( - "CREATE KEYSPACE foo WITH replication={'class':'SimpleStrategy','replication_factor':5} AND durable_writes=true"); - } - - @Test - public void should_generate_create_keyspace_if_not_exists() { - assertThat(createDseKeyspace("foo").ifNotExists().withSimpleStrategy(2)) - .hasCql( - "CREATE KEYSPACE IF NOT EXISTS foo WITH replication={'class':'SimpleStrategy','replication_factor':2}"); - } - - @Test - public void should_generate_create_keyspace_network_topology_strategy() { - assertThat( - createDseKeyspace("foo") - .withNetworkTopologyStrategy(ImmutableMap.of("dc1", 3, "dc2", 4))) - .hasCql( - "CREATE KEYSPACE foo WITH replication={'class':'NetworkTopologyStrategy','dc1':3,'dc2':4}"); - } - - @Test - public void should_generate_create_keyspace_with_graph_engine() { - assertThat( - createDseKeyspace("foo") - .ifNotExists() - .withNetworkTopologyStrategy(ImmutableMap.of("dc1", 3, "dc2", 4)) - .withDurableWrites(true) - .withGraphEngine("Core")) - .hasCql( - "CREATE KEYSPACE IF NOT EXISTS foo " - + "WITH replication={'class':'NetworkTopologyStrategy','dc1':3,'dc2':4} " - + "AND durable_writes=true " - + "AND graph_engine='Core'"); - } - - @Test - public void should_generate_create_keyspace_with_custom_properties() { - assertThat( - createDseKeyspace("foo") - .withSimpleStrategy(3) - .withOption("awesome_feature", true) - .withOption("wow_factor", 11) - .withOption("random_string", "hi")) - .hasCql( - "CREATE KEYSPACE foo WITH replication={'class':'SimpleStrategy','replication_factor':3} AND awesome_feature=true AND wow_factor=11 AND random_string='hi'"); - } -} diff --git a/query-builder/src/test/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseTableTest.java b/query-builder/src/test/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseTableTest.java deleted file mode 100644 index d8ee1c4e380..00000000000 --- a/query-builder/src/test/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseTableTest.java +++ /dev/null @@ -1,457 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.querybuilder.schema; - -import static com.datastax.dse.driver.api.querybuilder.Assertions.assertThat; -import static com.datastax.dse.driver.api.querybuilder.DseSchemaBuilder.createDseTable; -import static com.datastax.dse.driver.api.querybuilder.schema.DseGraphEdgeSide.table; -import static com.datastax.oss.driver.api.querybuilder.SchemaBuilder.udt; - -import com.datastax.oss.driver.api.core.metadata.schema.ClusteringOrder; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.querybuilder.SchemaBuilder; -import com.datastax.oss.driver.api.querybuilder.SchemaBuilder.RowsPerPartition; -import com.datastax.oss.driver.api.querybuilder.schema.compaction.TimeWindowCompactionStrategy.CompactionWindowUnit; -import com.datastax.oss.driver.api.querybuilder.schema.compaction.TimeWindowCompactionStrategy.TimestampResolution; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import org.junit.Test; - -public class CreateDseTableTest { - - @Test - public void should_not_throw_on_toString_for_CreateTableStart() { - assertThat(createDseTable("foo").toString()).isEqualTo("CREATE TABLE foo"); - } - - @Test - public void should_generate_create_table_if_not_exists() { - assertThat(createDseTable("bar").ifNotExists().withPartitionKey("k", DataTypes.INT)) - .hasCql("CREATE TABLE IF NOT EXISTS bar (k int PRIMARY KEY)"); - } - - @Test - public void should_generate_create_table_with_single_partition_key() { - assertThat( - createDseTable("bar") - .withPartitionKey("k", DataTypes.INT) - .withColumn("v", DataTypes.TEXT)) - .hasCql("CREATE TABLE bar (k int PRIMARY KEY,v text)"); - } - - @Test - public void should_generate_create_table_with_compound_partition_key() { - assertThat( - createDseTable("bar") - .withPartitionKey("kc", DataTypes.INT) - .withPartitionKey("ka", DataTypes.TIMESTAMP) - .withColumn("v", DataTypes.TEXT)) - .hasCql("CREATE TABLE bar (kc int,ka timestamp,v text,PRIMARY KEY((kc,ka)))"); - } - - @Test - public void should_generate_create_table_with_single_partition_key_and_clustering_column() { - assertThat( - createDseTable("bar") - .withPartitionKey("k", DataTypes.INT) - .withClusteringColumn("c", DataTypes.TEXT) - .withColumn("v", udt("val", true))) - .hasCql("CREATE TABLE bar (k int,c text,v frozen,PRIMARY KEY(k,c))"); - } - - @Test - public void should_generate_create_table_with_static_column() { - assertThat( - createDseTable("bar") - .withPartitionKey("k", DataTypes.INT) - .withClusteringColumn("c", DataTypes.TEXT) - .withStaticColumn("s", DataTypes.TIMEUUID) - .withColumn("v", udt("val", true))) - .hasCql("CREATE TABLE bar (k int,c text,s timeuuid STATIC,v frozen,PRIMARY KEY(k,c))"); - } - - @Test - public void should_generate_create_table_with_compound_partition_key_and_clustering_columns() { - assertThat( - createDseTable("bar") - .withPartitionKey("kc", DataTypes.INT) - .withPartitionKey("ka", DataTypes.TIMESTAMP) - .withClusteringColumn("c", DataTypes.FLOAT) - .withClusteringColumn("a", DataTypes.UUID) - .withColumn("v", DataTypes.TEXT)) - .hasCql( - "CREATE TABLE bar (kc int,ka timestamp,c float,a uuid,v text,PRIMARY KEY((kc,ka),c,a))"); - } - - @Test - public void should_generate_create_table_with_compact_storage() { - assertThat( - createDseTable("bar") - .withPartitionKey("k", DataTypes.INT) - .withColumn("v", DataTypes.TEXT) - .withCompactStorage()) - .hasCql("CREATE TABLE bar (k int PRIMARY KEY,v text) WITH COMPACT STORAGE"); - } - - @Test - public void should_generate_create_table_with_clustering_single() { - assertThat( - createDseTable("bar") - .withPartitionKey("k", DataTypes.INT) - .withClusteringColumn("c", DataTypes.TEXT) - .withColumn("v", DataTypes.TEXT) - .withClusteringOrder("c", ClusteringOrder.ASC)) - .hasCql( - "CREATE TABLE bar (k int,c text,v text,PRIMARY KEY(k,c)) WITH CLUSTERING ORDER BY (c ASC)"); - } - - @Test - public void should_generate_create_table_with_clustering_three() { - assertThat( - createDseTable("bar") - .withPartitionKey("k", DataTypes.INT) - .withClusteringColumn("c0", DataTypes.TEXT) - .withClusteringColumn("c1", DataTypes.TEXT) - .withClusteringColumn("c2", DataTypes.TEXT) - .withColumn("v", DataTypes.TEXT) - .withClusteringOrder("c0", ClusteringOrder.DESC) - .withClusteringOrder( - ImmutableMap.of("c1", ClusteringOrder.ASC, "c2", ClusteringOrder.DESC))) - .hasCql( - "CREATE TABLE bar (k int,c0 text,c1 text,c2 text,v text,PRIMARY KEY(k,c0,c1,c2)) WITH CLUSTERING ORDER BY (c0 DESC,c1 ASC,c2 DESC)"); - } - - @Test - public void should_generate_create_table_with_compact_storage_and_default_ttl() { - assertThat( - createDseTable("bar") - .withPartitionKey("k", DataTypes.INT) - .withColumn("v", DataTypes.TEXT) - .withCompactStorage() - .withDefaultTimeToLiveSeconds(86400)) - .hasCql( - "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH COMPACT STORAGE AND default_time_to_live=86400"); - } - - @Test - public void should_generate_create_table_with_clustering_compact_storage_and_default_ttl() { - assertThat( - createDseTable("bar") - .withPartitionKey("k", DataTypes.INT) - .withClusteringColumn("c", DataTypes.TEXT) - .withColumn("v", DataTypes.TEXT) - .withCompactStorage() - .withClusteringOrder("c", ClusteringOrder.DESC) - .withDefaultTimeToLiveSeconds(86400)) - .hasCql( - "CREATE TABLE bar (k int,c text,v text,PRIMARY KEY(k,c)) WITH COMPACT STORAGE AND CLUSTERING ORDER BY (c DESC) AND default_time_to_live=86400"); - } - - @Test - public void should_generate_create_table_with_options() { - assertThat( - createDseTable("bar") - .withPartitionKey("k", DataTypes.INT) - .withColumn("v", DataTypes.TEXT) - .withBloomFilterFpChance(0.42) - .withCDC(false) - .withComment("Hello world") - .withDcLocalReadRepairChance(0.54) - .withDefaultTimeToLiveSeconds(86400) - .withGcGraceSeconds(864000) - .withMemtableFlushPeriodInMs(10000) - .withMinIndexInterval(1024) - .withMaxIndexInterval(4096) - .withReadRepairChance(0.55) - .withSpeculativeRetry("99percentile")) - .hasCql( - "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH bloom_filter_fp_chance=0.42 AND cdc=false AND comment='Hello world' AND dclocal_read_repair_chance=0.54 AND default_time_to_live=86400 AND gc_grace_seconds=864000 AND memtable_flush_period_in_ms=10000 AND min_index_interval=1024 AND max_index_interval=4096 AND read_repair_chance=0.55 AND speculative_retry='99percentile'"); - } - - @Test - public void should_generate_create_table_lz4_compression() { - assertThat( - createDseTable("bar") - .withPartitionKey("k", DataTypes.INT) - .withColumn("v", DataTypes.TEXT) - .withLZ4Compression()) - .hasCql( - "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'LZ4Compressor'}"); - } - - @Test - public void should_generate_create_table_lz4_compression_options() { - assertThat( - createDseTable("bar") - .withPartitionKey("k", DataTypes.INT) - .withColumn("v", DataTypes.TEXT) - .withLZ4Compression(1024)) - .hasCql( - "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'LZ4Compressor','chunk_length_in_kb':1024}"); - } - - @Test - public void should_generate_create_table_lz4_compression_options_crc() { - assertThat( - createDseTable("bar") - .withPartitionKey("k", DataTypes.INT) - .withColumn("v", DataTypes.TEXT) - .withLZ4Compression(1024, .5)) - .hasCql( - "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'LZ4Compressor','chunk_length_kb':1024,'crc_check_chance':0.5}"); - } - - @Test - public void should_generate_create_table_zstd_compression() { - assertThat( - createDseTable("bar") - .withPartitionKey("k", DataTypes.INT) - .withColumn("v", DataTypes.TEXT) - .withZstdCompression()) - .hasCql( - "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'ZstdCompressor'}"); - } - - @Test - public void should_generate_create_table_zstd_compression_options() { - assertThat( - createDseTable("bar") - .withPartitionKey("k", DataTypes.INT) - .withColumn("v", DataTypes.TEXT) - .withZstdCompression(1024)) - .hasCql( - "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'ZstdCompressor','chunk_length_in_kb':1024}"); - } - - @Test - public void should_generate_create_table_snappy_compression() { - assertThat( - createDseTable("bar") - .withPartitionKey("k", DataTypes.INT) - .withColumn("v", DataTypes.TEXT) - .withSnappyCompression()) - .hasCql( - "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'SnappyCompressor'}"); - } - - @Test - public void should_generate_create_table_snappy_compression_options() { - assertThat( - createDseTable("bar") - .withPartitionKey("k", DataTypes.INT) - .withColumn("v", DataTypes.TEXT) - .withSnappyCompression(2048)) - .hasCql( - "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'SnappyCompressor','chunk_length_in_kb':2048}"); - } - - @Test - public void should_generate_create_table_snappy_compression_options_crc() { - assertThat( - createDseTable("bar") - .withPartitionKey("k", DataTypes.INT) - .withColumn("v", DataTypes.TEXT) - .withSnappyCompression(2048, .25)) - .hasCql( - "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'SnappyCompressor','chunk_length_kb':2048,'crc_check_chance':0.25}"); - } - - @Test - public void should_generate_create_table_deflate_compression() { - assertThat( - createDseTable("bar") - .withPartitionKey("k", DataTypes.INT) - .withColumn("v", DataTypes.TEXT) - .withDeflateCompression()) - .hasCql( - "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'DeflateCompressor'}"); - } - - @Test - public void should_generate_create_table_deflate_compression_options() { - assertThat( - createDseTable("bar") - .withPartitionKey("k", DataTypes.INT) - .withColumn("v", DataTypes.TEXT) - .withDeflateCompression(4096)) - .hasCql( - "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'DeflateCompressor','chunk_length_in_kb':4096}"); - } - - @Test - public void should_generate_create_table_deflate_compression_options_crc() { - assertThat( - createDseTable("bar") - .withPartitionKey("k", DataTypes.INT) - .withColumn("v", DataTypes.TEXT) - .withDeflateCompression(4096, .1)) - .hasCql( - "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'DeflateCompressor','chunk_length_kb':4096,'crc_check_chance':0.1}"); - } - - @Test - public void should_generate_create_table_caching_options() { - assertThat( - createDseTable("bar") - .withPartitionKey("k", DataTypes.INT) - .withColumn("v", DataTypes.TEXT) - .withCaching(true, RowsPerPartition.rows(10))) - .hasCql( - "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH caching={'keys':'ALL','rows_per_partition':'10'}"); - } - - @Test - public void should_generate_create_table_size_tiered_compaction() { - assertThat( - createDseTable("bar") - .withPartitionKey("k", DataTypes.INT) - .withColumn("v", DataTypes.TEXT) - .withCompaction( - SchemaBuilder.sizeTieredCompactionStrategy() - .withBucketHigh(1.6) - .withBucketLow(0.6) - .withColdReadsToOmit(0.1) - .withMaxThreshold(33) - .withMinThreshold(5) - .withMinSSTableSizeInBytes(50000) - .withOnlyPurgeRepairedTombstones(true) - .withEnabled(false) - .withTombstoneCompactionIntervalInSeconds(86400) - .withTombstoneThreshold(0.22) - .withUncheckedTombstoneCompaction(true))) - .hasCql( - "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compaction={'class':'SizeTieredCompactionStrategy','bucket_high':1.6,'bucket_low':0.6,'cold_reads_to_omit':0.1,'max_threshold':33,'min_threshold':5,'min_sstable_size':50000,'only_purge_repaired_tombstones':true,'enabled':false,'tombstone_compaction_interval':86400,'tombstone_threshold':0.22,'unchecked_tombstone_compaction':true}"); - } - - @Test - public void should_generate_create_table_leveled_compaction() { - assertThat( - createDseTable("bar") - .withPartitionKey("k", DataTypes.INT) - .withColumn("v", DataTypes.TEXT) - .withCompaction( - SchemaBuilder.leveledCompactionStrategy() - .withSSTableSizeInMB(110) - .withTombstoneCompactionIntervalInSeconds(3600))) - .hasCql( - "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compaction={'class':'LeveledCompactionStrategy','sstable_size_in_mb':110,'tombstone_compaction_interval':3600}"); - } - - @Test - public void should_generate_create_table_time_window_compaction() { - assertThat( - createDseTable("bar") - .withPartitionKey("k", DataTypes.INT) - .withColumn("v", DataTypes.TEXT) - .withCompaction( - SchemaBuilder.timeWindowCompactionStrategy() - .withCompactionWindow(10, CompactionWindowUnit.DAYS) - .withTimestampResolution(TimestampResolution.MICROSECONDS) - .withUnsafeAggressiveSSTableExpiration(false))) - .hasCql( - "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compaction={'class':'TimeWindowCompactionStrategy','compaction_window_size':10,'compaction_window_unit':'DAYS','timestamp_resolution':'MICROSECONDS','unsafe_aggressive_sstable_expiration':false}"); - } - - @Test - public void should_generate_create_table_with_anonymous_vertex() { - assertThat( - createDseTable("bar") - .withPartitionKey("k", DataTypes.INT) - .withColumn("v", DataTypes.TEXT) - .withComment("test") - .withVertexLabel() - .withCaching(true, RowsPerPartition.rows(10))) - .hasCql( - "CREATE TABLE bar (k int PRIMARY KEY,v text) " - + "WITH VERTEX LABEL " - + "AND comment='test' " - + "AND caching={'keys':'ALL','rows_per_partition':'10'}"); - } - - @Test - public void should_generate_create_table_with_named_vertex() { - assertThat( - createDseTable("bar") - .withPartitionKey("k", DataTypes.INT) - .withColumn("v", DataTypes.TEXT) - .withComment("test") - .withVertexLabel("b") - .withCaching(true, RowsPerPartition.rows(10))) - .hasCql( - "CREATE TABLE bar (k int PRIMARY KEY,v text) " - + "WITH VERTEX LABEL b " - + "AND comment='test' " - + "AND caching={'keys':'ALL','rows_per_partition':'10'}"); - } - - @Test - public void should_generate_create_table_with_anonymous_edge() { - assertThat( - createDseTable("contributors") - .withPartitionKey("contributor", DataTypes.TEXT) - .withClusteringColumn("company_name", DataTypes.TEXT) - .withClusteringColumn("software_name", DataTypes.TEXT) - .withClusteringColumn("software_version", DataTypes.INT) - .withEdgeLabel( - table("person").withPartitionKey("contributor"), - table("soft") - .withPartitionKey("company_name") - .withPartitionKey("software_name") - .withClusteringColumn("software_version"))) - .hasCql( - "CREATE TABLE contributors (contributor text,company_name text,software_name text,software_version int," - + "PRIMARY KEY(contributor,company_name,software_name,software_version)) " - + "WITH EDGE LABEL " - + "FROM person(contributor) " - + "TO soft((company_name,software_name),software_version)"); - } - - @Test - public void should_generate_create_table_with_named_edge() { - assertThat( - createDseTable("contributors") - .withPartitionKey("contributor", DataTypes.TEXT) - .withClusteringColumn("company_name", DataTypes.TEXT) - .withClusteringColumn("software_name", DataTypes.TEXT) - .withClusteringColumn("software_version", DataTypes.INT) - .withClusteringOrder("company_name", ClusteringOrder.ASC) - .withEdgeLabel( - "contrib", - table("person").withPartitionKey("contributor"), - table("soft") - .withPartitionKey("company_name") - .withPartitionKey("software_name") - .withClusteringColumn("software_version"))) - .hasCql( - "CREATE TABLE contributors (contributor text,company_name text,software_name text,software_version int," - + "PRIMARY KEY(contributor,company_name,software_name,software_version)) " - + "WITH CLUSTERING ORDER BY (company_name ASC) " - + "AND EDGE LABEL contrib " - + "FROM person(contributor) " - + "TO soft((company_name,software_name),software_version)"); - } - - @Test - public void should_generate_create_table_crc_check_chance() { - assertThat( - createDseTable("bar") - .withPartitionKey("k", DataTypes.INT) - .withColumn("v", DataTypes.TEXT) - .withCRCCheckChance(0.8)) - .hasCql("CREATE TABLE bar (k int PRIMARY KEY,v text) WITH crc_check_chance=0.8"); - } -} diff --git a/query-builder/src/test/java/com/datastax/dse/driver/internal/querybuilder/DependencyCheckTest.java b/query-builder/src/test/java/com/datastax/dse/driver/internal/querybuilder/DependencyCheckTest.java deleted file mode 100644 index ff5dd1e66a4..00000000000 --- a/query-builder/src/test/java/com/datastax/dse/driver/internal/querybuilder/DependencyCheckTest.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.querybuilder; - -import com.datastax.dse.driver.internal.DependencyCheckTestBase; -import java.nio.file.Path; -import java.nio.file.Paths; - -public class DependencyCheckTest extends DependencyCheckTestBase { - - @Override - protected Path getDepsTxtPath() { - return Paths.get( - getBaseResourcePathString(), - "target", - "classes", - "com", - "datastax", - "dse", - "driver", - "internal", - "querybuilder", - "deps.txt"); - } -} diff --git a/query-builder/src/test/java/com/datastax/dse/driver/internal/querybuilder/schema/CreateDseAggregateTest.java b/query-builder/src/test/java/com/datastax/dse/driver/internal/querybuilder/schema/CreateDseAggregateTest.java deleted file mode 100644 index 22ba0d8814d..00000000000 --- a/query-builder/src/test/java/com/datastax/dse/driver/internal/querybuilder/schema/CreateDseAggregateTest.java +++ /dev/null @@ -1,184 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.querybuilder.schema; - -import static com.datastax.dse.driver.api.querybuilder.DseSchemaBuilder.createDseAggregate; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.literal; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.tuple; -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.type.DataTypes; -import org.junit.Test; - -/** - * Tests for creating DSE extended aggregates. Most of these tests are copied from the OSS {@code - * com.datastax.oss.driver.internal.querybuilder.schema.CreateAggregateTest} class to ensure DSE - * extended behavior does not break OSS functionality, with additional tests to verify the DSE - * specific functionality (i.e. the DETERMINISTIC keyword). - */ -public class CreateDseAggregateTest { - - @Test - public void should_create_aggreate_with_simple_param() { - - assertThat( - createDseAggregate("keyspace1", "agg1") - .withParameter(DataTypes.INT) - .withSFunc("sfunction") - .withSType(DataTypes.ASCII) - .withFinalFunc("finalfunction") - .withInitCond(tuple(literal(0), literal(0))) - .asCql()) - .isEqualTo( - "CREATE AGGREGATE keyspace1.agg1 (int) SFUNC sfunction STYPE ascii FINALFUNC finalfunction INITCOND (0,0)"); - } - - @Test - public void should_create_aggregate_with_many_params() { - - assertThat( - createDseAggregate("keyspace1", "agg2") - .withParameter(DataTypes.INT) - .withParameter(DataTypes.TEXT) - .withParameter(DataTypes.BOOLEAN) - .withSFunc("sfunction") - .withSType(DataTypes.ASCII) - .withFinalFunc("finalfunction") - .withInitCond(tuple(literal(0), literal(0))) - .asCql()) - .isEqualTo( - "CREATE AGGREGATE keyspace1.agg2 (int,text,boolean) SFUNC sfunction STYPE ascii FINALFUNC finalfunction INITCOND (0,0)"); - } - - @Test - public void should_create_aggregate_with_param_without_frozen() { - - assertThat( - createDseAggregate("keyspace1", "agg9") - .withParameter(DataTypes.tupleOf(DataTypes.TEXT)) - .withSFunc("sfunction") - .withSType(DataTypes.ASCII) - .withFinalFunc("finalfunction") - .withInitCond(tuple(literal(0), literal(0))) - .asCql()) - .isEqualTo( - "CREATE AGGREGATE keyspace1.agg9 (tuple) SFUNC sfunction STYPE ascii FINALFUNC finalfunction INITCOND (0,0)"); - } - - @Test - public void should_create_aggregate_with_no_params() { - - assertThat( - createDseAggregate("keyspace1", "agg3") - .withSFunc("sfunction") - .withSType(DataTypes.ASCII) - .withFinalFunc("finalfunction") - .withInitCond(tuple(literal(0), literal(0))) - .asCql()) - .isEqualTo( - "CREATE AGGREGATE keyspace1.agg3 () SFUNC sfunction STYPE ascii FINALFUNC finalfunction INITCOND (0,0)"); - } - - @Test - public void should_create_aggregate_with_no_keyspace() { - - assertThat( - createDseAggregate("agg4") - .withSFunc("sfunction") - .withSType(DataTypes.ASCII) - .withFinalFunc("finalfunction") - .withInitCond(tuple(literal(0), literal(0))) - .asCql()) - .isEqualTo( - "CREATE AGGREGATE agg4 () SFUNC sfunction STYPE ascii FINALFUNC finalfunction INITCOND (0,0)"); - } - - @Test - public void should_create_aggregate_with_if_not_exists() { - - assertThat( - createDseAggregate("agg6") - .ifNotExists() - .withSFunc("sfunction") - .withSType(DataTypes.ASCII) - .withFinalFunc("finalfunction") - .withInitCond(tuple(literal(0), literal(0))) - .asCql()) - .isEqualTo( - "CREATE AGGREGATE IF NOT EXISTS agg6 () SFUNC sfunction STYPE ascii FINALFUNC finalfunction INITCOND (0,0)"); - } - - @Test - public void should_create_aggregate_with_no_final_func() { - - assertThat( - createDseAggregate("cycling", "sum") - .withParameter(DataTypes.INT) - .withSFunc("dsum") - .withSType(DataTypes.INT) - .asCql()) - .isEqualTo("CREATE AGGREGATE cycling.sum (int) SFUNC dsum STYPE int"); - } - - @Test - public void should_create_or_replace() { - assertThat( - createDseAggregate("keyspace1", "agg7") - .orReplace() - .withSFunc("sfunction") - .withSType(DataTypes.ASCII) - .withFinalFunc("finalfunction") - .withInitCond(tuple(literal(0), literal(0))) - .asCql()) - .isEqualTo( - "CREATE OR REPLACE AGGREGATE keyspace1.agg7 () SFUNC sfunction STYPE ascii FINALFUNC finalfunction INITCOND (0,0)"); - } - - @Test - public void should_not_throw_on_toString_for_CreateAggregateStart() { - assertThat(createDseAggregate("agg1").toString()).isEqualTo("CREATE AGGREGATE agg1 ()"); - } - - @Test - public void should_not_throw_on_toString_for_CreateAggregateWithParam() { - assertThat(createDseAggregate("func1").withParameter(DataTypes.INT).toString()) - .isEqualTo("CREATE AGGREGATE func1 (int)"); - } - - @Test - public void should_not_throw_on_toString_for_NotExists_OrReplace() { - assertThat(createDseAggregate("func1").ifNotExists().orReplace().toString()) - .isEqualTo("CREATE OR REPLACE AGGREGATE IF NOT EXISTS func1 ()"); - } - - @Test - public void should_create_aggregate_with_deterministic() { - - assertThat( - createDseAggregate("keyspace1", "agg1") - .withParameter(DataTypes.INT) - .withSFunc("sfunction") - .withSType(DataTypes.ASCII) - .withFinalFunc("finalfunction") - .withInitCond(tuple(literal(0), literal(0))) - .deterministic() - .asCql()) - .isEqualTo( - "CREATE AGGREGATE keyspace1.agg1 (int) SFUNC sfunction STYPE ascii FINALFUNC finalfunction INITCOND (0,0) DETERMINISTIC"); - } -} diff --git a/query-builder/src/test/java/com/datastax/dse/driver/internal/querybuilder/schema/CreateDseFunctionTest.java b/query-builder/src/test/java/com/datastax/dse/driver/internal/querybuilder/schema/CreateDseFunctionTest.java deleted file mode 100644 index b795b4c5251..00000000000 --- a/query-builder/src/test/java/com/datastax/dse/driver/internal/querybuilder/schema/CreateDseFunctionTest.java +++ /dev/null @@ -1,454 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.querybuilder.schema; - -import static com.datastax.dse.driver.api.querybuilder.DseSchemaBuilder.createDseFunction; -import static com.datastax.oss.driver.api.querybuilder.SchemaBuilder.udt; -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.type.DataTypes; -import org.junit.Test; - -/** - * Tests for creating DSE extended functions. Most of these tests are copied from the OSS {@code - * com.datastax.oss.driver.internal.querybuilder.schema.CreateFunctionTest} class to ensure DSE - * extended behavior does not break OSS functionality, with additional tests to verify the DSE - * specific functionality (i.e. the DETERMINISTIC and MONOTONIC keywords). - */ -public class CreateDseFunctionTest { - - @Test - public void should_not_throw_on_toString_for_CreateFunctionStart() { - String funcStr = createDseFunction("func1").toString(); - assertThat(funcStr).isEqualTo("CREATE FUNCTION func1 () CALLED ON NULL INPUT"); - } - - @Test - public void should_not_throw_on_toString_for_CreateFunctionWithType() { - assertThat( - createDseFunction("func1") - .withParameter("param1", DataTypes.INT) - .returnsNullOnNull() - .returnsType(DataTypes.INT) - .toString()) - .isEqualTo("CREATE FUNCTION func1 (param1 int) RETURNS NULL ON NULL INPUT RETURNS int"); - } - - @Test - public void should_not_throw_on_toString_for_CreateFunctionWithLanguage() { - assertThat( - createDseFunction("func1") - .withParameter("param1", DataTypes.INT) - .returnsNullOnNull() - .returnsType(DataTypes.INT) - .withJavaLanguage() - .toString()) - .isEqualTo( - "CREATE FUNCTION func1 (param1 int) RETURNS NULL ON NULL INPUT RETURNS int LANGUAGE java"); - } - - @Test - public void should_create_function_with_simple_params() { - assertThat( - createDseFunction("keyspace1", "func1") - .withParameter("param1", DataTypes.INT) - .calledOnNull() - .returnsType(DataTypes.TEXT) - .withJavaLanguage() - .asQuoted("return Integer.toString(param1);") - .asCql()) - .isEqualTo( - "CREATE FUNCTION keyspace1.func1 (param1 int) CALLED ON NULL INPUT RETURNS text LANGUAGE java AS 'return Integer.toString(param1);'"); - } - - @Test - public void should_create_function_with_param_and_return_type_not_frozen() { - assertThat( - createDseFunction("keyspace1", "func6") - .withParameter("param1", DataTypes.tupleOf(DataTypes.INT, DataTypes.INT)) - .returnsNullOnNull() - .returnsType(udt("person", true)) - .withJavaLanguage() - .as("'return Integer.toString(param1);'") - .asCql()) - .isEqualTo( - "CREATE FUNCTION keyspace1.func6 (param1 tuple) RETURNS NULL ON NULL INPUT RETURNS person LANGUAGE java AS 'return Integer.toString(param1);'"); - } - - @Test - public void should_honor_returns_null() { - assertThat( - createDseFunction("keyspace1", "func2") - .withParameter("param1", DataTypes.INT) - .returnsNullOnNull() - .returnsType(DataTypes.TEXT) - .withJavaLanguage() - .asQuoted("return Integer.toString(param1);") - .asCql()) - .isEqualTo( - "CREATE FUNCTION keyspace1.func2 (param1 int) RETURNS NULL ON NULL INPUT RETURNS text LANGUAGE java AS 'return Integer.toString(param1);'"); - } - - @Test - public void should_create_function_with_many_params() { - assertThat( - createDseFunction("keyspace1", "func3") - .withParameter("param1", DataTypes.INT) - .withParameter("param2", DataTypes.TEXT) - .withParameter("param3", DataTypes.BOOLEAN) - .returnsNullOnNull() - .returnsType(DataTypes.TEXT) - .withJavaLanguage() - .asQuoted("return Integer.toString(param1);") - .asCql()) - .isEqualTo( - "CREATE FUNCTION keyspace1.func3 (param1 int,param2 text,param3 boolean) RETURNS NULL ON NULL INPUT RETURNS text LANGUAGE java AS 'return Integer.toString(param1);'"); - } - - @Test - public void should_create_function_with_no_params() { - - assertThat( - createDseFunction("keyspace1", "func4") - .returnsNullOnNull() - .returnsType(DataTypes.TEXT) - .withLanguage("java") - .asQuoted("return \"hello world\";") - .asCql()) - .isEqualTo( - "CREATE FUNCTION keyspace1.func4 () RETURNS NULL ON NULL INPUT RETURNS text LANGUAGE java AS 'return \"hello world\";'"); - } - - @Test - public void should_create_function_with_no_keyspace() { - assertThat( - createDseFunction("func5") - .returnsNullOnNull() - .returnsType(DataTypes.TEXT) - .withJavaLanguage() - .asQuoted("return \"hello world\";") - .asCql()) - .isEqualTo( - "CREATE FUNCTION func5 () RETURNS NULL ON NULL INPUT RETURNS text LANGUAGE java AS 'return \"hello world\";'"); - } - - @Test - public void should_create_function_with_if_not_exists() { - assertThat( - createDseFunction("keyspace1", "func6") - .ifNotExists() - .returnsNullOnNull() - .returnsType(DataTypes.TEXT) - .withJavaLanguage() - .asQuoted("return \"hello world\";") - .asCql()) - .isEqualTo( - "CREATE FUNCTION IF NOT EXISTS keyspace1.func6 () RETURNS NULL ON NULL INPUT RETURNS text LANGUAGE java AS 'return \"hello world\";'"); - } - - @Test - public void should_create_or_replace() { - assertThat( - createDseFunction("keyspace1", "func6") - .orReplace() - .withParameter("param1", DataTypes.INT) - .returnsNullOnNull() - .returnsType(DataTypes.TEXT) - .withJavaLanguage() - .asQuoted("return Integer.toString(param1);") - .asCql()) - .isEqualTo( - "CREATE OR REPLACE FUNCTION keyspace1.func6 (param1 int) RETURNS NULL ON NULL INPUT RETURNS text LANGUAGE java AS 'return Integer.toString(param1);'"); - } - - @Test - public void should_not_quote_body_using_as() { - assertThat( - createDseFunction("keyspace1", "func6") - .withParameter("param1", DataTypes.INT) - .returnsNullOnNull() - .returnsType(DataTypes.TEXT) - .withJavaLanguage() - .as("'return Integer.toString(param1);'") - .asCql()) - .isEqualTo( - "CREATE FUNCTION keyspace1.func6 (param1 int) RETURNS NULL ON NULL INPUT RETURNS text LANGUAGE java AS 'return Integer.toString(param1);'"); - } - - @Test - public void should_quote_with_dollar_signs_on_asQuoted_if_body_contains_single_quote() { - assertThat( - createDseFunction("keyspace1", "func6") - .withParameter("param1", DataTypes.INT) - .returnsNullOnNull() - .returnsType(DataTypes.TEXT) - .withJavaScriptLanguage() - .asQuoted("'hello ' + param1;") - .asCql()) - .isEqualTo( - "CREATE FUNCTION keyspace1.func6 (param1 int) RETURNS NULL ON NULL INPUT RETURNS text LANGUAGE javascript AS $$ 'hello ' + param1; $$"); - } - - @Test - public void should_not_throw_on_toString_for_create_function_with_deterministic() { - final String funcStr = - createDseFunction("func1") - .withParameter("param1", DataTypes.INT) - .returnsNullOnNull() - .returnsType(DataTypes.INT) - .deterministic() - .toString(); - assertThat(funcStr) - .isEqualTo( - "CREATE FUNCTION func1 (param1 int) RETURNS NULL ON NULL INPUT RETURNS int DETERMINISTIC"); - } - - @Test - public void should_not_quote_body_using_as_with_deterministic() { - final String funcStr = - createDseFunction("func1") - .withParameter("param1", DataTypes.INT) - .returnsNullOnNull() - .returnsType(DataTypes.TEXT) - .deterministic() - .withJavaLanguage() - .as("'return Integer.toString(param1);'") - .asCql(); - assertThat(funcStr) - .isEqualTo( - "CREATE FUNCTION func1 (param1 int) RETURNS NULL ON NULL INPUT RETURNS text DETERMINISTIC LANGUAGE java AS 'return Integer.toString(param1);'"); - } - - @Test - public void - should_quote_with_dollar_signs_on_asQuoted_if_body_contains_single_quote_with_deterministic() { - final String funcStr = - createDseFunction("func1") - .withParameter("param1", DataTypes.INT) - .returnsNullOnNull() - .returnsType(DataTypes.TEXT) - .deterministic() - .withJavaScriptLanguage() - .asQuoted("'hello ' + param1;") - .asCql(); - assertThat(funcStr) - .isEqualTo( - "CREATE FUNCTION func1 (param1 int) RETURNS NULL ON NULL INPUT RETURNS text DETERMINISTIC LANGUAGE javascript AS $$ 'hello ' + param1; $$"); - } - - @Test - public void should_not_throw_on_toString_for_create_function_with_monotonic() { - final String funcStr = - createDseFunction("func1") - .withParameter("param1", DataTypes.INT) - .returnsNullOnNull() - .returnsType(DataTypes.INT) - .monotonic() - .toString(); - assertThat(funcStr) - .isEqualTo( - "CREATE FUNCTION func1 (param1 int) RETURNS NULL ON NULL INPUT RETURNS int MONOTONIC"); - } - - @Test - public void should_not_quote_body_using_as_with_monotonic() { - final String funcStr = - createDseFunction("func1") - .withParameter("param1", DataTypes.INT) - .returnsNullOnNull() - .returnsType(DataTypes.TEXT) - .monotonic() - .withJavaLanguage() - .as("'return Integer.toString(param1);'") - .asCql(); - assertThat(funcStr) - .isEqualTo( - "CREATE FUNCTION func1 (param1 int) RETURNS NULL ON NULL INPUT RETURNS text MONOTONIC LANGUAGE java AS 'return Integer.toString(param1);'"); - } - - @Test - public void - should_quote_with_dollar_signs_on_asQuoted_if_body_contains_single_quote_with_monotonic() { - final String funcStr = - createDseFunction("func1") - .withParameter("param1", DataTypes.INT) - .returnsNullOnNull() - .returnsType(DataTypes.TEXT) - .monotonic() - .withJavaScriptLanguage() - .asQuoted("'hello ' + param1;") - .asCql(); - assertThat(funcStr) - .isEqualTo( - "CREATE FUNCTION func1 (param1 int) RETURNS NULL ON NULL INPUT RETURNS text MONOTONIC LANGUAGE javascript AS $$ 'hello ' + param1; $$"); - } - - @Test - public void should_not_throw_on_toString_for_create_function_with_monotonic_on() { - final String funcStr = - createDseFunction("func1") - .withParameter("param1", DataTypes.INT) - .withParameter("param2", DataTypes.INT) - .returnsNullOnNull() - .returnsType(DataTypes.INT) - .monotonicOn("param2") - .toString(); - assertThat(funcStr) - .isEqualTo( - "CREATE FUNCTION func1 (param1 int,param2 int) RETURNS NULL ON NULL INPUT RETURNS int MONOTONIC ON param2"); - } - - @Test - public void should_not_quote_body_using_as_with_monotonic_on() { - final String funcStr = - createDseFunction("func1") - .withParameter("param1", DataTypes.INT) - .withParameter("param2", DataTypes.INT) - .returnsNullOnNull() - .returnsType(DataTypes.TEXT) - .monotonicOn("param2") - .withJavaLanguage() - .as("'return Integer.toString(param1);'") - .asCql(); - assertThat(funcStr) - .isEqualTo( - "CREATE FUNCTION func1 (param1 int,param2 int) RETURNS NULL ON NULL INPUT RETURNS text MONOTONIC ON param2 LANGUAGE java AS 'return Integer.toString(param1);'"); - } - - @Test - public void - should_quote_with_dollar_signs_on_asQuoted_if_body_contains_single_quote_with_monotonic_on() { - final String funcStr = - createDseFunction("func1") - .withParameter("param1", DataTypes.INT) - .withParameter("param2", DataTypes.INT) - .returnsNullOnNull() - .returnsType(DataTypes.TEXT) - .monotonicOn("param2") - .withJavaScriptLanguage() - .asQuoted("'hello ' + param1;") - .asCql(); - assertThat(funcStr) - .isEqualTo( - "CREATE FUNCTION func1 (param1 int,param2 int) RETURNS NULL ON NULL INPUT RETURNS text MONOTONIC ON param2 LANGUAGE javascript AS $$ 'hello ' + param1; $$"); - } - - @Test - public void should_not_throw_on_toString_for_create_function_with_deterministic_and_monotonic() { - final String funcStr = - createDseFunction("func1") - .withParameter("param1", DataTypes.INT) - .returnsNullOnNull() - .returnsType(DataTypes.INT) - .deterministic() - .monotonic() - .toString(); - assertThat(funcStr) - .isEqualTo( - "CREATE FUNCTION func1 (param1 int) RETURNS NULL ON NULL INPUT RETURNS int DETERMINISTIC MONOTONIC"); - } - - @Test - public void should_not_quote_body_using_as_with_deterministic_and_monotonic() { - final String funcStr = - createDseFunction("func1") - .withParameter("param1", DataTypes.INT) - .returnsNullOnNull() - .returnsType(DataTypes.TEXT) - .deterministic() - .monotonic() - .withJavaLanguage() - .as("'return Integer.toString(param1);'") - .asCql(); - assertThat(funcStr) - .isEqualTo( - "CREATE FUNCTION func1 (param1 int) RETURNS NULL ON NULL INPUT RETURNS text DETERMINISTIC MONOTONIC LANGUAGE java AS 'return Integer.toString(param1);'"); - } - - @Test - public void - should_quote_with_dollar_signs_on_asQuoted_if_body_contains_single_quote_with_deterministic_and_monotonic() { - final String funcStr = - createDseFunction("func1") - .withParameter("param1", DataTypes.INT) - .returnsNullOnNull() - .returnsType(DataTypes.TEXT) - .deterministic() - .monotonic() - .withJavaScriptLanguage() - .asQuoted("'hello ' + param1;") - .asCql(); - assertThat(funcStr) - .isEqualTo( - "CREATE FUNCTION func1 (param1 int) RETURNS NULL ON NULL INPUT RETURNS text DETERMINISTIC MONOTONIC LANGUAGE javascript AS $$ 'hello ' + param1; $$"); - } - - @Test - public void - should_not_throw_on_toString_for_create_function_with_deterministic_and_monotonic_on() { - final String funcStr = - createDseFunction("func1") - .withParameter("param1", DataTypes.INT) - .withParameter("param2", DataTypes.INT) - .returnsNullOnNull() - .returnsType(DataTypes.INT) - .deterministic() - .monotonicOn("param2") - .toString(); - assertThat(funcStr) - .isEqualTo( - "CREATE FUNCTION func1 (param1 int,param2 int) RETURNS NULL ON NULL INPUT RETURNS int DETERMINISTIC MONOTONIC ON param2"); - } - - @Test - public void should_not_quote_body_using_as_with_deterministic_and_monotonic_on() { - final String funcStr = - createDseFunction("func1") - .withParameter("param1", DataTypes.INT) - .withParameter("param2", DataTypes.INT) - .returnsNullOnNull() - .returnsType(DataTypes.TEXT) - .deterministic() - .monotonicOn("param2") - .withJavaLanguage() - .as("'return Integer.toString(param1);'") - .asCql(); - assertThat(funcStr) - .isEqualTo( - "CREATE FUNCTION func1 (param1 int,param2 int) RETURNS NULL ON NULL INPUT RETURNS text DETERMINISTIC MONOTONIC ON param2 LANGUAGE java AS 'return Integer.toString(param1);'"); - } - - @Test - public void - should_quote_with_dollar_signs_on_asQuoted_if_body_contains_single_quote_with_deterministic_and_monotonic_on() { - final String funcStr = - createDseFunction("func1") - .withParameter("param1", DataTypes.INT) - .withParameter("param2", DataTypes.INT) - .returnsNullOnNull() - .returnsType(DataTypes.TEXT) - .deterministic() - .monotonicOn("param2") - .withJavaScriptLanguage() - .asQuoted("'hello ' + param1;") - .asCql(); - assertThat(funcStr) - .isEqualTo( - "CREATE FUNCTION func1 (param1 int,param2 int) RETURNS NULL ON NULL INPUT RETURNS text DETERMINISTIC MONOTONIC ON param2 LANGUAGE javascript AS $$ 'hello ' + param1; $$"); - } -} diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/Assertions.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/Assertions.java deleted file mode 100644 index 21f1922f6ca..00000000000 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/Assertions.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder; - -public class Assertions extends org.assertj.core.api.Assertions { - - public static BuildableQueryAssert assertThat(BuildableQuery actual) { - return new BuildableQueryAssert(actual); - } - - public static CqlSnippetAssert assertThat(CqlSnippet actual) { - return new CqlSnippetAssert(actual); - } -} diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/BuildableQueryAssert.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/BuildableQueryAssert.java deleted file mode 100644 index 8652fc7e3c8..00000000000 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/BuildableQueryAssert.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder; - -import static org.assertj.core.api.Assertions.assertThat; - -import org.assertj.core.api.AbstractAssert; - -public class BuildableQueryAssert extends AbstractAssert { - - public BuildableQueryAssert(BuildableQuery actual) { - super(actual, BuildableQueryAssert.class); - } - - public BuildableQueryAssert hasCql(String expected) { - assertThat(actual.asCql()).isEqualTo(expected); - return this; - } - - public BuildableQueryAssert isIdempotent() { - assertThat(actual.build().isIdempotent()).isTrue(); - return this; - } - - public BuildableQueryAssert isNotIdempotent() { - assertThat(actual.build().isIdempotent()).isFalse(); - return this; - } -} diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/BuildableQueryTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/BuildableQueryTest.java deleted file mode 100644 index 875f957b2fb..00000000000 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/BuildableQueryTest.java +++ /dev/null @@ -1,155 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder; - -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.bindMarker; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.deleteFrom; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.function; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.insertInto; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.selectFrom; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.tuple; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.update; -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.Map; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class BuildableQueryTest { - - @DataProvider - public static Object[][] sampleQueries() { - // query | values | expected CQL | expected idempotence - return new Object[][] { - { - selectFrom("foo").all().whereColumn("k").isEqualTo(bindMarker("k")), - ImmutableMap.of("k", 1), - "SELECT * FROM foo WHERE k=:k", - true - }, - { - deleteFrom("foo").whereColumn("k").isEqualTo(bindMarker("k")), - ImmutableMap.of("k", 1), - "DELETE FROM foo WHERE k=:k", - true - }, - { - deleteFrom("foo").whereColumn("k").isEqualTo(bindMarker("k")).ifExists(), - ImmutableMap.of("k", 1), - "DELETE FROM foo WHERE k=:k IF EXISTS", - false - }, - { - insertInto("foo").value("a", bindMarker("a")).value("b", bindMarker("b")), - ImmutableMap.of("a", 1, "b", "b"), - "INSERT INTO foo (a,b) VALUES (:a,:b)", - true - }, - { - insertInto("foo").value("k", tuple(bindMarker("field1"), function("generate_id"))), - ImmutableMap.of("field1", 1), - "INSERT INTO foo (k) VALUES ((:field1,generate_id()))", - false - }, - { - update("foo").setColumn("v", bindMarker("v")).whereColumn("k").isEqualTo(bindMarker("k")), - ImmutableMap.of("v", 3, "k", 1), - "UPDATE foo SET v=:v WHERE k=:k", - true - }, - { - update("foo") - .setColumn("v", function("non_idempotent_func")) - .whereColumn("k") - .isEqualTo(bindMarker("k")), - ImmutableMap.of("k", 1), - "UPDATE foo SET v=non_idempotent_func() WHERE k=:k", - false - }, - }; - } - - @Test - @UseDataProvider("sampleQueries") - public void should_build_statement_without_values( - BuildableQuery query, - @SuppressWarnings("unused") Map boundValues, - String expectedQueryString, - boolean expectedIdempotence) { - SimpleStatement statement = query.build(); - assertThat(statement.getQuery()).isEqualTo(expectedQueryString); - assertThat(statement.isIdempotent()).isEqualTo(expectedIdempotence); - assertThat(statement.getPositionalValues()).isEmpty(); - assertThat(statement.getNamedValues()).isEmpty(); - } - - @Test - @UseDataProvider("sampleQueries") - public void should_build_statement_with_positional_values( - BuildableQuery query, - Map boundValues, - String expectedQueryString, - boolean expectedIdempotence) { - Object[] positionalValues = boundValues.values().toArray(); - SimpleStatement statement = query.build(positionalValues); - assertThat(statement.getQuery()).isEqualTo(expectedQueryString); - assertThat(statement.isIdempotent()).isEqualTo(expectedIdempotence); - assertThat(statement.getPositionalValues()).containsExactly(positionalValues); - assertThat(statement.getNamedValues()).isEmpty(); - } - - @Test - @UseDataProvider("sampleQueries") - public void should_build_statement_with_named_values( - BuildableQuery query, - Map boundValues, - String expectedQueryString, - boolean expectedIdempotence) { - SimpleStatement statement = query.build(boundValues); - assertThat(statement.getQuery()).isEqualTo(expectedQueryString); - assertThat(statement.isIdempotent()).isEqualTo(expectedIdempotence); - assertThat(statement.getPositionalValues()).isEmpty(); - assertThat(statement.getNamedValues()).hasSize(boundValues.size()); - for (Map.Entry entry : boundValues.entrySet()) { - assertThat(statement.getNamedValues().get(CqlIdentifier.fromCql(entry.getKey()))) - .isEqualTo(entry.getValue()); - } - } - - @Test - @UseDataProvider("sampleQueries") - public void should_convert_to_statement_builder( - BuildableQuery query, - Map boundValues, - String expectedQueryString, - boolean expectedIdempotence) { - Object[] positionalValues = boundValues.values().toArray(); - SimpleStatement statement = query.builder().addPositionalValues(positionalValues).build(); - assertThat(statement.getQuery()).isEqualTo(expectedQueryString); - assertThat(statement.isIdempotent()).isEqualTo(expectedIdempotence); - assertThat(statement.getPositionalValues()).containsExactly(positionalValues); - assertThat(statement.getNamedValues()).isEmpty(); - } -} diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/CharsetCodec.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/CharsetCodec.java deleted file mode 100644 index 5620bcc2fe9..00000000000 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/CharsetCodec.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.api.core.type.codec.registry.MutableCodecRegistry; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.codec.registry.DefaultCodecRegistry; -import com.datastax.oss.driver.internal.querybuilder.DefaultLiteral; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.nio.charset.Charset; - -/** Example codec implementation used for {@link DefaultLiteral} tests. */ -public class CharsetCodec implements TypeCodec { - - /** A registry that contains an instance of this codec. */ - public static final CodecRegistry TEST_REGISTRY; - - static { - MutableCodecRegistry registry = new DefaultCodecRegistry("test"); - registry.register(new CharsetCodec()); - TEST_REGISTRY = registry; - } - - @NonNull - @Override - public GenericType getJavaType() { - return GenericType.of(Charset.class); - } - - @NonNull - @Override - public DataType getCqlType() { - return DataTypes.TEXT; - } - - @NonNull - @Override - public String format(Charset value) { - return "'" + value.name() + "'"; - } - - @Override - public ByteBuffer encode(Charset value, @NonNull ProtocolVersion protocolVersion) { - throw new UnsupportedOperationException("Not used in this test"); - } - - @Override - public Charset decode(ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - throw new UnsupportedOperationException("Not used in this test"); - } - - @Override - public Charset parse(String value) { - throw new UnsupportedOperationException("Not used in this test"); - } -} diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/CqlSnippetAssert.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/CqlSnippetAssert.java deleted file mode 100644 index 0d76bbea3c6..00000000000 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/CqlSnippetAssert.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder; - -import static org.assertj.core.api.Assertions.assertThat; - -import org.assertj.core.api.AbstractAssert; - -public class CqlSnippetAssert extends AbstractAssert { - - public CqlSnippetAssert(CqlSnippet actual) { - super(actual, CqlSnippetAssert.class); - } - - public CqlSnippetAssert hasCql(String expected) { - StringBuilder builder = new StringBuilder(); - actual.appendTo(builder); - assertThat(builder.toString()).isEqualTo(expected); - return this; - } -} diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/TokenLiteralTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/TokenLiteralTest.java deleted file mode 100644 index 49584ea0c18..00000000000 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/TokenLiteralTest.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder; - -import static com.datastax.oss.driver.api.querybuilder.Assertions.assertThat; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.literal; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.selectFrom; - -import com.datastax.oss.driver.internal.core.metadata.token.ByteOrderedTokenFactory; -import com.datastax.oss.driver.internal.core.metadata.token.Murmur3TokenFactory; -import com.datastax.oss.driver.internal.core.metadata.token.RandomTokenFactory; -import org.junit.Test; - -public class TokenLiteralTest { - - @Test - public void should_inline_murmur3_token_literal() { - assertThat( - selectFrom("test") - .all() - .whereToken("pk") - .isEqualTo(literal(Murmur3TokenFactory.MIN_TOKEN))) - .hasCql("SELECT * FROM test WHERE token(pk)=-9223372036854775808"); - } - - @Test - public void should_inline_byte_ordered_token_literal() { - assertThat( - selectFrom("test") - .all() - .whereToken("pk") - .isEqualTo(literal(ByteOrderedTokenFactory.MIN_TOKEN))) - .hasCql("SELECT * FROM test WHERE token(pk)=0x"); - } - - @Test - public void should_inline_random_token_literal() { - assertThat( - selectFrom("test") - .all() - .whereToken("pk") - .isEqualTo(literal(RandomTokenFactory.MIN_TOKEN))) - .hasCql("SELECT * FROM test WHERE token(pk)=-1"); - } -} diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/condition/ConditionTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/condition/ConditionTest.java deleted file mode 100644 index 08e4a42a568..00000000000 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/condition/ConditionTest.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.condition; - -import static com.datastax.oss.driver.api.querybuilder.Assertions.assertThat; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.bindMarker; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.deleteFrom; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.literal; - -import org.junit.Test; - -public class ConditionTest { - - @Test - public void should_generate_simple_column_condition() { - deleteFrom("foo").whereColumn("k").isEqualTo(bindMarker()).ifColumn("v").isEqualTo(literal(1)); - assertThat( - deleteFrom("foo") - .whereColumn("k") - .isEqualTo(bindMarker()) - .if_(Condition.column("v").isEqualTo(literal(1)))) - .hasCql("DELETE FROM foo WHERE k=? IF v=1"); - assertThat( - deleteFrom("foo") - .whereColumn("k") - .isEqualTo(bindMarker()) - .if_( - Condition.column("v1").isEqualTo(literal(1)), - Condition.column("v2").isEqualTo(literal(2)))) - .hasCql("DELETE FROM foo WHERE k=? IF v1=1 AND v2=2"); - } - - @Test - public void should_generate_field_condition() { - assertThat( - deleteFrom("foo") - .whereColumn("k") - .isEqualTo(bindMarker()) - .if_(Condition.field("v", "f").isEqualTo(literal(1)))) - .hasCql("DELETE FROM foo WHERE k=? IF v.f=1"); - } - - @Test - public void should_generate_element_condition() { - assertThat( - deleteFrom("foo") - .whereColumn("k") - .isEqualTo(bindMarker()) - .if_(Condition.element("v", literal(1)).isEqualTo(literal(1)))) - .hasCql("DELETE FROM foo WHERE k=? IF v[1]=1"); - } -} diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/delete/DeleteFluentConditionTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/delete/DeleteFluentConditionTest.java deleted file mode 100644 index 6d967f87a33..00000000000 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/delete/DeleteFluentConditionTest.java +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.delete; - -import static com.datastax.oss.driver.api.querybuilder.Assertions.assertThat; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.bindMarker; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.deleteFrom; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.literal; - -import org.junit.Test; - -public class DeleteFluentConditionTest { - - @Test - public void should_generate_simple_column_condition() { - assertThat( - deleteFrom("foo") - .whereColumn("k") - .isEqualTo(bindMarker()) - .ifColumn("v") - .isEqualTo(literal(1))) - .hasCql("DELETE FROM foo WHERE k=? IF v=1"); - assertThat( - deleteFrom("foo") - .whereColumn("k") - .isEqualTo(bindMarker()) - .ifColumn("v1") - .isEqualTo(literal(1)) - .ifColumn("v2") - .isEqualTo(literal(2))) - .hasCql("DELETE FROM foo WHERE k=? IF v1=1 AND v2=2"); - } - - @Test - public void should_generate_field_condition() { - assertThat( - deleteFrom("foo") - .whereColumn("k") - .isEqualTo(bindMarker()) - .ifField("v", "f") - .isEqualTo(literal(1))) - .hasCql("DELETE FROM foo WHERE k=? IF v.f=1"); - } - - @Test - public void should_generate_element_condition() { - assertThat( - deleteFrom("foo") - .whereColumn("k") - .isEqualTo(bindMarker()) - .ifElement("v", literal(1)) - .isEqualTo(literal(1))) - .hasCql("DELETE FROM foo WHERE k=? IF v[1]=1"); - } - - @Test - public void should_generate_if_exists_condition() { - assertThat(deleteFrom("foo").whereColumn("k").isEqualTo(bindMarker()).ifExists()) - .hasCql("DELETE FROM foo WHERE k=? IF EXISTS"); - } - - @Test - public void should_cancel_if_exists_if_other_condition_added() { - assertThat( - deleteFrom("foo") - .whereColumn("k") - .isEqualTo(bindMarker()) - .ifExists() - .ifColumn("v") - .isEqualTo(literal(1))) - .hasCql("DELETE FROM foo WHERE k=? IF v=1"); - } - - @Test - public void should_cancel_other_conditions_if_if_exists_added() { - assertThat( - deleteFrom("foo") - .whereColumn("k") - .isEqualTo(bindMarker()) - .ifColumn("v1") - .isEqualTo(literal(1)) - .ifColumn("v2") - .isEqualTo(literal(2)) - .ifExists()) - .hasCql("DELETE FROM foo WHERE k=? IF EXISTS"); - } -} diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/delete/DeleteFluentRelationTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/delete/DeleteFluentRelationTest.java deleted file mode 100644 index e42038029a3..00000000000 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/delete/DeleteFluentRelationTest.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.delete; - -import static com.datastax.oss.driver.api.querybuilder.Assertions.assertThat; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.bindMarker; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.deleteFrom; - -import com.datastax.oss.driver.api.querybuilder.relation.RelationTest; -import com.datastax.oss.driver.api.querybuilder.select.SelectFluentRelationTest; -import org.junit.Test; - -/** - * Mostly covered by other tests already. - * - * @see SelectFluentRelationTest - * @see RelationTest - */ -public class DeleteFluentRelationTest { - - @Test - public void should_generate_delete_with_column_relation() { - assertThat(deleteFrom("foo").whereColumn("k").isEqualTo(bindMarker())) - .hasCql("DELETE FROM foo WHERE k=?"); - } -} diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/delete/DeleteIdempotenceTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/delete/DeleteIdempotenceTest.java deleted file mode 100644 index 9b0dead3845..00000000000 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/delete/DeleteIdempotenceTest.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.delete; - -import static com.datastax.oss.driver.api.querybuilder.Assertions.assertThat; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.bindMarker; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.deleteFrom; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.function; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.literal; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.raw; - -import org.junit.Test; - -public class DeleteIdempotenceTest { - - @Test - public void should_not_be_idempotent_if_conditional() { - assertThat(deleteFrom("foo").whereColumn("k").isEqualTo(bindMarker())) - .hasCql("DELETE FROM foo WHERE k=?") - .isIdempotent(); - assertThat(deleteFrom("foo").whereColumn("k").isEqualTo(bindMarker()).ifExists()) - .hasCql("DELETE FROM foo WHERE k=? IF EXISTS") - .isNotIdempotent(); - assertThat( - deleteFrom("foo") - .whereColumn("k") - .isEqualTo(bindMarker()) - .ifColumn("k") - .isEqualTo(literal(1))) - .hasCql("DELETE FROM foo WHERE k=? IF k=1") - .isNotIdempotent(); - } - - @Test - public void should_not_be_idempotent_if_deleting_collection_element() { - assertThat(deleteFrom("foo").element("l", literal(0)).whereColumn("k").isEqualTo(bindMarker())) - .hasCql("DELETE l[0] FROM foo WHERE k=?") - .isNotIdempotent(); - } - - @Test - public void should_not_be_idempotent_if_using_non_idempotent_term_in_relation() { - assertThat(deleteFrom("foo").whereColumn("k").isEqualTo(function("non_idempotent_func"))) - .hasCql("DELETE FROM foo WHERE k=non_idempotent_func()") - .isNotIdempotent(); - assertThat(deleteFrom("foo").whereColumn("k").isEqualTo(raw("1"))) - .hasCql("DELETE FROM foo WHERE k=1") - .isNotIdempotent(); - } -} diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/delete/DeleteSelectorTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/delete/DeleteSelectorTest.java deleted file mode 100644 index cce4cf51a10..00000000000 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/delete/DeleteSelectorTest.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.delete; - -import static com.datastax.oss.driver.api.querybuilder.Assertions.assertThat; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.bindMarker; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.deleteFrom; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.literal; - -import com.datastax.oss.driver.api.core.data.CqlVector; -import org.junit.Test; - -public class DeleteSelectorTest { - - @Test - public void should_generate_column_deletion() { - assertThat(deleteFrom("foo").column("v").whereColumn("k").isEqualTo(bindMarker())) - .hasCql("DELETE v FROM foo WHERE k=?"); - assertThat(deleteFrom("ks", "foo").column("v").whereColumn("k").isEqualTo(bindMarker())) - .hasCql("DELETE v FROM ks.foo WHERE k=?"); - } - - @Test - public void should_generate_vector_deletion() { - assertThat( - deleteFrom("foo") - .column("v") - .whereColumn("k") - .isEqualTo(literal(CqlVector.newInstance(0.1, 0.2)))) - .hasCql("DELETE v FROM foo WHERE k=[0.1, 0.2]"); - } - - @Test - public void should_generate_field_deletion() { - assertThat( - deleteFrom("foo").field("address", "street").whereColumn("k").isEqualTo(bindMarker())) - .hasCql("DELETE address.street FROM foo WHERE k=?"); - } - - @Test - public void should_generate_element_deletion() { - assertThat(deleteFrom("foo").element("m", literal(1)).whereColumn("k").isEqualTo(bindMarker())) - .hasCql("DELETE m[1] FROM foo WHERE k=?"); - } -} diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/delete/DeleteTimestampTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/delete/DeleteTimestampTest.java deleted file mode 100644 index daa4ece66e6..00000000000 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/delete/DeleteTimestampTest.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.delete; - -import static com.datastax.oss.driver.api.querybuilder.Assertions.assertThat; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.bindMarker; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.deleteFrom; - -import org.junit.Test; - -public class DeleteTimestampTest { - - @Test - public void should_generate_using_timestamp_clause() { - assertThat(deleteFrom("foo").usingTimestamp(1).whereColumn("k").isEqualTo(bindMarker())) - .hasCql("DELETE FROM foo USING TIMESTAMP 1 WHERE k=?"); - assertThat( - deleteFrom("foo").usingTimestamp(bindMarker()).whereColumn("k").isEqualTo(bindMarker())) - .hasCql("DELETE FROM foo USING TIMESTAMP ? WHERE k=?"); - assertThat( - deleteFrom("foo") - .column("v") - .usingTimestamp(1) - .whereColumn("k") - .isEqualTo(bindMarker())) - .hasCql("DELETE v FROM foo USING TIMESTAMP 1 WHERE k=?"); - assertThat( - deleteFrom("foo") - .column("v") - .usingTimestamp(bindMarker()) - .whereColumn("k") - .isEqualTo(bindMarker())) - .hasCql("DELETE v FROM foo USING TIMESTAMP ? WHERE k=?"); - } - - @Test - public void should_use_last_timestamp_if_called_multiple_times() { - assertThat( - deleteFrom("foo") - .usingTimestamp(1) - .usingTimestamp(2) - .usingTimestamp(3) - .whereColumn("k") - .isEqualTo(bindMarker())) - .hasCql("DELETE FROM foo USING TIMESTAMP 3 WHERE k=?"); - } -} diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/insert/InsertIdempotenceTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/insert/InsertIdempotenceTest.java deleted file mode 100644 index 37baf2f91ed..00000000000 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/insert/InsertIdempotenceTest.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.insert; - -import static com.datastax.oss.driver.api.querybuilder.Assertions.assertThat; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.add; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.function; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.insertInto; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.literal; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.raw; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.tuple; - -import org.junit.Test; - -public class InsertIdempotenceTest { - - @Test - public void should_not_be_idempotent_if_conditional() { - assertThat(insertInto("foo").value("k", literal(1))) - .hasCql("INSERT INTO foo (k) VALUES (1)") - .isIdempotent(); - assertThat(insertInto("foo").value("k", literal(1)).ifNotExists()) - .hasCql("INSERT INTO foo (k) VALUES (1) IF NOT EXISTS") - .isNotIdempotent(); - } - - @Test - public void should_not_be_idempotent_if_inserting_non_idempotent_term() { - assertThat(insertInto("foo").value("k", literal(1))) - .hasCql("INSERT INTO foo (k) VALUES (1)") - .isIdempotent(); - assertThat(insertInto("foo").value("k", function("generate_id"))) - .hasCql("INSERT INTO foo (k) VALUES (generate_id())") - .isNotIdempotent(); - assertThat(insertInto("foo").value("k", raw("generate_id()"))) - .hasCql("INSERT INTO foo (k) VALUES (generate_id())") - .isNotIdempotent(); - - assertThat(insertInto("foo").value("k", add(literal(1), literal(1)))) - .hasCql("INSERT INTO foo (k) VALUES (1+1)") - .isIdempotent(); - assertThat(insertInto("foo").value("k", add(literal(1), function("generate_id")))) - .hasCql("INSERT INTO foo (k) VALUES (1+generate_id())") - .isNotIdempotent(); - - assertThat(insertInto("foo").value("k", tuple(literal(1), literal(1)))) - .hasCql("INSERT INTO foo (k) VALUES ((1,1))") - .isIdempotent(); - assertThat(insertInto("foo").value("k", tuple(literal(1), function("generate_id")))) - .hasCql("INSERT INTO foo (k) VALUES ((1,generate_id()))") - .isNotIdempotent(); - } -} diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/insert/JsonInsertTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/insert/JsonInsertTest.java deleted file mode 100644 index 8fa9dcddc33..00000000000 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/insert/JsonInsertTest.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.insert; - -import static com.datastax.oss.driver.api.querybuilder.Assertions.assertThat; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.bindMarker; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.insertInto; - -import org.junit.Test; - -public class JsonInsertTest { - - @Test - public void should_generate_insert_json() { - assertThat(insertInto("foo").json("{\"bar\": 1}")) - .hasCql("INSERT INTO foo JSON '{\"bar\": 1}'"); - assertThat(insertInto("foo").json(bindMarker())).hasCql("INSERT INTO foo JSON ?"); - assertThat(insertInto("foo").json(bindMarker()).defaultNull()) - .hasCql("INSERT INTO foo JSON ? DEFAULT NULL"); - assertThat(insertInto("foo").json(bindMarker()).defaultUnset()) - .hasCql("INSERT INTO foo JSON ? DEFAULT UNSET"); - } - - @Test - public void should_keep_last_missing_json_behavior() { - assertThat(insertInto("foo").json(bindMarker()).defaultNull().defaultUnset()) - .hasCql("INSERT INTO foo JSON ? DEFAULT UNSET"); - } - - @Test - public void should_generate_if_not_exists_and_timestamp_clauses() { - assertThat(insertInto("foo").json(bindMarker()).ifNotExists().usingTimestamp(1)) - .hasCql("INSERT INTO foo JSON ? IF NOT EXISTS USING TIMESTAMP 1"); - assertThat(insertInto("foo").json(bindMarker()).defaultUnset().ifNotExists().usingTimestamp(1)) - .hasCql("INSERT INTO foo JSON ? DEFAULT UNSET IF NOT EXISTS USING TIMESTAMP 1"); - } -} diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/insert/RegularInsertTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/insert/RegularInsertTest.java deleted file mode 100644 index 89c833ff1c6..00000000000 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/insert/RegularInsertTest.java +++ /dev/null @@ -1,171 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.insert; - -import static com.datastax.oss.driver.api.querybuilder.Assertions.assertThat; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.bindMarker; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.insertInto; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.literal; -import static org.assertj.core.api.Assertions.catchThrowable; - -import com.datastax.oss.driver.api.core.data.CqlVector; -import com.datastax.oss.driver.api.querybuilder.term.Term; -import com.datastax.oss.driver.internal.querybuilder.insert.DefaultInsert; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import java.util.Map; -import org.junit.Test; - -public class RegularInsertTest { - - @Test - public void should_generate_column_assignments() { - assertThat(insertInto("foo").value("a", literal(1)).value("b", literal(2))) - .hasCql("INSERT INTO foo (a,b) VALUES (1,2)"); - assertThat(insertInto("ks", "foo").value("a", literal(1)).value("b", literal(2))) - .hasCql("INSERT INTO ks.foo (a,b) VALUES (1,2)"); - assertThat(insertInto("foo").value("a", bindMarker()).value("b", bindMarker())) - .hasCql("INSERT INTO foo (a,b) VALUES (?,?)"); - } - - @Test - public void should_generate_vector_literals() { - assertThat(insertInto("foo").value("a", literal(CqlVector.newInstance(0.1, 0.2, 0.3)))) - .hasCql("INSERT INTO foo (a) VALUES ([0.1, 0.2, 0.3])"); - } - - @Test - public void should_keep_last_assignment_if_column_listed_twice() { - assertThat( - insertInto("foo") - .value("a", bindMarker()) - .value("b", bindMarker()) - .value("a", literal(1))) - .hasCql("INSERT INTO foo (b,a) VALUES (?,1)"); - } - - @Test - public void should_generate_bulk_column_assignments() { - Map assignments = ImmutableMap.of("a", literal(1), "b", literal(2)); - assertThat(insertInto("ks", "foo").values(assignments)) - .hasCql("INSERT INTO ks.foo (a,b) VALUES (1,2)"); - - assertThat( - insertInto("ks", "foo") - .value("a", literal(2)) - .value("c", literal(3)) - .values(assignments)) - .hasCql("INSERT INTO ks.foo (c,a,b) VALUES (3,1,2)"); - } - - @Test - public void should_generate_if_not_exists_clause() { - assertThat(insertInto("foo").value("a", bindMarker()).ifNotExists()) - .hasCql("INSERT INTO foo (a) VALUES (?) IF NOT EXISTS"); - } - - @Test - public void should_generate_using_timestamp_clause() { - assertThat(insertInto("foo").value("a", bindMarker()).usingTimestamp(1)) - .hasCql("INSERT INTO foo (a) VALUES (?) USING TIMESTAMP 1"); - assertThat(insertInto("foo").value("a", bindMarker()).usingTimestamp(bindMarker())) - .hasCql("INSERT INTO foo (a) VALUES (?) USING TIMESTAMP ?"); - } - - @Test - public void should_use_last_timestamp_if_called_multiple_times() { - assertThat( - insertInto("foo") - .value("a", bindMarker()) - .usingTimestamp(1) - .usingTimestamp(2) - .usingTimestamp(3)) - .hasCql("INSERT INTO foo (a) VALUES (?) USING TIMESTAMP 3"); - } - - @Test - public void should_generate_if_not_exists_and_timestamp_clauses() { - assertThat(insertInto("foo").value("a", bindMarker()).ifNotExists().usingTimestamp(1)) - .hasCql("INSERT INTO foo (a) VALUES (?) IF NOT EXISTS USING TIMESTAMP 1"); - } - - @Test - public void should_generate_ttl_clause() { - assertThat(insertInto("foo").value("a", bindMarker()).usingTtl(10)) - .hasCql("INSERT INTO foo (a) VALUES (?) USING TTL 10"); - } - - @Test - public void should_use_last_ttl_if_called_multiple_times() { - assertThat(insertInto("foo").value("a", bindMarker()).usingTtl(10).usingTtl(20).usingTtl(30)) - .hasCql("INSERT INTO foo (a) VALUES (?) USING TTL 30"); - } - - @Test - public void should_generate_using_timestamp_and_ttl_clauses() { - assertThat(insertInto("foo").value("a", bindMarker()).usingTtl(10).usingTimestamp(30l)) - .hasCql("INSERT INTO foo (a) VALUES (?) USING TIMESTAMP 30 AND TTL 10"); - // order of TTL and TIMESTAMP method calls should not change the order of the generated clauses - assertThat(insertInto("foo").value("a", bindMarker()).usingTimestamp(30l).usingTtl(10)) - .hasCql("INSERT INTO foo (a) VALUES (?) USING TIMESTAMP 30 AND TTL 10"); - } - - @Test - public void should_throw_exception_with_invalid_ttl() { - DefaultInsert defaultInsert = - (DefaultInsert) insertInto("foo").value("a", bindMarker()).usingTtl(10); - - Throwable t = - catchThrowable( - () -> - new DefaultInsert( - defaultInsert.getKeyspace(), - defaultInsert.getTable(), - (Term) defaultInsert.getJson(), - defaultInsert.getMissingJsonBehavior(), - defaultInsert.getAssignments(), - defaultInsert.getTimestamp(), - new Object(), // invalid TTL object - defaultInsert.isIfNotExists())); - - assertThat(t) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage("TTL value must be a BindMarker or an Integer"); - } - - @Test - public void should_throw_exception_with_invalid_timestamp() { - DefaultInsert defaultInsert = - (DefaultInsert) insertInto("foo").value("a", bindMarker()).usingTimestamp(1); - - Throwable t = - catchThrowable( - () -> - new DefaultInsert( - defaultInsert.getKeyspace(), - defaultInsert.getTable(), - (Term) defaultInsert.getJson(), - defaultInsert.getMissingJsonBehavior(), - defaultInsert.getAssignments(), - new Object(), // invalid timestamp object) - defaultInsert.getTtlInSeconds(), - defaultInsert.isIfNotExists())); - assertThat(t) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage("TIMESTAMP value must be a BindMarker or a Long"); - } -} diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/relation/RelationTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/relation/RelationTest.java deleted file mode 100644 index ec121eaa050..00000000000 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/relation/RelationTest.java +++ /dev/null @@ -1,208 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.relation; - -import static com.datastax.oss.driver.api.querybuilder.Assertions.assertThat; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.bindMarker; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.literal; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.raw; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.selectFrom; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.tuple; - -import org.assertj.core.util.Lists; -import org.junit.Test; - -public class RelationTest { - - @Test - public void should_generate_comparison_relation() { - assertThat(selectFrom("foo").all().where(Relation.column("k").isEqualTo(bindMarker()))) - .hasCql("SELECT * FROM foo WHERE k=?"); - assertThat(selectFrom("foo").all().where(Relation.column("k").isEqualTo(bindMarker("value")))) - .hasCql("SELECT * FROM foo WHERE k=:value"); - } - - @Test - public void should_generate_is_not_null_relation() { - assertThat(selectFrom("foo").all().where(Relation.column("k").isNotNull())) - .hasCql("SELECT * FROM foo WHERE k IS NOT NULL"); - } - - @Test - public void should_generate_contains_relation() { - assertThat(selectFrom("foo").all().where(Relation.column("k").contains(literal(1)))) - .hasCql("SELECT * FROM foo WHERE k CONTAINS 1"); - } - - @Test - public void should_generate_contains_key_relation() { - assertThat(selectFrom("foo").all().where(Relation.column("k").containsKey(literal(1)))) - .hasCql("SELECT * FROM foo WHERE k CONTAINS KEY 1"); - } - - @Test - public void should_generate_not_contains_relation() { - assertThat(selectFrom("foo").all().where(Relation.column("k").notContains(literal(1)))) - .hasCql("SELECT * FROM foo WHERE k NOT CONTAINS 1"); - } - - @Test - public void should_generate_not_contains_key_relation() { - assertThat(selectFrom("foo").all().where(Relation.column("k").notContainsKey(literal(1)))) - .hasCql("SELECT * FROM foo WHERE k NOT CONTAINS KEY 1"); - } - - @Test - public void should_generate_in_relation_bind_markers() { - assertThat(selectFrom("foo").all().where(Relation.column("k").in(bindMarker()))) - .hasCql("SELECT * FROM foo WHERE k IN ?"); - assertThat(selectFrom("foo").all().where(Relation.column("k").in(bindMarker(), bindMarker()))) - .hasCql("SELECT * FROM foo WHERE k IN (?,?)"); - } - - @Test - public void should_generate_in_relation_terms() { - assertThat( - selectFrom("foo") - .all() - .where( - Relation.column("k") - .in(Lists.newArrayList(literal(1), literal(2), literal(3))))) - .hasCql("SELECT * FROM foo WHERE k IN (1,2,3)"); - assertThat( - selectFrom("foo") - .all() - .where(Relation.column("k").in(literal(1), literal(2), literal(3)))) - .hasCql("SELECT * FROM foo WHERE k IN (1,2,3)"); - } - - @Test - public void should_generate_not_in_relation_bind_markers() { - assertThat(selectFrom("foo").all().where(Relation.column("k").notIn(bindMarker()))) - .hasCql("SELECT * FROM foo WHERE k NOT IN ?"); - assertThat( - selectFrom("foo").all().where(Relation.column("k").notIn(bindMarker(), bindMarker()))) - .hasCql("SELECT * FROM foo WHERE k NOT IN (?,?)"); - } - - @Test - public void should_generate_not_in_relation_terms() { - assertThat( - selectFrom("foo") - .all() - .where( - Relation.column("k") - .notIn(Lists.newArrayList(literal(1), literal(2), literal(3))))) - .hasCql("SELECT * FROM foo WHERE k NOT IN (1,2,3)"); - assertThat( - selectFrom("foo") - .all() - .where(Relation.column("k").notIn(literal(1), literal(2), literal(3)))) - .hasCql("SELECT * FROM foo WHERE k NOT IN (1,2,3)"); - } - - @Test - public void should_generate_token_relation() { - assertThat(selectFrom("foo").all().where(Relation.token("k1", "k2").isEqualTo(bindMarker("t")))) - .hasCql("SELECT * FROM foo WHERE token(k1,k2)=:t"); - } - - @Test - public void should_generate_column_component_relation() { - assertThat( - selectFrom("foo") - .all() - .where( - Relation.column("id").isEqualTo(bindMarker()), - Relation.mapValue("user", raw("'name'")).isEqualTo(bindMarker()))) - .hasCql("SELECT * FROM foo WHERE id=? AND user['name']=?"); - } - - @Test - public void should_generate_tuple_relation() { - assertThat( - selectFrom("foo") - .all() - .where(Relation.column("k").isEqualTo(bindMarker())) - .where(Relation.columns("c1", "c2", "c3").in(bindMarker()))) - .hasCql("SELECT * FROM foo WHERE k=? AND (c1,c2,c3) IN ?"); - assertThat( - selectFrom("foo") - .all() - .where(Relation.column("k").isEqualTo(bindMarker())) - .where(Relation.columns("c1", "c2", "c3").in(bindMarker(), bindMarker()))) - .hasCql("SELECT * FROM foo WHERE k=? AND (c1,c2,c3) IN (?,?)"); - assertThat( - selectFrom("foo") - .all() - .where(Relation.column("k").isEqualTo(bindMarker())) - .where(Relation.columns("c1", "c2", "c3").in(bindMarker(), raw("(4,5,6)")))) - .hasCql("SELECT * FROM foo WHERE k=? AND (c1,c2,c3) IN (?,(4,5,6))"); - assertThat( - selectFrom("foo") - .all() - .where(Relation.column("k").isEqualTo(bindMarker())) - .where( - Relation.columns("c1", "c2", "c3") - .in( - tuple(bindMarker(), bindMarker(), bindMarker()), - tuple(bindMarker(), bindMarker(), bindMarker())))) - .hasCql("SELECT * FROM foo WHERE k=? AND (c1,c2,c3) IN ((?,?,?),(?,?,?))"); - - assertThat( - selectFrom("foo") - .all() - .where(Relation.column("k").isEqualTo(bindMarker())) - .where(Relation.columns("c1", "c2", "c3").isEqualTo(bindMarker()))) - .hasCql("SELECT * FROM foo WHERE k=? AND (c1,c2,c3)=?"); - assertThat( - selectFrom("foo") - .all() - .where(Relation.column("k").isEqualTo(bindMarker())) - .where( - Relation.columns("c1", "c2", "c3") - .isLessThan(tuple(bindMarker(), bindMarker(), bindMarker())))) - .hasCql("SELECT * FROM foo WHERE k=? AND (c1,c2,c3)<(?,?,?)"); - assertThat( - selectFrom("foo") - .all() - .where(Relation.column("k").isEqualTo(bindMarker())) - .where(Relation.columns("c1", "c2", "c3").isGreaterThanOrEqualTo(raw("(1,2,3)")))) - .hasCql("SELECT * FROM foo WHERE k=? AND (c1,c2,c3)>=(1,2,3)"); - } - - @Test - public void should_generate_custom_index_relation() { - assertThat( - selectFrom("foo") - .all() - .where(Relation.column("k").isEqualTo(bindMarker())) - .where(Relation.customIndex("my_index", raw("'custom expression'")))) - .hasCql("SELECT * FROM foo WHERE k=? AND expr(my_index,'custom expression')"); - } - - @Test - public void should_generate_raw_relation() { - assertThat( - selectFrom("foo") - .all() - .where(Relation.column("k").isEqualTo(bindMarker())) - .where(raw("c = 'test'"))) - .hasCql("SELECT * FROM foo WHERE k=? AND c = 'test'"); - } -} diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/relation/TermTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/relation/TermTest.java deleted file mode 100644 index 61d09ecea7b..00000000000 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/relation/TermTest.java +++ /dev/null @@ -1,129 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.relation; - -import static com.datastax.oss.driver.api.querybuilder.Assertions.assertThat; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.add; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.currentDate; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.currentTime; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.currentTimeUuid; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.currentTimestamp; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.function; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.literal; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.maxTimeUuid; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.minTimeUuid; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.multiply; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.negate; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.now; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.raw; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.remainder; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.subtract; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.toDate; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.toTimestamp; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.toUnixTimestamp; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.typeHint; -import static org.assertj.core.api.Assertions.assertThatThrownBy; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.data.TupleValue; -import com.datastax.oss.driver.api.core.data.UdtValue; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.TupleType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.api.core.type.codec.CodecNotFoundException; -import com.datastax.oss.driver.api.querybuilder.CharsetCodec; -import com.datastax.oss.driver.internal.core.type.UserDefinedTypeBuilder; -import com.datastax.oss.driver.shaded.guava.common.base.Charsets; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import java.util.Date; -import org.junit.Test; - -public class TermTest { - - @Test - public void should_generate_arithmetic_terms() { - assertThat(add(raw("a"), raw("b"))).hasCql("a+b"); - assertThat(add(add(raw("a"), raw("b")), add(raw("c"), raw("d")))).hasCql("a+b+c+d"); - assertThat(subtract(add(raw("a"), raw("b")), add(raw("c"), raw("d")))).hasCql("a+b-(c+d)"); - assertThat(subtract(add(raw("a"), raw("b")), subtract(raw("c"), raw("d")))).hasCql("a+b-(c-d)"); - assertThat(negate(add(raw("a"), raw("b")))).hasCql("-(a+b)"); - assertThat(negate(subtract(raw("a"), raw("b")))).hasCql("-(a-b)"); - assertThat(multiply(add(raw("a"), raw("b")), add(raw("c"), raw("d")))).hasCql("(a+b)*(c+d)"); - assertThat(remainder(multiply(raw("a"), raw("b")), multiply(raw("c"), raw("d")))) - .hasCql("a*b%(c*d)"); - assertThat(remainder(multiply(raw("a"), raw("b")), remainder(raw("c"), raw("d")))) - .hasCql("a*b%(c%d)"); - } - - @Test - public void should_generate_function_terms() { - assertThat(function("f")).hasCql("f()"); - assertThat(function("f", raw("a"), raw("b"))).hasCql("f(a,b)"); - assertThat(function("ks", "f", raw("a"), raw("b"))).hasCql("ks.f(a,b)"); - assertThat(now()).hasCql("now()"); - assertThat(currentTimestamp()).hasCql("currenttimestamp()"); - assertThat(currentDate()).hasCql("currentdate()"); - assertThat(currentTime()).hasCql("currenttime()"); - assertThat(currentTimeUuid()).hasCql("currenttimeuuid()"); - assertThat(minTimeUuid(raw("a"))).hasCql("mintimeuuid(a)"); - assertThat(maxTimeUuid(raw("a"))).hasCql("maxtimeuuid(a)"); - assertThat(toDate(raw("a"))).hasCql("todate(a)"); - assertThat(toTimestamp(raw("a"))).hasCql("totimestamp(a)"); - assertThat(toUnixTimestamp(raw("a"))).hasCql("tounixtimestamp(a)"); - } - - @Test - public void should_generate_type_hint_terms() { - assertThat(typeHint(raw("1"), DataTypes.BIGINT)).hasCql("(bigint)1"); - } - - @Test - public void should_generate_literal_terms() { - assertThat(literal(1)).hasCql("1"); - assertThat(literal("foo")).hasCql("'foo'"); - assertThat(literal(ImmutableList.of(1, 2, 3))).hasCql("[1,2,3]"); - - TupleType tupleType = DataTypes.tupleOf(DataTypes.INT, DataTypes.TEXT); - TupleValue tupleValue = tupleType.newValue().setInt(0, 1).setString(1, "foo"); - assertThat(literal(tupleValue)).hasCql("(1,'foo')"); - - UserDefinedType udtType = - new UserDefinedTypeBuilder(CqlIdentifier.fromCql("ks"), CqlIdentifier.fromCql("user")) - .withField(CqlIdentifier.fromCql("first_name"), DataTypes.TEXT) - .withField(CqlIdentifier.fromCql("last_name"), DataTypes.TEXT) - .build(); - UdtValue udtValue = - udtType.newValue().setString("first_name", "Jane").setString("last_name", "Doe"); - assertThat(literal(udtValue)).hasCql("{first_name:'Jane',last_name:'Doe'}"); - assertThat(literal(null)).hasCql("NULL"); - - assertThat(literal(Charsets.UTF_8, new CharsetCodec())).hasCql("'UTF-8'"); - assertThat(literal(Charsets.UTF_8, CharsetCodec.TEST_REGISTRY)).hasCql("'UTF-8'"); - } - - @Test - public void should_fail_when_no_codec_for_literal() { - assertThatThrownBy(() -> literal(new Date(1234))) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Could not inline literal of type java.util.Date. " - + "This happens because the driver doesn't know how to map it to a CQL type. " - + "Try passing a TypeCodec or CodecRegistry to literal().") - .hasCauseInstanceOf(CodecNotFoundException.class); - } -} diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/AlterKeyspaceTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/AlterKeyspaceTest.java deleted file mode 100644 index 3c1b8ca7af1..00000000000 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/AlterKeyspaceTest.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import static com.datastax.oss.driver.api.querybuilder.Assertions.assertThat; -import static com.datastax.oss.driver.api.querybuilder.SchemaBuilder.alterKeyspace; - -import org.junit.Test; - -public class AlterKeyspaceTest { - - @Test - public void should_not_throw_on_toString_for_AlterKeyspaceStart() { - assertThat(alterKeyspace("foo").toString()).isEqualTo("ALTER KEYSPACE foo"); - } - - @Test - public void should_generate_alter_keyspace_with_replication() { - assertThat(alterKeyspace("foo").withSimpleStrategy(3)) - .hasCql( - "ALTER KEYSPACE foo WITH replication={'class':'SimpleStrategy','replication_factor':3}"); - } - - @Test - public void should_generate_alter_keyspace_with_durable_writes_and_options() { - assertThat(alterKeyspace("foo").withDurableWrites(true).withOption("hello", "world")) - .hasCql("ALTER KEYSPACE foo WITH durable_writes=true AND hello='world'"); - } -} diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/AlterMaterializedViewTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/AlterMaterializedViewTest.java deleted file mode 100644 index ef131a255a7..00000000000 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/AlterMaterializedViewTest.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import static com.datastax.oss.driver.api.querybuilder.Assertions.assertThat; -import static com.datastax.oss.driver.api.querybuilder.SchemaBuilder.alterMaterializedView; - -import org.junit.Test; - -public class AlterMaterializedViewTest { - - @Test - public void should_not_throw_on_toString_for_AlterMaterializedViewStart() { - assertThat(alterMaterializedView("foo").toString()).isEqualTo("ALTER MATERIALIZED VIEW foo"); - } - - @Test - public void should_generate_alter_view_with_options() { - assertThat( - alterMaterializedView("baz").withLZ4Compression().withDefaultTimeToLiveSeconds(86400)) - .hasCql( - "ALTER MATERIALIZED VIEW baz WITH compression={'class':'LZ4Compressor'} AND default_time_to_live=86400"); - } - - @Test - public void should_generate_alter_view_with_keyspace_options() { - assertThat(alterMaterializedView("foo", "baz").withCDC(true)) - .hasCql("ALTER MATERIALIZED VIEW foo.baz WITH cdc=true"); - } -} diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableTest.java deleted file mode 100644 index 2c99b154b38..00000000000 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableTest.java +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import static com.datastax.oss.driver.api.querybuilder.Assertions.assertThat; -import static com.datastax.oss.driver.api.querybuilder.SchemaBuilder.alterTable; - -import com.datastax.oss.driver.api.core.type.DataTypes; -import org.junit.Test; - -public class AlterTableTest { - - @Test - public void should_not_throw_on_toString_for_AlterTableStart() { - assertThat(alterTable("foo").toString()).isEqualTo("ALTER TABLE foo"); - } - - @Test - public void should_generate_alter_table_with_alter_column_type() { - assertThat(alterTable("foo", "bar").alterColumn("x", DataTypes.TEXT)) - .hasCql("ALTER TABLE foo.bar ALTER x TYPE text"); - } - - @Test - public void should_generate_alter_table_with_add_single_column() { - assertThat(alterTable("foo", "bar").addColumn("x", DataTypes.TEXT)) - .hasCql("ALTER TABLE foo.bar ADD x text"); - } - - @Test - public void should_generate_alter_table_with_add_single_column_static() { - assertThat(alterTable("foo", "bar").addStaticColumn("x", DataTypes.TEXT)) - .hasCql("ALTER TABLE foo.bar ADD x text STATIC"); - } - - @Test - public void should_generate_alter_table_with_add_three_columns() { - assertThat( - alterTable("foo", "bar") - .addColumn("x", DataTypes.TEXT) - .addStaticColumn("y", DataTypes.FLOAT) - .addColumn("z", DataTypes.DOUBLE)) - .hasCql("ALTER TABLE foo.bar ADD (x text,y float STATIC,z double)"); - } - - @Test - public void should_generate_alter_table_with_drop_single_column() { - assertThat(alterTable("foo", "bar").dropColumn("x")).hasCql("ALTER TABLE foo.bar DROP x"); - } - - @Test - public void should_generate_alter_table_with_drop_two_columns() { - assertThat(alterTable("foo", "bar").dropColumn("x").dropColumn("y")) - .hasCql("ALTER TABLE foo.bar DROP (x,y)"); - } - - @Test - public void should_generate_alter_table_with_drop_two_columns_at_once() { - assertThat(alterTable("foo", "bar").dropColumns("x", "y")) - .hasCql("ALTER TABLE foo.bar DROP (x,y)"); - } - - @Test - public void should_generate_alter_table_with_rename_single_column() { - assertThat(alterTable("foo", "bar").renameColumn("x", "y")) - .hasCql("ALTER TABLE foo.bar RENAME x TO y"); - } - - @Test - public void should_generate_alter_table_with_rename_three_columns() { - assertThat( - alterTable("foo", "bar") - .renameColumn("x", "y") - .renameColumn("u", "v") - .renameColumn("b", "a")) - .hasCql("ALTER TABLE foo.bar RENAME x TO y AND u TO v AND b TO a"); - } - - @Test - public void should_generate_alter_table_with_drop_compact_storage() { - assertThat(alterTable("bar").dropCompactStorage()) - .hasCql("ALTER TABLE bar DROP COMPACT STORAGE"); - } - - @Test - public void should_generate_alter_table_with_options() { - assertThat(alterTable("bar").withComment("Hello").withCDC(true)) - .hasCql("ALTER TABLE bar WITH comment='Hello' AND cdc=true"); - } - - @Test - public void should_generate_alter_table_with_no_compression() { - assertThat(alterTable("bar").withNoCompression()) - .hasCql("ALTER TABLE bar WITH compression={'sstable_compression':''}"); - } - - @Test - public void should_generate_alter_table_with_vector() { - assertThat(alterTable("bar").alterColumn("v", DataTypes.vectorOf(DataTypes.FLOAT, 3))) - .hasCql("ALTER TABLE bar ALTER v TYPE vector"); - } -} diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTypeTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTypeTest.java deleted file mode 100644 index 14bec0a6ce3..00000000000 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTypeTest.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import static com.datastax.oss.driver.api.querybuilder.Assertions.assertThat; -import static com.datastax.oss.driver.api.querybuilder.SchemaBuilder.alterType; - -import com.datastax.oss.driver.api.core.type.DataTypes; -import org.junit.Test; - -public class AlterTypeTest { - - @Test - public void should_not_throw_on_toString_for_AlterTypeStart() { - assertThat(alterType("foo").toString()).isEqualTo("ALTER TYPE foo"); - } - - @Test - public void should_generate_alter_type_with_alter_field_type() { - assertThat(alterType("foo", "bar").alterField("x", DataTypes.TEXT)) - .hasCql("ALTER TYPE foo.bar ALTER x TYPE text"); - } - - @Test - public void should_generate_alter_table_with_add_field() { - assertThat(alterType("foo", "bar").addField("x", DataTypes.TEXT)) - .hasCql("ALTER TYPE foo.bar ADD x text"); - } - - @Test - public void should_generate_alter_table_with_rename_single_column() { - assertThat(alterType("foo", "bar").renameField("x", "y")) - .hasCql("ALTER TYPE foo.bar RENAME x TO y"); - } - - @Test - public void should_generate_alter_table_with_rename_three_columns() { - assertThat(alterType("bar").renameField("x", "y").renameField("u", "v").renameField("b", "a")) - .hasCql("ALTER TYPE bar RENAME x TO y AND u TO v AND b TO a"); - } - - @Test - public void should_generate_alter_type_with_vector() { - assertThat(alterType("foo", "bar").alterField("vec", DataTypes.vectorOf(DataTypes.FLOAT, 3))) - .hasCql("ALTER TYPE foo.bar ALTER vec TYPE vector"); - } -} diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/CreateAggregateTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/CreateAggregateTest.java deleted file mode 100644 index 00e41dd87c0..00000000000 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/CreateAggregateTest.java +++ /dev/null @@ -1,153 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import static com.datastax.oss.driver.api.querybuilder.Assertions.assertThat; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.literal; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.tuple; -import static com.datastax.oss.driver.api.querybuilder.SchemaBuilder.createAggregate; - -import com.datastax.oss.driver.api.core.type.DataTypes; -import org.junit.Test; - -public class CreateAggregateTest { - @Test - public void should_create_aggreate_with_simple_param() { - - assertThat( - createAggregate("keyspace1", "agg1") - .withParameter(DataTypes.INT) - .withSFunc("sfunction") - .withSType(DataTypes.ASCII) - .withFinalFunc("finalfunction") - .withInitCond(tuple(literal(0), literal(0)))) - .hasCql( - "CREATE AGGREGATE keyspace1.agg1 (int) SFUNC sfunction STYPE ascii FINALFUNC finalfunction INITCOND (0,0)"); - } - - @Test - public void should_create_aggregate_with_many_params() { - - assertThat( - createAggregate("keyspace1", "agg2") - .withParameter(DataTypes.INT) - .withParameter(DataTypes.TEXT) - .withParameter(DataTypes.BOOLEAN) - .withSFunc("sfunction") - .withSType(DataTypes.ASCII) - .withFinalFunc("finalfunction") - .withInitCond(tuple(literal(0), literal(0)))) - .hasCql( - "CREATE AGGREGATE keyspace1.agg2 (int,text,boolean) SFUNC sfunction STYPE ascii FINALFUNC finalfunction INITCOND (0,0)"); - } - - @Test - public void should_create_aggregate_with_param_without_frozen() { - - assertThat( - createAggregate("keyspace1", "agg9") - .withParameter(DataTypes.tupleOf(DataTypes.TEXT)) - .withSFunc("sfunction") - .withSType(DataTypes.ASCII) - .withFinalFunc("finalfunction") - .withInitCond(tuple(literal(0), literal(0)))) - .hasCql( - "CREATE AGGREGATE keyspace1.agg9 (tuple) SFUNC sfunction STYPE ascii FINALFUNC finalfunction INITCOND (0,0)"); - } - - @Test - public void should_create_aggregate_with_no_params() { - - assertThat( - createAggregate("keyspace1", "agg3") - .withSFunc("sfunction") - .withSType(DataTypes.ASCII) - .withFinalFunc("finalfunction") - .withInitCond(tuple(literal(0), literal(0)))) - .hasCql( - "CREATE AGGREGATE keyspace1.agg3 () SFUNC sfunction STYPE ascii FINALFUNC finalfunction INITCOND (0,0)"); - } - - @Test - public void should_create_aggregate_with_no_keyspace() { - - assertThat( - createAggregate("agg4") - .withSFunc("sfunction") - .withSType(DataTypes.ASCII) - .withFinalFunc("finalfunction") - .withInitCond(tuple(literal(0), literal(0)))) - .hasCql( - "CREATE AGGREGATE agg4 () SFUNC sfunction STYPE ascii FINALFUNC finalfunction INITCOND (0,0)"); - } - - @Test - public void should_create_aggregate_with_if_not_exists() { - - assertThat( - createAggregate("agg6") - .ifNotExists() - .withSFunc("sfunction") - .withSType(DataTypes.ASCII) - .withFinalFunc("finalfunction") - .withInitCond(tuple(literal(0), literal(0)))) - .hasCql( - "CREATE AGGREGATE IF NOT EXISTS agg6 () SFUNC sfunction STYPE ascii FINALFUNC finalfunction INITCOND (0,0)"); - } - - @Test - public void should_create_aggregate_with_no_final_func() { - - assertThat( - createAggregate("cycling", "sum") - .withParameter(DataTypes.INT) - .withSFunc("dsum") - .withSType(DataTypes.INT)) - .hasCql("CREATE AGGREGATE cycling.sum (int) SFUNC dsum STYPE int"); - } - - @Test - public void should_create_or_replace() { - assertThat( - createAggregate("keyspace1", "agg7") - .orReplace() - .withSFunc("sfunction") - .withSType(DataTypes.ASCII) - .withFinalFunc("finalfunction") - .withInitCond(tuple(literal(0), literal(0)))) - .hasCql( - "CREATE OR REPLACE AGGREGATE keyspace1.agg7 () SFUNC sfunction STYPE ascii FINALFUNC finalfunction INITCOND (0,0)"); - } - - @Test - public void should_not_throw_on_toString_for_CreateAggregateStart() { - assertThat(createAggregate("agg1").toString()).isEqualTo("CREATE AGGREGATE agg1 ()"); - } - - @Test - public void should_not_throw_on_toString_for_CreateAggregateWithParam() { - assertThat(createAggregate("func1").withParameter(DataTypes.INT).toString()) - .isEqualTo("CREATE AGGREGATE func1 (int)"); - } - - @Test - public void should_not_throw_on_toString_for_NotExists_OrReplace() { - assertThat(createAggregate("func1").ifNotExists().orReplace().toString()) - .isEqualTo("CREATE OR REPLACE AGGREGATE IF NOT EXISTS func1 ()"); - } -} diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/CreateFunctionTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/CreateFunctionTest.java deleted file mode 100644 index 18c9813a5cf..00000000000 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/CreateFunctionTest.java +++ /dev/null @@ -1,190 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import static com.datastax.oss.driver.api.querybuilder.Assertions.assertThat; -import static com.datastax.oss.driver.api.querybuilder.SchemaBuilder.createFunction; -import static com.datastax.oss.driver.api.querybuilder.SchemaBuilder.udt; - -import com.datastax.oss.driver.api.core.type.DataTypes; -import org.junit.Test; - -public class CreateFunctionTest { - - @Test - public void should_not_throw_on_toString_for_CreateFunctionStart() { - assertThat(createFunction("func1").toString()) - .isEqualTo("CREATE FUNCTION func1 () CALLED ON NULL INPUT"); - } - - @Test - public void should_not_throw_on_toString_for_CreateFunctionWithType() { - assertThat( - createFunction("func1") - .withParameter("param1", DataTypes.INT) - .returnsNullOnNull() - .returnsType(DataTypes.INT) - .toString()) - .isEqualTo("CREATE FUNCTION func1 (param1 int) RETURNS NULL ON NULL INPUT RETURNS int"); - } - - @Test - public void should_not_throw_on_toString_for_CreateFunctionWithLanguage() { - assertThat( - createFunction("func1") - .withParameter("param1", DataTypes.INT) - .returnsNullOnNull() - .returnsType(DataTypes.INT) - .withJavaLanguage() - .toString()) - .isEqualTo( - "CREATE FUNCTION func1 (param1 int) RETURNS NULL ON NULL INPUT RETURNS int LANGUAGE java"); - } - - @Test - public void should_create_function_with_simple_params() { - assertThat( - createFunction("keyspace1", "func1") - .withParameter("param1", DataTypes.INT) - .calledOnNull() - .returnsType(DataTypes.TEXT) - .withJavaLanguage() - .asQuoted("return Integer.toString(param1);")) - .hasCql( - "CREATE FUNCTION keyspace1.func1 (param1 int) CALLED ON NULL INPUT RETURNS text LANGUAGE java AS 'return Integer.toString(param1);'"); - } - - @Test - public void should_create_function_with_param_and_return_type_not_frozen() { - assertThat( - createFunction("keyspace1", "func6") - .withParameter("param1", DataTypes.tupleOf(DataTypes.INT, DataTypes.INT)) - .returnsNullOnNull() - .returnsType(udt("person", true)) - .withJavaLanguage() - .as("'return Integer.toString(param1);'")) - .hasCql( - "CREATE FUNCTION keyspace1.func6 (param1 tuple) RETURNS NULL ON NULL INPUT RETURNS person LANGUAGE java AS 'return Integer.toString(param1);'"); - } - - @Test - public void should_honor_returns_null() { - assertThat( - createFunction("keyspace1", "func2") - .withParameter("param1", DataTypes.INT) - .returnsNullOnNull() - .returnsType(DataTypes.TEXT) - .withJavaLanguage() - .asQuoted("return Integer.toString(param1);")) - .hasCql( - "CREATE FUNCTION keyspace1.func2 (param1 int) RETURNS NULL ON NULL INPUT RETURNS text LANGUAGE java AS 'return Integer.toString(param1);'"); - } - - @Test - public void should_create_function_with_many_params() { - assertThat( - createFunction("keyspace1", "func3") - .withParameter("param1", DataTypes.INT) - .withParameter("param2", DataTypes.TEXT) - .withParameter("param3", DataTypes.BOOLEAN) - .returnsNullOnNull() - .returnsType(DataTypes.TEXT) - .withJavaLanguage() - .asQuoted("return Integer.toString(param1);")) - .hasCql( - "CREATE FUNCTION keyspace1.func3 (param1 int,param2 text,param3 boolean) RETURNS NULL ON NULL INPUT RETURNS text LANGUAGE java AS 'return Integer.toString(param1);'"); - } - - @Test - public void should_create_function_with_no_params() { - - assertThat( - createFunction("keyspace1", "func4") - .returnsNullOnNull() - .returnsType(DataTypes.TEXT) - .withLanguage("java") - .asQuoted("return \"hello world\";")) - .hasCql( - "CREATE FUNCTION keyspace1.func4 () RETURNS NULL ON NULL INPUT RETURNS text LANGUAGE java AS 'return \"hello world\";'"); - } - - @Test - public void should_create_function_with_no_keyspace() { - assertThat( - createFunction("func5") - .returnsNullOnNull() - .returnsType(DataTypes.TEXT) - .withJavaLanguage() - .asQuoted("return \"hello world\";")) - .hasCql( - "CREATE FUNCTION func5 () RETURNS NULL ON NULL INPUT RETURNS text LANGUAGE java AS 'return \"hello world\";'"); - } - - @Test - public void should_create_function_with_if_not_exists() { - assertThat( - createFunction("keyspace1", "func6") - .ifNotExists() - .returnsNullOnNull() - .returnsType(DataTypes.TEXT) - .withJavaLanguage() - .asQuoted("return \"hello world\";")) - .hasCql( - "CREATE FUNCTION IF NOT EXISTS keyspace1.func6 () RETURNS NULL ON NULL INPUT RETURNS text LANGUAGE java AS 'return \"hello world\";'"); - } - - @Test - public void should_create_or_replace() { - assertThat( - createFunction("keyspace1", "func6") - .orReplace() - .withParameter("param1", DataTypes.INT) - .returnsNullOnNull() - .returnsType(DataTypes.TEXT) - .withJavaLanguage() - .asQuoted("return Integer.toString(param1);")) - .hasCql( - "CREATE OR REPLACE FUNCTION keyspace1.func6 (param1 int) RETURNS NULL ON NULL INPUT RETURNS text LANGUAGE java AS 'return Integer.toString(param1);'"); - } - - @Test - public void should_not_quote_body_using_as() { - assertThat( - createFunction("keyspace1", "func6") - .withParameter("param1", DataTypes.INT) - .returnsNullOnNull() - .returnsType(DataTypes.TEXT) - .withJavaLanguage() - .as("'return Integer.toString(param1);'")) - .hasCql( - "CREATE FUNCTION keyspace1.func6 (param1 int) RETURNS NULL ON NULL INPUT RETURNS text LANGUAGE java AS 'return Integer.toString(param1);'"); - } - - @Test - public void should_quote_with_dollar_signs_on_asQuoted_if_body_contains_single_quote() { - assertThat( - createFunction("keyspace1", "func6") - .withParameter("param1", DataTypes.INT) - .returnsNullOnNull() - .returnsType(DataTypes.TEXT) - .withJavaScriptLanguage() - .asQuoted("'hello ' + param1;")) - .hasCql( - "CREATE FUNCTION keyspace1.func6 (param1 int) RETURNS NULL ON NULL INPUT RETURNS text LANGUAGE javascript AS $$ 'hello ' + param1; $$"); - } -} diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/CreateIndexTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/CreateIndexTest.java deleted file mode 100644 index 03d3bfd4223..00000000000 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/CreateIndexTest.java +++ /dev/null @@ -1,147 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import static com.datastax.oss.driver.api.querybuilder.Assertions.assertThat; -import static com.datastax.oss.driver.api.querybuilder.SchemaBuilder.createIndex; - -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import org.junit.Test; - -public class CreateIndexTest { - - @Test - public void should_not_throw_on_toString_for_CreateIndexStart() { - assertThat(createIndex().toString()).isEqualTo("CREATE INDEX"); - } - - @Test - public void should_not_throw_on_toString_for_CreateIndexOnTable() { - assertThat(createIndex().onTable("x").toString()).isEqualTo("CREATE INDEX ON x"); - } - - @Test - public void should_generate_create_index_with_no_name() { - assertThat(createIndex().onTable("x").andColumn("y")).hasCql("CREATE INDEX ON x (y)"); - } - - @Test - public void should_generate_create_custom_index_with_no_name() { - assertThat(createIndex().custom("MyClass").onTable("x").andColumn("y")) - .hasCql("CREATE CUSTOM INDEX ON x (y) USING 'MyClass'"); - } - - @Test - public void should_generate_create_custom_index_if_not_exists_with_no_name() { - assertThat(createIndex().custom("MyClass").ifNotExists().onTable("x").andColumn("y")) - .hasCql("CREATE CUSTOM INDEX IF NOT EXISTS ON x (y) USING 'MyClass'"); - } - - @Test - public void should_generate_create_index_with_no_name_if_not_exists() { - assertThat(createIndex().ifNotExists().onTable("x").andColumn("y")) - .hasCql("CREATE INDEX IF NOT EXISTS ON x (y)"); - } - - @Test - public void should_generate_custom_index_with_name() { - assertThat(createIndex("bar").custom("MyClass").onTable("x").andColumn("y")) - .hasCql("CREATE CUSTOM INDEX bar ON x (y) USING 'MyClass'"); - } - - @Test - public void should_generate_create_custom_index_if_not_exists_with_name() { - assertThat(createIndex("bar").custom("MyClass").ifNotExists().onTable("x").andColumn("y")) - .hasCql("CREATE CUSTOM INDEX IF NOT EXISTS bar ON x (y) USING 'MyClass'"); - } - - @Test - public void should_generate_index_with_keyspace() { - assertThat(createIndex("bar").onTable("foo", "x").andColumn("y")) - .hasCql("CREATE INDEX bar ON foo.x (y)"); - } - - @Test - public void should_generate_create_index_with_name_if_not_exists() { - assertThat(createIndex("bar").ifNotExists().onTable("x").andColumn("y")) - .hasCql("CREATE INDEX IF NOT EXISTS bar ON x (y)"); - } - - @Test - public void should_generate_create_index_values() { - assertThat(createIndex().onTable("x").andColumnValues("m")) - .hasCql("CREATE INDEX ON x (VALUES(m))"); - } - - @Test - public void should_generate_create_index_keys() { - assertThat(createIndex().onTable("x").andColumnKeys("m")).hasCql("CREATE INDEX ON x (KEYS(m))"); - } - - @Test - public void should_generate_create_index_entries() { - assertThat(createIndex().onTable("x").andColumnEntries("m")) - .hasCql("CREATE INDEX ON x (ENTRIES(m))"); - } - - @Test - public void should_generate_create_index_full() { - assertThat(createIndex().onTable("x").andColumnFull("l")).hasCql("CREATE INDEX ON x (FULL(l))"); - } - - @Test - public void should_generate_create_index_custom_index_type() { - assertThat(createIndex().onTable("x").andColumn("m", "CUST")) - .hasCql("CREATE INDEX ON x (CUST(m))"); - } - - @Test - public void should_generate_create_index_with_options() { - assertThat( - createIndex() - .custom("MyClass") - .onTable("x") - .andColumn("y") - .withOption("opt1", 1) - .withOption("opt2", "data")) - .hasCql("CREATE CUSTOM INDEX ON x (y) USING 'MyClass' WITH opt1=1 AND opt2='data'"); - } - - @Test - public void should_generate_create_custom_index_with_options() { - assertThat( - createIndex() - .onTable("x") - .andColumn("y") - .withOption("opt1", 1) - .withOption("opt2", "data")) - .hasCql("CREATE INDEX ON x (y) WITH opt1=1 AND opt2='data'"); - } - - @Test - public void should_generate_create_index_sasi_with_options() { - assertThat( - createIndex() - .usingSASI() - .onTable("x") - .andColumn("y") - .withSASIOptions(ImmutableMap.of("mode", "CONTAINS", "tokenization_locale", "en"))) - .hasCql( - "CREATE CUSTOM INDEX ON x (y) USING 'org.apache.cassandra.index.sasi.SASIIndex' WITH OPTIONS={'mode':'CONTAINS','tokenization_locale':'en'}"); - } -} diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/CreateKeyspaceTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/CreateKeyspaceTest.java deleted file mode 100644 index a11f9df94a1..00000000000 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/CreateKeyspaceTest.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import static com.datastax.oss.driver.api.querybuilder.Assertions.assertThat; -import static com.datastax.oss.driver.api.querybuilder.SchemaBuilder.createKeyspace; - -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import org.junit.Test; - -public class CreateKeyspaceTest { - - @Test - public void should_not_throw_on_toString_for_CreateKeyspaceStart() { - assertThat(createKeyspace("foo").toString()).isEqualTo("CREATE KEYSPACE foo"); - } - - @Test - public void should_generate_create_keyspace_simple_strategy() { - assertThat(createKeyspace("foo").withSimpleStrategy(5)) - .hasCql( - "CREATE KEYSPACE foo WITH replication={'class':'SimpleStrategy','replication_factor':5}"); - } - - @Test - public void should_generate_create_keyspace_simple_strategy_and_durable_writes() { - assertThat(createKeyspace("foo").withSimpleStrategy(5).withDurableWrites(true)) - .hasCql( - "CREATE KEYSPACE foo WITH replication={'class':'SimpleStrategy','replication_factor':5} AND durable_writes=true"); - } - - @Test - public void should_generate_create_keyspace_if_not_exists() { - assertThat(createKeyspace("foo").ifNotExists().withSimpleStrategy(2)) - .hasCql( - "CREATE KEYSPACE IF NOT EXISTS foo WITH replication={'class':'SimpleStrategy','replication_factor':2}"); - } - - @Test - public void should_generate_create_keyspace_network_topology_strategy() { - assertThat( - createKeyspace("foo").withNetworkTopologyStrategy(ImmutableMap.of("dc1", 3, "dc2", 4))) - .hasCql( - "CREATE KEYSPACE foo WITH replication={'class':'NetworkTopologyStrategy','dc1':3,'dc2':4}"); - } - - @Test - public void should_generate_create_keyspace_with_custom_properties() { - assertThat( - createKeyspace("foo") - .withSimpleStrategy(3) - .withOption("awesome_feature", true) - .withOption("wow_factor", 11) - .withOption("random_string", "hi")) - .hasCql( - "CREATE KEYSPACE foo WITH replication={'class':'SimpleStrategy','replication_factor':3} AND awesome_feature=true AND wow_factor=11 AND random_string='hi'"); - } -} diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewTest.java deleted file mode 100644 index 9c5180429b3..00000000000 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewTest.java +++ /dev/null @@ -1,141 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import static com.datastax.oss.driver.api.querybuilder.Assertions.assertThat; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.literal; -import static com.datastax.oss.driver.api.querybuilder.SchemaBuilder.createMaterializedView; - -import com.datastax.oss.driver.api.core.metadata.schema.ClusteringOrder; -import org.junit.Test; - -public class CreateMaterializedViewTest { - - @Test - public void should_not_throw_on_toString_for_CreateMaterializedViewStart() { - assertThat(createMaterializedView("foo").toString()).isEqualTo("CREATE MATERIALIZED VIEW foo"); - } - - @Test - public void should_not_throw_on_toString_for_CreateMaterializedViewSelection() { - assertThat(createMaterializedView("foo").asSelectFrom("bar").toString()) - .isEqualTo("CREATE MATERIALIZED VIEW foo"); - } - - @Test - public void should_not_throw_on_toString_for_CreateMaterializedViewWhereStart() { - assertThat(createMaterializedView("foo").asSelectFrom("bar").all().toString()) - .isEqualTo("CREATE MATERIALIZED VIEW foo AS SELECT * FROM bar"); - } - - @Test - public void should_generate_create_view_if_not_exists_with_select_all() { - assertThat( - createMaterializedView("baz") - .ifNotExists() - .asSelectFrom("foo", "bar") - .all() - .whereColumn("x") - .isNotNull() - .withPartitionKey("x")) - .hasCql( - "CREATE MATERIALIZED VIEW IF NOT EXISTS baz AS SELECT * FROM foo.bar WHERE x IS NOT NULL PRIMARY KEY(x)"); - } - - @Test - public void should_generate_create_view_with_select_columns() { - assertThat( - createMaterializedView("baz") - .asSelectFrom("bar") - .columns("x", "y") - .whereColumn("x") - .isNotNull() - .whereColumn("y") - .isLessThan(literal(5)) - .withPartitionKey("x")) - .hasCql( - "CREATE MATERIALIZED VIEW baz AS SELECT x,y FROM bar WHERE x IS NOT NULL AND y<5 PRIMARY KEY(x)"); - } - - @Test - public void should_generate_create_view_with_compound_partition_key_and_clustering_columns() { - assertThat( - createMaterializedView("baz") - .asSelectFrom("bar") - .all() - .whereColumn("x") - .isNotNull() - .whereColumn("y") - .isNotNull() - .withPartitionKey("x") - .withPartitionKey("y") - .withClusteringColumn("a") - .withClusteringColumn("b")) - .hasCql( - "CREATE MATERIALIZED VIEW baz AS SELECT * FROM bar WHERE x IS NOT NULL AND y IS NOT NULL PRIMARY KEY((x,y),a,b)"); - } - - @Test - public void should_generate_create_view_with_clustering_single() { - assertThat( - createMaterializedView("baz") - .asSelectFrom("bar") - .all() - .whereColumn("x") - .isNotNull() - .withPartitionKey("x") - .withClusteringColumn("a") - .withClusteringOrder("a", ClusteringOrder.DESC)) - .hasCql( - "CREATE MATERIALIZED VIEW baz AS SELECT * FROM bar WHERE x IS NOT NULL PRIMARY KEY(x,a) WITH CLUSTERING ORDER BY (a DESC)"); - } - - @Test - public void should_generate_create_view_with_clustering_and_options() { - assertThat( - createMaterializedView("baz") - .asSelectFrom("bar") - .all() - .whereColumn("x") - .isNotNull() - .withPartitionKey("x") - .withClusteringColumn("a") - .withClusteringColumn("b") - .withClusteringOrder("a", ClusteringOrder.DESC) - .withClusteringOrder("b", ClusteringOrder.ASC) - .withCDC(true) - .withComment("Hello")) - .hasCql( - "CREATE MATERIALIZED VIEW baz AS SELECT * FROM bar WHERE x IS NOT NULL PRIMARY KEY(x,a,b) WITH CLUSTERING ORDER BY (a DESC,b ASC) AND cdc=true AND comment='Hello'"); - } - - @Test - public void should_generate_create_view_with_options() { - assertThat( - createMaterializedView("baz") - .asSelectFrom("bar") - .all() - .whereColumn("x") - .isNotNull() - .withPartitionKey("x") - .withCDC(true) - .withComment("Hello")) - .hasCql( - "CREATE MATERIALIZED VIEW baz AS SELECT * FROM bar WHERE x IS NOT NULL PRIMARY KEY(x) WITH cdc=true AND comment='Hello'"); - } -} diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/CreateTableTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/CreateTableTest.java deleted file mode 100644 index 31efc278472..00000000000 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/CreateTableTest.java +++ /dev/null @@ -1,381 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import static com.datastax.oss.driver.api.querybuilder.Assertions.assertThat; -import static com.datastax.oss.driver.api.querybuilder.SchemaBuilder.createTable; -import static com.datastax.oss.driver.api.querybuilder.SchemaBuilder.udt; - -import com.datastax.oss.driver.api.core.metadata.schema.ClusteringOrder; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.querybuilder.SchemaBuilder; -import com.datastax.oss.driver.api.querybuilder.SchemaBuilder.RowsPerPartition; -import com.datastax.oss.driver.api.querybuilder.schema.compaction.TimeWindowCompactionStrategy.CompactionWindowUnit; -import com.datastax.oss.driver.api.querybuilder.schema.compaction.TimeWindowCompactionStrategy.TimestampResolution; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import java.nio.charset.StandardCharsets; -import org.junit.Test; - -public class CreateTableTest { - - @Test - public void should_not_throw_on_toString_for_CreateTableStart() { - assertThat(createTable("foo").toString()).isEqualTo("CREATE TABLE foo"); - } - - @Test - public void should_generate_create_table_if_not_exists() { - assertThat(createTable("bar").ifNotExists().withPartitionKey("k", DataTypes.INT)) - .hasCql("CREATE TABLE IF NOT EXISTS bar (k int PRIMARY KEY)"); - } - - @Test - public void should_generate_create_table_with_single_partition_key() { - assertThat( - createTable("bar").withPartitionKey("k", DataTypes.INT).withColumn("v", DataTypes.TEXT)) - .hasCql("CREATE TABLE bar (k int PRIMARY KEY,v text)"); - } - - @Test - public void should_generate_create_table_with_compound_partition_key() { - assertThat( - createTable("bar") - .withPartitionKey("kc", DataTypes.INT) - .withPartitionKey("ka", DataTypes.TIMESTAMP) - .withColumn("v", DataTypes.TEXT)) - .hasCql("CREATE TABLE bar (kc int,ka timestamp,v text,PRIMARY KEY((kc,ka)))"); - } - - @Test - public void should_generate_create_table_with_single_partition_key_and_clustering_column() { - assertThat( - createTable("bar") - .withPartitionKey("k", DataTypes.INT) - .withClusteringColumn("c", DataTypes.TEXT) - .withColumn("v", udt("val", true))) - .hasCql("CREATE TABLE bar (k int,c text,v frozen,PRIMARY KEY(k,c))"); - } - - @Test - public void should_generate_create_table_with_static_column() { - assertThat( - createTable("bar") - .withPartitionKey("k", DataTypes.INT) - .withClusteringColumn("c", DataTypes.TEXT) - .withStaticColumn("s", DataTypes.TIMEUUID) - .withColumn("v", udt("val", true))) - .hasCql("CREATE TABLE bar (k int,c text,s timeuuid STATIC,v frozen,PRIMARY KEY(k,c))"); - } - - @Test - public void should_generate_create_table_with_compound_partition_key_and_clustering_columns() { - assertThat( - createTable("bar") - .withPartitionKey("kc", DataTypes.INT) - .withPartitionKey("ka", DataTypes.TIMESTAMP) - .withClusteringColumn("c", DataTypes.FLOAT) - .withClusteringColumn("a", DataTypes.UUID) - .withColumn("v", DataTypes.TEXT)) - .hasCql( - "CREATE TABLE bar (kc int,ka timestamp,c float,a uuid,v text,PRIMARY KEY((kc,ka),c,a))"); - } - - @Test - public void should_generate_create_table_with_compact_storage() { - assertThat( - createTable("bar") - .withPartitionKey("k", DataTypes.INT) - .withColumn("v", DataTypes.TEXT) - .withCompactStorage()) - .hasCql("CREATE TABLE bar (k int PRIMARY KEY,v text) WITH COMPACT STORAGE"); - } - - @Test - public void should_generate_create_table_with_clustering_single() { - assertThat( - createTable("bar") - .withPartitionKey("k", DataTypes.INT) - .withClusteringColumn("c", DataTypes.TEXT) - .withColumn("v", DataTypes.TEXT) - .withClusteringOrder("c", ClusteringOrder.ASC)) - .hasCql( - "CREATE TABLE bar (k int,c text,v text,PRIMARY KEY(k,c)) WITH CLUSTERING ORDER BY (c ASC)"); - } - - @Test - public void should_generate_create_table_with_clustering_three() { - assertThat( - createTable("bar") - .withPartitionKey("k", DataTypes.INT) - .withClusteringColumn("c0", DataTypes.TEXT) - .withClusteringColumn("c1", DataTypes.TEXT) - .withClusteringColumn("c2", DataTypes.TEXT) - .withColumn("v", DataTypes.TEXT) - .withClusteringOrder("c0", ClusteringOrder.DESC) - .withClusteringOrder( - ImmutableMap.of("c1", ClusteringOrder.ASC, "c2", ClusteringOrder.DESC))) - .hasCql( - "CREATE TABLE bar (k int,c0 text,c1 text,c2 text,v text,PRIMARY KEY(k,c0,c1,c2)) WITH CLUSTERING ORDER BY (c0 DESC,c1 ASC,c2 DESC)"); - } - - @Test - public void should_generate_create_table_with_compact_storage_and_default_ttl() { - assertThat( - createTable("bar") - .withPartitionKey("k", DataTypes.INT) - .withColumn("v", DataTypes.TEXT) - .withCompactStorage() - .withDefaultTimeToLiveSeconds(86400)) - .hasCql( - "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH COMPACT STORAGE AND default_time_to_live=86400"); - } - - @Test - public void should_generate_create_table_with_clustering_compact_storage_and_default_ttl() { - assertThat( - createTable("bar") - .withPartitionKey("k", DataTypes.INT) - .withClusteringColumn("c", DataTypes.TEXT) - .withColumn("v", DataTypes.TEXT) - .withCompactStorage() - .withClusteringOrder("c", ClusteringOrder.DESC) - .withDefaultTimeToLiveSeconds(86400)) - .hasCql( - "CREATE TABLE bar (k int,c text,v text,PRIMARY KEY(k,c)) WITH COMPACT STORAGE AND CLUSTERING ORDER BY (c DESC) AND default_time_to_live=86400"); - } - - @Test - public void should_generate_create_table_with_options() { - assertThat( - createTable("bar") - .withPartitionKey("k", DataTypes.INT) - .withColumn("v", DataTypes.TEXT) - .withBloomFilterFpChance(0.42) - .withCDC(false) - .withComment("Hello world") - .withDcLocalReadRepairChance(0.54) - .withDefaultTimeToLiveSeconds(86400) - .withExtensions( - ImmutableMap.of( - "key1", - "apache".getBytes(StandardCharsets.UTF_8), - "key2", - "cassandra".getBytes(StandardCharsets.UTF_8))) - .withGcGraceSeconds(864000) - .withMemtableFlushPeriodInMs(10000) - .withMinIndexInterval(1024) - .withMaxIndexInterval(4096) - .withReadRepairChance(0.55) - .withSpeculativeRetry("99percentile")) - .hasCql( - "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH bloom_filter_fp_chance=0.42 AND cdc=false AND comment='Hello world' AND dclocal_read_repair_chance=0.54 AND default_time_to_live=86400 AND extensions={'key1':0x617061636865,'key2':0x63617373616e647261} AND gc_grace_seconds=864000 AND memtable_flush_period_in_ms=10000 AND min_index_interval=1024 AND max_index_interval=4096 AND read_repair_chance=0.55 AND speculative_retry='99percentile'"); - } - - @Test - public void should_generate_create_table_lz4_compression() { - assertThat( - createTable("bar") - .withPartitionKey("k", DataTypes.INT) - .withColumn("v", DataTypes.TEXT) - .withLZ4Compression()) - .hasCql( - "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'LZ4Compressor'}"); - } - - @Test - public void should_generate_create_table_lz4_compression_options() { - assertThat( - createTable("bar") - .withPartitionKey("k", DataTypes.INT) - .withColumn("v", DataTypes.TEXT) - .withLZ4Compression(1024)) - .hasCql( - "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'LZ4Compressor','chunk_length_in_kb':1024}"); - } - - @Test - public void should_generate_create_table_lz4_compression_options_crc() { - assertThat( - createTable("bar") - .withPartitionKey("k", DataTypes.INT) - .withColumn("v", DataTypes.TEXT) - .withLZ4Compression(1024, .5)) - .hasCql( - "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'LZ4Compressor','chunk_length_kb':1024,'crc_check_chance':0.5}"); - } - - @Test - public void should_generate_create_table_zstd_compression() { - assertThat( - createTable("bar") - .withPartitionKey("k", DataTypes.INT) - .withColumn("v", DataTypes.TEXT) - .withZstdCompression()) - .hasCql( - "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'ZstdCompressor'}"); - } - - @Test - public void should_generate_create_table_zstd_compression_options() { - assertThat( - createTable("bar") - .withPartitionKey("k", DataTypes.INT) - .withColumn("v", DataTypes.TEXT) - .withZstdCompression(1024)) - .hasCql( - "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'ZstdCompressor','chunk_length_in_kb':1024}"); - } - - @Test - public void should_generate_create_table_snappy_compression() { - assertThat( - createTable("bar") - .withPartitionKey("k", DataTypes.INT) - .withColumn("v", DataTypes.TEXT) - .withSnappyCompression()) - .hasCql( - "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'SnappyCompressor'}"); - } - - @Test - public void should_generate_create_table_snappy_compression_options() { - assertThat( - createTable("bar") - .withPartitionKey("k", DataTypes.INT) - .withColumn("v", DataTypes.TEXT) - .withSnappyCompression(2048)) - .hasCql( - "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'SnappyCompressor','chunk_length_in_kb':2048}"); - } - - @Test - public void should_generate_create_table_snappy_compression_options_crc() { - assertThat( - createTable("bar") - .withPartitionKey("k", DataTypes.INT) - .withColumn("v", DataTypes.TEXT) - .withSnappyCompression(2048, .25)) - .hasCql( - "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'SnappyCompressor','chunk_length_kb':2048,'crc_check_chance':0.25}"); - } - - @Test - public void should_generate_create_table_deflate_compression() { - assertThat( - createTable("bar") - .withPartitionKey("k", DataTypes.INT) - .withColumn("v", DataTypes.TEXT) - .withDeflateCompression()) - .hasCql( - "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'DeflateCompressor'}"); - } - - @Test - public void should_generate_create_table_deflate_compression_options() { - assertThat( - createTable("bar") - .withPartitionKey("k", DataTypes.INT) - .withColumn("v", DataTypes.TEXT) - .withDeflateCompression(4096)) - .hasCql( - "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'DeflateCompressor','chunk_length_in_kb':4096}"); - } - - @Test - public void should_generate_create_table_deflate_compression_options_crc() { - assertThat( - createTable("bar") - .withPartitionKey("k", DataTypes.INT) - .withColumn("v", DataTypes.TEXT) - .withDeflateCompression(4096, .1)) - .hasCql( - "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'DeflateCompressor','chunk_length_kb':4096,'crc_check_chance':0.1}"); - } - - @Test - public void should_generate_create_table_caching_options() { - assertThat( - createTable("bar") - .withPartitionKey("k", DataTypes.INT) - .withColumn("v", DataTypes.TEXT) - .withCaching(true, RowsPerPartition.rows(10))) - .hasCql( - "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH caching={'keys':'ALL','rows_per_partition':'10'}"); - } - - @Test - public void should_generate_create_table_size_tiered_compaction() { - assertThat( - createTable("bar") - .withPartitionKey("k", DataTypes.INT) - .withColumn("v", DataTypes.TEXT) - .withCompaction( - SchemaBuilder.sizeTieredCompactionStrategy() - .withBucketHigh(1.6) - .withBucketLow(0.6) - .withColdReadsToOmit(0.1) - .withMaxThreshold(33) - .withMinThreshold(5) - .withMinSSTableSizeInBytes(50000) - .withOnlyPurgeRepairedTombstones(true) - .withEnabled(false) - .withTombstoneCompactionIntervalInSeconds(86400) - .withTombstoneThreshold(0.22) - .withUncheckedTombstoneCompaction(true))) - .hasCql( - "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compaction={'class':'SizeTieredCompactionStrategy','bucket_high':1.6,'bucket_low':0.6,'cold_reads_to_omit':0.1,'max_threshold':33,'min_threshold':5,'min_sstable_size':50000,'only_purge_repaired_tombstones':true,'enabled':false,'tombstone_compaction_interval':86400,'tombstone_threshold':0.22,'unchecked_tombstone_compaction':true}"); - } - - @Test - public void should_generate_create_table_leveled_compaction() { - assertThat( - createTable("bar") - .withPartitionKey("k", DataTypes.INT) - .withColumn("v", DataTypes.TEXT) - .withCompaction( - SchemaBuilder.leveledCompactionStrategy() - .withSSTableSizeInMB(110) - .withTombstoneCompactionIntervalInSeconds(3600))) - .hasCql( - "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compaction={'class':'LeveledCompactionStrategy','sstable_size_in_mb':110,'tombstone_compaction_interval':3600}"); - } - - @Test - public void should_generate_create_table_time_window_compaction() { - assertThat( - createTable("bar") - .withPartitionKey("k", DataTypes.INT) - .withColumn("v", DataTypes.TEXT) - .withCompaction( - SchemaBuilder.timeWindowCompactionStrategy() - .withCompactionWindow(10, CompactionWindowUnit.DAYS) - .withTimestampResolution(TimestampResolution.MICROSECONDS) - .withUnsafeAggressiveSSTableExpiration(false))) - .hasCql( - "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compaction={'class':'TimeWindowCompactionStrategy','compaction_window_size':10,'compaction_window_unit':'DAYS','timestamp_resolution':'MICROSECONDS','unsafe_aggressive_sstable_expiration':false}"); - } - - @Test - public void should_generate_vector_column() { - assertThat( - createTable("foo") - .withPartitionKey("k", DataTypes.INT) - .withColumn("v", DataTypes.vectorOf(DataTypes.FLOAT, 3))) - .hasCql("CREATE TABLE foo (k int PRIMARY KEY,v vector)"); - } -} diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/CreateTypeTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/CreateTypeTest.java deleted file mode 100644 index f7c15788a0f..00000000000 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/CreateTypeTest.java +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import static com.datastax.oss.driver.api.querybuilder.Assertions.assertThat; -import static com.datastax.oss.driver.api.querybuilder.SchemaBuilder.createType; -import static com.datastax.oss.driver.api.querybuilder.SchemaBuilder.udt; - -import com.datastax.oss.driver.api.core.type.DataTypes; -import org.junit.Test; - -public class CreateTypeTest { - - @Test - public void should_not_throw_on_toString_for_CreateTypeStart() { - assertThat(createType("foo").toString()).isEqualTo("CREATE TYPE foo"); - } - - @Test - public void should_create_type_with_single_field() { - - assertThat(createType("keyspace1", "type").withField("single", DataTypes.TEXT)) - .hasCql("CREATE TYPE keyspace1.type (single text)"); - assertThat(createType("type").withField("single", DataTypes.TEXT)) - .hasCql("CREATE TYPE type (single text)"); - - assertThat(createType("type").ifNotExists().withField("single", DataTypes.TEXT)) - .hasCql("CREATE TYPE IF NOT EXISTS type (single text)"); - } - - @Test - public void should_create_type_with_many_fields() { - - assertThat( - createType("keyspace1", "type") - .withField("first", DataTypes.TEXT) - .withField("second", DataTypes.INT) - .withField("third", DataTypes.BLOB) - .withField("fourth", DataTypes.BOOLEAN)) - .hasCql("CREATE TYPE keyspace1.type (first text,second int,third blob,fourth boolean)"); - assertThat( - createType("type") - .withField("first", DataTypes.TEXT) - .withField("second", DataTypes.INT) - .withField("third", DataTypes.BLOB) - .withField("fourth", DataTypes.BOOLEAN)) - .hasCql("CREATE TYPE type (first text,second int,third blob,fourth boolean)"); - } - - @Test - public void should_create_type_with_nested_UDT() { - assertThat(createType("keyspace1", "type").withField("nested", udt("val", true))) - .hasCql("CREATE TYPE keyspace1.type (nested frozen)"); - assertThat(createType("keyspace1", "type").withField("nested", udt("val", false))) - .hasCql("CREATE TYPE keyspace1.type (nested val)"); - } - - @Test - public void should_create_type_with_collections() { - assertThat(createType("ks1", "type").withField("names", DataTypes.listOf(DataTypes.TEXT))) - .hasCql("CREATE TYPE ks1.type (names list)"); - - assertThat(createType("ks1", "type").withField("names", DataTypes.tupleOf(DataTypes.TEXT))) - .hasCql("CREATE TYPE ks1.type (names frozen>)"); - - assertThat( - createType("ks1", "type") - .withField("map", DataTypes.mapOf(DataTypes.INT, DataTypes.TEXT))) - .hasCql("CREATE TYPE ks1.type (map map)"); - } - - @Test - public void should_create_type_with_vector() { - assertThat( - createType("ks1", "type") - .withField("c1", DataTypes.INT) - .withField("vec", DataTypes.vectorOf(DataTypes.FLOAT, 3))) - .hasCql("CREATE TYPE ks1.type (c1 int,vec vector)"); - } -} diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/DropAggregateTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/DropAggregateTest.java deleted file mode 100644 index 875ed7d7432..00000000000 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/DropAggregateTest.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import static com.datastax.oss.driver.api.querybuilder.Assertions.assertThat; -import static com.datastax.oss.driver.api.querybuilder.SchemaBuilder.dropAggregate; - -import org.junit.Test; - -public class DropAggregateTest { - @Test - public void should_generate_drop_aggregate() { - assertThat(dropAggregate("bar")).hasCql("DROP AGGREGATE bar"); - } - - @Test - public void should_generate_drop_aggregate_with_keyspace() { - assertThat(dropAggregate("foo", "bar")).hasCql("DROP AGGREGATE foo.bar"); - } - - @Test - public void should_generate_drop_aggregate_if_exists() { - assertThat(dropAggregate("bar").ifExists()).hasCql("DROP AGGREGATE IF EXISTS bar"); - } -} diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/DropFunctionTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/DropFunctionTest.java deleted file mode 100644 index 3157212a271..00000000000 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/DropFunctionTest.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import static com.datastax.oss.driver.api.querybuilder.Assertions.assertThat; -import static com.datastax.oss.driver.api.querybuilder.SchemaBuilder.dropFunction; - -import org.junit.Test; - -public class DropFunctionTest { - - @Test - public void should_generate_drop_function() { - assertThat(dropFunction("bar")).hasCql("DROP FUNCTION bar"); - } - - @Test - public void should_generate_drop_function_with_keyspace() { - assertThat(dropFunction("foo", "bar")).hasCql("DROP FUNCTION foo.bar"); - } - - @Test - public void should_generate_drop_function_if_exists() { - assertThat(dropFunction("bar").ifExists()).hasCql("DROP FUNCTION IF EXISTS bar"); - } -} diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/DropIndexTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/DropIndexTest.java deleted file mode 100644 index 150b52c86e3..00000000000 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/DropIndexTest.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import static com.datastax.oss.driver.api.querybuilder.Assertions.assertThat; -import static com.datastax.oss.driver.api.querybuilder.SchemaBuilder.dropIndex; - -import org.junit.Test; - -public class DropIndexTest { - - @Test - public void should_generate_drop_index() { - assertThat(dropIndex("bar")).hasCql("DROP INDEX bar"); - } - - @Test - public void should_generate_drop_index_with_keyspace() { - assertThat(dropIndex("foo", "bar")).hasCql("DROP INDEX foo.bar"); - } - - @Test - public void should_generate_drop_index_if_exists() { - assertThat(dropIndex("bar").ifExists()).hasCql("DROP INDEX IF EXISTS bar"); - } -} diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/DropKeyspaceTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/DropKeyspaceTest.java deleted file mode 100644 index 4f124f0bc04..00000000000 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/DropKeyspaceTest.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import static com.datastax.oss.driver.api.querybuilder.Assertions.assertThat; -import static com.datastax.oss.driver.api.querybuilder.SchemaBuilder.dropKeyspace; - -import org.junit.Test; - -public class DropKeyspaceTest { - @Test - public void should_generate_drop_keyspace() { - assertThat(dropKeyspace("foo")).hasCql("DROP KEYSPACE foo"); - } - - @Test - public void should_generate_drop_keyspace_if_exists() { - assertThat(dropKeyspace("foo").ifExists()).hasCql("DROP KEYSPACE IF EXISTS foo"); - } -} diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/DropMaterializedViewTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/DropMaterializedViewTest.java deleted file mode 100644 index 054b0b2e3f7..00000000000 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/DropMaterializedViewTest.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import static com.datastax.oss.driver.api.querybuilder.Assertions.assertThat; -import static com.datastax.oss.driver.api.querybuilder.SchemaBuilder.dropMaterializedView; - -import org.junit.Test; - -public class DropMaterializedViewTest { - - @Test - public void should_generate_drop_view() { - assertThat(dropMaterializedView("bar")).hasCql("DROP MATERIALIZED VIEW bar"); - } - - @Test - public void should_generate_drop_view_with_keyspace() { - assertThat(dropMaterializedView("foo", "bar")).hasCql("DROP MATERIALIZED VIEW foo.bar"); - } - - @Test - public void should_generate_drop_view_if_exists() { - assertThat(dropMaterializedView("bar").ifExists()) - .hasCql("DROP MATERIALIZED VIEW IF EXISTS bar"); - } -} diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/DropTableTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/DropTableTest.java deleted file mode 100644 index 79f655346c5..00000000000 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/DropTableTest.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import static com.datastax.oss.driver.api.querybuilder.Assertions.assertThat; -import static com.datastax.oss.driver.api.querybuilder.SchemaBuilder.dropTable; - -import org.junit.Test; - -public class DropTableTest { - - @Test - public void should_generate_drop_table() { - assertThat(dropTable("bar")).hasCql("DROP TABLE bar"); - } - - @Test - public void should_generate_drop_table_with_keyspace() { - assertThat(dropTable("foo", "bar")).hasCql("DROP TABLE foo.bar"); - } - - @Test - public void should_generate_drop_table_if_exists() { - assertThat(dropTable("bar").ifExists()).hasCql("DROP TABLE IF EXISTS bar"); - } -} diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/DropTypeTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/DropTypeTest.java deleted file mode 100644 index a2c5e35054a..00000000000 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/DropTypeTest.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import static com.datastax.oss.driver.api.querybuilder.Assertions.assertThat; -import static com.datastax.oss.driver.api.querybuilder.SchemaBuilder.dropType; - -import org.junit.Test; - -public class DropTypeTest { - - @Test - public void should_generate_drop_type() { - assertThat(dropType("bar")).hasCql("DROP TYPE bar"); - } - - @Test - public void should_generate_drop_type_with_keyspace() { - assertThat(dropType("foo", "bar")).hasCql("DROP TYPE foo.bar"); - } - - @Test - public void should_generate_drop_type_if_exists() { - assertThat(dropType("bar").ifExists()).hasCql("DROP TYPE IF EXISTS bar"); - } -} diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/select/SelectAllowFilteringTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/select/SelectAllowFilteringTest.java deleted file mode 100644 index 73220cfe0d7..00000000000 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/select/SelectAllowFilteringTest.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.select; - -import static com.datastax.oss.driver.api.querybuilder.Assertions.assertThat; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.selectFrom; - -import org.junit.Test; - -public class SelectAllowFilteringTest { - @Test - public void should_generate_allow_filtering() { - assertThat(selectFrom("foo").all().allowFiltering()) - .hasCql("SELECT * FROM foo ALLOW FILTERING"); - } - - @Test - public void should_use_single_allow_filtering_if_called_multiple_times() { - assertThat(selectFrom("foo").all().allowFiltering().allowFiltering()) - .hasCql("SELECT * FROM foo ALLOW FILTERING"); - } -} diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/select/SelectFluentRelationTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/select/SelectFluentRelationTest.java deleted file mode 100644 index 497cd7876c0..00000000000 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/select/SelectFluentRelationTest.java +++ /dev/null @@ -1,152 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.select; - -import static com.datastax.oss.driver.api.querybuilder.Assertions.assertThat; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.bindMarker; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.raw; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.selectFrom; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.tuple; - -import com.datastax.oss.driver.api.querybuilder.relation.RelationTest; -import org.junit.Test; - -/** Same as {@link RelationTest}, but using {@code whereXxx()} instead of {@code where(isXxx())}. */ -public class SelectFluentRelationTest { - - @Test - public void should_generate_comparison_relation() { - assertThat(selectFrom("foo").all().whereColumn("k").isEqualTo(bindMarker())) - .hasCql("SELECT * FROM foo WHERE k=?"); - assertThat(selectFrom("foo").all().whereColumn("k").isEqualTo(bindMarker("value"))) - .hasCql("SELECT * FROM foo WHERE k=:value"); - } - - @Test - public void should_generate_is_not_null_relation() { - assertThat(selectFrom("foo").all().whereColumn("k").isNotNull()) - .hasCql("SELECT * FROM foo WHERE k IS NOT NULL"); - } - - @Test - public void should_generate_in_relation() { - assertThat(selectFrom("foo").all().whereColumn("k").in(bindMarker())) - .hasCql("SELECT * FROM foo WHERE k IN ?"); - assertThat(selectFrom("foo").all().whereColumn("k").in(bindMarker(), bindMarker())) - .hasCql("SELECT * FROM foo WHERE k IN (?,?)"); - } - - @Test - public void should_generate_token_relation() { - assertThat(selectFrom("foo").all().whereToken("k1", "k2").isEqualTo(bindMarker("t"))) - .hasCql("SELECT * FROM foo WHERE token(k1,k2)=:t"); - } - - @Test - public void should_generate_column_component_relation() { - assertThat( - selectFrom("foo") - .all() - .whereColumn("id") - .isEqualTo(bindMarker()) - .whereMapValue("user", raw("'name'")) - .isEqualTo(bindMarker())) - .hasCql("SELECT * FROM foo WHERE id=? AND user['name']=?"); - } - - @Test - public void should_generate_tuple_relation() { - assertThat( - selectFrom("foo") - .all() - .whereColumn("k") - .isEqualTo(bindMarker()) - .whereColumns("c1", "c2", "c3") - .in(bindMarker())) - .hasCql("SELECT * FROM foo WHERE k=? AND (c1,c2,c3) IN ?"); - assertThat( - selectFrom("foo") - .all() - .whereColumn("k") - .isEqualTo(bindMarker()) - .whereColumns("c1", "c2", "c3") - .in(bindMarker(), bindMarker())) - .hasCql("SELECT * FROM foo WHERE k=? AND (c1,c2,c3) IN (?,?)"); - assertThat( - selectFrom("foo") - .all() - .whereColumn("k") - .isEqualTo(bindMarker()) - .whereColumns("c1", "c2", "c3") - .in(bindMarker(), raw("(4,5,6)"))) - .hasCql("SELECT * FROM foo WHERE k=? AND (c1,c2,c3) IN (?,(4,5,6))"); - assertThat( - selectFrom("foo") - .all() - .whereColumn("k") - .isEqualTo(bindMarker()) - .whereColumns("c1", "c2", "c3") - .in( - tuple(bindMarker(), bindMarker(), bindMarker()), - tuple(bindMarker(), bindMarker(), bindMarker()))) - .hasCql("SELECT * FROM foo WHERE k=? AND (c1,c2,c3) IN ((?,?,?),(?,?,?))"); - - assertThat( - selectFrom("foo") - .all() - .whereColumn("k") - .isEqualTo(bindMarker()) - .whereColumns("c1", "c2", "c3") - .isEqualTo(bindMarker())) - .hasCql("SELECT * FROM foo WHERE k=? AND (c1,c2,c3)=?"); - assertThat( - selectFrom("foo") - .all() - .whereColumn("k") - .isEqualTo(bindMarker()) - .whereColumns("c1", "c2", "c3") - .isLessThan(tuple(bindMarker(), bindMarker(), bindMarker()))) - .hasCql("SELECT * FROM foo WHERE k=? AND (c1,c2,c3)<(?,?,?)"); - assertThat( - selectFrom("foo") - .all() - .whereColumn("k") - .isEqualTo(bindMarker()) - .whereColumns("c1", "c2", "c3") - .isGreaterThanOrEqualTo(raw("(1,2,3)"))) - .hasCql("SELECT * FROM foo WHERE k=? AND (c1,c2,c3)>=(1,2,3)"); - } - - @Test - public void should_generate_custom_index_relation() { - assertThat( - selectFrom("foo") - .all() - .whereColumn("k") - .isEqualTo(bindMarker()) - .whereCustomIndex("my_index", raw("'custom expression'"))) - .hasCql("SELECT * FROM foo WHERE k=? AND expr(my_index,'custom expression')"); - } - - @Test - public void should_generate_raw_relation() { - assertThat( - selectFrom("foo").all().whereColumn("k").isEqualTo(bindMarker()).whereRaw("c = 'test'")) - .hasCql("SELECT * FROM foo WHERE k=? AND c = 'test'"); - } -} diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/select/SelectGroupByTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/select/SelectGroupByTest.java deleted file mode 100644 index 368b9dfc480..00000000000 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/select/SelectGroupByTest.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.select; - -import static com.datastax.oss.driver.api.querybuilder.Assertions.assertThat; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.literal; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.selectFrom; - -import org.junit.Test; - -public class SelectGroupByTest { - - @Test - public void should_generate_group_by_clauses() { - assertThat( - selectFrom("foo") - .all() - .whereColumn("k") - .isEqualTo(literal(1)) - .groupBy("foo") - .groupBy("bar")) - .hasCql("SELECT * FROM foo WHERE k=1 GROUP BY foo,bar"); - - assertThat( - selectFrom("foo") - .all() - .whereColumn("k") - .isEqualTo(literal(1)) - .groupByColumns("foo", "bar")) - .hasCql("SELECT * FROM foo WHERE k=1 GROUP BY foo,bar"); - - assertThat( - selectFrom("foo") - .all() - .whereColumn("k") - .isEqualTo(literal(1)) - .groupBy( - Selector.function("ks", "f", Selector.column("foo")), Selector.column("bar"))) - .hasCql("SELECT * FROM foo WHERE k=1 GROUP BY ks.f(foo),bar"); - } -} diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/select/SelectLimitTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/select/SelectLimitTest.java deleted file mode 100644 index d617aa5673f..00000000000 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/select/SelectLimitTest.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.select; - -import static com.datastax.oss.driver.api.querybuilder.Assertions.assertThat; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.bindMarker; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.selectFrom; - -import org.junit.Test; - -public class SelectLimitTest { - - @Test - public void should_generate_limit() { - assertThat(selectFrom("foo").all().limit(1)).hasCql("SELECT * FROM foo LIMIT 1"); - assertThat(selectFrom("foo").all().limit(bindMarker("l"))).hasCql("SELECT * FROM foo LIMIT :l"); - } - - @Test - public void should_use_last_limit_if_called_multiple_times() { - assertThat(selectFrom("foo").all().limit(1).limit(2)).hasCql("SELECT * FROM foo LIMIT 2"); - } - - @Test - public void should_generate_per_partition_limit() { - assertThat(selectFrom("foo").all().perPartitionLimit(1)) - .hasCql("SELECT * FROM foo PER PARTITION LIMIT 1"); - assertThat(selectFrom("foo").all().perPartitionLimit(bindMarker("l"))) - .hasCql("SELECT * FROM foo PER PARTITION LIMIT :l"); - } - - @Test - public void should_use_last_per_partition_limit_if_called_multiple_times() { - assertThat(selectFrom("foo").all().perPartitionLimit(1).perPartitionLimit(2)) - .hasCql("SELECT * FROM foo PER PARTITION LIMIT 2"); - } -} diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/select/SelectOrderingTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/select/SelectOrderingTest.java deleted file mode 100644 index a9c618e9559..00000000000 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/select/SelectOrderingTest.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.select; - -import static com.datastax.oss.driver.api.core.metadata.schema.ClusteringOrder.ASC; -import static com.datastax.oss.driver.api.core.metadata.schema.ClusteringOrder.DESC; -import static com.datastax.oss.driver.api.querybuilder.Assertions.assertThat; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.literal; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.selectFrom; - -import com.datastax.oss.driver.api.core.data.CqlVector; -import com.datastax.oss.driver.api.querybuilder.relation.Relation; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import org.junit.Test; - -public class SelectOrderingTest { - - @Test - public void should_generate_ordering_clauses() { - assertThat( - selectFrom("foo") - .all() - .where(Relation.column("k").isEqualTo(literal(1))) - .orderBy("c1", ASC) - .orderBy("c2", DESC)) - .hasCql("SELECT * FROM foo WHERE k=1 ORDER BY c1 ASC,c2 DESC"); - assertThat( - selectFrom("foo") - .all() - .where(Relation.column("k").isEqualTo(literal(1))) - .orderBy(ImmutableMap.of("c1", ASC, "c2", DESC))) - .hasCql("SELECT * FROM foo WHERE k=1 ORDER BY c1 ASC,c2 DESC"); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_when_provided_names_resolve_to_the_same_id() { - selectFrom("foo") - .all() - .where(Relation.column("k").isEqualTo(literal(1))) - .orderBy(ImmutableMap.of("c1", ASC, "C1", DESC)); - } - - @Test - public void should_replace_previous_ordering() { - assertThat( - selectFrom("foo") - .all() - .where(Relation.column("k").isEqualTo(literal(1))) - .orderBy("c1", ASC) - .orderBy("c2", DESC) - .orderBy("c1", DESC)) - .hasCql("SELECT * FROM foo WHERE k=1 ORDER BY c2 DESC,c1 DESC"); - assertThat( - selectFrom("foo") - .all() - .where(Relation.column("k").isEqualTo(literal(1))) - .orderBy("c1", ASC) - .orderBy("c2", DESC) - .orderBy("c3", ASC) - .orderBy(ImmutableMap.of("c1", DESC, "c2", ASC))) - .hasCql("SELECT * FROM foo WHERE k=1 ORDER BY c3 ASC,c1 DESC,c2 ASC"); - } - - @Test - public void should_generate_ann_clause() { - assertThat( - selectFrom("foo") - .all() - .where(Relation.column("k").isEqualTo(literal(1))) - .orderByAnnOf("c1", CqlVector.newInstance(0.1, 0.2, 0.3))) - .hasCql("SELECT * FROM foo WHERE k=1 ORDER BY c1 ANN OF [0.1, 0.2, 0.3]"); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_when_provided_ann_with_other_orderings() { - selectFrom("foo") - .all() - .where(Relation.column("k").isEqualTo(literal(1))) - .orderBy("c1", ASC) - .orderByAnnOf("c2", CqlVector.newInstance(0.1, 0.2, 0.3)); - } -} diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/select/SelectSelectorTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/select/SelectSelectorTest.java deleted file mode 100644 index 7e03627d4b7..00000000000 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/select/SelectSelectorTest.java +++ /dev/null @@ -1,373 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.select; - -import static com.datastax.oss.driver.api.querybuilder.Assertions.assertThat; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.literal; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.raw; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.selectFrom; - -import com.datastax.oss.driver.api.core.data.CqlVector; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.CodecNotFoundException; -import com.datastax.oss.driver.api.querybuilder.CharsetCodec; -import com.datastax.oss.driver.shaded.guava.common.base.Charsets; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import org.junit.Test; - -public class SelectSelectorTest { - - @Test - public void should_generate_star_selector() { - assertThat(selectFrom("foo").all()).hasCql("SELECT * FROM foo"); - assertThat(selectFrom("ks", "foo").all()).hasCql("SELECT * FROM ks.foo"); - } - - @Test - public void should_remove_star_selector_if_other_selector_added() { - assertThat(selectFrom("foo").all().column("bar")).hasCql("SELECT bar FROM foo"); - } - - @Test - public void should_remove_other_selectors_if_star_selector_added() { - assertThat(selectFrom("foo").column("bar").column("baz").all()).hasCql("SELECT * FROM foo"); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_if_selector_list_contains_star_selector() { - selectFrom("foo").selectors(Selector.column("bar"), Selector.all(), raw("baz")); - } - - @Test - public void should_generate_count_all_selector() { - assertThat(selectFrom("foo").countAll()).hasCql("SELECT count(*) FROM foo"); - } - - @Test - public void should_generate_column_selectors() { - assertThat(selectFrom("foo").column("bar")).hasCql("SELECT bar FROM foo"); - assertThat(selectFrom("foo").column("bar").column("baz")).hasCql("SELECT bar,baz FROM foo"); - assertThat(selectFrom("foo").selectors(Selector.column("bar"), Selector.column("baz"))) - .hasCql("SELECT bar,baz FROM foo"); - assertThat(selectFrom("foo").columns("a", "b", "c")).hasCql("SELECT a,b,c FROM foo"); - } - - @Test - public void should_generate_arithmetic_selectors() { - assertThat(selectFrom("foo").add(Selector.column("bar"), Selector.column("baz"))) - .hasCql("SELECT bar+baz FROM foo"); - assertThat( - selectFrom("foo") - .subtract(raw("1"), Selector.add(Selector.column("bar"), Selector.column("baz")))) - .hasCql("SELECT 1-(bar+baz) FROM foo"); - assertThat( - selectFrom("foo").negate(Selector.add(Selector.column("bar"), Selector.column("baz")))) - .hasCql("SELECT -(bar+baz) FROM foo"); - assertThat( - selectFrom("foo") - .multiply( - Selector.negate(Selector.column("bar")), - Selector.add(Selector.column("baz"), literal(1)))) - .hasCql("SELECT -bar*(baz+1) FROM foo"); - assertThat( - selectFrom("foo") - .divide(literal(1), Selector.add(Selector.column("bar"), Selector.column("baz")))) - .hasCql("SELECT 1/(bar+baz) FROM foo"); - assertThat( - selectFrom("foo") - .divide( - literal(1), Selector.multiply(Selector.column("bar"), Selector.column("baz")))) - .hasCql("SELECT 1/(bar*baz) FROM foo"); - } - - @Test - public void should_generate_field_selectors() { - assertThat(selectFrom("foo").field("user", "name")).hasCql("SELECT user.name FROM foo"); - assertThat(selectFrom("foo").field(Selector.field("user", "address"), "city")) - .hasCql("SELECT user.address.city FROM foo"); - } - - @Test - public void should_generate_element_selectors() { - assertThat(selectFrom("foo").element("m", literal(1))).hasCql("SELECT m[1] FROM foo"); - assertThat(selectFrom("foo").element(Selector.element("m", literal("bar")), literal(1))) - .hasCql("SELECT m['bar'][1] FROM foo"); - } - - @Test - public void should_generate_range_selectors() { - assertThat(selectFrom("foo").range("s", literal(1), literal(5))) - .hasCql("SELECT s[1..5] FROM foo"); - assertThat(selectFrom("foo").range("s", literal(1), null)).hasCql("SELECT s[1..] FROM foo"); - assertThat(selectFrom("foo").range("s", null, literal(5))).hasCql("SELECT s[..5] FROM foo"); - } - - @Test - public void should_generate_collection_and_tuple_selectors() { - assertThat( - selectFrom("foo") - .listOf(Selector.column("a"), Selector.column("b"), Selector.column("c"))) - .hasCql("SELECT [a,b,c] FROM foo"); - assertThat( - selectFrom("foo") - .setOf(Selector.column("a"), Selector.column("b"), Selector.column("c"))) - .hasCql("SELECT {a,b,c} FROM foo"); - assertThat( - selectFrom("foo") - .tupleOf(Selector.column("a"), Selector.column("b"), Selector.column("c"))) - .hasCql("SELECT (a,b,c) FROM foo"); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_if_collection_selector_contains_aliases() { - selectFrom("foo") - .listOf( - Selector.column("a"), Selector.column("b").as("FORBIDDEN_HERE"), Selector.column("c")); - } - - @Test - public void should_generate_map_selectors() { - assertThat( - selectFrom("foo") - .mapOf( - ImmutableMap.of( - Selector.column("k1"), - Selector.column("v1"), - Selector.column("k2"), - Selector.column("v2")))) - .hasCql("SELECT {k1:v1,k2:v2} FROM foo"); - assertThat( - selectFrom("foo") - .mapOf( - ImmutableMap.of( - Selector.column("k1"), - Selector.column("v1"), - Selector.column("k2"), - Selector.column("v2")), - DataTypes.TEXT, - DataTypes.INT)) - .hasCql("SELECT (map){k1:v1,k2:v2} FROM foo"); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_if_map_selector_contains_aliases() { - selectFrom("foo") - .mapOf( - ImmutableMap.of( - Selector.column("k1"), - Selector.column("v1").as("FORBIDDEN_HERE"), - Selector.column("k2"), - Selector.column("v2"))); - } - - @Test - public void should_generate_type_hint_selector() { - assertThat(selectFrom("foo").typeHint(Selector.column("k"), DataTypes.INT)) - .hasCql("SELECT (int)k FROM foo"); - } - - @Test - public void should_generate_function_selectors() { - assertThat( - selectFrom("foo") - .function( - "f", Selector.column("c1"), Selector.add(Selector.column("c2"), raw("1")))) - .hasCql("SELECT f(c1,c2+1) FROM foo"); - assertThat( - selectFrom("foo") - .function( - "ks", - "f", - Selector.column("c1"), - Selector.add(Selector.column("c2"), raw("1")))) - .hasCql("SELECT ks.f(c1,c2+1) FROM foo"); - assertThat(selectFrom("foo").writeTime("c1").ttl("c2")) - .hasCql("SELECT writetime(c1),ttl(c2) FROM foo"); - assertThat(selectFrom("foo").toDate("a").toTimestamp("b").toUnixTimestamp("c")) - .hasCql("SELECT todate(a),totimestamp(b),tounixtimestamp(c) FROM foo"); - } - - @Test - public void should_generate_cast_selector() { - assertThat(selectFrom("foo").cast(Selector.column("k"), DataTypes.DOUBLE)) - .hasCql("SELECT CAST(k AS double) FROM foo"); - } - - @Test - public void should_generate_literal_selectors() { - assertThat(selectFrom("foo").literal(1)).hasCql("SELECT 1 FROM foo"); - assertThat(selectFrom("foo").literal(Charsets.UTF_8, new CharsetCodec())) - .hasCql("SELECT 'UTF-8' FROM foo"); - assertThat(selectFrom("foo").literal(Charsets.UTF_8, CharsetCodec.TEST_REGISTRY)) - .hasCql("SELECT 'UTF-8' FROM foo"); - assertThat(selectFrom("foo").literal(null)).hasCql("SELECT NULL FROM foo"); - } - - @Test(expected = CodecNotFoundException.class) - public void should_fail_when_no_codec_for_literal() { - selectFrom("foo").literal(Charsets.UTF_8); - } - - @Test - public void should_generate_raw_selector() { - assertThat(selectFrom("foo").raw("a,b,c")).hasCql("SELECT a,b,c FROM foo"); - - assertThat(selectFrom("foo").selectors(Selector.column("bar"), raw("baz"))) - .hasCql("SELECT bar,baz FROM foo"); - } - - @Test - public void should_generate_similarity_functions() { - Select similarity_cosine_clause = - selectFrom("cycling", "comments_vs") - .column("comment") - .function( - "similarity_cosine", - Selector.column("comment_vector"), - literal(CqlVector.newInstance(0.2, 0.15, 0.3, 0.2, 0.05))) - .orderByAnnOf("comment_vector", CqlVector.newInstance(0.1, 0.15, 0.3, 0.12, 0.05)) - .limit(1); - assertThat(similarity_cosine_clause) - .hasCql( - "SELECT comment,similarity_cosine(comment_vector,[0.2, 0.15, 0.3, 0.2, 0.05]) FROM cycling.comments_vs ORDER BY comment_vector ANN OF [0.1, 0.15, 0.3, 0.12, 0.05] LIMIT 1"); - - Select similarity_euclidean_clause = - selectFrom("cycling", "comments_vs") - .column("comment") - .function( - "similarity_euclidean", - Selector.column("comment_vector"), - literal(CqlVector.newInstance(0.2, 0.15, 0.3, 0.2, 0.05))) - .orderByAnnOf("comment_vector", CqlVector.newInstance(0.1, 0.15, 0.3, 0.12, 0.05)) - .limit(1); - assertThat(similarity_euclidean_clause) - .hasCql( - "SELECT comment,similarity_euclidean(comment_vector,[0.2, 0.15, 0.3, 0.2, 0.05]) FROM cycling.comments_vs ORDER BY comment_vector ANN OF [0.1, 0.15, 0.3, 0.12, 0.05] LIMIT 1"); - - Select similarity_dot_product_clause = - selectFrom("cycling", "comments_vs") - .column("comment") - .function( - "similarity_dot_product", - Selector.column("comment_vector"), - literal(CqlVector.newInstance(0.2, 0.15, 0.3, 0.2, 0.05))) - .orderByAnnOf("comment_vector", CqlVector.newInstance(0.1, 0.15, 0.3, 0.12, 0.05)) - .limit(1); - assertThat(similarity_dot_product_clause) - .hasCql( - "SELECT comment,similarity_dot_product(comment_vector,[0.2, 0.15, 0.3, 0.2, 0.05]) FROM cycling.comments_vs ORDER BY comment_vector ANN OF [0.1, 0.15, 0.3, 0.12, 0.05] LIMIT 1"); - } - - @Test - public void should_alias_selectors() { - assertThat(selectFrom("foo").column("bar").as("baz")).hasCql("SELECT bar AS baz FROM foo"); - assertThat( - selectFrom("foo") - .selectors(Selector.column("bar").as("c1"), Selector.column("baz").as("c2"))) - .hasCql("SELECT bar AS c1,baz AS c2 FROM foo"); - } - - @Test(expected = IllegalStateException.class) - public void should_fail_to_alias_star_selector() { - selectFrom("foo").all().as("allthethings"); - } - - @Test(expected = IllegalStateException.class) - public void should_fail_to_alias_if_no_selector_yet() { - selectFrom("foo").as("bar"); - } - - @Test - public void should_keep_last_alias_if_aliased_twice() { - assertThat(selectFrom("foo").countAll().as("allthethings").as("total")) - .hasCql("SELECT count(*) AS total FROM foo"); - } - - @Test - public void should_alias_function_selector() { - assertThat(selectFrom("foo").function("bar", Selector.column("col")).as("alias_1")) - .hasCql("SELECT bar(col) AS alias_1 FROM foo"); - - assertThat( - selectFrom("foo") - .function("bar", Selector.column("col")) - .as("alias_1") - .function("baz", Selector.column("col")) - .as("alias_2")) - .hasCql("SELECT bar(col) AS alias_1,baz(col) AS alias_2 FROM foo"); - } - - @Test - public void should_alias_list_selector() { - assertThat(selectFrom("foo").listOf(Selector.column("col")).as("alias_1")) - .hasCql("SELECT [col] AS alias_1 FROM foo"); - - assertThat( - selectFrom("foo") - .listOf(Selector.column("col")) - .as("alias_1") - .listOf(Selector.column("col2")) - .as("alias_2")) - .hasCql("SELECT [col] AS alias_1,[col2] AS alias_2 FROM foo"); - } - - @Test - public void should_alias_set_selector() { - assertThat(selectFrom("foo").setOf(Selector.column("col")).as("alias_1")) - .hasCql("SELECT {col} AS alias_1 FROM foo"); - - assertThat( - selectFrom("foo") - .setOf(Selector.column("col")) - .as("alias_1") - .setOf(Selector.column("col2")) - .as("alias_2")) - .hasCql("SELECT {col} AS alias_1,{col2} AS alias_2 FROM foo"); - } - - @Test - public void should_alias_tuple_selector() { - assertThat(selectFrom("foo").tupleOf(Selector.column("col")).as("alias_1")) - .hasCql("SELECT (col) AS alias_1 FROM foo"); - - assertThat( - selectFrom("foo") - .tupleOf(Selector.column("col")) - .as("alias_1") - .tupleOf(Selector.column("col2")) - .as("alias_2")) - .hasCql("SELECT (col) AS alias_1,(col2) AS alias_2 FROM foo"); - } - - @Test - public void should_alias_map_selector() { - assertThat( - selectFrom("foo") - .mapOf(ImmutableMap.of(Selector.column("a"), Selector.column("b"))) - .as("alias_1")) - .hasCql("SELECT {a:b} AS alias_1 FROM foo"); - - assertThat( - selectFrom("foo") - .mapOf(ImmutableMap.of(Selector.column("a"), Selector.column("b"))) - .as("alias_1") - .mapOf(ImmutableMap.of(Selector.column("c"), Selector.column("d"))) - .as("alias_2")) - .hasCql("SELECT {a:b} AS alias_1,{c:d} AS alias_2 FROM foo"); - } -} diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/truncate/TruncateTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/truncate/TruncateTest.java deleted file mode 100644 index f4c8d22a294..00000000000 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/truncate/TruncateTest.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.truncate; - -import static com.datastax.oss.driver.api.querybuilder.Assertions.assertThat; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.truncate; -import static org.assertj.core.api.Assertions.assertThatThrownBy; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import org.junit.Test; - -public class TruncateTest { - - @Test - public void should_create_truncate_for_table_string() { - assertThat(truncate("foo")).hasCql("TRUNCATE foo").isIdempotent(); - } - - @Test - public void should_create_truncate_for_table_cql_identifier() { - assertThat(truncate(CqlIdentifier.fromCql("foo"))).hasCql("TRUNCATE foo").isIdempotent(); - } - - @Test - public void should_create_truncate_for_keyspace_and_table_string() { - assertThat(truncate("ks", "foo")).hasCql("TRUNCATE ks.foo").isIdempotent(); - } - - @Test - public void should_create_truncate_for_keyspace_and_table_cql_identifier() { - assertThat(truncate(CqlIdentifier.fromCql("ks"), CqlIdentifier.fromCql("foo"))) - .hasCql("TRUNCATE ks.foo") - .isIdempotent(); - } - - @Test - public void should_create_truncate_if_call_build_without_arguments() { - assertThat( - truncate(CqlIdentifier.fromCql("ks"), CqlIdentifier.fromCql("foo")).build().getQuery()) - .isEqualTo("TRUNCATE ks.foo"); - } - - @Test - public void should_throw_if_call_build_with_values() { - assertThatThrownBy( - () -> truncate(CqlIdentifier.fromCql("ks"), CqlIdentifier.fromCql("foo")).build("arg1")) - .isExactlyInstanceOf(UnsupportedOperationException.class) - .hasMessage("TRUNCATE doesn't take values as parameters. Use build() method instead."); - } - - @Test - public void should_throw_if_call_build_with_named_values() { - assertThatThrownBy( - () -> - truncate(CqlIdentifier.fromCql("ks"), CqlIdentifier.fromCql("foo")) - .build(ImmutableMap.of("k", "v"))) - .isExactlyInstanceOf(UnsupportedOperationException.class) - .hasMessage("TRUNCATE doesn't take namedValues as parameters. Use build() method instead."); - } -} diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/update/UpdateFluentAssignmentTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/update/UpdateFluentAssignmentTest.java deleted file mode 100644 index 34f2538587e..00000000000 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/update/UpdateFluentAssignmentTest.java +++ /dev/null @@ -1,205 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.update; - -import static com.datastax.oss.driver.api.querybuilder.Assertions.assertThat; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.bindMarker; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.literal; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.update; - -import com.datastax.oss.driver.api.querybuilder.Literal; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import org.junit.Test; - -public class UpdateFluentAssignmentTest { - - @Test - public void should_generate_simple_column_assignment() { - assertThat(update("foo").setColumn("v", bindMarker()).whereColumn("k").isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET v=? WHERE k=?"); - assertThat( - update("ks", "foo") - .setColumn("v", bindMarker()) - .whereColumn("k") - .isEqualTo(bindMarker())) - .hasCql("UPDATE ks.foo SET v=? WHERE k=?"); - } - - @Test - public void should_generate_field_assignment() { - assertThat( - update("foo") - .setField("address", "street", bindMarker()) - .whereColumn("k") - .isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET address.street=? WHERE k=?"); - } - - @Test - public void should_generate_map_value_assignment() { - assertThat( - update("foo") - .setMapValue("features", literal("color"), bindMarker()) - .whereColumn("k") - .isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET features['color']=? WHERE k=?"); - } - - @Test - public void should_generate_list_value_assignment() { - assertThat( - update("foo") - .setListValue("features", literal(1), bindMarker()) - .whereColumn("k") - .isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET features[1]=? WHERE k=?"); - } - - @Test - public void should_generate_counter_operations() { - assertThat(update("foo").increment("c").whereColumn("k").isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET c=c+1 WHERE k=?"); - assertThat(update("foo").increment("c", literal(2)).whereColumn("k").isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET c=c+2 WHERE k=?"); - assertThat(update("foo").increment("c", bindMarker()).whereColumn("k").isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET c=c+? WHERE k=?"); - - assertThat(update("foo").decrement("c").whereColumn("k").isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET c=c-1 WHERE k=?"); - assertThat(update("foo").decrement("c", literal(2)).whereColumn("k").isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET c=c-2 WHERE k=?"); - assertThat(update("foo").decrement("c", bindMarker()).whereColumn("k").isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET c=c-? WHERE k=?"); - } - - @Test - public void should_generate_list_operations() { - Literal listLiteral = literal(ImmutableList.of(1, 2, 3)); - - assertThat(update("foo").append("l", bindMarker()).whereColumn("k").isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET l=l+? WHERE k=?"); - assertThat(update("foo").append("l", listLiteral).whereColumn("k").isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET l=l+[1,2,3] WHERE k=?"); - assertThat( - update("foo") - .appendListElement("l", bindMarker()) - .whereColumn("k") - .isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET l=l+[?] WHERE k=?"); - - assertThat(update("foo").prepend("l", bindMarker()).whereColumn("k").isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET l=?+l WHERE k=?"); - assertThat(update("foo").prepend("l", listLiteral).whereColumn("k").isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET l=[1,2,3]+l WHERE k=?"); - assertThat( - update("foo") - .prependListElement("l", bindMarker()) - .whereColumn("k") - .isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET l=[?]+l WHERE k=?"); - - assertThat(update("foo").remove("l", bindMarker()).whereColumn("k").isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET l=l-? WHERE k=?"); - assertThat(update("foo").remove("l", listLiteral).whereColumn("k").isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET l=l-[1,2,3] WHERE k=?"); - assertThat( - update("foo") - .removeListElement("l", bindMarker()) - .whereColumn("k") - .isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET l=l-[?] WHERE k=?"); - } - - @Test - public void should_generate_set_operations() { - Literal setLiteral = literal(ImmutableSet.of(1, 2, 3)); - - assertThat(update("foo").append("s", bindMarker()).whereColumn("k").isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET s=s+? WHERE k=?"); - assertThat(update("foo").append("s", setLiteral).whereColumn("k").isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET s=s+{1,2,3} WHERE k=?"); - assertThat( - update("foo") - .appendSetElement("s", bindMarker()) - .whereColumn("k") - .isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET s=s+{?} WHERE k=?"); - - assertThat(update("foo").prepend("s", bindMarker()).whereColumn("k").isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET s=?+s WHERE k=?"); - assertThat(update("foo").prepend("s", setLiteral).whereColumn("k").isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET s={1,2,3}+s WHERE k=?"); - assertThat( - update("foo") - .prependSetElement("s", bindMarker()) - .whereColumn("k") - .isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET s={?}+s WHERE k=?"); - - assertThat(update("foo").remove("s", bindMarker()).whereColumn("k").isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET s=s-? WHERE k=?"); - assertThat(update("foo").remove("s", setLiteral).whereColumn("k").isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET s=s-{1,2,3} WHERE k=?"); - assertThat( - update("foo") - .removeSetElement("s", bindMarker()) - .whereColumn("k") - .isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET s=s-{?} WHERE k=?"); - } - - @Test - public void should_generate_map_operations() { - Literal mapLiteral = literal(ImmutableMap.of(1, "foo", 2, "bar")); - - assertThat(update("foo").append("m", bindMarker()).whereColumn("k").isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET m=m+? WHERE k=?"); - assertThat(update("foo").append("m", mapLiteral).whereColumn("k").isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET m=m+{1:'foo',2:'bar'} WHERE k=?"); - assertThat( - update("foo") - .appendMapEntry("m", literal(1), literal("foo")) - .whereColumn("k") - .isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET m=m+{1:'foo'} WHERE k=?"); - - assertThat(update("foo").prepend("m", bindMarker()).whereColumn("k").isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET m=?+m WHERE k=?"); - assertThat(update("foo").prepend("m", mapLiteral).whereColumn("k").isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET m={1:'foo',2:'bar'}+m WHERE k=?"); - assertThat( - update("foo") - .prependMapEntry("m", literal(1), literal("foo")) - .whereColumn("k") - .isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET m={1:'foo'}+m WHERE k=?"); - - assertThat(update("foo").remove("m", bindMarker()).whereColumn("k").isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET m=m-? WHERE k=?"); - assertThat(update("foo").remove("m", mapLiteral).whereColumn("k").isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET m=m-{1:'foo',2:'bar'} WHERE k=?"); - assertThat( - update("foo") - .removeMapEntry("m", literal(1), literal("foo")) - .whereColumn("k") - .isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET m=m-{1:'foo'} WHERE k=?"); - } -} diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/update/UpdateFluentConditionTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/update/UpdateFluentConditionTest.java deleted file mode 100644 index 3f333e0ef86..00000000000 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/update/UpdateFluentConditionTest.java +++ /dev/null @@ -1,113 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.update; - -import static com.datastax.oss.driver.api.querybuilder.Assertions.assertThat; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.bindMarker; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.literal; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.update; - -import org.junit.Test; - -public class UpdateFluentConditionTest { - - @Test - public void should_generate_simple_column_condition() { - assertThat( - update("foo") - .setColumn("v", bindMarker()) - .whereColumn("k") - .isEqualTo(bindMarker()) - .ifColumn("v") - .isEqualTo(literal(1))) - .hasCql("UPDATE foo SET v=? WHERE k=? IF v=1"); - assertThat( - update("foo") - .setColumn("v", bindMarker()) - .whereColumn("k") - .isEqualTo(bindMarker()) - .ifColumn("v1") - .isEqualTo(literal(1)) - .ifColumn("v2") - .isEqualTo(literal(2))) - .hasCql("UPDATE foo SET v=? WHERE k=? IF v1=1 AND v2=2"); - } - - @Test - public void should_generate_field_condition() { - assertThat( - update("foo") - .setColumn("v", bindMarker()) - .whereColumn("k") - .isEqualTo(bindMarker()) - .ifField("v", "f") - .isEqualTo(literal(1))) - .hasCql("UPDATE foo SET v=? WHERE k=? IF v.f=1"); - } - - @Test - public void should_generate_element_condition() { - assertThat( - update("foo") - .setColumn("v", bindMarker()) - .whereColumn("k") - .isEqualTo(bindMarker()) - .ifElement("v", literal(1)) - .isEqualTo(literal(1))) - .hasCql("UPDATE foo SET v=? WHERE k=? IF v[1]=1"); - } - - @Test - public void should_generate_if_exists_condition() { - assertThat( - update("foo") - .setColumn("v", bindMarker()) - .whereColumn("k") - .isEqualTo(bindMarker()) - .ifExists()) - .hasCql("UPDATE foo SET v=? WHERE k=? IF EXISTS"); - } - - @Test - public void should_cancel_if_exists_if_other_condition_added() { - assertThat( - update("foo") - .setColumn("v", bindMarker()) - .whereColumn("k") - .isEqualTo(bindMarker()) - .ifExists() - .ifColumn("v") - .isEqualTo(literal(1))) - .hasCql("UPDATE foo SET v=? WHERE k=? IF v=1"); - } - - @Test - public void should_cancel_other_conditions_if_if_exists_added() { - assertThat( - update("foo") - .setColumn("v", bindMarker()) - .whereColumn("k") - .isEqualTo(bindMarker()) - .ifColumn("v1") - .isEqualTo(literal(1)) - .ifColumn("v2") - .isEqualTo(literal(2)) - .ifExists()) - .hasCql("UPDATE foo SET v=? WHERE k=? IF EXISTS"); - } -} diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/update/UpdateFluentRelationTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/update/UpdateFluentRelationTest.java deleted file mode 100644 index 9d67d0b9819..00000000000 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/update/UpdateFluentRelationTest.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.update; - -import static com.datastax.oss.driver.api.querybuilder.Assertions.assertThat; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.bindMarker; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.update; - -import com.datastax.oss.driver.api.querybuilder.relation.RelationTest; -import com.datastax.oss.driver.api.querybuilder.select.SelectFluentRelationTest; -import org.junit.Test; - -/** - * Mostly covered by other tests already. - * - * @see SelectFluentRelationTest - * @see RelationTest - */ -public class UpdateFluentRelationTest { - - @Test - public void should_generate_update_with_column_relation() { - assertThat(update("foo").setColumn("v", bindMarker()).whereColumn("k").isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET v=? WHERE k=?"); - } -} diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/update/UpdateIdempotenceTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/update/UpdateIdempotenceTest.java deleted file mode 100644 index 6727e5856ef..00000000000 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/update/UpdateIdempotenceTest.java +++ /dev/null @@ -1,191 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.update; - -import static com.datastax.oss.driver.api.querybuilder.Assertions.assertThat; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.bindMarker; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.function; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.literal; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.raw; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.update; - -import java.util.Arrays; -import org.junit.Test; - -public class UpdateIdempotenceTest { - - @Test - public void should_not_be_idempotent_if_conditional() { - assertThat(update("foo").setColumn("v", bindMarker()).whereColumn("k").isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET v=? WHERE k=?") - .isIdempotent(); - assertThat( - update("foo") - .setColumn("v", bindMarker()) - .whereColumn("k") - .isEqualTo(bindMarker()) - .ifExists()) - .hasCql("UPDATE foo SET v=? WHERE k=? IF EXISTS") - .isNotIdempotent(); - assertThat( - update("foo") - .setColumn("v", bindMarker()) - .whereColumn("k") - .isEqualTo(bindMarker()) - .ifColumn("v") - .isEqualTo(literal(1))) - .hasCql("UPDATE foo SET v=? WHERE k=? IF v=1") - .isNotIdempotent(); - } - - @Test - public void should_not_be_idempotent_if_assigning_non_idempotent_term() { - assertThat( - update("foo") - .setColumn("v", function("non_idempotent_func")) - .whereColumn("k") - .isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET v=non_idempotent_func() WHERE k=?") - .isNotIdempotent(); - assertThat( - update("foo") - .setColumn("v", raw("non_idempotent_func()")) - .whereColumn("k") - .isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET v=non_idempotent_func() WHERE k=?") - .isNotIdempotent(); - } - - @Test - public void should_not_be_idempotent_if_using_non_idempotent_term_in_relation() { - assertThat( - update("foo") - .setColumn("v", bindMarker()) - .whereColumn("k") - .isEqualTo(function("non_idempotent_func"))) - .hasCql("UPDATE foo SET v=? WHERE k=non_idempotent_func()") - .isNotIdempotent(); - assertThat( - update("foo") - .setColumn("v", bindMarker()) - .whereColumn("k") - .isEqualTo(raw("non_idempotent_func()"))) - .hasCql("UPDATE foo SET v=? WHERE k=non_idempotent_func()") - .isNotIdempotent(); - } - - @Test - public void should_not_be_idempotent_if_updating_counter() { - assertThat(update("foo").increment("c").whereColumn("k").isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET c=c+1 WHERE k=?") - .isNotIdempotent(); - assertThat(update("foo").decrement("c").whereColumn("k").isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET c=c-1 WHERE k=?") - .isNotIdempotent(); - } - - @Test - public void should_not_be_idempotent_if_adding_element_to_list() { - assertThat( - update("foo") - .appendListElement("l", literal(1)) - .whereColumn("k") - .isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET l=l+[1] WHERE k=?") - .isNotIdempotent(); - assertThat( - update("foo") - .prependListElement("l", literal(1)) - .whereColumn("k") - .isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET l=[1]+l WHERE k=?") - .isNotIdempotent(); - - // On the other hand, other collections are safe: - assertThat( - update("foo") - .appendSetElement("s", literal(1)) - .whereColumn("k") - .isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET s=s+{1} WHERE k=?") - .isIdempotent(); - assertThat( - update("foo") - .appendMapEntry("m", literal(1), literal("bar")) - .whereColumn("k") - .isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET m=m+{1:'bar'} WHERE k=?") - .isIdempotent(); - - // Also, removals are always safe: - assertThat( - update("foo") - .removeListElement("l", literal(1)) - .whereColumn("k") - .isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET l=l-[1] WHERE k=?") - .isIdempotent(); - assertThat( - update("foo") - .removeSetElement("s", literal(1)) - .whereColumn("k") - .isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET s=s-{1} WHERE k=?") - .isIdempotent(); - assertThat( - update("foo") - .removeMapEntry("m", literal(1), literal("bar")) - .whereColumn("k") - .isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET m=m-{1:'bar'} WHERE k=?") - .isIdempotent(); - } - - @Test - public void should_not_be_idempotent_if_concatenating_to_collection() { - assertThat( - update("foo") - .append("l", literal(Arrays.asList(1, 2, 3))) - .whereColumn("k") - .isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET l=l+[1,2,3] WHERE k=?") - .isNotIdempotent(); - assertThat( - update("foo") - .prepend("l", literal(Arrays.asList(1, 2, 3))) - .whereColumn("k") - .isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET l=[1,2,3]+l WHERE k=?") - .isNotIdempotent(); - // However, removals are always safe: - assertThat( - update("foo") - .remove("l", literal(Arrays.asList(1, 2, 3))) - .whereColumn("k") - .isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET l=l-[1,2,3] WHERE k=?") - .isIdempotent(); - } - - @Test - public void should_be_idempotent_if_relation_does_not_have_right_operand() { - assertThat(update("foo").setColumn("col1", literal(42)).whereColumn("col2").isNotNull()) - .hasCql("UPDATE foo SET col1=42 WHERE col2 IS NOT NULL") - .isIdempotent(); - } -} diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/update/UpdateUsingTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/update/UpdateUsingTest.java deleted file mode 100644 index 00006370f97..00000000000 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/update/UpdateUsingTest.java +++ /dev/null @@ -1,175 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.update; - -import static com.datastax.oss.driver.api.querybuilder.Assertions.assertThat; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.bindMarker; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.update; -import static org.assertj.core.api.Assertions.catchThrowable; - -import com.datastax.oss.driver.internal.querybuilder.update.DefaultUpdate; -import org.junit.Test; - -public class UpdateUsingTest { - - @Test - public void should_generate_using_timestamp_clause() { - assertThat( - update("foo") - .usingTimestamp(1) - .setColumn("v", bindMarker()) - .whereColumn("k") - .isEqualTo(bindMarker())) - .hasCql("UPDATE foo USING TIMESTAMP 1 SET v=? WHERE k=?"); - assertThat( - update("foo") - .usingTimestamp(bindMarker()) - .setColumn("v", bindMarker()) - .whereColumn("k") - .isEqualTo(bindMarker())) - .hasCql("UPDATE foo USING TIMESTAMP ? SET v=? WHERE k=?"); - } - - @Test - public void should_use_last_timestamp_if_called_multiple_times() { - assertThat( - update("foo") - .usingTimestamp(1) - .usingTimestamp(2) - .usingTimestamp(3) - .setColumn("v", bindMarker()) - .whereColumn("k") - .isEqualTo(bindMarker())) - .hasCql("UPDATE foo USING TIMESTAMP 3 SET v=? WHERE k=?"); - } - - @Test - public void should_generate_using_ttl_clause() { - assertThat( - update("foo") - .usingTtl(10) - .setColumn("v", bindMarker()) - .whereColumn("k") - .isEqualTo(bindMarker())) - .hasCql("UPDATE foo USING TTL 10 SET v=? WHERE k=?"); - assertThat( - update("foo") - .usingTtl(bindMarker()) - .setColumn("v", bindMarker()) - .whereColumn("k") - .isEqualTo(bindMarker())) - .hasCql("UPDATE foo USING TTL ? SET v=? WHERE k=?"); - } - - @Test - public void should_use_last_ttl_if_called_multiple_times() { - assertThat( - update("foo") - .usingTtl(10) - .usingTtl(20) - .usingTtl(30) - .setColumn("v", bindMarker()) - .whereColumn("k") - .isEqualTo(bindMarker())) - .hasCql("UPDATE foo USING TTL 30 SET v=? WHERE k=?"); - } - - @Test - public void should_generate_using_ttl_and_timestamp_clauses() { - assertThat( - update("foo") - .usingTtl(10) - .usingTimestamp(1) - .setColumn("v", bindMarker()) - .whereColumn("k") - .isEqualTo(bindMarker())) - .hasCql("UPDATE foo USING TIMESTAMP 1 AND TTL 10 SET v=? WHERE k=?"); - // order of TTL and TIMESTAMP method calls should not change the order of the generated clauses - assertThat( - update("foo") - .usingTimestamp(1) - .usingTtl(10) - .setColumn("v", bindMarker()) - .whereColumn("k") - .isEqualTo(bindMarker())) - .hasCql("UPDATE foo USING TIMESTAMP 1 AND TTL 10 SET v=? WHERE k=?"); - assertThat( - update("foo") - .usingTtl(bindMarker()) - .usingTimestamp(1) - .setColumn("v", bindMarker()) - .whereColumn("k") - .isEqualTo(bindMarker())) - .hasCql("UPDATE foo USING TIMESTAMP 1 AND TTL ? SET v=? WHERE k=?"); - } - - @Test - public void should_throw_exception_with_invalid_ttl() { - DefaultUpdate defaultUpdate = - (DefaultUpdate) - update("foo") - .usingTtl(10) - .setColumn("v", bindMarker()) - .whereColumn("k") - .isEqualTo(bindMarker()); - - Throwable t = - catchThrowable( - () -> - new DefaultUpdate( - defaultUpdate.getKeyspace(), - defaultUpdate.getTable(), - defaultUpdate.getTimestamp(), - new Object(), // invalid TTL object - defaultUpdate.getAssignments(), - defaultUpdate.getRelations(), - defaultUpdate.isIfExists(), - defaultUpdate.getConditions())); - - assertThat(t) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage("TTL value must be a BindMarker or an Integer"); - } - - @Test - public void should_throw_exception_with_invalid_timestamp() { - DefaultUpdate defaultUpdate = - (DefaultUpdate) - update("foo") - .usingTtl(10) - .setColumn("v", bindMarker()) - .whereColumn("k") - .isEqualTo(bindMarker()); - - Throwable t = - catchThrowable( - () -> - new DefaultUpdate( - defaultUpdate.getKeyspace(), - defaultUpdate.getTable(), - new Object(), // invalid timestamp object - defaultUpdate.getTtl(), - defaultUpdate.getAssignments(), - defaultUpdate.getRelations(), - defaultUpdate.isIfExists(), - defaultUpdate.getConditions())); - assertThat(t) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage("TIMESTAMP value must be a BindMarker or a Long"); - } -} diff --git a/query-builder/src/test/resources/project.properties b/query-builder/src/test/resources/project.properties deleted file mode 100644 index 66eab90b6e4..00000000000 --- a/query-builder/src/test/resources/project.properties +++ /dev/null @@ -1,19 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -project.basedir=${basedir} \ No newline at end of file diff --git a/src/license/header.txt b/src/license/header.txt new file mode 100644 index 00000000000..c4e34ff415e --- /dev/null +++ b/src/license/header.txt @@ -0,0 +1,13 @@ + Copyright (C) ${project.inceptionYear}-${currentYear} DataStax Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/src/main/config/ide/eclipse-formatter.xml b/src/main/config/ide/eclipse-formatter.xml new file mode 100644 index 00000000000..025d1bd1c73 --- /dev/null +++ b/src/main/config/ide/eclipse-formatter.xml @@ -0,0 +1,295 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/main/config/ide/eclipse.importorder b/src/main/config/ide/eclipse.importorder new file mode 100644 index 00000000000..97adde6f5d6 --- /dev/null +++ b/src/main/config/ide/eclipse.importorder @@ -0,0 +1,8 @@ +#Organize Import Order +#Sun Oct 19 21:16:33 CEST 2014 +5=\#com.datastax.driver.core +4=com.datastax.driver.core +3=\# +2= +1=\#java +0=java diff --git a/src/main/config/ide/intellij-code-style.jar b/src/main/config/ide/intellij-code-style.jar new file mode 100644 index 00000000000..153394580d8 Binary files /dev/null and b/src/main/config/ide/intellij-code-style.jar differ diff --git a/test-infra/pom.xml b/test-infra/pom.xml deleted file mode 100644 index 5bf2d07f652..00000000000 --- a/test-infra/pom.xml +++ /dev/null @@ -1,118 +0,0 @@ - - - - 4.0.0 - - org.apache.cassandra - java-driver-parent - 4.19.3-SNAPSHOT - - java-driver-test-infra - bundle - Apache Cassandra Java Driver - test infrastructure tools - - - - ${project.groupId} - java-driver-bom - ${project.version} - pom - import - - - - - - org.apache.cassandra - java-driver-core - ${project.parent.version} - - - com.github.spotbugs - spotbugs-annotations - provided - - - junit - junit - - - org.assertj - assertj-core - - - - com.datastax.oss.simulacron - simulacron-native-server - true - - - - org.apache.commons - commons-exec - true - - - org.awaitility - awaitility - - - - - - src/main/resources - - - ${project.basedir}/.. - - LICENSE - NOTICE_binary.txt - NOTICE.txt - - META-INF - - - - - maven-jar-plugin - - - - com.datastax.oss.driver.tests.infrastructure - - - - - - org.apache.felix - maven-bundle-plugin - - - com.datastax.oss.driver.testinfra - - * - com.datastax.oss.driver.*.testinfra.*, com.datastax.oss.driver.assertions, com.datastax.oss.driver.categories - - - - - - diff --git a/test-infra/revapi.json b/test-infra/revapi.json deleted file mode 100644 index 293d9f4d142..00000000000 --- a/test-infra/revapi.json +++ /dev/null @@ -1,196 +0,0 @@ -{ - "revapi": { - "java": { - "filter": { - "packages": { - "regex": true, - "exclude": [ - "com\\.datastax\\.(oss|dse)\\.protocol\\.internal(\\..+)?", - "com\\.datastax\\.(oss|dse)\\.driver\\.internal(\\..+)?", - "com\\.datastax\\.oss\\.driver\\.shaded(\\..+)?", - "com\\.datastax\\.oss\\.simulacron(\\..+)?", - "org\\.assertj(\\..+)?", - "// Don't re-check sibling modules that this module depends on", - "com\\.datastax\\.(oss|dse)\\.driver\\.api\\.core(\\..+)?" - ] - } - } - }, - "ignore": [ - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method java.util.Set com.datastax.oss.driver.api.testinfra.CassandraResourceRule::getContactPoints()", - "new": "method java.util.Set com.datastax.oss.driver.api.testinfra.CassandraResourceRule::getContactPoints()", - "justification": "JAVA-2165: Abstract node connection information" - }, - { - "code": "java.method.numberOfParametersChanged", - "old": "method void com.datastax.oss.driver.api.testinfra.loadbalancing.SortingLoadBalancingPolicy::init(java.util.Map, com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy.DistanceReporter, java.util.Set)", - "new": "method void com.datastax.oss.driver.api.testinfra.loadbalancing.SortingLoadBalancingPolicy::init(java.util.Map, com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy.DistanceReporter)", - "justification": "JAVA-2165: Abstract node connection information" - }, - { - "code": "java.method.returnTypeTypeParametersChanged", - "old": "method java.util.Set com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule::getContactPoints()", - "new": "method java.util.Set com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule::getContactPoints()", - "justification": "JAVA-2165: Abstract node connection information" - }, - { - "code": "java.method.returnTypeChanged", - "old": "method com.datastax.oss.driver.internal.core.config.typesafe.DefaultDriverConfigLoaderBuilder com.datastax.oss.driver.api.testinfra.session.SessionUtils::configLoaderBuilder()", - "new": "method com.datastax.oss.driver.api.core.config.ProgrammaticDriverConfigLoaderBuilder com.datastax.oss.driver.api.testinfra.session.SessionUtils::configLoaderBuilder()", - "justification": "JAVA-2201: Expose a public API for programmatic config" - }, - { - "code": "java.annotation.removed", - "old": "parameter java.util.Queue com.datastax.oss.driver.api.testinfra.loadbalancing.SortingLoadBalancingPolicy::newQueryPlan(===com.datastax.oss.driver.api.core.session.Request===, com.datastax.oss.driver.api.core.session.Session)", - "new": "parameter java.util.Queue com.datastax.oss.driver.api.testinfra.loadbalancing.SortingLoadBalancingPolicy::newQueryPlan(===com.datastax.oss.driver.api.core.session.Request===, com.datastax.oss.driver.api.core.session.Session)", - "annotation": "@edu.umd.cs.findbugs.annotations.NonNull", - "justification": "Method arguments were mistakenly annotated with @NonNull" - }, - { - "code": "java.annotation.added", - "old": "parameter java.util.Queue com.datastax.oss.driver.api.testinfra.loadbalancing.SortingLoadBalancingPolicy::newQueryPlan(===com.datastax.oss.driver.api.core.session.Request===, com.datastax.oss.driver.api.core.session.Session)", - "new": "parameter java.util.Queue com.datastax.oss.driver.api.testinfra.loadbalancing.SortingLoadBalancingPolicy::newQueryPlan(===com.datastax.oss.driver.api.core.session.Request===, com.datastax.oss.driver.api.core.session.Session)", - "annotation": "@edu.umd.cs.findbugs.annotations.Nullable", - "justification": "Method arguments were mistakenly annotated with @NonNull" - }, - { - "code": "java.annotation.removed", - "old": "parameter java.util.Queue com.datastax.oss.driver.api.testinfra.loadbalancing.SortingLoadBalancingPolicy::newQueryPlan(com.datastax.oss.driver.api.core.session.Request, ===com.datastax.oss.driver.api.core.session.Session===)", - "new": "parameter java.util.Queue com.datastax.oss.driver.api.testinfra.loadbalancing.SortingLoadBalancingPolicy::newQueryPlan(com.datastax.oss.driver.api.core.session.Request, ===com.datastax.oss.driver.api.core.session.Session===)", - "annotation": "@edu.umd.cs.findbugs.annotations.NonNull", - "justification": "Method arguments were mistakenly annotated with @NonNull" - }, - { - "code": "java.annotation.added", - "old": "parameter java.util.Queue com.datastax.oss.driver.api.testinfra.loadbalancing.SortingLoadBalancingPolicy::newQueryPlan(com.datastax.oss.driver.api.core.session.Request, ===com.datastax.oss.driver.api.core.session.Session===)", - "new": "parameter java.util.Queue com.datastax.oss.driver.api.testinfra.loadbalancing.SortingLoadBalancingPolicy::newQueryPlan(com.datastax.oss.driver.api.core.session.Request, ===com.datastax.oss.driver.api.core.session.Session===)", - "annotation": "@edu.umd.cs.findbugs.annotations.Nullable", - "justification": "Method arguments were mistakenly annotated with @NonNull" - }, - { - "code": "java.method.parameterTypeParameterChanged", - "old": "parameter com.datastax.oss.driver.api.testinfra.simulacron.QueryCounter.QueryCounterBuilder com.datastax.oss.driver.api.testinfra.simulacron.QueryCounter::builder(===com.datastax.oss.simulacron.server.BoundTopic===)", - "new": "parameter com.datastax.oss.driver.api.testinfra.simulacron.QueryCounter.QueryCounterBuilder com.datastax.oss.driver.api.testinfra.simulacron.QueryCounter::builder(===com.datastax.oss.simulacron.server.BoundTopic===)", - "justification": "Fix usage of raw type BoundTopic" - }, - { - "code": "java.field.constantValueChanged", - "old": "field com.datastax.oss.driver.api.testinfra.ccm.CcmBridge.DEFAULT_CLIENT_KEYSTORE_PASSWORD", - "new": "field com.datastax.oss.driver.api.testinfra.ccm.CcmBridge.DEFAULT_CLIENT_KEYSTORE_PASSWORD", - "justification": "JAVA-2620: Use clearly dummy passwords in tests" - }, - { - "code": "java.field.constantValueChanged", - "old": "field com.datastax.oss.driver.api.testinfra.ccm.CcmBridge.DEFAULT_CLIENT_TRUSTSTORE_PASSWORD", - "new": "field com.datastax.oss.driver.api.testinfra.ccm.CcmBridge.DEFAULT_CLIENT_TRUSTSTORE_PASSWORD", - "justification": "JAVA-2620: Use clearly dummy passwords in tests" - }, - { - "code": "java.field.constantValueChanged", - "old": "field com.datastax.oss.driver.api.testinfra.ccm.CcmBridge.DEFAULT_SERVER_KEYSTORE_PASSWORD", - "new": "field com.datastax.oss.driver.api.testinfra.ccm.CcmBridge.DEFAULT_SERVER_KEYSTORE_PASSWORD", - "justification": "JAVA-2620: Use clearly dummy passwords in tests" - }, - { - "code": "java.field.constantValueChanged", - "old": "field com.datastax.oss.driver.api.testinfra.ccm.CcmBridge.DEFAULT_SERVER_TRUSTSTORE_PASSWORD", - "new": "field com.datastax.oss.driver.api.testinfra.ccm.CcmBridge.DEFAULT_SERVER_TRUSTSTORE_PASSWORD", - "justification": "JAVA-2620: Use clearly dummy passwords in tests" - }, - { - "code": "java.missing.newClass", - "new": "missing-class com.datastax.oss.simulacron.common.cluster.ClusterSpec", - "justification":"Dependency was made optional" - }, - { - "code": "java.missing.newClass", - "new": "missing-class com.datastax.oss.simulacron.common.cluster.ClusterSpec.Builder", - "justification":"Dependency was made optional" - }, - { - "code": "java.missing.newClass", - "new": "missing-class com.datastax.oss.simulacron.common.cluster.QueryLog", - "justification":"Dependency was made optional" - }, - { - "code": "java.missing.newClass", - "new": "missing-class com.datastax.oss.simulacron.server.BoundCluster", - "justification":"Dependency was made optional" - }, - { - "code": "java.missing.newClass", - "new": "missing-class com.datastax.oss.simulacron.server.BoundTopic", - "justification":"Dependency was made optional" - }, - { - "code": "java.missing.newClass", - "new": "missing-class com.datastax.oss.simulacron.server.Server", - "justification":"Dependency was made optional" - }, - { - "code": "java.missing.oldClass", - "old": "missing-class com.datastax.oss.simulacron.common.cluster.ClusterSpec", - "new": "missing-class com.datastax.oss.simulacron.common.cluster.ClusterSpec", - "justification": "Dependency was made optional" - }, - { - "code": "java.missing.oldClass", - "old": "missing-class com.datastax.oss.simulacron.common.cluster.ClusterSpec.Builder", - "new": "missing-class com.datastax.oss.simulacron.common.cluster.ClusterSpec.Builder", - "justification": "Dependency was made optional" - }, - { - "code": "java.missing.oldClass", - "old": "missing-class com.datastax.oss.simulacron.common.cluster.QueryLog", - "new": "missing-class com.datastax.oss.simulacron.common.cluster.QueryLog", - "justification": "Dependency was made optional" - }, - { - "code": "java.missing.oldClass", - "old": "missing-class com.datastax.oss.simulacron.server.BoundCluster", - "new": "missing-class com.datastax.oss.simulacron.server.BoundCluster", - "justification": "Dependency was made optional" - }, - { - "code": "java.missing.oldClass", - "old": "missing-class com.datastax.oss.simulacron.server.BoundTopic", - "new": "missing-class com.datastax.oss.simulacron.server.BoundTopic", - "justification": "Dependency was made optional" - }, - { - "code": "java.missing.oldClass", - "old": "missing-class com.datastax.oss.simulacron.server.Server", - "new": "missing-class com.datastax.oss.simulacron.server.Server", - "justification": "Dependency was made optional" - }, - { - "code": "java.method.removed", - "old": "method void com.datastax.oss.driver.api.testinfra.ccm.CcmRule::reloadCore(int, java.lang.String, java.lang.String, boolean)", - "justification": "Modifying the state of a globally shared CCM instance is dangerous" - }, - { - "code": "java.method.removed", - "old": "method java.util.Optional com.datastax.oss.driver.api.testinfra.ccm.BaseCcmRule::getDseVersion()", - "justification": "Method has been replaced with more generic isDistributionOf(BackendType) and getDistributionVersion()" - }, - { - "code": "java.field.removed", - "old": "field com.datastax.oss.driver.api.testinfra.ccm.CcmBridge.DSE_ENABLEMENT", - "justification": "Method has been replaced with more generic isDistributionOf(BackendType) and getDistributionVersion()" - }, - { - "code": "java.method.nowStatic", - "old": "method com.datastax.oss.driver.api.core.Version com.datastax.oss.driver.api.testinfra.ccm.CcmBridge::getCassandraVersion()", - "new": "method com.datastax.oss.driver.api.core.Version com.datastax.oss.driver.api.testinfra.ccm.CcmBridge::getCassandraVersion()", - "justification": "Previous and current implemntation do not relay on non-static fields" - }, - { - "code": "java.method.removed", - "old": "method java.util.Optional com.datastax.oss.driver.api.testinfra.ccm.CcmBridge::getDseVersion()", - "justification": "Method has been replaced with more generic isDistributionOf(BackendType) and getDistributionVersion()" - } - ] - } -} diff --git a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/CassandraRequirement.java b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/CassandraRequirement.java deleted file mode 100644 index acbee82f3b3..00000000000 --- a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/CassandraRequirement.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.testinfra; - -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; - -/** - * Annotation for a Class or Method that defines a Cassandra Version requirement. If the cassandra - * version in use does not meet the version requirement, the test is skipped. - * - * @deprecated Replaced by {@link - * com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement} - */ -@Deprecated -@Retention(RetentionPolicy.RUNTIME) -public @interface CassandraRequirement { - - /** @return The minimum version required to execute this test, i.e. "2.0.13" */ - String min() default ""; - - /** - * @return the maximum exclusive version allowed to execute this test, i.e. "2.2" means only tests - * < "2.2" may execute this test. - */ - String max() default ""; - - /** @return The description returned if this version requirement is not met. */ - String description() default "Does not meet version requirement."; -} diff --git a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/CassandraResourceRule.java b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/CassandraResourceRule.java deleted file mode 100644 index 83c27b45e3b..00000000000 --- a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/CassandraResourceRule.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.testinfra; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.internal.core.metadata.DefaultEndPoint; -import java.net.InetSocketAddress; -import java.util.Collections; -import java.util.Set; -import org.junit.rules.ExternalResource; -import org.junit.rules.RuleChain; - -/** - * An {@link ExternalResource} which provides a {@link #getContactPoints()} for accessing the - * contact points of the cassandra cluster. - */ -public abstract class CassandraResourceRule extends ExternalResource { - - /** - * @deprecated this method is preserved for backward compatibility only. The correct way to ensure - * that a {@code CassandraResourceRule} gets initialized before a {@link SessionRule} is to - * wrap them into a {@link RuleChain}. Therefore there is no need to force the initialization - * of a {@code CassandraResourceRule} explicitly anymore. - */ - @Deprecated - public synchronized void setUp() { - try { - this.before(); - } catch (Throwable t) { - throw new RuntimeException(t); - } - } - - /** - * @return Default contact points associated with this cassandra resource. By default returns - * 127.0.0.1 - */ - public Set getContactPoints() { - return Collections.singleton(new DefaultEndPoint(new InetSocketAddress("127.0.0.1", 9042))); - } - - /** @return The highest protocol version supported by this resource. */ - public abstract ProtocolVersion getHighestProtocolVersion(); -} diff --git a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/DseRequirement.java b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/DseRequirement.java deleted file mode 100644 index c1c4249620f..00000000000 --- a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/DseRequirement.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.testinfra; - -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; - -/** - * Annotation for a Class or Method that defines a DSE Version requirement. If the DSE version in - * use does not meet the version requirement or DSE isn't used at all, the test is skipped. - * - * @deprecated Replaced by {@link - * com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement} - */ -@Deprecated -@Retention(RetentionPolicy.RUNTIME) -public @interface DseRequirement { - - /** @return The minimum version required to execute this test, i.e. "5.0.13" */ - String min() default ""; - - /** - * @return the maximum exclusive version allowed to execute this test, i.e. "2.2" means only tests - * < "2.2" may execute this test. - */ - String max() default ""; - - /** @return The description returned if this version requirement is not met. */ - String description() default "Does not meet version requirement."; -} diff --git a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/ccm/BaseCcmRule.java b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/ccm/BaseCcmRule.java deleted file mode 100644 index 882cd55b948..00000000000 --- a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/ccm/BaseCcmRule.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.testinfra.ccm; - -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.testinfra.CassandraResourceRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirementRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import org.junit.AssumptionViolatedException; -import org.junit.runner.Description; -import org.junit.runners.model.Statement; - -public abstract class BaseCcmRule extends CassandraResourceRule { - - protected final CcmBridge ccmBridge; - - BaseCcmRule(CcmBridge ccmBridge) { - this.ccmBridge = ccmBridge; - Runtime.getRuntime() - .addShutdownHook( - new Thread( - () -> { - try { - ccmBridge.close(); - } catch (Exception e) { - // silently remove as may have already been removed. - } - })); - } - - @Override - protected void before() { - ccmBridge.create(); - ccmBridge.start(); - } - - @Override - protected void after() { - ccmBridge.close(); - } - - @Override - public Statement apply(Statement base, Description description) { - if (BackendRequirementRule.meetsDescriptionRequirements(description)) { - return super.apply(base, description); - } else { - // requirements not met, throw reasoning assumption to skip test - return new Statement() { - @Override - public void evaluate() { - throw new AssumptionViolatedException( - BackendRequirementRule.buildReasonString(description)); - } - }; - } - } - - public BackendType getDistribution() { - return CcmBridge.DISTRIBUTION; - } - - public boolean isDistributionOf(BackendType type) { - return CcmBridge.isDistributionOf(type); - } - - public boolean isDistributionOf(BackendType type, CcmBridge.VersionComparator comparator) { - return CcmBridge.isDistributionOf(type, comparator); - } - - public Version getDistributionVersion() { - return CcmBridge.getDistributionVersion(); - } - - public Version getCassandraVersion() { - return CcmBridge.getCassandraVersion(); - } - - @Override - public ProtocolVersion getHighestProtocolVersion() { - if (CcmBridge.getCassandraVersion().compareTo(Version.V2_2_0) >= 0) { - return DefaultProtocolVersion.V4; - } else { - return DefaultProtocolVersion.V3; - } - } -} diff --git a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/ccm/CcmBridge.java b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/ccm/CcmBridge.java deleted file mode 100644 index f0ce6bc5b0e..00000000000 --- a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/ccm/CcmBridge.java +++ /dev/null @@ -1,645 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.testinfra.ccm; - -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.shaded.guava.common.base.Joiner; -import com.datastax.oss.driver.shaded.guava.common.io.Resources; -import java.io.File; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.OutputStream; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.Objects; -import java.util.Optional; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.stream.Collectors; -import org.apache.commons.exec.CommandLine; -import org.apache.commons.exec.DefaultExecutor; -import org.apache.commons.exec.ExecuteStreamHandler; -import org.apache.commons.exec.ExecuteWatchdog; -import org.apache.commons.exec.Executor; -import org.apache.commons.exec.LogOutputStream; -import org.apache.commons.exec.PumpStreamHandler; -import org.assertj.core.util.Lists; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class CcmBridge implements AutoCloseable { - - private static final Logger LOG = LoggerFactory.getLogger(CcmBridge.class); - - public static BackendType DISTRIBUTION = - BackendType.valueOf( - System.getProperty("ccm.distribution", BackendType.CASSANDRA.name()).toUpperCase()); - public static final Version VERSION = - Objects.requireNonNull(Version.parse(System.getProperty("ccm.version", "4.0.0"))); - - public static final String INSTALL_DIRECTORY = System.getProperty("ccm.directory"); - - public static final String BRANCH = System.getProperty("ccm.branch"); - - public static final String CLUSTER_NAME = "ccm_1"; - - public static final String DEFAULT_CLIENT_TRUSTSTORE_PASSWORD = "fakePasswordForTests"; - public static final String DEFAULT_CLIENT_TRUSTSTORE_PATH = "/client.truststore"; - - public static final File DEFAULT_CLIENT_TRUSTSTORE_FILE = - createTempStore(DEFAULT_CLIENT_TRUSTSTORE_PATH); - - public static final String DEFAULT_CLIENT_KEYSTORE_PASSWORD = "fakePasswordForTests"; - public static final String DEFAULT_CLIENT_KEYSTORE_PATH = "/client.keystore"; - - public static final File DEFAULT_CLIENT_KEYSTORE_FILE = - createTempStore(DEFAULT_CLIENT_KEYSTORE_PATH); - - // Contains the same keypair as the client keystore, but in format usable by OpenSSL - public static final File DEFAULT_CLIENT_PRIVATE_KEY_FILE = createTempStore("/client.key"); - public static final File DEFAULT_CLIENT_CERT_CHAIN_FILE = createTempStore("/client.crt"); - - public static final String DEFAULT_SERVER_TRUSTSTORE_PASSWORD = "fakePasswordForTests"; - public static final String DEFAULT_SERVER_TRUSTSTORE_PATH = "/server.truststore"; - - private static final File DEFAULT_SERVER_TRUSTSTORE_FILE = - createTempStore(DEFAULT_SERVER_TRUSTSTORE_PATH); - - public static final String DEFAULT_SERVER_KEYSTORE_PASSWORD = "fakePasswordForTests"; - public static final String DEFAULT_SERVER_KEYSTORE_PATH = "/server.keystore"; - - private static final File DEFAULT_SERVER_KEYSTORE_FILE = - createTempStore(DEFAULT_SERVER_KEYSTORE_PATH); - - // A separate keystore where the certificate has a CN of localhost, used for hostname - // validation testing. - public static final String DEFAULT_SERVER_LOCALHOST_KEYSTORE_PATH = "/server_localhost.keystore"; - - private static final File DEFAULT_SERVER_LOCALHOST_KEYSTORE_FILE = - createTempStore(DEFAULT_SERVER_LOCALHOST_KEYSTORE_PATH); - - // major DSE versions - public static final Version V6_0_0 = Version.parse("6.0.0"); - public static final Version V5_1_0 = Version.parse("5.1.0"); - public static final Version V5_0_0 = Version.parse("5.0.0"); - - // mapped C* versions from DSE versions - public static final Version V4_0_0 = Version.parse("4.0.0"); - public static final Version V3_10 = Version.parse("3.10"); - public static final Version V3_0_15 = Version.parse("3.0.15"); - public static final Version V2_1_19 = Version.parse("2.1.19"); - - // mapped C* versions from HCD versions - public static final Version V4_0_11 = Version.parse("4.0.11"); - - static { - LOG.info("CCM Bridge configured with {} version {}", DISTRIBUTION.getFriendlyName(), VERSION); - } - - private final int[] nodes; - private final Path configDirectory; - private final AtomicBoolean started = new AtomicBoolean(); - private final AtomicBoolean created = new AtomicBoolean(); - private final String ipPrefix; - private final Map cassandraConfiguration; - private final Map dseConfiguration; - private final List rawDseYaml; - private final List createOptions; - private final List dseWorkloads; - private final String jvmArgs; - - private CcmBridge( - Path configDirectory, - int[] nodes, - String ipPrefix, - Map cassandraConfiguration, - Map dseConfiguration, - List dseConfigurationRawYaml, - List createOptions, - Collection jvmArgs, - List dseWorkloads) { - this.configDirectory = configDirectory; - if (nodes.length == 1) { - // Hack to ensure that the default DC is always called 'dc1': pass a list ('-nX:0') even if - // there is only one DC (with '-nX', CCM configures `SimpleSnitch`, which hard-codes the name - // to 'datacenter1') - this.nodes = new int[] {nodes[0], 0}; - } else { - this.nodes = nodes; - } - this.ipPrefix = ipPrefix; - this.cassandraConfiguration = cassandraConfiguration; - this.dseConfiguration = dseConfiguration; - this.rawDseYaml = dseConfigurationRawYaml; - this.createOptions = createOptions; - - StringBuilder allJvmArgs = new StringBuilder(""); - String quote = isWindows() ? "\"" : ""; - for (String jvmArg : jvmArgs) { - // Windows requires jvm arguments to be quoted, while *nix requires unquoted. - allJvmArgs.append(" "); - allJvmArgs.append(quote); - allJvmArgs.append("--jvm_arg="); - allJvmArgs.append(jvmArg); - allJvmArgs.append(quote); - } - this.jvmArgs = allJvmArgs.toString(); - this.dseWorkloads = dseWorkloads; - } - - // Copied from Netty's PlatformDependent to avoid the dependency on Netty - private static boolean isWindows() { - return System.getProperty("os.name", "").toLowerCase(Locale.US).contains("win"); - } - - public static boolean isDistributionOf(BackendType type) { - return DISTRIBUTION == type; - } - - public static boolean isDistributionOf(BackendType type, VersionComparator comparator) { - return isDistributionOf(type) - && comparator.accept(getDistributionVersion(), getCassandraVersion()); - } - - public static Version getDistributionVersion() { - return VERSION; - } - - public static Version getCassandraVersion() { - if (isDistributionOf(BackendType.CASSANDRA)) { - return VERSION; - } - return DistributionCassandraVersions.getCassandraVersion(DISTRIBUTION, VERSION); - } - - private String getCcmVersionString(Version version) { - // for 4.0 or 5.0 pre-releases, the CCM version string needs to be "4.0-alpha1", "4.0-alpha2" or - // "5.0-beta1" Version.toString() always adds a patch value, even if it's not specified when - // parsing. - if (version.getMajor() >= 4 - && version.getMinor() == 0 - && version.getPatch() == 0 - && version.getPreReleaseLabels() != null) { - // truncate the patch version from the Version string - StringBuilder sb = new StringBuilder(); - sb.append(version.getMajor()).append('.').append(version.getMinor()); - for (String preReleaseString : version.getPreReleaseLabels()) { - sb.append('-').append(preReleaseString); - } - return sb.toString(); - } - return version.toString(); - } - - public void create() { - if (created.compareAndSet(false, true)) { - if (INSTALL_DIRECTORY != null) { - createOptions.add("--install-dir=" + new File(INSTALL_DIRECTORY).getAbsolutePath()); - } else if (BRANCH != null) { - createOptions.add("-v git:" + BRANCH.trim().replaceAll("\"", "")); - - } else { - createOptions.add("-v " + getCcmVersionString(VERSION)); - } - createOptions.addAll(Arrays.asList(DISTRIBUTION.getCcmOptions())); - execute( - "create", - CLUSTER_NAME, - "-i", - ipPrefix, - "-n", - Arrays.stream(nodes).mapToObj(n -> "" + n).collect(Collectors.joining(":")), - createOptions.stream().collect(Collectors.joining(" "))); - - Version cassandraVersion = getCassandraVersion(); - for (Map.Entry conf : cassandraConfiguration.entrySet()) { - String originalKey = conf.getKey(); - Object originalValue = conf.getValue(); - execute( - "updateconf", - String.join( - ":", - getConfigKey(originalKey, originalValue, cassandraVersion), - getConfigValue(originalKey, originalValue, cassandraVersion))); - } - - // If we're dealing with anything more recent than 2.2 explicitly enable UDF... but run it - // through our conversion process to make - // sure more recent versions don't have a problem. - if (cassandraVersion.compareTo(Version.V2_2_0) >= 0 || isDistributionOf(BackendType.HCD)) { - String originalKey = "enable_user_defined_functions"; - Object originalValue = "true"; - execute( - "updateconf", - String.join( - ":", - getConfigKey(originalKey, originalValue, cassandraVersion), - getConfigValue(originalKey, originalValue, cassandraVersion))); - } - - // Note that we aren't performing any substitution on DSE key/value props (at least for now) - if (isDistributionOf(BackendType.DSE)) { - for (Map.Entry conf : dseConfiguration.entrySet()) { - execute("updatedseconf", String.format("%s:%s", conf.getKey(), conf.getValue())); - } - for (String yaml : rawDseYaml) { - executeUnsanitized("updatedseconf", "-y", yaml); - } - if (!dseWorkloads.isEmpty()) { - execute("setworkload", String.join(",", dseWorkloads)); - } - } - } - } - - public void nodetool(int node, String... args) { - execute(String.format("node%d nodetool %s", node, Joiner.on(" ").join(args))); - } - - public void dsetool(int node, String... args) { - execute(String.format("node%d dsetool %s", node, Joiner.on(" ").join(args))); - } - - public void reloadCore(int node, String keyspace, String table, boolean reindex) { - dsetool(node, "reload_core", keyspace + "." + table, "reindex=" + reindex); - } - - public void start() { - if (started.compareAndSet(false, true)) { - List cmdAndArgs = Lists.newArrayList("start", jvmArgs, "--wait-for-binary-proto"); - updateJvmVersion(cmdAndArgs); - try { - execute(cmdAndArgs.toArray(new String[0])); - } catch (RuntimeException re) { - // if something went wrong starting CCM, see if we can also dump the error - executeCheckLogError(); - throw re; - } - } - } - - public void stop() { - if (started.compareAndSet(true, false)) { - execute("stop"); - } - } - - public void remove() { - execute("remove"); - } - - public void pause(int n) { - execute("node" + n, "pause"); - } - - public void resume(int n) { - execute("node" + n, "resume"); - } - - public void start(int n) { - List cmdAndArgs = Lists.newArrayList("node" + n, "start"); - updateJvmVersion(cmdAndArgs); - execute(cmdAndArgs.toArray(new String[0])); - } - - private void updateJvmVersion(List cmdAndArgs) { - overrideJvmVersionForDseWorkloads() - .ifPresent(jvmVersion -> cmdAndArgs.add(String.format("--jvm_version=%d", jvmVersion))); - } - - public void stop(int n) { - execute("node" + n, "stop"); - } - - public void add(int n, String dc) { - List addOptions = new ArrayList<>(); - addOptions.addAll(Arrays.asList("add", "-i", ipPrefix + n, "-d", dc, "node" + n)); - addOptions.addAll(Arrays.asList(DISTRIBUTION.getCcmOptions())); - execute(addOptions.toArray(new String[0])); - start(n); - } - - public void decommission(int n) { - nodetool(n, "decommission"); - } - - synchronized void execute(String... args) { - String command = - "ccm " - + String.join(" ", args) - + " --config-dir=" - + configDirectory.toFile().getAbsolutePath(); - - execute(CommandLine.parse(command)); - } - - synchronized void executeUnsanitized(String... args) { - String command = "ccm "; - - CommandLine cli = CommandLine.parse(command); - for (String arg : args) { - cli.addArgument(arg, false); - } - cli.addArgument("--config-dir=" + configDirectory.toFile().getAbsolutePath()); - - execute(cli); - } - - private void execute(CommandLine cli) { - execute(cli, false); - } - - private void executeCheckLogError() { - String command = "ccm checklogerror --config-dir=" + configDirectory.toFile().getAbsolutePath(); - // force all logs to be error logs - execute(CommandLine.parse(command), true); - } - - private void execute(CommandLine cli, boolean forceErrorLogging) { - if (forceErrorLogging) { - LOG.error("Executing: " + cli); - } else { - LOG.debug("Executing: " + cli); - } - ExecuteWatchdog watchDog = new ExecuteWatchdog(TimeUnit.MINUTES.toMillis(10)); - try (LogOutputStream outStream = - new LogOutputStream() { - @Override - protected void processLine(String line, int logLevel) { - if (forceErrorLogging) { - LOG.error("ccmout> {}", line); - } else { - LOG.debug("ccmout> {}", line); - } - } - }; - LogOutputStream errStream = - new LogOutputStream() { - @Override - protected void processLine(String line, int logLevel) { - LOG.error("ccmerr> {}", line); - } - }) { - Executor executor = new DefaultExecutor(); - ExecuteStreamHandler streamHandler = new PumpStreamHandler(outStream, errStream); - executor.setStreamHandler(streamHandler); - executor.setWatchdog(watchDog); - - int retValue = executor.execute(cli); - if (retValue != 0) { - LOG.error("Non-zero exit code ({}) returned from executing ccm command: {}", retValue, cli); - } - } catch (IOException ex) { - if (watchDog.killedProcess()) { - throw new RuntimeException("The command '" + cli + "' was killed after 10 minutes"); - } else { - throw new RuntimeException("The command '" + cli + "' failed to execute", ex); - } - } - } - - @Override - public void close() { - if (created.compareAndSet(true, false)) { - remove(); - } - } - - /** - * Extracts a keystore from the classpath into a temporary file. - * - *

This is needed as the keystore could be part of a built test jar used by other projects, and - * they need to be extracted to a file system so cassandra may use them. - * - * @param storePath Path in classpath where the keystore exists. - * @return The generated File. - */ - private static File createTempStore(String storePath) { - File f = null; - try (OutputStream os = new FileOutputStream(f = File.createTempFile("server", ".store"))) { - f.deleteOnExit(); - Resources.copy(CcmBridge.class.getResource(storePath), os); - } catch (IOException e) { - LOG.warn("Failure to write keystore, SSL-enabled servers may fail to start.", e); - } - return f; - } - - /** - * Get the current JVM major version (1.8.0_372 -> 8, 11.0.19 -> 11) - * - * @return major version of current JVM - */ - private static int getCurrentJvmMajorVersion() { - String version = System.getProperty("java.version"); - if (version.startsWith("1.")) { - version = version.substring(2, 3); - } else { - int dot = version.indexOf("."); - if (dot != -1) { - version = version.substring(0, dot); - } - } - return Integer.parseInt(version); - } - - private Optional overrideJvmVersionForDseWorkloads() { - if (getCurrentJvmMajorVersion() <= 8) { - return Optional.empty(); - } - - if (!isDistributionOf(BackendType.DSE)) { - return Optional.empty(); - } - - if (getDistributionVersion().compareTo(Version.V6_9_0) >= 0) { - // DSE 6.9.0 supports only JVM 11 onwards (also with graph workload) - return Optional.empty(); - } - - if (dseWorkloads.contains("graph")) { - return Optional.of(8); - } - - return Optional.empty(); - } - - private static String IN_MS_STR = "_in_ms"; - private static int IN_MS_STR_LENGTH = IN_MS_STR.length(); - private static String ENABLE_STR = "enable_"; - private static int ENABLE_STR_LENGTH = ENABLE_STR.length(); - private static String IN_KB_STR = "_in_kb"; - private static int IN_KB_STR_LENGTH = IN_KB_STR.length(); - - @SuppressWarnings("unused") - private String getConfigKey(String originalKey, Object originalValue, Version cassandraVersion) { - - // At least for now we won't support substitutions on nested keys. This requires an extra - // traversal of the string - // but we'll live with that for now - if (originalKey.contains(".")) return originalKey; - if (cassandraVersion.compareTo(Version.V4_1_0) < 0) return originalKey; - if (originalKey.endsWith(IN_MS_STR)) - return originalKey.substring(0, originalKey.length() - IN_MS_STR_LENGTH); - if (originalKey.startsWith(ENABLE_STR)) - return originalKey.substring(ENABLE_STR_LENGTH) + "_enabled"; - if (originalKey.endsWith(IN_KB_STR)) - return originalKey.substring(0, originalKey.length() - IN_KB_STR_LENGTH); - return originalKey; - } - - private String getConfigValue( - String originalKey, Object originalValue, Version cassandraVersion) { - - String originalValueStr = originalValue.toString(); - if (cassandraVersion.compareTo(Version.V4_1_0) < 0) return originalValueStr; - if (originalKey.endsWith(IN_MS_STR)) return originalValueStr + "ms"; - if (originalKey.endsWith(IN_KB_STR)) return originalValueStr + "KiB"; - return originalValueStr; - } - - public static Builder builder() { - return new Builder(); - } - - public static class Builder { - private int[] nodes = {1}; - private final Map cassandraConfiguration = new LinkedHashMap<>(); - private final Map dseConfiguration = new LinkedHashMap<>(); - private final List dseRawYaml = new ArrayList<>(); - private final List jvmArgs = new ArrayList<>(); - private String ipPrefix = "127.0.0."; - private final List createOptions = new ArrayList<>(); - private final List dseWorkloads = new ArrayList<>(); - - private final Path configDirectory; - - private Builder() { - try { - this.configDirectory = Files.createTempDirectory("ccm"); - // mark the ccm temp directories for deletion when the JVM exits - this.configDirectory.toFile().deleteOnExit(); - } catch (IOException e) { - // change to unchecked for now. - throw new RuntimeException(e); - } - // disable auto_snapshot by default to reduce disk usage when destroying schema. - withCassandraConfiguration("auto_snapshot", "false"); - } - - public Builder withCassandraConfiguration(String key, Object value) { - cassandraConfiguration.put(key, value); - return this; - } - - public Builder withDseConfiguration(String key, Object value) { - dseConfiguration.put(key, value); - return this; - } - - public Builder withDseConfiguration(String rawYaml) { - dseRawYaml.add(rawYaml); - return this; - } - - public Builder withJvmArgs(String... jvmArgs) { - Collections.addAll(this.jvmArgs, jvmArgs); - return this; - } - - public Builder withNodes(int... nodes) { - this.nodes = nodes; - return this; - } - - public Builder withIpPrefix(String ipPrefix) { - this.ipPrefix = ipPrefix; - return this; - } - - /** Adds an option to the {@code ccm create} command. */ - public Builder withCreateOption(String option) { - this.createOptions.add(option); - return this; - } - - /** Enables SSL encryption. */ - public Builder withSsl() { - cassandraConfiguration.put("client_encryption_options.enabled", "true"); - cassandraConfiguration.put("client_encryption_options.optional", "false"); - cassandraConfiguration.put( - "client_encryption_options.keystore", DEFAULT_SERVER_KEYSTORE_FILE.getAbsolutePath()); - cassandraConfiguration.put( - "client_encryption_options.keystore_password", DEFAULT_SERVER_KEYSTORE_PASSWORD); - return this; - } - - public Builder withSslLocalhostCn() { - cassandraConfiguration.put("client_encryption_options.enabled", "true"); - cassandraConfiguration.put("client_encryption_options.optional", "false"); - cassandraConfiguration.put( - "client_encryption_options.keystore", - DEFAULT_SERVER_LOCALHOST_KEYSTORE_FILE.getAbsolutePath()); - cassandraConfiguration.put( - "client_encryption_options.keystore_password", DEFAULT_SERVER_KEYSTORE_PASSWORD); - return this; - } - - /** Enables client authentication. This also enables encryption ({@link #withSsl()}. */ - public Builder withSslAuth() { - withSsl(); - cassandraConfiguration.put("client_encryption_options.require_client_auth", "true"); - cassandraConfiguration.put( - "client_encryption_options.truststore", DEFAULT_SERVER_TRUSTSTORE_FILE.getAbsolutePath()); - cassandraConfiguration.put( - "client_encryption_options.truststore_password", DEFAULT_SERVER_TRUSTSTORE_PASSWORD); - return this; - } - - public Builder withDseWorkloads(String... workloads) { - this.dseWorkloads.addAll(Arrays.asList(workloads)); - return this; - } - - public CcmBridge build() { - return new CcmBridge( - configDirectory, - nodes, - ipPrefix, - cassandraConfiguration, - dseConfiguration, - dseRawYaml, - createOptions, - jvmArgs, - dseWorkloads); - } - } - - public interface VersionComparator { - boolean accept(Version distribution, Version cassandra); - } -} diff --git a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/ccm/CcmRule.java b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/ccm/CcmRule.java deleted file mode 100644 index e6483c37877..00000000000 --- a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/ccm/CcmRule.java +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.testinfra.ccm; - -import com.datastax.oss.driver.categories.ParallelizableTests; -import java.lang.reflect.Method; -import org.junit.AssumptionViolatedException; -import org.junit.experimental.categories.Category; -import org.junit.runner.Description; -import org.junit.runners.model.Statement; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A rule that creates a globally shared single node Ccm cluster that is only shut down after the - * JVM exists. - * - *

Note that this rule should be considered mutually exclusive with {@link CustomCcmRule}. - * Creating instances of these rules can create resource issues. - */ -public class CcmRule extends BaseCcmRule { - - private static final CcmRule INSTANCE = new CcmRule(); - - private volatile boolean started = false; - - private CcmRule() { - super(configureCcmBridge(CcmBridge.builder()).build()); - } - - public static CcmBridge.Builder configureCcmBridge(CcmBridge.Builder builder) { - Logger logger = LoggerFactory.getLogger(CcmRule.class); - String customizerClass = - System.getProperty( - "ccmrule.bridgecustomizer", - "com.datastax.oss.driver.api.testinfra.ccm.DefaultCcmBridgeBuilderCustomizer"); - try { - Class clazz = Class.forName(customizerClass); - Method method = clazz.getMethod("configureBuilder", CcmBridge.Builder.class); - return (CcmBridge.Builder) method.invoke(null, builder); - } catch (Exception e) { - logger.warn( - "Could not find CcmRule customizer {}, will use the default CcmBridge.", - customizerClass, - e); - return builder; - } - } - - @Override - protected synchronized void before() { - if (!started) { - // synchronize before so blocks on other before() call waiting to finish. - super.before(); - started = true; - } - } - - @Override - protected void after() { - // override after so we don't remove when done. - } - - @Override - public Statement apply(Statement base, Description description) { - - Category categoryAnnotation = description.getTestClass().getAnnotation(Category.class); - if (categoryAnnotation == null - || categoryAnnotation.value().length != 1 - || categoryAnnotation.value()[0] != ParallelizableTests.class) { - return new Statement() { - @Override - public void evaluate() { - throw new AssumptionViolatedException( - String.format( - "Tests using %s must be annotated with `@Category(%s.class)`. Description: %s", - CcmRule.class.getSimpleName(), - ParallelizableTests.class.getSimpleName(), - description)); - } - }; - } - - return super.apply(base, description); - } - - public static CcmRule getInstance() { - return INSTANCE; - } -} diff --git a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/ccm/CustomCcmRule.java b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/ccm/CustomCcmRule.java deleted file mode 100644 index 5ea1bf7ed3c..00000000000 --- a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/ccm/CustomCcmRule.java +++ /dev/null @@ -1,141 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.testinfra.ccm; - -import java.util.concurrent.atomic.AtomicReference; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A rule that creates a ccm cluster that can be used in a test. This should be used if you plan on - * creating clusters with unique configurations, such as using multiple nodes, authentication, ssl - * and so on. If you do not plan on doing this at all in your tests, consider using {@link CcmRule} - * which creates a global single node CCM cluster that may be shared among tests. - * - *

Note that this rule should be considered mutually exclusive with {@link CcmRule}. Creating - * instances of these rules can create resource issues. - */ -public class CustomCcmRule extends BaseCcmRule { - - private static final Logger LOG = LoggerFactory.getLogger(CustomCcmRule.class); - private static final AtomicReference CURRENT = new AtomicReference<>(); - - CustomCcmRule(CcmBridge ccmBridge) { - super(ccmBridge); - } - - @Override - protected void before() { - if (CURRENT.get() == null && CURRENT.compareAndSet(null, this)) { - try { - super.before(); - } catch (Exception e) { - // ExternalResource will not call after() when before() throws an exception - // Let's try and clean up and release the lock we have in CURRENT - LOG.warn( - "Error in CustomCcmRule before() method, attempting to clean up leftover state", e); - try { - after(); - } catch (Exception e1) { - LOG.warn("Error cleaning up CustomCcmRule before() failure", e1); - e.addSuppressed(e1); - } - throw e; - } - } else if (CURRENT.get() != this) { - throw new IllegalStateException( - "Attempting to use a Ccm rule while another is in use. This is disallowed"); - } - } - - @Override - protected void after() { - try { - super.after(); - } finally { - CURRENT.compareAndSet(this, null); - } - } - - public CcmBridge getCcmBridge() { - return ccmBridge; - } - - public static Builder builder() { - return new Builder(); - } - - public static class Builder { - - private final CcmBridge.Builder bridgeBuilder = CcmBridge.builder(); - - public Builder withNodes(int... nodes) { - bridgeBuilder.withNodes(nodes); - return this; - } - - public Builder withCassandraConfiguration(String key, Object value) { - bridgeBuilder.withCassandraConfiguration(key, value); - return this; - } - - public Builder withDseConfiguration(String key, Object value) { - bridgeBuilder.withDseConfiguration(key, value); - return this; - } - - public Builder withDseConfiguration(String rawYaml) { - bridgeBuilder.withDseConfiguration(rawYaml); - return this; - } - - public Builder withDseWorkloads(String... workloads) { - bridgeBuilder.withDseWorkloads(workloads); - return this; - } - - public Builder withJvmArgs(String... jvmArgs) { - bridgeBuilder.withJvmArgs(jvmArgs); - return this; - } - - public Builder withCreateOption(String option) { - bridgeBuilder.withCreateOption(option); - return this; - } - - public Builder withSsl() { - bridgeBuilder.withSsl(); - return this; - } - - public Builder withSslLocalhostCn() { - bridgeBuilder.withSslLocalhostCn(); - return this; - } - - public Builder withSslAuth() { - bridgeBuilder.withSslAuth(); - return this; - } - - public CustomCcmRule build() { - return new CustomCcmRule(bridgeBuilder.build()); - } - } -} diff --git a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/ccm/DefaultCcmBridgeBuilderCustomizer.java b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/ccm/DefaultCcmBridgeBuilderCustomizer.java deleted file mode 100644 index 0819f785446..00000000000 --- a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/ccm/DefaultCcmBridgeBuilderCustomizer.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.testinfra.ccm; - -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; - -/** @see CcmRule */ -@SuppressWarnings("unused") -public class DefaultCcmBridgeBuilderCustomizer { - - public static CcmBridge.Builder configureBuilder(CcmBridge.Builder builder) { - if (!CcmBridge.isDistributionOf( - BackendType.DSE, (dist, cass) -> dist.nextStable().compareTo(Version.V4_0_0) >= 0) - || CcmBridge.isDistributionOf(BackendType.HCD)) { - builder.withCassandraConfiguration("enable_materialized_views", true); - builder.withCassandraConfiguration("enable_sasi_indexes", true); - } - if (CcmBridge.getDistributionVersion().nextStable().compareTo(Version.V3_0_0) >= 0) { - builder.withJvmArgs("-Dcassandra.superuser_setup_delay_ms=0"); - builder.withJvmArgs("-Dcassandra.skip_wait_for_gossip_to_settle=0"); - builder.withCassandraConfiguration("num_tokens", "1"); - builder.withCassandraConfiguration("initial_token", "0"); - } - return builder; - } -} diff --git a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/ccm/DistributionCassandraVersions.java b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/ccm/DistributionCassandraVersions.java deleted file mode 100644 index 9f7634d1b37..00000000000 --- a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/ccm/DistributionCassandraVersions.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.testinfra.ccm; - -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSortedMap; -import java.util.HashMap; -import java.util.Map; - -/** Defines mapping of various distributions to shipped Apache Cassandra version. */ -public abstract class DistributionCassandraVersions { - private static final Map> mappings = - new HashMap<>(); - - static { - { - // DSE - ImmutableSortedMap dse = - ImmutableSortedMap.of( - Version.V1_0_0, CcmBridge.V2_1_19, - Version.V5_0_0, CcmBridge.V3_0_15, - CcmBridge.V5_1_0, CcmBridge.V3_10, - CcmBridge.V6_0_0, CcmBridge.V4_0_0); - mappings.put(BackendType.DSE, dse); - } - { - // HCD - ImmutableSortedMap hcd = - ImmutableSortedMap.of(Version.V1_0_0, CcmBridge.V4_0_11); - mappings.put(BackendType.HCD, hcd); - } - } - - public static Version getCassandraVersion(BackendType type, Version version) { - ImmutableSortedMap mapping = mappings.get(type); - if (mapping == null) { - return null; - } - return mapping.floorEntry(version).getValue(); - } -} diff --git a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/ccm/SchemaChangeSynchronizer.java b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/ccm/SchemaChangeSynchronizer.java deleted file mode 100644 index 093d1d3f9f9..00000000000 --- a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/ccm/SchemaChangeSynchronizer.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.testinfra.ccm; - -import java.util.concurrent.Semaphore; - -/** - * Running multiple parallel integration tests may fail due to query timeout when trying to apply - * several schema changes at once. Limit concurrently executed DDLs to 5. - */ -public class SchemaChangeSynchronizer { - private static final Semaphore lock = new Semaphore(5); - - public static void withLock(Runnable callback) { - try { - lock.acquire(); - try { - callback.run(); - } finally { - lock.release(); - } - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - throw new RuntimeException("Thread interrupted wile waiting to obtain DDL lock", e); - } - } -} diff --git a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/loadbalancing/NodeComparator.java b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/loadbalancing/NodeComparator.java deleted file mode 100644 index 3e51ad10e7a..00000000000 --- a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/loadbalancing/NodeComparator.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.testinfra.loadbalancing; - -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.shaded.guava.common.primitives.UnsignedBytes; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.util.Comparator; - -public final class NodeComparator implements Comparator { - - public static final NodeComparator INSTANCE = new NodeComparator(); - - private static final byte[] EMPTY = {}; - - private NodeComparator() {} - - @Override - public int compare(Node node1, Node node2) { - // compare address bytes, byte by byte. - byte[] address1 = - node1 - .getBroadcastAddress() - .map(InetSocketAddress::getAddress) - .map(InetAddress::getAddress) - .orElse(EMPTY); - byte[] address2 = - node2 - .getBroadcastAddress() - .map(InetSocketAddress::getAddress) - .map(InetAddress::getAddress) - .orElse(EMPTY); - - int result = UnsignedBytes.lexicographicalComparator().compare(address1, address2); - if (result != 0) { - return result; - } - - int port1 = node1.getBroadcastAddress().map(InetSocketAddress::getPort).orElse(0); - int port2 = node2.getBroadcastAddress().map(InetSocketAddress::getPort).orElse(0); - return port1 - port2; - } -} diff --git a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/loadbalancing/SortingLoadBalancingPolicy.java b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/loadbalancing/SortingLoadBalancingPolicy.java deleted file mode 100644 index a0fa292b0bb..00000000000 --- a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/loadbalancing/SortingLoadBalancingPolicy.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.testinfra.loadbalancing; - -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.session.Session; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.ArrayDeque; -import java.util.Map; -import java.util.Queue; -import java.util.Set; -import java.util.TreeSet; -import java.util.UUID; - -public class SortingLoadBalancingPolicy implements LoadBalancingPolicy { - - private final Set nodes = new TreeSet<>(NodeComparator.INSTANCE); - - @SuppressWarnings("unused") - public SortingLoadBalancingPolicy(DriverContext context, String profileName) { - // constructor needed for loading via config. - } - - public SortingLoadBalancingPolicy() {} - - @Override - public void init(@NonNull Map nodes, @NonNull DistanceReporter distanceReporter) { - this.nodes.addAll(nodes.values()); - this.nodes.forEach(n -> distanceReporter.setDistance(n, NodeDistance.LOCAL)); - } - - @NonNull - @Override - public Queue newQueryPlan(@Nullable Request request, @Nullable Session session) { - return new ArrayDeque<>(nodes); - } - - @Override - public void onAdd(@NonNull Node node) { - this.nodes.add(node); - } - - @Override - public void onUp(@NonNull Node node) { - onAdd(node); - } - - @Override - public void onDown(@NonNull Node node) { - onRemove(node); - } - - @Override - public void onRemove(@NonNull Node node) { - this.nodes.remove(node); - } - - @Override - public void close() {} -} diff --git a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/requirement/BackendRequirement.java b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/requirement/BackendRequirement.java deleted file mode 100644 index 9b1400b6313..00000000000 --- a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/requirement/BackendRequirement.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.testinfra.requirement; - -import java.lang.annotation.Repeatable; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; - -/** - * Annotation for a Class or Method that defines a database backend Version requirement. If the - * type/version in use does not meet the requirement, the test is skipped. - */ -@Repeatable(BackendRequirements.class) -@Retention(RetentionPolicy.RUNTIME) -public @interface BackendRequirement { - BackendType type(); - - String minInclusive() default ""; - - String maxExclusive() default ""; - - String description() default ""; -} diff --git a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/requirement/BackendRequirementRule.java b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/requirement/BackendRequirementRule.java deleted file mode 100644 index 343861571e0..00000000000 --- a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/requirement/BackendRequirementRule.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.testinfra.requirement; - -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.testinfra.ccm.CcmBridge; -import org.junit.AssumptionViolatedException; -import org.junit.rules.ExternalResource; -import org.junit.runner.Description; -import org.junit.runners.model.Statement; - -public class BackendRequirementRule extends ExternalResource { - @Override - public Statement apply(Statement base, Description description) { - if (meetsDescriptionRequirements(description)) { - return super.apply(base, description); - } else { - // requirements not met, throw reasoning assumption to skip test - return new Statement() { - @Override - public void evaluate() { - throw new AssumptionViolatedException(buildReasonString(description)); - } - }; - } - } - - protected static BackendType getBackendType() { - return CcmBridge.DISTRIBUTION; - } - - protected static Version getVersion() { - return CcmBridge.VERSION; - } - - public static boolean meetsDescriptionRequirements(Description description) { - return VersionRequirement.meetsAny( - VersionRequirement.fromAnnotations(description), getBackendType(), getVersion()); - } - - /* Note, duplicating annotation processing from #meetsDescriptionRequirements */ - public static String buildReasonString(Description description) { - return VersionRequirement.buildReasonString( - VersionRequirement.fromAnnotations(description), getBackendType(), getVersion()); - } -} diff --git a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/requirement/BackendRequirements.java b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/requirement/BackendRequirements.java deleted file mode 100644 index c097c7bd430..00000000000 --- a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/requirement/BackendRequirements.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.testinfra.requirement; - -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; - -/** Annotation to allow @BackendRequirement to be repeatable. */ -@Retention(RetentionPolicy.RUNTIME) -public @interface BackendRequirements { - BackendRequirement[] value(); -} diff --git a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/requirement/BackendType.java b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/requirement/BackendType.java deleted file mode 100644 index e0058ca324a..00000000000 --- a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/requirement/BackendType.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.testinfra.requirement; - -public enum BackendType { - CASSANDRA("Apache Cassandra"), - DSE("DSE"), - HCD("HCD"); - - final String friendlyName; - - BackendType(String friendlyName) { - this.friendlyName = friendlyName; - } - - public String getFriendlyName() { - return friendlyName; - } - - public String[] getCcmOptions() { - if (this == CASSANDRA) { - return new String[0]; - } - return new String[] {"--" + name().toLowerCase()}; - } -} diff --git a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/requirement/VersionRequirement.java b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/requirement/VersionRequirement.java deleted file mode 100644 index 6b184490a41..00000000000 --- a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/requirement/VersionRequirement.java +++ /dev/null @@ -1,174 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.testinfra.requirement; - -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.testinfra.CassandraRequirement; -import com.datastax.oss.driver.api.testinfra.DseRequirement; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Optional; -import java.util.stream.Collectors; -import org.junit.runner.Description; - -/** - * Used to unify the requirements specified by - * annotations @CassandraRequirement, @DseRequirment, @BackendRequirement - */ -public class VersionRequirement { - final BackendType backendType; - final Optional minInclusive; - final Optional maxExclusive; - final String description; - - public VersionRequirement( - BackendType backendType, String minInclusive, String maxExclusive, String description) { - this.backendType = backendType; - this.minInclusive = - minInclusive.isEmpty() ? Optional.empty() : Optional.of(Version.parse(minInclusive)); - this.maxExclusive = - maxExclusive.isEmpty() ? Optional.empty() : Optional.of(Version.parse(maxExclusive)); - this.description = description; - } - - public BackendType getBackendType() { - return backendType; - } - - public Optional getMinInclusive() { - return minInclusive; - } - - public Optional getMaxExclusive() { - return maxExclusive; - } - - public String readableString() { - final String versionRange; - if (minInclusive.isPresent() && maxExclusive.isPresent()) { - versionRange = - String.format("%s or greater, but less than %s", minInclusive.get(), maxExclusive.get()); - } else if (minInclusive.isPresent()) { - versionRange = String.format("%s or greater", minInclusive.get()); - } else if (maxExclusive.isPresent()) { - versionRange = String.format("less than %s", maxExclusive.get()); - } else { - versionRange = "any version"; - } - - if (!description.isEmpty()) { - return String.format("%s %s [%s]", backendType.getFriendlyName(), versionRange, description); - } else { - return String.format("%s %s", backendType.getFriendlyName(), versionRange); - } - } - - public static VersionRequirement fromBackendRequirement(BackendRequirement requirement) { - return new VersionRequirement( - requirement.type(), - requirement.minInclusive(), - requirement.maxExclusive(), - requirement.description()); - } - - public static VersionRequirement fromCassandraRequirement(CassandraRequirement requirement) { - return new VersionRequirement( - BackendType.CASSANDRA, requirement.min(), requirement.max(), requirement.description()); - } - - public static VersionRequirement fromDseRequirement(DseRequirement requirement) { - return new VersionRequirement( - BackendType.DSE, requirement.min(), requirement.max(), requirement.description()); - } - - public static Collection fromAnnotations(Description description) { - // collect all requirement annotation types - CassandraRequirement cassandraRequirement = - description.getAnnotation(CassandraRequirement.class); - DseRequirement dseRequirement = description.getAnnotation(DseRequirement.class); - // matches methods/classes with one @BackendRequirement annotation - BackendRequirement backendRequirement = description.getAnnotation(BackendRequirement.class); - // matches methods/classes with two or more @BackendRequirement annotations - BackendRequirements backendRequirements = description.getAnnotation(BackendRequirements.class); - - // build list of required versions - Collection requirements = new ArrayList<>(); - if (cassandraRequirement != null) { - requirements.add(VersionRequirement.fromCassandraRequirement(cassandraRequirement)); - } - if (dseRequirement != null) { - requirements.add(VersionRequirement.fromDseRequirement(dseRequirement)); - } - if (backendRequirement != null) { - requirements.add(VersionRequirement.fromBackendRequirement(backendRequirement)); - } - if (backendRequirements != null) { - Arrays.stream(backendRequirements.value()) - .forEach(r -> requirements.add(VersionRequirement.fromBackendRequirement(r))); - } - return requirements; - } - - public static boolean meetsAny( - Collection requirements, - BackendType configuredBackend, - Version configuredVersion) { - // special case: if there are no requirements then any backend/version is sufficient - if (requirements.isEmpty()) { - return true; - } - - return requirements.stream() - .anyMatch( - requirement -> { - // requirement is different db type - if (requirement.getBackendType() != configuredBackend) { - return false; - } - - // configured version is less than requirement min - if (requirement.getMinInclusive().isPresent()) { - if (requirement.getMinInclusive().get().compareTo(configuredVersion) > 0) { - return false; - } - } - - // configured version is greater than or equal to requirement max - if (requirement.getMaxExclusive().isPresent()) { - if (requirement.getMaxExclusive().get().compareTo(configuredVersion) <= 0) { - return false; - } - } - - // backend type and version range match - return true; - }); - } - - public static String buildReasonString( - Collection requirements, BackendType backend, Version version) { - return String.format( - "Test requires one of:\n%s\nbut configuration is %s %s.", - requirements.stream() - .map(req -> String.format(" - %s", req.readableString())) - .collect(Collectors.joining("\n")), - backend.getFriendlyName(), - version); - } -} diff --git a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/session/CqlSessionRuleBuilder.java b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/session/CqlSessionRuleBuilder.java deleted file mode 100644 index 8f392dca0bf..00000000000 --- a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/session/CqlSessionRuleBuilder.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.testinfra.session; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.testinfra.CassandraResourceRule; -import com.datastax.oss.driver.internal.core.config.typesafe.DefaultDriverConfigLoader; -import com.typesafe.config.Config; -import com.typesafe.config.ConfigValueFactory; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.function.Supplier; - -public class CqlSessionRuleBuilder extends SessionRuleBuilder { - - private static final AtomicInteger GRAPH_NAME_INDEX = new AtomicInteger(); - - public CqlSessionRuleBuilder(CassandraResourceRule cassandraResource) { - super(cassandraResource); - } - - @Override - public SessionRule build() { - - final String graphName; - final DriverConfigLoader actualLoader; - - Supplier actualSupplier; - - if (createGraph) { - graphName = "dsedrivertests_" + GRAPH_NAME_INDEX.getAndIncrement(); - - // Inject the generated graph name in the provided configuration, so that the test doesn't - // need to set it explicitly on every statement. - if (loader == null) { - // This would normally be handled in DseSessionBuilder, do it early because we need it now - loader = new DefaultDriverConfigLoader(); - } else { - // To keep this relatively simple we assume that if the config loader was provided in a - // test, it is the Typesafe-config based one. This is always true in our integration tests. - assertThat(loader).isInstanceOf(DefaultDriverConfigLoader.class); - } - Supplier originalSupplier = ((DefaultDriverConfigLoader) loader).getConfigSupplier(); - actualSupplier = - () -> - originalSupplier - .get() - .withValue( - DseDriverOption.GRAPH_NAME.getPath(), - ConfigValueFactory.fromAnyRef(graphName)); - } else { - graphName = null; - if (loader == null) { - loader = new DefaultDriverConfigLoader(); - } - - actualSupplier = ((DefaultDriverConfigLoader) loader).getConfigSupplier(); - } - - actualLoader = - new DefaultDriverConfigLoader( - () -> - graphProtocol != null - ? actualSupplier - .get() - .withValue( - DseDriverOption.GRAPH_SUB_PROTOCOL.getPath(), - ConfigValueFactory.fromAnyRef(graphProtocol)) - // will use the protocol from the config file (in application.conf if - // defined or in reference.conf) - : actualSupplier.get()); - - return new SessionRule<>( - cassandraResource, - createKeyspace, - nodeStateListener, - schemaChangeListener, - actualLoader, - graphName, - isCoreGraph); - } -} diff --git a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/session/SessionRule.java b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/session/SessionRule.java deleted file mode 100644 index 3b792374769..00000000000 --- a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/session/SessionRule.java +++ /dev/null @@ -1,228 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.testinfra.session; - -import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.metadata.NodeStateListener; -import com.datastax.oss.driver.api.core.metadata.schema.SchemaChangeListener; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.testinfra.CassandraResourceRule; -import com.datastax.oss.driver.api.testinfra.ccm.BaseCcmRule; -import com.datastax.oss.driver.api.testinfra.ccm.CcmBridge; -import com.datastax.oss.driver.api.testinfra.ccm.SchemaChangeSynchronizer; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; -import java.util.Objects; -import org.junit.rules.ExternalResource; - -/** - * Creates and manages a {@link Session} instance for a test. - * - *

Use it in conjunction with a {@link CassandraResourceRule} that creates the server resource to - * connect to: - * - *

{@code
- * public static @ClassRule CcmRule server = CcmRule.getInstance();
- *
- * // Or: public static @ClassRule SimulacronRule server =
- * //    new SimulacronRule(ClusterSpec.builder().withNodes(3));
- *
- * public static @ClassRule SessionRule sessionRule = new SessionRule(server);
- *
- * public void @Test should_do_something() {
- *   sessionRule.session().execute("some query");
- * }
- * }
- * - * Optionally, it can also create a dedicated keyspace (useful to isolate tests that share a common - * server). - * - *

If you would rather create a new keyspace manually in each test, see the utility methods in - * {@link SessionUtils}. - */ -public class SessionRule extends ExternalResource { - - private static final Version V6_8_0 = Objects.requireNonNull(Version.parse("6.8.0")); - - // the CCM or Simulacron rule to depend on - private final CassandraResourceRule cassandraResource; - private final NodeStateListener nodeStateListener; - private final SchemaChangeListener schemaChangeListener; - private final CqlIdentifier keyspace; - private final DriverConfigLoader configLoader; - private final String graphName; - private final boolean isCoreGraph; - - // the session that is auto created for this rule and is tied to the given keyspace. - private SessionT session; - - private DriverExecutionProfile slowProfile; - - /** - * Returns a builder to construct an instance with a fluent API. - * - * @param cassandraResource resource to create clusters for. - */ - public static CqlSessionRuleBuilder builder(CassandraResourceRule cassandraResource) { - return new CqlSessionRuleBuilder(cassandraResource); - } - - /** @see #builder(CassandraResourceRule) */ - public SessionRule( - CassandraResourceRule cassandraResource, - boolean createKeyspace, - NodeStateListener nodeStateListener, - SchemaChangeListener schemaChangeListener, - DriverConfigLoader configLoader, - String graphName, - boolean isCoreGraph) { - this.cassandraResource = cassandraResource; - this.nodeStateListener = nodeStateListener; - this.schemaChangeListener = schemaChangeListener; - this.keyspace = - (cassandraResource instanceof SimulacronRule || !createKeyspace) - ? null - : SessionUtils.uniqueKeyspaceId(); - this.configLoader = configLoader; - this.graphName = graphName; - this.isCoreGraph = isCoreGraph; - } - - public SessionRule( - CassandraResourceRule cassandraResource, - boolean createKeyspace, - NodeStateListener nodeStateListener, - SchemaChangeListener schemaChangeListener, - DriverConfigLoader configLoader, - String graphName) { - this( - cassandraResource, - createKeyspace, - nodeStateListener, - schemaChangeListener, - configLoader, - graphName, - false); - } - - public SessionRule( - CassandraResourceRule cassandraResource, - boolean createKeyspace, - NodeStateListener nodeStateListener, - SchemaChangeListener schemaChangeListener, - DriverConfigLoader configLoader) { - this( - cassandraResource, - createKeyspace, - nodeStateListener, - schemaChangeListener, - configLoader, - null, - false); - } - - @Override - protected void before() { - session = - SessionUtils.newSession( - cassandraResource, null, nodeStateListener, schemaChangeListener, null, configLoader); - slowProfile = SessionUtils.slowProfile(session); - if (keyspace != null) { - SessionUtils.createKeyspace(session, keyspace, slowProfile); - session.execute( - SimpleStatement.newInstance(String.format("USE %s", keyspace.asCql(false))), - Statement.SYNC); - } - if (graphName != null) { - BaseCcmRule rule = - (cassandraResource instanceof BaseCcmRule) ? ((BaseCcmRule) cassandraResource) : null; - if (rule == null || !CcmBridge.isDistributionOf(BackendType.DSE)) { - throw new IllegalArgumentException("DseSessionRule should work with DSE."); - } - if (rule.getDistributionVersion().compareTo(V6_8_0) >= 0) { - session() - .execute( - ScriptGraphStatement.newInstance( - String.format( - "system.graph('%s').ifNotExists()%s.create()", - this.graphName, isCoreGraph ? ".coreEngine()" : ".classicEngine()")) - .setSystemQuery(true), - ScriptGraphStatement.SYNC); - } else { - if (isCoreGraph) { - throw new IllegalArgumentException( - "Core graph is not supported for DSE version < " + V6_8_0); - } - session() - .execute( - ScriptGraphStatement.newInstance( - String.format("system.graph('%s').ifNotExists().create()", this.graphName)) - .setSystemQuery(true), - ScriptGraphStatement.SYNC); - } - } - } - - @Override - protected void after() { - if (graphName != null) { - session() - .execute( - ScriptGraphStatement.newInstance( - String.format("system.graph('%s').drop()", this.graphName)) - .setSystemQuery(true), - ScriptGraphStatement.SYNC); - } - if (keyspace != null) { - SchemaChangeSynchronizer.withLock( - () -> { - SessionUtils.dropKeyspace(session, keyspace, slowProfile); - }); - } - session.close(); - } - - /** @return the session created with this rule. */ - public SessionT session() { - return session; - } - - /** - * @return the identifier of the keyspace associated with this rule, or {@code null} if no - * keyspace was created (this is always the case if the server resource is a {@link - * SimulacronRule}). - */ - public CqlIdentifier keyspace() { - return keyspace; - } - - public String getGraphName() { - return graphName; - } - - /** @return a config profile where the request timeout is 30 seconds. * */ - public DriverExecutionProfile slowProfile() { - return slowProfile; - } -} diff --git a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/session/SessionRuleBuilder.java b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/session/SessionRuleBuilder.java deleted file mode 100644 index 62c5babbf1d..00000000000 --- a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/session/SessionRuleBuilder.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.testinfra.session; - -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.metadata.NodeStateListener; -import com.datastax.oss.driver.api.core.metadata.schema.SchemaChangeListener; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.testinfra.CassandraResourceRule; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; - -public abstract class SessionRuleBuilder< - SelfT extends SessionRuleBuilder, SessionT extends Session> { - - protected final CassandraResourceRule cassandraResource; - protected boolean createKeyspace = true; - protected NodeStateListener nodeStateListener; - protected SchemaChangeListener schemaChangeListener; - protected DriverConfigLoader loader; - protected boolean createGraph; - protected boolean isCoreGraph; - protected String graphProtocol; - - @SuppressWarnings("unchecked") - protected final SelfT self = (SelfT) this; - - public SessionRuleBuilder(CassandraResourceRule cassandraResource) { - this.cassandraResource = cassandraResource; - } - - /** - * Whether to create a keyspace. - * - *

If this is set, the rule will create a keyspace with a name unique to this test (this allows - * multiple tests to run concurrently against the same server resource), and make the name - * available through {@link SessionRule#keyspace()}. The created session will be connected to this - * keyspace. - * - *

If this method is not called, the default value is {@code true}. - * - *

Note that this option is only valid with a {@link CcmRule}. If the server resource is a - * {@link SimulacronRule}, this option is ignored, no keyspace gets created, and {@link - * SessionRule#keyspace()} returns {@code null}. - */ - public SelfT withKeyspace(boolean createKeyspace) { - this.createKeyspace = createKeyspace; - return self; - } - - /** A set of options to override in the session configuration. */ - public SelfT withConfigLoader(DriverConfigLoader loader) { - this.loader = loader; - return self; - } - - public SelfT withNodeStateListener(NodeStateListener listener) { - this.nodeStateListener = listener; - return self; - } - - public SelfT withSchemaChangeListener(SchemaChangeListener listener) { - this.schemaChangeListener = listener; - return self; - } - - /** - * Configures the rule to create a new graph instance. - * - *

This assumes that the associated {@link CassandraResourceRule} is a DSE instance with the - * graph workload enabled. - * - *

The name of the graph will be injected in the session's configuration, so that all graph - * statements are automatically routed to it. It's also exposed via {@link - * SessionRule#getGraphName()}. - */ - public SelfT withCreateGraph() { - this.createGraph = true; - return self; - } - - public SelfT withCoreEngine() { - this.isCoreGraph = true; - return self; - } - - public SelfT withGraphProtocol(String graphProtocol) { - this.graphProtocol = graphProtocol; - return self; - } - - public abstract SessionRule build(); -} diff --git a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/session/SessionUtils.java b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/session/SessionUtils.java deleted file mode 100644 index 7536c0ffdc0..00000000000 --- a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/session/SessionUtils.java +++ /dev/null @@ -1,253 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.testinfra.session; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.config.ProgrammaticDriverConfigLoaderBuilder; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeStateListener; -import com.datastax.oss.driver.api.core.metadata.schema.SchemaChangeListener; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.core.session.SessionBuilder; -import com.datastax.oss.driver.api.testinfra.CassandraResourceRule; -import com.datastax.oss.driver.internal.core.loadbalancing.helper.NodeFilterToDistanceEvaluatorAdapter; -import java.lang.reflect.Method; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.function.Predicate; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Utility methods to manage {@link Session} instances manually. - * - *

Use this if you need to initialize a new session instance in each test method: - * - *

{@code
- * public static @ClassRule CcmRule server = CcmRule.getInstance();
- *
- * // Or: public static @ClassRule SimulacronRule server =
- * //    new SimulacronRule(ClusterSpec.builder().withNodes(3));
- *
- * public void @Test should_do_something() {
- *   try (Session session = TestUtils.newSession(server)) {
- *     session.execute("some query");
- *   }
- * }
- * }
- * - * The instances returned by {@code newSession()} methods are not managed automatically, you need to - * close them yourself (this is done with a try-with-resources block in the example above). - * - *

If you can share the same {@code Session} instance between all test methods, {@link - * SessionRule} provides a simpler alternative. - */ -public class SessionUtils { - - public static final String SESSION_BUILDER_CLASS_PROPERTY = "session.builder"; - - private static final Logger LOG = LoggerFactory.getLogger(SessionUtils.class); - private static final AtomicInteger keyspaceId = new AtomicInteger(); - private static final String DEFAULT_SESSION_CLASS_NAME = CqlSession.class.getName(); - - private static String getSessionBuilderClass() { - return System.getProperty(SESSION_BUILDER_CLASS_PROPERTY, DEFAULT_SESSION_CLASS_NAME); - } - - @SuppressWarnings("unchecked") - public static SessionBuilder baseBuilder() { - String sessionBuilderClass = getSessionBuilderClass(); - try { - Class clazz = Class.forName(sessionBuilderClass); - Method m = clazz.getMethod("builder"); - return (SessionBuilder) m.invoke(null); - } catch (Exception e) { - LOG.warn( - "Could not construct SessionBuilder from {} using builder(), using default " - + "implementation.", - sessionBuilderClass, - e); - return (SessionBuilder) CqlSession.builder(); - } - } - - public static ProgrammaticDriverConfigLoaderBuilder configLoaderBuilder() { - String sessionBuilderClass = getSessionBuilderClass(); - try { - Class clazz = Class.forName(sessionBuilderClass); - Method m = clazz.getMethod("configLoaderBuilder"); - return (ProgrammaticDriverConfigLoaderBuilder) m.invoke(null); - } catch (Exception e) { - if (!sessionBuilderClass.equals(DEFAULT_SESSION_CLASS_NAME)) { - LOG.warn( - "Could not construct ProgrammaticDriverConfigLoaderBuilder from {} using " - + "configLoaderBuilder(), using default implementation.", - sessionBuilderClass, - e); - } - return DriverConfigLoader.programmaticBuilder(); - } - } - - /** - * Creates a new instance of the driver's default {@code Session} implementation, using the nodes - * in the 0th DC of the provided Cassandra resource as contact points, and the default - * configuration augmented with the provided options. - */ - @SuppressWarnings("TypeParameterUnusedInFormals") - public static SessionT newSession( - CassandraResourceRule cassandraResource) { - return newSession(cassandraResource, null, null); - } - - @SuppressWarnings("TypeParameterUnusedInFormals") - public static SessionT newSession( - CassandraResourceRule cassandraResource, CqlIdentifier keyspace) { - return newSession(cassandraResource, keyspace, null, null, null); - } - - @SuppressWarnings("TypeParameterUnusedInFormals") - public static SessionT newSession( - CassandraResourceRule cassandraResourceRule, DriverConfigLoader loader) { - return newSession(cassandraResourceRule, null, null, null, null, loader); - } - - @SuppressWarnings("TypeParameterUnusedInFormals") - public static SessionT newSession( - CassandraResourceRule cassandraResourceRule, - CqlIdentifier keyspace, - DriverConfigLoader loader) { - return newSession(cassandraResourceRule, keyspace, null, null, null, loader); - } - - private static SessionBuilder builder( - CassandraResourceRule cassandraResource, - CqlIdentifier keyspace, - NodeStateListener nodeStateListener, - SchemaChangeListener schemaChangeListener, - Predicate nodeFilter) { - SessionBuilder builder = baseBuilder(); - builder - .addContactEndPoints(cassandraResource.getContactPoints()) - .withKeyspace(keyspace) - .withNodeStateListener(nodeStateListener) - .withSchemaChangeListener(schemaChangeListener); - if (nodeFilter != null) { - builder.withNodeDistanceEvaluator(new NodeFilterToDistanceEvaluatorAdapter(nodeFilter)); - } - return builder; - } - - @SuppressWarnings({"TypeParameterUnusedInFormals"}) - public static SessionT newSession( - CassandraResourceRule cassandraResource, - CqlIdentifier keyspace, - NodeStateListener nodeStateListener, - SchemaChangeListener schemaChangeListener, - Predicate nodeFilter) { - SessionBuilder builder = - builder(cassandraResource, keyspace, nodeStateListener, schemaChangeListener, nodeFilter); - return builder.build(); - } - - @SuppressWarnings({"unchecked", "TypeParameterUnusedInFormals"}) - public static SessionT newSession( - CassandraResourceRule cassandraResource, - CqlIdentifier keyspace, - NodeStateListener nodeStateListener, - SchemaChangeListener schemaChangeListener, - Predicate nodeFilter, - DriverConfigLoader loader) { - SessionBuilder builder = - builder(cassandraResource, keyspace, nodeStateListener, schemaChangeListener, nodeFilter); - return (SessionT) builder.withConfigLoader(loader).build(); - } - - /** - * Generates a keyspace identifier that is guaranteed to be unique in the current classloader. - * - *

This is useful to isolate tests that share a common server resource. - */ - public static CqlIdentifier uniqueKeyspaceId() { - return CqlIdentifier.fromCql("ks_" + keyspaceId.getAndIncrement()); - } - - /** Creates a keyspace through the given session instance, with the given profile. */ - public static void createKeyspace( - Session session, CqlIdentifier keyspace, DriverExecutionProfile profile) { - SimpleStatement createKeyspace = - SimpleStatement.builder( - String.format( - "CREATE KEYSPACE %s WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };", - keyspace.asCql(false))) - .setExecutionProfile(profile) - .build(); - session.execute(createKeyspace, Statement.SYNC); - } - - /** - * Calls {@link #createKeyspace(Session, CqlIdentifier, DriverExecutionProfile)} with {@link - * #slowProfile(Session)} as the third argument. - * - *

Note that this creates a derived profile for each invocation, which has a slight performance - * overhead. Instead, consider building the profile manually with {@link #slowProfile(Session)}, - * and storing it in a local variable so it can be reused. - */ - public static void createKeyspace(Session session, CqlIdentifier keyspace) { - createKeyspace(session, keyspace, slowProfile(session)); - } - - /** Drops a keyspace through the given session instance, with the given profile. */ - public static void dropKeyspace( - Session session, CqlIdentifier keyspace, DriverExecutionProfile profile) { - session.execute( - SimpleStatement.builder(String.format("DROP KEYSPACE IF EXISTS %s", keyspace.asCql(false))) - .setExecutionProfile(profile) - .build(), - Statement.SYNC); - } - - /** - * Calls {@link #dropKeyspace(Session, CqlIdentifier, DriverExecutionProfile)} with {@link - * #slowProfile(Session)} as the third argument. - * - *

Note that this creates a derived profile for each invocation, which has a slight performance - * overhead. Instead, consider building the profile manually with {@link #slowProfile(Session)}, - * and storing it in a local variable so it can be reused. - */ - public static void dropKeyspace(Session session, CqlIdentifier keyspace) { - dropKeyspace(session, keyspace, slowProfile(session)); - } - - /** - * Builds a profile derived from the given cluster's default profile, with a higher request - * timeout (30 seconds) that is appropriate for DML operations. - */ - public static DriverExecutionProfile slowProfile(Session session) { - return session - .getContext() - .getConfig() - .getDefaultProfile() - .withString(DefaultDriverOption.REQUEST_TIMEOUT, "30s"); - } -} diff --git a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/simulacron/QueryCounter.java b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/simulacron/QueryCounter.java deleted file mode 100644 index 90a0050265c..00000000000 --- a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/simulacron/QueryCounter.java +++ /dev/null @@ -1,160 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.testinfra.simulacron; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.awaitility.Awaitility.await; - -import com.datastax.oss.simulacron.common.cluster.QueryLog; -import com.datastax.oss.simulacron.server.BoundNode; -import com.datastax.oss.simulacron.server.BoundTopic; -import com.datastax.oss.simulacron.server.listener.QueryListener; -import java.util.HashMap; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.function.Predicate; - -/** - * A convenience utility that keeps track of the number of queries matching a filter received by - * {@link BoundNode}s. - * - *

One tricky thing about validating query counts in context of testing is in cases where you - * don't require waiting for a node to respond. In this case it's possible that a user would check - * count criteria before the node has even processed the message. This class offers the capability - * to wait a specified amount of time when asserting query arrival counts on nodes. - */ -public class QueryCounter { - - private final long beforeTimeout; - private final TimeUnit beforeUnit; - private final AtomicInteger totalCount = new AtomicInteger(0); - private final ConcurrentHashMap countMap = new ConcurrentHashMap<>(); - - public enum NotificationMode { - BEFORE_PROCESSING, - AFTER_PROCESSING - } - - private QueryCounter( - BoundTopic topic, - NotificationMode notificationMode, - Predicate queryLogFilter, - long beforeTimeout, - TimeUnit beforeUnit) { - this.beforeTimeout = beforeTimeout; - this.beforeUnit = beforeUnit; - QueryListener listener = - (boundNode, queryLog) -> { - totalCount.incrementAndGet(); - countMap.merge(boundNode.getId().intValue(), 1, Integer::sum); - }; - topic.registerQueryListener( - listener, notificationMode == NotificationMode.AFTER_PROCESSING, queryLogFilter); - } - - /** Creates a builder that tracks counts for the given {@link BoundTopic} (cluster, dc, node). */ - public static QueryCounterBuilder builder(BoundTopic topic) { - return new QueryCounterBuilder(topic); - } - - /** Clears all counters. */ - public void clearCounts() { - totalCount.set(0); - countMap.clear(); - } - - /** - * Asserts that the total number of requests received matching filter criteria matches the - * expected count within the configured time period. - */ - public void assertTotalCount(int expected) { - await() - .pollInterval(10, TimeUnit.MILLISECONDS) - .atMost(beforeTimeout, beforeUnit) - .untilAsserted(() -> assertThat(totalCount.get()).isEqualTo(expected)); - } - - /** - * Asserts that the total number of requests received matcher filter criteria matches the expected - * count for each node within the configured time period. - * - * @param counts The expected node counts, with the value at each index matching the expected - * count for that node id (i.e. index 0 = node id 0 expected count). - */ - public void assertNodeCounts(int... counts) { - Map expectedCounts = new HashMap<>(); - for (int id = 0; id < counts.length; id++) { - int count = counts[id]; - if (count > 0) { - expectedCounts.put(id, counts[id]); - } - } - await() - .pollInterval(10, TimeUnit.MILLISECONDS) - .atMost(beforeTimeout, beforeUnit) - .untilAsserted(() -> assertThat(countMap).containsAllEntriesOf(expectedCounts)); - } - - public static class QueryCounterBuilder { - - @SuppressWarnings("UnnecessaryLambda") - private static final Predicate DEFAULT_FILTER = (q) -> !q.getQuery().isEmpty(); - - private final BoundTopic topic; - - private Predicate queryLogFilter = DEFAULT_FILTER; - private NotificationMode notificationMode = NotificationMode.BEFORE_PROCESSING; - private long beforeTimeout = 1; - private TimeUnit beforeUnit = TimeUnit.SECONDS; - - private QueryCounterBuilder(BoundTopic topic) { - this.topic = topic; - } - - /** - * The filter to apply to consider a message received by the node, if not provided we consider - * all messages that are queries. - */ - public QueryCounterBuilder withFilter(Predicate queryLogFilter) { - this.queryLogFilter = queryLogFilter; - return this; - } - - /** Whether or not simulacron should notify before or after the message is processed. */ - public QueryCounterBuilder withNotification(NotificationMode notificationMode) { - this.notificationMode = notificationMode; - return this; - } - - /** - * Up to how long we check counts to match. If counts don't match after this time, an exception - * is thrown. - */ - public QueryCounterBuilder before(long timeout, TimeUnit unit) { - this.beforeTimeout = timeout; - this.beforeUnit = unit; - return this; - } - - public QueryCounter build() { - return new QueryCounter(topic, notificationMode, queryLogFilter, beforeTimeout, beforeUnit); - } - } -} diff --git a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/simulacron/SimulacronRule.java b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/simulacron/SimulacronRule.java deleted file mode 100644 index d958d097a5d..00000000000 --- a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/simulacron/SimulacronRule.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.testinfra.simulacron; - -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.api.testinfra.CassandraResourceRule; -import com.datastax.oss.driver.internal.core.metadata.DefaultEndPoint; -import com.datastax.oss.simulacron.common.cluster.ClusterSpec; -import com.datastax.oss.simulacron.server.BoundCluster; -import com.datastax.oss.simulacron.server.Inet4Resolver; -import com.datastax.oss.simulacron.server.Server; -import java.net.SocketAddress; -import java.util.Set; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.stream.Collectors; - -public class SimulacronRule extends CassandraResourceRule { - // TODO perhaps share server some other way - // TODO: Temporarily do not release addresses to ensure IPs are always ordered - // TODO: Add a way to configure the server for multiple nodes per ip - public static final Server server = - Server.builder() - .withAddressResolver( - new Inet4Resolver(9043) { - @Override - public void release(SocketAddress address) {} - }) - .build(); - - private final ClusterSpec clusterSpec; - private BoundCluster boundCluster; - - private final AtomicBoolean started = new AtomicBoolean(); - - public SimulacronRule(ClusterSpec clusterSpec) { - this.clusterSpec = clusterSpec; - } - - public SimulacronRule(ClusterSpec.Builder clusterSpec) { - this(clusterSpec.build()); - } - - /** - * Convenient fluent name for getting at bound cluster. - * - * @return default bound cluster for this simulacron instance. - */ - public BoundCluster cluster() { - return boundCluster; - } - - public BoundCluster getBoundCluster() { - return boundCluster; - } - - @Override - protected void before() { - // prevent duplicate initialization of rule - if (started.compareAndSet(false, true)) { - boundCluster = server.register(clusterSpec); - } - } - - @Override - protected void after() { - boundCluster.close(); - } - - /** @return All nodes in first data center. */ - @Override - public Set getContactPoints() { - return boundCluster.dc(0).getNodes().stream() - .map(node -> new DefaultEndPoint(node.inetSocketAddress())) - .collect(Collectors.toSet()); - } - - @Override - public ProtocolVersion getHighestProtocolVersion() { - return DefaultProtocolVersion.V4; - } -} diff --git a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/utils/ConditionChecker.java b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/utils/ConditionChecker.java deleted file mode 100644 index 931237189c5..00000000000 --- a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/utils/ConditionChecker.java +++ /dev/null @@ -1,203 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.testinfra.utils; - -import static org.assertj.core.api.Fail.fail; - -import java.util.Timer; -import java.util.TimerTask; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.locks.Condition; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; -import java.util.function.BooleanSupplier; - -/** - * @deprecated We've replaced this home-grown utility by Awaitility in our tests. We're preserving - * it because it was part of the public test infrastructure API, but it won't be maintained - * anymore, and removed in the next major version. - * @see Awaitility homepage - */ -@Deprecated -public class ConditionChecker { - - private static final int DEFAULT_PERIOD_MILLIS = 500; - - private static final int DEFAULT_TIMEOUT_MILLIS = 60000; - - /** @deprecated see {@link ConditionChecker} */ - @Deprecated - public static class ConditionCheckerBuilder { - - private long timeout = DEFAULT_TIMEOUT_MILLIS; - - private TimeUnit timeoutUnit = TimeUnit.MILLISECONDS; - - private long period = DEFAULT_PERIOD_MILLIS; - - private TimeUnit periodUnit = TimeUnit.MILLISECONDS; - - private final Object predicate; - - private String description; - - ConditionCheckerBuilder(BooleanSupplier predicate) { - this.predicate = predicate; - } - - public ConditionCheckerBuilder(Runnable predicate) { - this.predicate = predicate; - } - - public ConditionCheckerBuilder every(long period, TimeUnit unit) { - this.period = period; - periodUnit = unit; - return this; - } - - public ConditionCheckerBuilder every(long periodMillis) { - period = periodMillis; - periodUnit = TimeUnit.MILLISECONDS; - return this; - } - - public ConditionCheckerBuilder before(long timeout, TimeUnit unit) { - this.timeout = timeout; - timeoutUnit = unit; - return this; - } - - public ConditionCheckerBuilder before(long timeoutMillis) { - timeout = timeoutMillis; - timeoutUnit = TimeUnit.MILLISECONDS; - return this; - } - - public ConditionCheckerBuilder as(String description) { - this.description = description; - return this; - } - - public void becomesTrue() { - new ConditionChecker(predicate, true, period, periodUnit, description) - .await(timeout, timeoutUnit); - } - - public void becomesFalse() { - new ConditionChecker(predicate, false, period, periodUnit, description) - .await(timeout, timeoutUnit); - } - } - - public static ConditionCheckerBuilder checkThat(BooleanSupplier predicate) { - return new ConditionCheckerBuilder(predicate); - } - - public static ConditionCheckerBuilder checkThat(Runnable predicate) { - return new ConditionCheckerBuilder(predicate); - } - - private final Object predicate; - private final boolean expectedOutcome; - private final String description; - private final Lock lock; - private final Condition condition; - private final Timer timer; - private Throwable lastFailure; - - public ConditionChecker( - Object predicate, - boolean expectedOutcome, - long period, - TimeUnit periodUnit, - String description) { - this.predicate = predicate; - this.expectedOutcome = expectedOutcome; - this.description = (description != null) ? description : this.toString(); - lock = new ReentrantLock(); - condition = lock.newCondition(); - timer = new Timer("condition-checker", true); - timer.schedule( - new TimerTask() { - @Override - public void run() { - checkCondition(); - } - }, - 0, - periodUnit.toMillis(period)); - } - - /** Waits until the predicate becomes true, or a timeout occurs, whichever happens first. */ - public void await(long timeout, TimeUnit unit) { - boolean interrupted = false; - long nanos = unit.toNanos(timeout); - lock.lock(); - try { - while (!evalCondition()) { - if (nanos <= 0L) { - String msg = - String.format( - "Timeout after %s %s while waiting for '%s'", - timeout, unit.toString().toLowerCase(), description); - if (lastFailure != null) { - fail(msg, lastFailure); - } else { - fail(msg); - } - } - try { - nanos = condition.awaitNanos(nanos); - } catch (InterruptedException e) { - interrupted = true; - } - } - } finally { - timer.cancel(); - if (interrupted) Thread.currentThread().interrupt(); - } - } - - private void checkCondition() { - lock.lock(); - try { - if (evalCondition()) { - condition.signal(); - } - } finally { - lock.unlock(); - } - } - - private boolean evalCondition() { - if (predicate instanceof BooleanSupplier) { - return ((BooleanSupplier) predicate).getAsBoolean() == expectedOutcome; - } else if (predicate instanceof Runnable) { - boolean succeeded = true; - try { - ((Runnable) predicate).run(); - } catch (Throwable t) { - succeeded = false; - lastFailure = t; - } - return succeeded == expectedOutcome; - } else { - throw new AssertionError("Unsupported predicate type " + predicate.getClass()); - } - } -} diff --git a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/utils/NodeUtils.java b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/utils/NodeUtils.java deleted file mode 100644 index b1d41562287..00000000000 --- a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/utils/NodeUtils.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.testinfra.utils; - -import static java.util.concurrent.TimeUnit.MILLISECONDS; -import static java.util.concurrent.TimeUnit.SECONDS; -import static org.awaitility.Awaitility.await; - -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeState; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class NodeUtils { - - private static final Logger logger = LoggerFactory.getLogger(NodeUtils.class); - - private static final int TEST_BASE_NODE_WAIT = 60; - - public static void waitForUp(Node node) { - waitFor(node, TEST_BASE_NODE_WAIT, NodeState.UP); - } - - public static void waitForUp(Node node, int timeoutSeconds) { - waitFor(node, timeoutSeconds, NodeState.UP); - } - - public static void waitForDown(Node node) { - waitFor(node, TEST_BASE_NODE_WAIT * 3, NodeState.DOWN); - } - - public static void waitForDown(Node node, int timeoutSeconds) { - waitFor(node, timeoutSeconds, NodeState.DOWN); - } - - public static void waitFor(Node node, int timeoutSeconds, NodeState nodeState) { - logger.debug("Waiting for node {} to enter state {}", node, nodeState); - await() - .pollInterval(100, MILLISECONDS) - .atMost(timeoutSeconds, SECONDS) - .until(() -> node.getState().equals(nodeState)); - } -} diff --git a/test-infra/src/main/java/com/datastax/oss/driver/assertions/Assertions.java b/test-infra/src/main/java/com/datastax/oss/driver/assertions/Assertions.java deleted file mode 100644 index 277fe54195d..00000000000 --- a/test-infra/src/main/java/com/datastax/oss/driver/assertions/Assertions.java +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.assertions; - -import com.datastax.oss.driver.api.core.metadata.Node; - -public class Assertions extends org.assertj.core.api.Assertions { - public static NodeMetadataAssert assertThat(Node actual) { - return new NodeMetadataAssert(actual); - } -} diff --git a/test-infra/src/main/java/com/datastax/oss/driver/assertions/NodeMetadataAssert.java b/test-infra/src/main/java/com/datastax/oss/driver/assertions/NodeMetadataAssert.java deleted file mode 100644 index 9d4bd9be637..00000000000 --- a/test-infra/src/main/java/com/datastax/oss/driver/assertions/NodeMetadataAssert.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.assertions; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeState; -import org.assertj.core.api.AbstractAssert; - -public class NodeMetadataAssert extends AbstractAssert { - - public NodeMetadataAssert(Node actual) { - super(actual, NodeMetadataAssert.class); - } - - public NodeMetadataAssert isUp() { - assertThat(actual.getState()).isSameAs(NodeState.UP); - return this; - } - - public NodeMetadataAssert isDown() { - assertThat(actual.getState()).isSameAs(NodeState.DOWN); - return this; - } - - public NodeMetadataAssert isUnknown() { - assertThat(actual.getState()).isSameAs(NodeState.UNKNOWN); - return this; - } - - public NodeMetadataAssert isForcedDown() { - assertThat(actual.getState()).isSameAs(NodeState.FORCED_DOWN); - return this; - } - - public NodeMetadataAssert hasOpenConnections(int expected) { - assertThat(actual.getOpenConnections()).isEqualTo(expected); - return this; - } - - public NodeMetadataAssert isReconnecting() { - assertThat(actual.isReconnecting()).isTrue(); - return this; - } - - public NodeMetadataAssert isNotReconnecting() { - assertThat(actual.isReconnecting()).isFalse(); - return this; - } - - public NodeMetadataAssert isLocal() { - assertThat(actual.getDistance()).isSameAs(NodeDistance.LOCAL); - return this; - } - - public NodeMetadataAssert isRemote() { - assertThat(actual.getDistance()).isSameAs(NodeDistance.REMOTE); - return this; - } - - public NodeMetadataAssert isIgnored() { - assertThat(actual.getDistance()).isSameAs(NodeDistance.IGNORED); - return this; - } -} diff --git a/test-infra/src/main/java/com/datastax/oss/driver/categories/IsolatedTests.java b/test-infra/src/main/java/com/datastax/oss/driver/categories/IsolatedTests.java deleted file mode 100644 index a8b8ea40a10..00000000000 --- a/test-infra/src/main/java/com/datastax/oss/driver/categories/IsolatedTests.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.categories; - -/** - * Defines a classification of tests that should be run in their own jvm fork. - * - *

This is generally because they need to set system properties. - */ -public interface IsolatedTests {} diff --git a/test-infra/src/main/java/com/datastax/oss/driver/categories/ParallelizableTests.java b/test-infra/src/main/java/com/datastax/oss/driver/categories/ParallelizableTests.java deleted file mode 100644 index 2c718bc08d7..00000000000 --- a/test-infra/src/main/java/com/datastax/oss/driver/categories/ParallelizableTests.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.categories; - -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; - -/** - * Defines a classification of tests that can be run in parallel, namely: tests that use {@link - * CcmRule} (not {@link CustomCcmRule}), and tests that use Simulacron. - */ -public interface ParallelizableTests {} diff --git a/test-infra/src/main/resources/client.crt b/test-infra/src/main/resources/client.crt deleted file mode 100644 index 241e5f545d6..00000000000 --- a/test-infra/src/main/resources/client.crt +++ /dev/null @@ -1,19 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDqTCCApGgAwIBAgIERLZiJzANBgkqhkiG9w0BAQsFADCBhDELMAkGA1UEBhMCVVMxEzARBgNV -BAgTCkNhbGlmb3JuaWExFDASBgNVBAcTC1NhbnRhIENsYXJhMRYwFAYDVQQKEw1EYXRhU3RheCBJ -bmMuMRowGAYDVQQLExFEcml2ZXJzIGFuZCBUb29sczEWMBQGA1UEAxMNRHJpdmVyIENsaWVudDAe -Fw0xNTAzMTIwMTA4MjRaFw0xNTA2MTAwMTA4MjRaMIGEMQswCQYDVQQGEwJVUzETMBEGA1UECBMK -Q2FsaWZvcm5pYTEUMBIGA1UEBxMLU2FudGEgQ2xhcmExFjAUBgNVBAoTDURhdGFTdGF4IEluYy4x -GjAYBgNVBAsTEURyaXZlcnMgYW5kIFRvb2xzMRYwFAYDVQQDEw1Ecml2ZXIgQ2xpZW50MIIBIjAN -BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAq0J0EoZQnOv2KRrvwA+1ZL9VZ3hDdQMwkDfitoGN -B6upvMUZpf8W+ReQmaY6yacYJthHzsZTd3G97Bw81/3VNHQB9PnXGmbupMLVXeFXysSCs1nPEdJl -TBbJXWHSh41AE4ejJaoCoTuigKGwI9lTbOOPDz/WMcio9nagsCJdsdG2+TxmR7RlyzEIANJ0wpnL -JEIeJmRS2loLVuCU4lZ9hDLN57cP9jEVD4Hk2kJD4Exx7G9HQFH+/63H6XtEDZsJcYldR7yBNsGr -pz9CupULCS1R40ePQEIlUXhM4ft/hsljQybLQvvfXNVTvk5WgY7LNaBJy6A/Tfg32SXEn3wUvwID -AQABoyEwHzAdBgNVHQ4EFgQUt+JDOeziZzHNYTFU/FL9PhDGqSQwDQYJKoZIhvcNAQELBQADggEB -ADOYpa1f9dPcVLq3RiMytajHo3YJ0AQqGRzVgngkeRFSdhyy/y+/8D0/V5s6QbNt/l6x3FxkoiTR -1Lptf96eylnS5AkGQTgogJP53cSNrqkDL0IyyvErSiATEXNpBKz6ivY+e5J1GLTfX9Ylu8limzIq -Y6YBnr8fMLD6XWraxtzzkJ9NIPhhaz696rxqr8ix6uy0mgxR/7/jUglreimZkLW40/qiABgX7Evw -UqpuJWmqNbQP9UXecx/UJ0hdxxxuxkZsoRoQwWYhkeT4aGCLJv/hjiNTfFAt23uHe0LVfW/HqykW -KoEj8F08mJVe5ZfpjF974i5qO9PU9XxvLfLjNvo= ------END CERTIFICATE----- diff --git a/test-infra/src/main/resources/client.key b/test-infra/src/main/resources/client.key deleted file mode 100644 index 05bb6fad83d..00000000000 --- a/test-infra/src/main/resources/client.key +++ /dev/null @@ -1,28 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCrQnQShlCc6/Yp -Gu/AD7Vkv1VneEN1AzCQN+K2gY0Hq6m8xRml/xb5F5CZpjrJpxgm2EfOxlN3cb3s -HDzX/dU0dAH0+dcaZu6kwtVd4VfKxIKzWc8R0mVMFsldYdKHjUATh6MlqgKhO6KA -obAj2VNs448PP9YxyKj2dqCwIl2x0bb5PGZHtGXLMQgA0nTCmcskQh4mZFLaWgtW -4JTiVn2EMs3ntw/2MRUPgeTaQkPgTHHsb0dAUf7/rcfpe0QNmwlxiV1HvIE2waun -P0K6lQsJLVHjR49AQiVReEzh+3+GyWNDJstC+99c1VO+TlaBjss1oEnLoD9N+DfZ -JcSffBS/AgMBAAECggEAMHATNEoY8skqTmX3+XJ3847KMQGq0qWcTq3/yW7K3KiI -0YNNxc1oSfuIQmzpo69G/XWembUuVlItTWKPMufwLW3CP++KD0WdqawRfQQHOKpr -7R4xmvDPBb5MJcVNLlmdDekHE9gJ9mBPjeItV3ZYSivygnWjt2DxqQPUXvzZUzlu -munh3H5x6ehXVHDYGzosPgTpfmLHdlNfvF4x9bcklMMbCOyoPttXB2uHWOvUIS+/ -2YEkPmJfZdpudI7RqN75yYi7N8+gpnCTp530zA2yONyZ8THqEG/0nWy+02/zm5sm -gs1saYNwXME2IPekZNM/pJh2DtnTcxZaUt84q2nhAQKBgQDi8mgvE8ekbs6DLfKK -YAtTuOcLRpuvJqxtiQecqaumzgZnmHtkm6yuDNjieqB6OITudP4NdhPpyvOFJw46 -zTHMpGqZboxHuxoxMOgmyeiO+cdSwGHobr1zUcT8jVmLH7A+LtL5hHi+733EbCRh -sF04Vq9L46Q52mhcZKbs56U8MQKBgQDBLwotnOJH7hZD5sKS0o8/Sfj3pgzXEDpL -RfnrBPGhLn+1zhPEYsEW3mKI/yHiOZHNXZMQ6oYmxThg03qKTjaY8OIm8sg/zrlZ -M+o3wVnAzayrhw5gZ8DzqioHhEUMOAwwRFXRpfxqj8regrLjE9KaYty8ZYAFtwuH -W2S3+MVT7wKBgGQx7XlLXErmeNpFgN1Cxf1ylt7Nj5Jmmp3Jb8jkx9ne/8jg8ylZ -6YT2OxLSXONY7Kdyk29SADyp05WnxoqDaUcWF9IhkmFg45FwLC5j2f61nCCWuyMp -MQ8mvLdbmHrpxJ/PgGmU6NIzXe1IaU+P07g53S6+FBVOreCMt33ET5khAoGAGgKz -ZCDTdsvfw5S2bf5buzHCi9WXtP1CXBA37iTkQ8d2+oucrbx+Mw4ORlPTxBnsP7Jx -sr1hAqdbR+4xeZ2+TCliycu2mqDC4/fReWBXLVaEATRWAzT1DdnDfu+YPGTvfzA0 -Pd4TdmWV8w+19k0c9hyJi/Q+oIZczwTHMt4T85ECgYAe4J0ht6b6kPEG3d9vxmMN -T23S+ucYLHnfT1nacTuBZnMphWHhSqf8UJloIGpusxDU84MdAp22Jpd9SfPi9KK9 -yZY9WDJGeb0Yk7ML1R5GcAAkM78lUw/rS2VfMjQFnnUl2jVMS8adcm8/vHcpkcn7 -MufMEZzDpeO/aI8nbClktw== ------END PRIVATE KEY----- diff --git a/test-infra/src/main/resources/client.keystore b/test-infra/src/main/resources/client.keystore deleted file mode 100644 index c8b11cc0c9b..00000000000 Binary files a/test-infra/src/main/resources/client.keystore and /dev/null differ diff --git a/test-infra/src/main/resources/client.truststore b/test-infra/src/main/resources/client.truststore deleted file mode 100644 index 169986dea99..00000000000 Binary files a/test-infra/src/main/resources/client.truststore and /dev/null differ diff --git a/test-infra/src/main/resources/server.keystore b/test-infra/src/main/resources/server.keystore deleted file mode 100644 index c6166977b3d..00000000000 Binary files a/test-infra/src/main/resources/server.keystore and /dev/null differ diff --git a/test-infra/src/main/resources/server.truststore b/test-infra/src/main/resources/server.truststore deleted file mode 100644 index 019bca91205..00000000000 Binary files a/test-infra/src/main/resources/server.truststore and /dev/null differ diff --git a/test-infra/src/main/resources/server_localhost.keystore b/test-infra/src/main/resources/server_localhost.keystore deleted file mode 100644 index d246e430e08..00000000000 Binary files a/test-infra/src/main/resources/server_localhost.keystore and /dev/null differ diff --git a/test-infra/src/test/java/com/datastax/oss/driver/api/testinfra/requirement/VersionRequirementTest.java b/test-infra/src/test/java/com/datastax/oss/driver/api/testinfra/requirement/VersionRequirementTest.java deleted file mode 100644 index ccddb18c80f..00000000000 --- a/test-infra/src/test/java/com/datastax/oss/driver/api/testinfra/requirement/VersionRequirementTest.java +++ /dev/null @@ -1,186 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.testinfra.requirement; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import java.util.Collections; -import java.util.List; -import org.junit.Test; - -public class VersionRequirementTest { - // backend aliases - private static BackendType CASSANDRA = BackendType.CASSANDRA; - private static BackendType DSE = BackendType.DSE; - - // version numbers - private static Version V_0_0_0 = Version.parse("0.0.0"); - private static Version V_0_1_0 = Version.parse("0.1.0"); - private static Version V_1_0_0 = Version.parse("1.0.0"); - private static Version V_1_0_1 = Version.parse("1.0.1"); - private static Version V_1_1_0 = Version.parse("1.1.0"); - private static Version V_2_0_0 = Version.parse("2.0.0"); - private static Version V_2_0_1 = Version.parse("2.0.1"); - private static Version V_3_0_0 = Version.parse("3.0.0"); - private static Version V_3_1_0 = Version.parse("3.1.0"); - private static Version V_4_0_0 = Version.parse("4.0.0"); - - // requirements - private static VersionRequirement CASSANDRA_ANY = new VersionRequirement(CASSANDRA, "", "", ""); - private static VersionRequirement CASSANDRA_FROM_1_0_0 = - new VersionRequirement(CASSANDRA, "1.0.0", "", ""); - private static VersionRequirement CASSANDRA_TO_1_0_0 = - new VersionRequirement(CASSANDRA, "", "1.0.0", ""); - private static VersionRequirement CASSANDRA_FROM_1_0_0_TO_2_0_0 = - new VersionRequirement(CASSANDRA, "1.0.0", "2.0.0", ""); - private static VersionRequirement CASSANDRA_FROM_1_1_0 = - new VersionRequirement(CASSANDRA, "1.1.0", "", ""); - private static VersionRequirement CASSANDRA_FROM_3_0_0_TO_3_1_0 = - new VersionRequirement(CASSANDRA, "3.0.0", "3.1.0", ""); - private static VersionRequirement DSE_ANY = new VersionRequirement(DSE, "", "", ""); - - @Test - public void empty_requirements() { - List req = Collections.emptyList(); - - assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_0_0_0)).isTrue(); - assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_1_0_0)).isTrue(); - assertThat(VersionRequirement.meetsAny(req, DSE, V_0_0_0)).isTrue(); - assertThat(VersionRequirement.meetsAny(req, DSE, V_1_0_0)).isTrue(); - } - - @Test - public void single_requirement_any_version() { - List anyCassandra = Collections.singletonList(CASSANDRA_ANY); - List anyDse = Collections.singletonList(DSE_ANY); - - assertThat(VersionRequirement.meetsAny(anyCassandra, CASSANDRA, V_0_0_0)).isTrue(); - assertThat(VersionRequirement.meetsAny(anyCassandra, CASSANDRA, V_1_0_0)).isTrue(); - assertThat(VersionRequirement.meetsAny(anyDse, DSE, V_0_0_0)).isTrue(); - assertThat(VersionRequirement.meetsAny(anyDse, DSE, V_1_0_0)).isTrue(); - - assertThat(VersionRequirement.meetsAny(anyDse, CASSANDRA, V_0_0_0)).isFalse(); - assertThat(VersionRequirement.meetsAny(anyDse, CASSANDRA, V_1_0_0)).isFalse(); - assertThat(VersionRequirement.meetsAny(anyCassandra, DSE, V_0_0_0)).isFalse(); - assertThat(VersionRequirement.meetsAny(anyCassandra, DSE, V_1_0_0)).isFalse(); - } - - @Test - public void single_requirement_min_only() { - List req = Collections.singletonList(CASSANDRA_FROM_1_0_0); - - assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_1_0_0)).isTrue(); - assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_1_0_1)).isTrue(); - assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_1_1_0)).isTrue(); - assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_2_0_0)).isTrue(); - - assertThat(VersionRequirement.meetsAny(req, DSE, V_1_0_0)).isFalse(); - assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_0_0_0)).isFalse(); - assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_0_1_0)).isFalse(); - } - - @Test - public void single_requirement_max_only() { - List req = Collections.singletonList(CASSANDRA_TO_1_0_0); - - assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_0_0_0)).isTrue(); - assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_0_1_0)).isTrue(); - - assertThat(VersionRequirement.meetsAny(req, DSE, V_0_0_0)).isFalse(); - assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_1_0_0)).isFalse(); - assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_1_0_1)).isFalse(); - } - - @Test - public void single_requirement_min_and_max() { - List req = Collections.singletonList(CASSANDRA_FROM_1_0_0_TO_2_0_0); - - assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_1_0_0)).isTrue(); - assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_1_0_1)).isTrue(); - assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_1_1_0)).isTrue(); - - assertThat(VersionRequirement.meetsAny(req, DSE, V_1_0_0)).isFalse(); - assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_0_0_0)).isFalse(); - assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_0_1_0)).isFalse(); - assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_2_0_0)).isFalse(); - assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_2_0_1)).isFalse(); - } - - @Test - public void multi_requirement_any_version() { - List req = ImmutableList.of(CASSANDRA_ANY, DSE_ANY); - - assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_1_0_0)).isTrue(); - assertThat(VersionRequirement.meetsAny(req, DSE, V_1_0_0)).isTrue(); - } - - @Test - public void multi_db_requirement_min_one_any_other() { - List req = ImmutableList.of(CASSANDRA_FROM_1_0_0, DSE_ANY); - - assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_1_0_0)).isTrue(); - assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_2_0_0)).isTrue(); - assertThat(VersionRequirement.meetsAny(req, DSE, V_0_0_0)).isTrue(); - assertThat(VersionRequirement.meetsAny(req, DSE, V_1_0_0)).isTrue(); - - assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_0_0_0)).isFalse(); - } - - @Test - public void multi_requirement_two_ranges() { - List req = - ImmutableList.of(CASSANDRA_FROM_1_0_0_TO_2_0_0, CASSANDRA_FROM_3_0_0_TO_3_1_0); - - assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_1_0_0)).isTrue(); - assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_1_1_0)).isTrue(); - assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_3_0_0)).isTrue(); - - assertThat(VersionRequirement.meetsAny(req, DSE, V_1_0_0)).isFalse(); - assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_0_0_0)).isFalse(); - assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_2_0_0)).isFalse(); - assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_3_1_0)).isFalse(); - assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_4_0_0)).isFalse(); - } - - @Test - public void multi_requirement_overlapping() { - List req = - ImmutableList.of(CASSANDRA_FROM_1_0_0_TO_2_0_0, CASSANDRA_FROM_1_1_0); - - assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_1_0_0)).isTrue(); - assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_1_1_0)).isTrue(); - assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_2_0_0)).isTrue(); - - assertThat(VersionRequirement.meetsAny(req, DSE, V_1_0_0)).isFalse(); - assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_0_0_0)).isFalse(); - } - - @Test - public void multi_requirement_not_range() { - List req = ImmutableList.of(CASSANDRA_TO_1_0_0, CASSANDRA_FROM_1_1_0); - - assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_0_0_0)).isTrue(); - assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_1_1_0)).isTrue(); - assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_2_0_0)).isTrue(); - - assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_1_0_0)).isFalse(); - assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_1_0_1)).isFalse(); - } -} diff --git a/testing/README.md b/testing/README.md new file mode 100644 index 00000000000..4dfbb525351 --- /dev/null +++ b/testing/README.md @@ -0,0 +1,79 @@ +## Testing Prerequisites + +### Install CCM + + pip install ccm + +### Setup CCM Loopbacks (required for OSX) + + # For basic ccm + sudo ifconfig lo0 alias 127.0.0.2 up + sudo ifconfig lo0 alias 127.0.0.3 up + + # Additional loopbacks for java-driver testing + sudo ifconfig lo0 alias 127.0.1.1 up + sudo ifconfig lo0 alias 127.0.1.2 up + sudo ifconfig lo0 alias 127.0.1.3 up + sudo ifconfig lo0 alias 127.0.1.4 up + sudo ifconfig lo0 alias 127.0.1.5 up + sudo ifconfig lo0 alias 127.0.1.6 up + + + +## Building the Driver + + mvn clean package + + + +## Testing the Driver + +### Unit Tests + +Use the following command to run only the unit tests: + + mvn test + +_**Estimated Run Time**: x minutes_ + +### Integration Tests + +The following command runs the full set of unit and integration tests: + + mvn verify + +_**Estimated Run Time**: 4 minutes_ + +### Coverage Report + +The following command runs the full set of integration tests and produces a +coverage report: + + mvn cobertura:cobertura + +Coverage report can be found at: + + driver-core/target/site/cobertura/index.html + +_**Estimated Run Time**: 4 minutes_ + + + +## Test Utility + +`testing/bin/coverage` exists to make testing a bit more straight-forward. + +The main commands are as follows: + +Displays the available parameters: + + testing/bin/coverage --help + +Runs all the integration tests, creates the Cobertura report, and uploads Cobertura +site to a remote machine, if applicable: + + testing/bin/coverage + +Runs a single integration test along with the Cobertura report for that test: + + testing/bin/coverage --test TestClass[#optionalTestMethod] diff --git a/testing/bin/coverage b/testing/bin/coverage new file mode 100755 index 00000000000..c920e9fa6d1 --- /dev/null +++ b/testing/bin/coverage @@ -0,0 +1,181 @@ +#!/usr/bin/env python + +import argparse +import ConfigParser +import datetime +import os +import platform +import shlex +import subprocess +import sys + +USER_CONFIG = '~/.java_driver_tests.conf' + +def read_config(section, option, data_type='string', default=None): + '''Read configs as stored in the above defined USER_CONFIG.''' + + config = ConfigParser.ConfigParser() + config.read([os.path.expanduser(USER_CONFIG)]) + + if config.has_option(section, option): + if data_type == 'string': + to_return = config.get(section, option) + elif data_type == 'boolean': + to_return = config.getboolean(section, option) + return default if default else to_return + + return default if default else False + +def read_commandline(command): + '''Simple shell read access.''' + + return subprocess.check_output(command, shell=True) + +def execute(command): + '''Simple shell execute access.''' + + print 'Running command:\n\t%s\n' % command + subprocess.call(shlex.split(command)) + +def parse(): + '''Creates the argument parser for this tool.''' + + parser = argparse.ArgumentParser(description='command line tool for quick testing commands.') + parser.add_argument('--test', help='run a specific unit test') + parser.add_argument('--cassandra-version', help='run tests on a specific Cassandra version') + parser.add_argument('--upload', action='store_true', help='upload cobertura site to configured server') + parser.add_argument('--clean', action='store_true', help='runs the maven project from a clean environment') + parser.add_argument('--automated', action='store_true', help='ensures that runs do not get hung up by user input requests') + parser.add_argument('--testdocs', action='store_true', help='generates the test\'s Javadoc') + parser.add_argument('--samplecode', action='store_true', help='prints generated sample CQL') + args = parser.parse_args() + return args + +def check_path(): + '''Ensures this tool is run from the java-driver home directory.''' + + if not 'driver-core' in read_commandline('ls'): + sys.exit('Execute this command from your java-driver root directory.') + + +def maybe_setup_loopbacks(): + '''Currently only setting loopbacks for Mac OSX, but feel free to contribute for other setups.''' + + if platform.system() == 'Darwin' and not read_config('general', 'no_loopbacks', data_type='boolean'): + print 'Setting up CCM loopbacks...' + try: + loopbacks_enabled = read_commandline('ifconfig | grep "inet 127.0.1.4 netmask 0xff000000"') + if not loopbacks_enabled: + raise subprocess.CalledProcessError + except subprocess.CalledProcessError: + # For basic ccm + execute('sudo ifconfig lo0 alias 127.0.0.2 up') + execute('sudo ifconfig lo0 alias 127.0.0.3 up') + + # Additional loopbacks for the java-driver + execute('sudo ifconfig lo0 alias 127.0.1.1 up') + execute('sudo ifconfig lo0 alias 127.0.1.2 up') + execute('sudo ifconfig lo0 alias 127.0.1.3 up') + execute('sudo ifconfig lo0 alias 127.0.1.4 up') + execute('sudo ifconfig lo0 alias 127.0.1.5 up') + execute('sudo ifconfig lo0 alias 127.0.1.6 up') + +def maybe_upload_cobertura_site(): + ''' + The following must be set in the above defined USER_CONFIG: + [general] + cobertura_server = xxx + cobertura_directory = xxx + + If defined, the cobertura site will be rsync'd to a remote location. + ''' + + cobertura_server = read_config('general', 'cobertura_server') + cobertura_directory = read_config('general', 'cobertura_directory') + + if cobertura_server and cobertura_directory: + print 'rsync-ing cobertura site to %s:%s...' % ( + cobertura_server, + cobertura_directory) + execute('rsync -avz testing/cobertura-history %s:%s' % ( + cobertura_server, + cobertura_directory)) + +def save_cobertura_site(): + '''Save cobertura site folders by date''' + + execute('mkdir -p testing/cobertura-history') + today = datetime.date.today() + execute('cp -r driver-core/target/site/cobertura testing/cobertura-history/cobertura-%s' % today) + execute('rm -rf testing/cobertura-history/current') + execute('cp -r driver-core/target/site/cobertura testing/cobertura-history/current') + +def main(): + check_path() + args = parse() + + # Check if an upload is all that is required + if args.upload: + maybe_upload_cobertura_site() + sys.exit() + + if args.testdocs: + execute('mvn javadoc:test-javadoc') + print '\nTo view test Javadocs:' + print '\topen driver-core/target/site/testapidocs/index.html' + sys.exit() + + # Setup required ccm loopbacks + maybe_setup_loopbacks() + + # Start building the mvn command + cobertura_build_command = 'mvn' + + # Add the clean target, if asked or building the entire project + if args.clean or not args.test: + cobertura_build_command += ' clean' + + cobertura_build_command += ' versions:display-dependency-updates' # Ensures dependencies are up to date + cobertura_build_command += ' cobertura:cobertura' # Runs code coverage plugin + cobertura_build_command += ' --projects driver-core' # Runs only the main module + + if args.samplecode: + cobertura_build_command += ' -Pdoc' # Runs the docs "tests" and prints sample code + else: + cobertura_build_command += ' -Plong' # Runs the integration tests, not just tests + + # Use a specific Cassandra version, if asked + if args.cassandra_version: + cobertura_build_command += ' -Dcassandra.version=%s' % args.cassandra_version + + if args.test: + # Run against a single mvn test + execute('%s' + ' -Dmaven.test.failure.ignore=true' + ' -DfailIfNoTests=false' + ' -Dtest=%s' % (cobertura_build_command, args.test)) + else: + # Run against the entire integration suite + execute('%s' % cobertura_build_command) + + try: + if args.automated or raw_input('Save Cobertura Report? [y/N] ').lower() == 'y': + + # Save out cobertura site files + save_cobertura_site() + + # Perhaps move cobertura site files to a central location + maybe_upload_cobertura_site() + except KeyboardInterrupt: + print + + # Optionally, open coverage report when done building + if read_config('general', 'open_cobertura_site_after_build', 'boolean'): + execute('open driver-core/target/site/cobertura/index.html') + else: + print '\nTo view Cobertura report:' + print '\topen driver-core/target/site/cobertura/index.html' + + +if __name__ == '__main__': + main() diff --git a/upgrade_guide/README.md b/upgrade_guide/README.md index 56d55aaab36..805eda2cf45 100644 --- a/upgrade_guide/README.md +++ b/upgrade_guide/README.md @@ -1,1003 +1,388 @@ - - ## Upgrade guide -### 4.18.1 - -#### Keystore reloading in DefaultSslEngineFactory - -`DefaultSslEngineFactory` now includes an optional keystore reloading interval, for detecting changes in the local -client keystore file. This is relevant in environments with mTLS enabled and short-lived client certificates, especially -when an application restart might not always happen between a new keystore becoming available and the previous -keystore certificate expiring. - -This feature is disabled by default for compatibility. To enable, see `keystore-reload-interval` in `reference.conf`. - -### 4.17.0 - -#### Support for Java17 - -With the completion of [JAVA-3042](https://datastax-oss.atlassian.net/browse/JAVA-3042) the driver now passes our automated test matrix for Java Driver releases. -If you discover an issue with the Java Driver running on Java 17, please let us know. We will triage and address Java 17 issues. - -#### Updated API for vector search - -The 4.16.0 release introduced support for the CQL `vector` datatype. This release modifies the `CqlVector` -value type used to represent a CQL vector to make it easier to use. `CqlVector` now implements the Iterable interface -as well as several methods modelled on the JDK's List interface. For more, see -[JAVA-3060](https://datastax-oss.atlassian.net/browse/JAVA-3060). - -The builder interface was replaced with factory methods that resemble similar methods on `CqlDuration`. -For example, the following code will create a keyspace and table, populate that table with some data, and then execute -a query that will return a `vector` type. This data is retrieved directly via `Row.getVector()` and the resulting -`CqlVector` value object can be interrogated directly. - -```java -try (CqlSession session = new CqlSessionBuilder().withLocalDatacenter("datacenter1").build()) { - - session.execute("DROP KEYSPACE IF EXISTS test"); - session.execute("CREATE KEYSPACE test WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); - session.execute("CREATE TABLE test.foo(i int primary key, j vector)"); - session.execute("CREATE CUSTOM INDEX ann_index ON test.foo(j) USING 'StorageAttachedIndex'"); - session.execute("INSERT INTO test.foo (i, j) VALUES (1, [8, 2.3, 58])"); - session.execute("INSERT INTO test.foo (i, j) VALUES (2, [1.2, 3.4, 5.6])"); - session.execute("INSERT INTO test.foo (i, j) VALUES (5, [23, 18, 3.9])"); - ResultSet rs=session.execute("SELECT j FROM test.foo WHERE j ann of [3.4, 7.8, 9.1] limit 1"); - for (Row row : rs){ - CqlVector v = row.getVector(0, Float.class); - System.out.println(v); - if (Iterables.size(v) != 3) { - throw new RuntimeException("Expected vector with three dimensions"); - } - } -} -``` - -You can also use the `CqlVector` type with prepared statements: - -```java -PreparedStatement preparedInsert = session.prepare("INSERT INTO test.foo (i, j) VALUES (?,?)"); -CqlVector vector = CqlVector.newInstance(1.4f, 2.5f, 3.6f); -session.execute(preparedInsert.bind(3, vector)); -``` - -In some cases, it makes sense to access the vector directly as an array of some numerical type. This version -supports such use cases by providing a codec which translates a CQL vector to and from a primitive array. Only float arrays are supported. -You can find more information about this codec in the manual documentation on [custom codecs](../manual/core/custom_codecs/) - -### 4.15.0 - -#### CodecNotFoundException now extends DriverException - -Before [JAVA-2995](https://datastax-oss.atlassian.net/browse/JAVA-2995), `CodecNotFoundException` -was extending `RuntimeException`. This is a discrepancy as all other exceptions extend -`DriverException`, which in turn extends `RuntimeException`. - -This was causing integrators to do workarounds in order to react on all exceptions correctly. - -The change introduced by JAVA-2995 shouldn't be a problem for most users. But if your code was using -a logic such as below, it won't compile anymore: - -```java -try { - doSomethingWithDriver(); -} catch(DriverException e) { -} catch(CodecNotFoundException e) { -} -``` - -You need to either reverse the catch order and catch `CodecNotFoundException` first: - -```java -try { - doSomethingWithDriver(); -} catch(CodecNotFoundException e) { -} catch(DriverException e) { -} -``` - -Or catch only `DriverException`: - -```java -try { - doSomethingWithDriver(); -} catch(DriverException e) { -} -``` - -### 4.14.0 - -#### AllNodesFailedException instead of NoNodeAvailableException in certain cases - -[JAVA-2959](https://datastax-oss.atlassian.net/browse/JAVA-2959) changed the behavior for when a -request cannot be executed because all nodes tried were busy. Previously you would get back a -`NoNodeAvailableException` but you will now get back an `AllNodesFailedException` where the -`getAllErrors` map contains a `NodeUnavailableException` for that node. - -#### Esri Geometry dependency now optional - -Previous versions of the Java Driver defined a mandatory dependency on the Esri geometry library. -This library offered support for primitive geometric types supported by DSE. As of driver 4.14.0 -this dependency is now optional. - -If you do not use DSE (or if you do but do not use the support for geometric types within DSE) you -should experience no disruption. If you are using geometric types with DSE you'll now need to -explicitly declare a dependency on the Esri library: - -```xml - - com.esri.geometry - esri-geometry-api - ${esri.version} - -``` - -See the [integration](../manual/core/integration/#esri) section in the manual for more details. - -### 4.13.0 - -#### Enhanced support for GraalVM native images - -[JAVA-2940](https://datastax-oss.atlassian.net/browse/JAVA-2940) introduced an enhanced support for -building GraalVM native images. - -If you were building a native image for your application, please verify your native image builder -configuration. Most of the extra configuration required until now is likely to not be necessary -anymore. - -Refer to this [manual page](../manual/core/graalvm) for details. - -#### Registration of multiple listeners and trackers - -[JAVA-2951](https://datastax-oss.atlassian.net/browse/JAVA-2951) introduced the ability to register -more than one instance of the following interfaces: - -* [RequestTracker](https://docs.datastax.com/en/drivers/java/4.12/com/datastax/oss/driver/api/core/tracker/RequestTracker.html) -* [NodeStateListener](https://docs.datastax.com/en/drivers/java/4.12/com/datastax/oss/driver/api/core/metadata/NodeStateListener.html) -* [SchemaChangeListener](https://docs.datastax.com/en/drivers/java/4.12/com/datastax/oss/driver/api/core/metadata/schema/SchemaChangeListener.html) - -Multiple components can now be registered both programmatically and through the configuration. _If -both approaches are used, components will add up and will all be registered_ (whereas previously, -the programmatic approach would take precedence over the configuration one). - -When using the programmatic approach to register multiple components, you should use the new -`SessionBuilder` methods `addRequestTracker`, `addNodeStateListener` and `addSchemaChangeListener`: - -```java -CqlSessionBuilder builder = CqlSession.builder(); -builder - .addRequestTracker(tracker1) - .addRequestTracker(tracker2); -builder - .addNodeStateListener(nodeStateListener1) - .addNodeStateListener(nodeStateListener2); -builder - .addSchemaChangeListener(schemaChangeListener1) - .addSchemaChangeListener(schemaChangeListener2); -``` - -To support registration of multiple components through the configuration, the following -configuration options were deprecated because they only allow one component to be declared: - -* `advanced.request-tracker.class` -* `advanced.node-state-listener.class` -* `advanced.schema-change-listener.class` - -They are still honored, but the driver will log a warning if they are used. They should now be -replaced with the following ones, that accept a list of classes to instantiate, instead of just -one: - -* `advanced.request-tracker.classes` -* `advanced.node-state-listener.classes` -* `advanced.schema-change-listener.classes` - -Example: - -```hocon -datastax-java-driver { - advanced { - # RequestLogger is a driver built-in tracker - request-tracker.classes = [RequestLogger,com.example.app.MyRequestTracker] - node-state-listener.classes = [com.example.app.MyNodeStateListener1,com.example.app.MyNodeStateListener2] - schema-change-listener.classes = [com.example.app.MySchemaChangeListener] - } -} -``` - -When more than one component of the same type is registered, the driver will distribute received -signals to all components in sequence, by order of their registration, starting with the -programmatically-provided ones. If a component throws an error, the error is intercepted and logged. - -### 4.12.0 - -#### MicroProfile Metrics upgraded to 3.0 - -The MicroProfile Metrics library has been upgraded from version 2.4 to 3.0. Since this upgrade -involves backwards-incompatible binary changes, users of this library and of the -`java-driver-metrics-microprofile` module are required to take the appropriate action: - -* If your application is still using MicroProfile Metrics < 3.0, you can still upgrade the core - driver to 4.12, but you now must keep `java-driver-metrics-microprofile` in version 4.11 or lower, - as newer versions will not work. - -* If your application is using MicroProfile Metrics >= 3.0, then you must upgrade to driver 4.12 or - higher, as previous versions of `java-driver-metrics-microprofile` will not work. - -#### Mapper `@GetEntity` and `@SetEntity` methods can now be lenient - -Thanks to [JAVA-2935](https://datastax-oss.atlassian.net/browse/JAVA-2935), `@GetEntity` and -`@SetEntity` methods now have a new `lenient` attribute. - -If the attribute is `false` (the default value), then the source row or the target statement must -contain a matching column for every property in the entity definition. If such a column is not -found, an error will be thrown. This corresponds to the mapper's current behavior prior to the -introduction of the new attribute. - -If the new attribute is explicitly set to `true` however, the mapper will operate on a best-effort -basis and attempt to read or write all entity properties that have a matching column in the source -row or in the target statement, *leaving unmatched properties untouched*. - -This new, lenient behavior allows to achieve the equivalent of driver 3.x -[lenient mapping](https://docs.datastax.com/en/developer/java-driver/3.10/manual/object_mapper/using/#manual-mapping). - -Read the manual pages on [@GetEntity](../manual/mapper/daos/getentity) methods and -[@SetEntity](../manual/mapper/daos/setentity) methods for more details and examples of lenient mode. - -### 4.11.0 - -#### Native protocol V5 is now production-ready - -Thanks to [JAVA-2704](https://datastax-oss.atlassian.net/browse/JAVA-2704), 4.11.0 is the first -version in the driver 4.x series to fully support Cassandra's native protocol version 5, which has -been promoted from beta to production-ready in the upcoming Cassandra 4.0 release. - -Users should not experience any disruption. When connecting to Cassandra 4.0, V5 will be -transparently selected as the protocol version to use. - -#### Customizable metric names, support for metric tags +The purpose of this guide is to detail changes made by successive +versions of the Java driver. -[JAVA-2872](https://datastax-oss.atlassian.net/browse/JAVA-2872) introduced the ability to configure -how metric identifiers are generated. Metric names can now be configured, but most importantly, -metric tags are now supported. See the [metrics](../manual/core/metrics/) section of the online -manual, or the `advanced.metrics.id-generator` section in the -[reference.conf](../manual/core/configuration/reference/) file for details. +### 2.1.7 -Users should not experience any disruption. However, those using metrics libraries that support tags -are encouraged to try out the new `TaggingMetricIdGenerator`, as it generates metric names and tags -that will look more familiar to users of libraries such as Micrometer or MicroProfile Metrics (and -look nicer when exported to Prometheus or Graphite). +This version brings a few changes in the driver's behavior; none of them break +binary compatibility. -#### New `NodeDistanceEvaluator` API +1. The `DefaultRetryPolicy`'s behaviour has changed in the case of an Unavailable + exception received from a request. The new behaviour will cause the driver to + process a Retry on a different node at most once, otherwise an exception will + be thrown. This change makes sense in the case where the node tried initially + for the request happens to be isolated from the rest of the cluster (e.g. + because of a network partition) but can still answer to the client normally. + In this case, trying another node has a chance of success. + The previous behaviour was to always throw an exception. -All driver built-in load-balancing policies now accept a new optional component called -[NodeDistanceEvaluator]. This component gets invoked each time a node is added to the cluster or -comes back up. If the evaluator returns a non-null distance for the node, that distance will be -used, otherwise the driver will use its built-in logic to assign a default distance to it. +2. The following properties in `PoolingOptions` were renamed: + * `MaxSimultaneousRequestsPerConnectionThreshold` to `NewConnectionThreshold` + * `MaxSimultaneousRequestsPerHostThreshold` to `MaxRequestsPerConnection` -[NodeDistanceEvaluator]: https://docs.datastax.com/en/drivers/java/4.11/com/datastax/oss/driver/api/core/loadbalancing/NodeDistanceEvaluator.html + The old getters/setters were deprecated, but they delegate to the new + ones. -This component replaces the old "node filter" component. As a consequence, all `withNodeFilter` -methods in `SessionBuilder` are now deprecated and should be replaced by the equivalent -`withNodeDistanceEvaluator` methods. + Also, note that the connection pool for protocol v3 can now be configured to + use multiple connections. See [this page](../features/pooling) for more + information. -If you have an existing node filter implementation, it can be converted to a `NodeDistanceEvaluator` -very easily: +3. `MappingManager(Session)` will now force the initialization of the `Session` + if needed. This is a change from 2.1.6, where if you gave it an uninitialized + session (created with `Cluster#newSession()` instead of `Cluster#connect()`), + it would only get initialized on the first request. -```java -Predicate nodeFilter = ... -NodeDistanceEvaluator nodeEvaluator = - (node, dc) -> nodeFilter.test(node) ? null : NodeDistance.IGNORED; -``` + If this is a problem for you, `MappingManager(Session, ProtocolVersion)` + preserves the previous behavior (see the API docs for more details). -The above can also be achieved by an adapter class as shown below: +Merged from 2.0.11: -```java -public class NodeFilterToDistanceEvaluatorAdapter implements NodeDistanceEvaluator { +4. The `DefaultRetryPolicy`'s behaviour has changed in the case of an Unavailable + exception received from a request. The new behaviour will cause the driver to + process a Retry on a different node at most once, otherwise an exception will + be thrown. This change makes sense in the case where the node tried initially + for the request happens to be isolated from the rest of the cluster (e.g. + because of a network partition) but can still answer to the client normally. + In this case, trying another node has a chance of success. + The previous behaviour was to always throw an exception. +5. A `BuiltStatement` is now considered non-idempotent whenever a `fcall()` + or `raw()` is used to build a value to be inserted in the database. + If you know that the CQL functions or expressions are safe, use + `setIdempotent(true)` on the statement. - private final Predicate nodeFilter; - public NodeFilterToDistanceEvaluatorAdapter(@NonNull Predicate nodeFilter) { - this.nodeFilter = nodeFilter; - } +### 2.1.6 - @Nullable @Override - public NodeDistance evaluateDistance(@NonNull Node node, @Nullable String localDc) { - return nodeFilter.test(node) ? null : NodeDistance.IGNORED; - } -} -``` +See [2.0.10](20x-to-2010). -Finally, the `datastax-java-driver.basic.load-balancing-policy.filter.class` configuration option -has been deprecated; it should be replaced with a node distance evaluator class defined by the -`datastax-java-driver.basic.load-balancing-policy.evaluator.class` option instead. -### 4.10.0 +### 2.1.2 -#### Cross-datacenter failover +2.1.2 brings important internal changes with native protocol v3 support, but +the impact on the public API has been kept as low as possible. -[JAVA-2899](https://datastax-oss.atlassian.net/browse/JAVA-2899) re-introduced the ability to -perform cross-datacenter failover using the driver's built-in load balancing policies. See [Load -balancing](../manual/core/loadbalancing/) in the manual for details. +#### User API Changes -Cross-datacenter failover is disabled by default, therefore existing applications should not -experience any disruption. +1. The native protocol version is now modelled as an enum: `ProtocolVersion`. + Most public methods that take it as an argument have a backward-compatible + version that takes an `int` (the exception being `RegularStatement`, + described below). For new code, prefer the enum version. -#### New `RetryVerdict` API +#### Internal API Changes -[JAVA-2900](https://datastax-oss.atlassian.net/browse/JAVA-2900) introduced [`RetryVerdict`], a new -interface that allows custom retry policies to customize the request before it is retried. +1. `RegularStatement.getValues` now takes the protocol version as a + `ProtocolVersion` instead of an `int`. This is transparent for callers + since there is a backward-compatible alternative, but if you happened to + extend the class you'll need to update your implementation. -For this reason, the following methods in the `RetryPolicy` interface were added; they all return -a `RetryVerdict` instance: +2. `BatchStatement.setSerialConsistencyLevel` now returns `BatchStatement` + instead of `Statement`. Again, this only matters if you extended this + class (if so, it might be a good idea to also have a covariant return in + your child class). -1. [`onReadTimeoutVerdict`](https://docs.datastax.com/en/drivers/java/4.11/com/datastax/oss/driver/api/core/retry/RetryPolicy.html#onReadTimeoutVerdict-com.datastax.oss.driver.api.core.session.Request-com.datastax.oss.driver.api.core.ConsistencyLevel-int-int-boolean-int-) -2. [`onWriteTimeoutVerdict`](https://docs.datastax.com/en/drivers/java/4.11/com/datastax/oss/driver/api/core/retry/RetryPolicy.html#onWriteTimeoutVerdict-com.datastax.oss.driver.api.core.session.Request-com.datastax.oss.driver.api.core.ConsistencyLevel-com.datastax.oss.driver.api.core.servererrors.WriteType-int-int-int-) -3. [`onUnavailableVerdict`](https://docs.datastax.com/en/drivers/java/4.11/com/datastax/oss/driver/api/core/retry/RetryPolicy.html#onUnavailableVerdict-com.datastax.oss.driver.api.core.session.Request-com.datastax.oss.driver.api.core.ConsistencyLevel-int-int-int-) -4. [`onRequestAbortedVerdict`](https://docs.datastax.com/en/drivers/java/4.11/com/datastax/oss/driver/api/core/retry/RetryPolicy.html#onRequestAbortedVerdict-com.datastax.oss.driver.api.core.session.Request-java.lang.Throwable-int-) -5. [`onErrorResponseVerdict`](https://docs.datastax.com/en/drivers/java/4.11/com/datastax/oss/driver/api/core/retry/RetryPolicy.html#onErrorResponseVerdict-com.datastax.oss.driver.api.core.session.Request-com.datastax.oss.driver.api.core.servererrors.CoordinatorException-int-) +3. The constructor of `UnsupportedFeatureException` now takes a + `ProtocolVersion` as a parameter. This should impact few users, as there's + hardly any reason to build instances of that class from client code. -The following methods were deprecated and will be removed in the next major version: +#### New features -1. [`onReadTimeout`](https://docs.datastax.com/en/drivers/java/4.11/com/datastax/oss/driver/api/core/retry/RetryPolicy.html#onReadTimeout-com.datastax.oss.driver.api.core.session.Request-com.datastax.oss.driver.api.core.ConsistencyLevel-int-int-boolean-int-) -2. [`onWriteTimeout`](https://docs.datastax.com/en/drivers/java/4.11/com/datastax/oss/driver/api/core/retry/RetryPolicy.html#onWriteTimeout-com.datastax.oss.driver.api.core.session.Request-com.datastax.oss.driver.api.core.ConsistencyLevel-com.datastax.oss.driver.api.core.servererrors.WriteType-int-int-int-) -3. [`onUnavailable`](https://docs.datastax.com/en/drivers/java/4.11/com/datastax/oss/driver/api/core/retry/RetryPolicy.html#onUnavailable-com.datastax.oss.driver.api.core.session.Request-com.datastax.oss.driver.api.core.ConsistencyLevel-int-int-int-) -4. [`onRequestAborted`](https://docs.datastax.com/en/drivers/java/4.11/com/datastax/oss/driver/api/core/retry/RetryPolicy.html#onRequestAborted-com.datastax.oss.driver.api.core.session.Request-java.lang.Throwable-int-) -5. [`onErrorResponse`](https://docs.datastax.com/en/drivers/java/4.11/com/datastax/oss/driver/api/core/retry/RetryPolicy.html#onErrorResponse-com.datastax.oss.driver.api.core.session.Request-com.datastax.oss.driver.api.core.servererrors.CoordinatorException-int-) +These features are only active when the native protocol v3 is in use. -Driver 4.10.0 also re-introduced a retry policy whose behavior is equivalent to the -`DowngradingConsistencyRetryPolicy` from driver 3.x. See this -[FAQ entry](https://docs.datastax.com/en/developer/java-driver/4.11/faq/#where-is-downgrading-consistency-retry-policy) -for more information. +1. The driver now uses a single connection per host (as opposed to a pool in + 2.1.1). Most options in `PoolingOptions` are ignored, except for a new one + called `maxSimultaneousRequestsPerHostThreshold`. See the class's Javadocs + for detailed explanations. -[`RetryVerdict`]: https://docs.datastax.com/en/drivers/java/4.11/com/datastax/oss/driver/api/core/retry/RetryVerdict.html +2. You can now provide a default timestamp with each query (but it will be + ignored if the CQL query string already contains a `USING TIMESTAMP` + clause). This can be done on a per-statement basis with + `Statement.setDefaultTimestamp`, or automatically with a + `TimestampGenerator` specified with + `Cluster.Builder.withTimestampGenerator` (two implementations are + provided: `ThreadLocalMonotonicTimestampGenerator` and + `AtomicMonotonicTimestampGenerator`). If you specify both, the statement's + timestamp takes precedence over the generator. By default, the driver has + the same behavior as 2.1.1 (no generator, timestamps are assigned by + Cassandra unless `USING TIMESTAMP` was specified). -#### Enhancements to the `Uuids` utility class +3. `BatchStatement.setSerialConsistencyLevel` no longer throws an exception, + it will honor the serial consistency level for the batch. -[JAVA-2449](https://datastax-oss.atlassian.net/browse/JAVA-2449) modified the implementation of -[Uuids.random()]: this method does not delegate anymore to the JDK's `java.util.UUID.randomUUID()` -implementation, but instead re-implements random UUID generation using the non-cryptographic -random number generator `java.util.Random`. -For most users, non-cryptographic strength is enough and this change should translate into better -performance when generating UUIDs for database insertion. However, in the unlikely case where your -application requires cryptographic strength for UUID generation, you should update your code to -use `java.util.UUID.randomUUID()` instead of `com.datastax.oss.driver.api.core.uuid.Uuids.random()` -from now on. +### 2.1.1 -This release also introduces two new methods for random UUID generation: +#### Internal API Changes -1. [Uuids.random(Random)]: similar to `Uuids.random()` but allows to pass a custom instance of - `java.util.Random` and/or re-use the same instance across calls. -2. [Uuids.random(SplittableRandom)]: similar to `Uuids.random()` but uses a - `java.util.SplittableRandom` instead. +1. The `ResultSet` interface has a new `wasApplied()` method. This will + only affect clients that provide their own implementation of this interface. -[Uuids.random()]: https://docs.datastax.com/en/drivers/java/4.11/com/datastax/oss/driver/api/core/uuid/Uuids.html#random-- -[Uuids.random(Random)]: https://docs.datastax.com/en/drivers/java/4.11/com/datastax/oss/driver/api/core/uuid/Uuids.html#random-java.util.Random- -[Uuids.random(SplittableRandom)]: https://docs.datastax.com/en/drivers/java/4.11/com/datastax/oss/driver/api/core/uuid/Uuids.html#random-java.util.SplittableRandom- -#### System and DSE keyspaces automatically excluded from metadata and token map computation +### 2.1.0 -[JAVA-2871](https://datastax-oss.atlassian.net/browse/JAVA-2871) now allows for a more fine-grained -control over which keyspaces should qualify for metadata and token map computation, including the -ability to *exclude* keyspaces based on their names. +#### User API Changes -From now on, the following keyspaces are automatically excluded: +1. The `getCaching` method of `TableMetadata#Options` now returns a + `Map` to account for changes to Cassandra 2.1. Also, the + `getIndexInterval` method now returns an `Integer` instead of an `int` + which will be `null` when connected to Cassandra 2.1 nodes. -1. The `system` keyspace; -2. All keyspaces starting with `system_`; -3. DSE-specific keyspaces: - 1. All keyspaces starting with `dse_`; - 2. The `solr_admin` keyspace; - 3. The `OpsCenter` keyspace. - -This means that they won't show up anymore in [Metadata.getKeyspaces()], and [TokenMap] will return -empty replicas and token ranges for them. If you need the driver to keep computing metadata and -token map for these keyspaces, you now must modify the following configuration option: -`datastax-java-driver.advanced.metadata.schema.refreshed-keyspaces`. +2. `BoundStatement` variables that have not been set explicitly will no + longer default to `null`. Instead, all variables must be bound explicitly, + otherwise the execution of the statement will fail (this also applies to + statements inside of a `BatchStatement`). For variables that map to a + primitive Java type, a new `setToNull` method has been added. + We made this change because the driver might soon distinguish between unset + and null variables, so we don't want clients relying on the "leave unset to + set to `null`" behavior. -[Metadata.getKeyspaces()]: https://docs.datastax.com/en/drivers/java/4.11/com/datastax/oss/driver/api/core/metadata/Metadata.html#getKeyspaces-- -[TokenMap]: https://docs.datastax.com/en/drivers/java/4.11/com/datastax/oss/driver/api/core/metadata/TokenMap.html -#### DSE Graph dependencies are now optional +#### Internal API Changes -Until driver 4.9.0, the driver declared a mandatory dependency to Apache TinkerPop, a library -required only when connecting to DSE Graph. The vast majority of Apache Cassandra users did not need -that library, but were paying the price of having that heavy-weight library in their application's -classpath. +The changes listed in this section should normally not impact end users of the +driver, but rather third-party frameworks and tools. -_Starting with driver 4.10.0, TinkerPop is now considered an optional dependency_. +1. The `serialize` and `deserialize` methods in `DataType` now take an + additional parameter: the protocol version. As explained in the javadoc, + if unsure, the proper value to use for this parameter is the protocol version + in use by the driver, i.e. the value returned by + `cluster.getConfiguration().getProtocolOptions().getProtocolVersion()`. -Regular users of Apache Cassandra that do not use DSE Graph will not notice any disruption. +2. The `parse` method in `DataType` now returns a Java object, not a + `ByteBuffer`. The previous behavior can be obtained by calling the + `serialize` method on the returned object. -DSE Graph users, however, will now have to explicitly declare a dependency to Apache TinkerPop. This -can be achieved with Maven by adding the following dependencies to the `` section of -your POM file: +3. The `getValues` method of `RegularStatement` now takes the protocol + version as a parameter. As above, the proper value if unsure is almost surely + the protocol version in use + (`cluster.getConfiguration().getProtocolOptions().getProtocolVersion()`). -```xml - - org.apache.tinkerpop - gremlin-core - ${tinkerpop.version} - - - org.apache.tinkerpop - tinkergraph-gremlin - ${tinkerpop.version} - -``` -See the [integration](../manual/core/integration/#tinker-pop) section in the manual for more details -as well as a driver vs. TinkerPop version compatibility matrix. - -### 4.5.x - 4.6.0 - -These versions are subject to [JAVA-2676](https://datastax-oss.atlassian.net/browse/JAVA-2676), a -bug that causes performance degradations in certain scenarios. We strongly recommend upgrading to at -least 4.6.1. - -### 4.4.0 - -DataStax Enterprise support is now available directly in the main driver. There is no longer a -separate DSE driver. - -#### For Apache Cassandra® users - -The great news is that [reactive execution](../manual/core/reactive/) is now available for everyone. -See the `CqlSession.executeReactive` methods. - -Apart from that, the only visible change is that DSE-specific features are now exposed in the API: - -* new execution methods: `CqlSession.executeGraph`, `CqlSession.executeContinuously*`. They all - have default implementations so this doesn't break binary compatibility. You can just ignore them. -* new driver dependencies: TinkerPop, ESRI, Reactive Streams. If you want to keep your classpath - lean, you can exclude some dependencies when you don't use the corresponding DSE features; see the - [Integration>Driver dependencies](../manual/core/integration/#driver-dependencies) section. - -#### For DataStax Enterprise users - -Adjust your Maven coordinates to use the unified artifact: - -```xml - - - com.datastax.dse - dse-java-driver-core - 2.3.0 - - - - - com.datastax.oss - java-driver-core - 4.4.0 - - - -``` - -The new driver is a drop-in replacement for the DSE driver. Note however that we've deprecated a few -DSE-specific types in favor of their OSS equivalents. They still work, so you don't need to make the -changes right away; but you will get deprecation warnings: - -* `DseSession`: use `CqlSession` instead, it can now do everything that a DSE session does. This - also applies to the builder: - +### 2.0.11 + +1. The `DefaultRetryPolicy`'s behaviour has changed in the case of an Unavailable + exception received from a request. The new behaviour will cause the driver to + process a Retry on a different node at most once, otherwise an exception will + be thrown. This change makes sense in the case where the node tried initially + for the request happens to be isolated from the rest of the cluster (e.g. + because of a network partition) but can still answer to the client normally. + In this case, trying another node has a chance of success. + The previous behaviour was to always throw an exception. +2. A `BuiltStatement` is now considered non-idempotent whenever a `fcall()` + or `raw()` is used to build a value to be inserted in the database. + If you know that the CQL functions or expressions are safe, use + `setIdempotent(true)` on the statement. + +### 2.0.x to 2.0.10 + +We try to avoid breaking changes within a branch (2.0.x to 2.0.y), but +2.0.10 saw a lot of new features and internal improvements. There is one +breaking change: + +1. `LatencyTracker#update` now has a different signature and takes two new + parameters: the statement that has been executed (never null), and the exception + thrown while executing the query (or null, if the query executed successfully). + Existing implementations of this interface, once upgraded to the new method + signature, should continue to work as before. + +The following might also be of interest: + +2. `SocketOptions#getTcpNoDelay()` is now TRUE by default (it was previously undefined). + This reflects the new behavior of Netty (which was upgraded from version 3.9.0 to + 4.0.27): `TCP_NODELAY` is now turned on by default, instead of depending on the OS + default like in previous versions. + +3. Netty is not shaded anymore in the default Maven artifact. However we publish a + [shaded artifact](../features/shaded_jar/) under a different classifier. + +4. The internal initialization sequence of the Cluster object has been slightly changed: + some fields that were previously initialized in the constructor are now set when + the `init()` method is called. This is unlikely to affect regular driver users. + +### 1.0 to 2.0 + +We used the opportunity of a major version bump to incorporate your feedback +and improve the API, to fix a number of inconsistencies and remove cruft. +Unfortunately this means there are some breaking changes, but the new API should +be both simpler and more complete. + +The following describes the changes for 2.0 that are breaking changes of the +1.0 API. For ease of use, we distinguish two categories of API changes: the "main" +ones and the "other" ones. + +The "main" API changes are the ones that are either +likely to affect most upgraded apps or are incompatible changes that, even if minor, +will not be detected at compile time. Upgraders are highly encouraged to check +this list of "main" changes while upgrading their application to 2.0 (even +though most applications are likely to be affected by only a handful of +changes). + +The "other" list is, well, other changes: those that are likely to +affect a minor number of applications and will be detected by compile time +errors anyway. It is ok to skip those initially and only come back to them if +you have trouble compiling your application after an upgrade. + +#### Main API changes + +1. The `Query` class has been renamed into `Statement` (it was confusing + to some that the `BoundStatement` was not a `Statement`). To allow + this, the old `Statement` class has been renamed to `RegularStatement`. + +2. The `Cluster` and `Session` shutdown API has changed. There is now a + `closeAsync` that is asynchronous but returns a `Future` on the + completion of the shutdown process. There is also a `close` shortcut + that does the same but blocks. Also, `close` now waits for ongoing + queries to complete by default (but you can force the closing of all + connections if you want to). + +3. `NoHostAvailableException#getErrors` now returns the full exception objects for + each node instead of just a message. In other words, it returns a + `Map` instead of a `Map`. + +4. `Statement#getConsistencyLevel` (previously `Query#getConsistencyLevel`, see + first point) will now return `null` by default (instead of `CL.ONE`), with the + meaning of "use the default consistency level". + The default consistency level can now be configured through the new `QueryOptions` + object in the cluster `Configuration`. + +5. The `Metrics` class now uses the Codahale metrics library version 3 (version 2 was + used previously). This new major version of the library has many API changes + compared to its version 2 (see the [release notes](https://dropwizard.github.io/metrics/3.1.0/about/release-notes/) for details), + which can thus impact consumers of the Metrics class. + Furthermore, the default `JmxReporter` now includes a name specific to the + cluster instance (to avoid conflicts when multiple Cluster instances are created + in the same JVM). As a result, tools that were polling JMX info will + have to be updated accordingly. + +6. The `QueryBuilder#in` method now has the following special case: using + `QueryBuilder.in(QueryBuilder.bindMarker())` will generate the string `IN ?`, + not `IN (?)` as was the case in 1.0. The reasoning being that the former + syntax, made valid by [CASSANDRA-4210](https://issues.apache.org/jira/browse/CASSANDRA-4210) + is a lot more useful than `IN (?)`, as the latter can more simply use an + equality. + Note that if you really want to output `IN (?)` with the query + builder, you can use `QueryBuilder.in(QueryBuilder.raw("?"))`. + +7. When binding values by name in `BoundStatement` (i.e. using the + `setX(String, X)` methods), if more than one variable have the same name, + then all values corresponding to that variable + name are set instead of just the first occurrence. + +8. The `QueryBuilder#raw` method does not automatically add quotes anymore, but + rather output its result without any change (as the raw name implies). + This means for instance that `eq("x", raw(foo))` will output `x = foo`, + not `x = 'foo'` (you don't need the raw method to output the latter string). + +9. The `QueryBuilder` will now sometimes use the new ability to send value as + bytes instead of serializing everything to string. In general the QueryBuilder + will do the right thing, but if you were calling the `getQueryString()` method + on a Statement created with a QueryBuilder (for other reasons than to prepare a query) + then the returned string may contain bind markers in place of some of the values + provided (and in that case, `getValues()` will contain the values corresponding + to those markers). If need be, it is possible to force the old behavior by + using the new `setForceNoValues()` method. + +#### Other API Changes + +1. Creating a Cluster instance (through `Cluster#buildFrom` or the + `Cluster.Builder#build` method) **does not create any connection right away + anymore** (and thus cannot throw a `NoHostAvailableException` or an + `AuthenticationException`). Instead, the initial contact points are checked + the first time a call to `Cluster#connect` is done. If for some reason you + want to emulate the previous behavior, you can use the new method + `Cluster#init`: `Cluster.builder().build()` in 1.0 is equivalent to + `Cluster.builder().build().init()` in 2.0. + +2. Methods from `Metadata`, `KeyspaceMetadata` and `TableMetadata` now use by default + case insensitive identifiers (for keyspace, table and column names in + parameter). You can double-quote an identifier if you want it to be a + case sensitive one (as you would do in CQL) and there is a `Metadata.quote` + helper method for that. + +3. The `TableMetadata#getClusteringKey` method has been renamed + `TableMetadata#getClusteringColumns` to match the "official" vocabulary. + +4. The `UnavailableException#getConsistency` method has been renamed to + `UnavailableException#getConsistencyLevel` for consistency with the method of + `QueryTimeoutException`. + +5. The `RegularStatement` class (ex-`Statement` class, see above) must now + implement two additional methods: `RegularStatement#getKeyspace` and + `RegularStatement#getValues`. If you had extended this class, you will have to + implement those new methods, but both can return null if they are not useful + in your case. + +6. The `Cluster.Initializer` interface should now implement 2 new methods: + `Cluster.Initializer#getInitialListeners` (which can return an empty + collection) and `Cluster.Initializer#getClusterName` (which can return null). + +7. The `Metadata#getReplicas` method now takes 2 arguments. On top of the + partition key, you must now provide the keyspace too. The previous behavior + was buggy: it's impossible to properly return the full list of replica for a + partition key without knowing the keyspace since replication may depend on + the keyspace). + +8. The method `LoadBalancingPolicy#newQueryPlan()` method now takes the currently + logged keyspace as 2nd argument. This information is necessary to do proper + token aware balancing (see preceding point). + +9. The `ResultSetFuture#set` and `ResultSetFuture#setException` methods have been + removed (from the public API at least). They were never meant to be exposed + publicly: a `resultSetFuture` is always set by the driver itself and should + not be set manually. + +10. The deprecated since 1.0.2 `Host.HealthMonitor` class has been removed. You + will now need to use `Host#isUp` and `Cluster#register` if you were using that + class. + +#### Features available only with Cassandra 2.0 + +This section details the biggest additions to 2.0 API wise. It is not an +exhaustive list of new features in 2.0. + +1. The new `BatchStatement` class allows to group any type of insert Statements + (`BoundStatement` or `RegularStatement`) for execution as a batch. For instance, + you can do something like: + ```java - // Replace: - DseSession session = DseSession.builder().build() - - // By: - CqlSession session = CqlSession.builder().build() + List values = ...; + PreparedStatement ps = session.prepare("INSERT INTO myTable(value) VALUES (?)"); + BatchStatement bs = new BatchStatement(); + for (String value : values) + bs.add(ps.bind(value)); + session.execute(bs); ``` -* `DseDriverConfigLoader`: the driver no longer needs DSE-specific config loaders. All the factory - methods in this class now redirect to `DriverConfigLoader`. On that note, `dse-reference.conf` - does not exist anymore, all the driver defaults are now in - [reference.conf](../manual/core/configuration/reference/). -* plain-text authentication: there is now a single implementation that works with both Cassandra and - DSE. If you used `DseProgrammaticPlainTextAuthProvider`, replace it by - `PlainTextProgrammaticAuthProvider`. Similarly, if you wrote a custom implementation by - subclassing `DsePlainTextAuthProviderBase`, extend `PlainTextAuthProviderBase` instead. -* `DseLoadBalancingPolicy`: DSE-specific features (the slow replica avoidance mechanism) have been - merged into `DefaultLoadBalancingPolicy`. `DseLoadBalancingPolicy` still exists for backward - compatibility, but it is now identical to the default policy. - -#### Class Loader - -The default class loader used by the driver when instantiating classes by reflection changed. -Unless specified by the user, the driver will now use the same class loader that was used to load -the driver classes themselves, in order to ensure that implemented interfaces and implementing -classes are fully compatible. - -This should ensure a more streamlined experience for OSGi users, who do not need anymore to define -a specific class loader to use. - -However if you are developing a web application and your setup corresponds to the following -scenario, then you will now be required to explicitly define another class loader to use: if in your -application the driver jar is loaded by the web server's system class loader (for example, -because the driver jar was placed in the "/lib" folder of the web server), then the default class -loader will be the server's system class loader. Then if the application tries to load, say, a -custom load balancing policy declared in the web app's "WEB-INF/lib" folder, then the default class -loader will not be able to locate that class. Instead, you must use the web app's class loader, that -you can obtain in most web environments by calling `Thread.getContextClassLoader()`: - - CqlSession.builder() - .addContactEndPoint(...) - .withClassLoader(Thread.currentThread().getContextClassLoader()) - .build(); - -See the javadocs of [SessionBuilder.withClassLoader] for more information. - -[SessionBuilder.withClassLoader]: https://docs.datastax.com/en/drivers/java/4.11/com/datastax/oss/driver/api/core/session/SessionBuilder.html#withClassLoader-java.lang.ClassLoader- - -### 4.1.0 - -#### Object mapper - -4.1.0 marks the introduction of the new object mapper in the 4.x series. - -Like driver 3, it relies on annotations to configure mapped entities and queries. However, there are -a few notable differences: - -* it uses compile-time annotation processing instead of runtime reflection; -* the "mapper" and "accessor" concepts have been unified into a single "DAO" component, that handles - both pre-defined CRUD patterns, and user-provided queries. - -Refer to the [mapper manual](../manual/mapper/) for all the details. - -#### Internal API - -`NettyOptions#afterBootstrapInitialized` is now responsible for setting socket options on driver -connections (see `advanced.socket` in the configuration). If you had written a custom `NettyOptions` -for 4.0, you'll have to copy over -- and possibly adapt -- the contents of -`DefaultNettyOptions#afterBootstrapInitialized` (if you didn't override `NettyOptions`, you don't -have to change anything). - -### 4.0.0 - -Version 4 is major redesign of the internal architecture. As such, it is **not binary compatible** -with previous versions. However, most of the concepts remain unchanged, and the new API will look -very familiar to 2.x and 3.x users. - -#### New Maven coordinates -The core driver is available from: +2. `SimpleStatement` can now take a list of values in addition to the query. This + allows to do the equivalent of a prepare+execute but with only one round-trip + to the server and without keeping the prepared statement after the + execution. -```xml - - com.datastax.oss - java-driver-core - 4.0.0 - -``` - -#### Runtime requirements - -The driver now requires **Java 8 or above**. It does not depend on Guava anymore (we still use it -internally but it's shaded). - -We have dropped support for legacy protocol versions v1 and v2. As a result, the driver is -compatible with: - -* **Apache Cassandra®: 2.1 and above**; -* **DataStax Enterprise: 4.7 and above**. - -#### Packages - -We've adopted new [API conventions] to better organize the driver code and make it more modular. As -a result, package names have changed. However most public API types have the same names; you can use -the auto-import or "find class" features of your IDE to discover the new locations. - -Here's a side-by-side comparison with the legacy driver for a basic example: - -```java -// Driver 3: -import com.datastax.driver.core.ResultSet; -import com.datastax.driver.core.Row; -import com.datastax.driver.core.SimpleStatement; + This is typically useful if a given query should be executed only + once (i.e. you don't want to prepare it) but you also don't want to + serialize all values into strings. Shortcut `Session#execute()` and + `Session#executeAsync()` methods are also provided so you that you can do: -SimpleStatement statement = - new SimpleStatement("SELECT release_version FROM system.local"); -ResultSet resultSet = session.execute(statement); -Row row = resultSet.one(); -System.out.println(row.getString("release_version")); - - -// Driver 4: -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; - -SimpleStatement statement = - SimpleStatement.newInstance("SELECT release_version FROM system.local"); -ResultSet resultSet = session.execute(statement); -Row row = resultSet.one(); -System.out.println(row.getString("release_version")); -``` - -Notable changes: - -* the imports; -* simple statement instances are now created with the `newInstance` static factory method. This is - because `SimpleStatement` is now an interface (as most public API types). - -[API conventions]: ../manual/api_conventions - -#### Configuration - -The configuration has been completely revamped. Instead of ad-hoc configuration classes, the default -mechanism is now file-based, using the [Typesafe Config] library. This is a better choice for most -deployments, since it allows configuration changes without recompiling the client application (note -that there are still programmatic setters for things that are likely to be injected dynamically, -such as contact points). - -The driver JAR contains a `reference.conf` file that defines the options with their defaults: - -``` -datastax-java-driver { - basic.request { - timeout = 2 seconds - consistency = LOCAL_ONE - page-size = 5000 - } - // ... and many more (~10 basic options, 70 advanced ones) -} -``` - -You can place an `application.conf` in your application's classpath to override options selectively: - -``` -datastax-java-driver { - basic.request.consistency = ONE -} -``` - -Options can also be overridden with system properties when launching your application: - -``` -java -Ddatastax-java-driver.basic.request.consistency=ONE MyApp -``` - -The configuration also supports *execution profiles*, that allow you to capture and reuse common -sets of options: - -```java -// application.conf: -datastax-java-driver { - profiles { - profile1 { basic.request.consistency = QUORUM } - profile2 { basic.request.consistency = ONE } - } -} - -// Application code: -SimpleStatement statement1 = - SimpleStatement.newInstance("...").setExecutionProfileName("profile1"); -SimpleStatement statement2 = - SimpleStatement.newInstance("...").setExecutionProfileName("profile2"); -``` - -The configuration can be reloaded periodically at runtime: - -``` -datastax-java-driver { - basic.config-reload-interval = 5 minutes -} -``` - -This is fully customizable: the configuration is exposed to the rest of the driver as an abstract -`DriverConfig` interface; if the default implementation doesn't work for you, you can write your -own. - -For more details, refer to the [manual](../manual/core/configuration). - -[Typesafe Config]: https://github.com/typesafehub/config - -#### Session - -`Cluster` does not exist anymore; the session is now the main component, initialized in a single -step: - -```java -CqlSession session = CqlSession.builder().build(); -session.execute("..."); -``` - -Protocol negotiation in mixed clusters has been improved: you no longer need to force the protocol -version during a rolling upgrade. The driver will detect that there are older nodes, and downgrade -to the best common denominator (see -[JAVA-1295](https://datastax-oss.atlassian.net/browse/JAVA-1295)). - -Reconnection is now possible at startup: if no contact point is reachable, the driver will retry at -periodic intervals (controlled by the [reconnection policy](../manual/core/reconnection/)) instead -of throwing an error. To turn this on, set the following configuration option: - -``` -datastax-java-driver { - advanced.reconnect-on-init = true -} -``` - -The session now has a built-in [throttler](../manual/core/throttling/) to limit how many requests -can execute concurrently. Here's an example based on the number of requests (a rate-based variant is -also available): - -``` -datastax-java-driver { - advanced.throttler { - class = ConcurrencyLimitingRequestThrottler - max-concurrent-requests = 10000 - max-queue-size = 100000 - } -} -``` - -#### Load balancing policy - -Previous driver versions came with multiple load balancing policies that could be nested into each -other. In our experience, this was one of the most complicated aspects of the configuration. - -In driver 4, we are taking a more opinionated approach: we provide a single [default -policy](../manual/core/load_balancing/#default-policy), with what we consider as the best practices: - -* local only: we believe that failover should be handled at infrastructure level, not by application - code. -* token-aware. -* optionally filtering nodes with a custom predicate. - -You can still provide your own policy by implementing the `LoadBalancingPolicy` interface. - -#### Statements - -Simple, bound and batch [statements](../manual/core/statements/) are now exposed in the public API -as interfaces. The internal implementations are **immutable**. This makes them automatically -thread-safe: you don't need to worry anymore about sharing them or reusing them between asynchronous -executions. - -Note that all mutating methods return a new instance, so **make sure you don't accidentally ignore -their result**: - -```java -BoundStatement boundSelect = preparedSelect.bind(); - -// This doesn't work: setInt doesn't modify boundSelect in place: -boundSelect.setInt("k", key); -session.execute(boundSelect); - -// Instead, reassign the statement every time: -boundSelect = boundSelect.setInt("k", key); -``` - -These methods are annotated with `@CheckReturnValue`. Some code analysis tools -- such as -[ErrorProne](https://errorprone.info/) -- can check correct usage at build time, and report mistakes -as compiler errors. - -Unlike 3.x, the request timeout now spans the entire request. In other words, it's the -maximum amount of time that `session.execute` will take, including any retry, speculative execution, -etc. You can set it with `Statement.setTimeout`, or globally in the configuration with the -`basic.request.timeout` option. - -[Prepared statements](../manual/core/statements/prepared/) are now cached client-side: if you call -`session.prepare()` twice with the same query string, it will no longer log a warning. The second -call will return the same statement instance, without sending anything to the server: - -```java -PreparedStatement ps1 = session.prepare("SELECT * FROM product WHERE sku = ?"); -PreparedStatement ps2 = session.prepare("SELECT * FROM product WHERE sku = ?"); -assert ps1 == ps2; -``` - -This cache takes into account all execution parameters. For example, if you prepare the same query -string with different consistency levels, you will get two distinct prepared statements, each -propagating its own consistency level to its bound statements: - -```java -PreparedStatement ps1 = - session.prepare( - SimpleStatement.newInstance("SELECT * FROM product WHERE sku = ?") - .setConsistencyLevel(DefaultConsistencyLevel.ONE)); -PreparedStatement ps2 = - session.prepare( - SimpleStatement.newInstance("SELECT * FROM product WHERE sku = ?") - .setConsistencyLevel(DefaultConsistencyLevel.TWO)); - -assert ps1 != ps2; - -BoundStatement bs1 = ps1.bind(); -assert bs1.getConsistencyLevel() == DefaultConsistencyLevel.ONE; - -BoundStatement bs2 = ps2.bind(); -assert bs2.getConsistencyLevel() == DefaultConsistencyLevel.TWO; -``` - -DDL statements are now debounced; see [Why do DDL queries have a higher latency than driver -3?](../faq/#why-do-ddl-queries-have-a-higher-latency-than-driver-3) in the FAQ. - -#### Dual result set APIs - -In 3.x, both synchronous and asynchronous execution models shared a common result set -implementation. This made asynchronous usage [notably error-prone][3.x async paging], because of the -risk of accidentally triggering background synchronous fetches. - -There are now two separate APIs: synchronous queries return a `ResultSet`; asynchronous queries -return a future of `AsyncResultSet`. - -`ResultSet` behaves much like its 3.x counterpart, except that background pre-fetching -(`fetchMoreResults`) was deliberately removed, in order to keep this interface simple and intuitive. -If you were using synchronous iterations with background pre-fetching, you should now switch to -fully asynchronous iterations (see below). - -`AsyncResultSet` is a simplified type that only contains the rows of the current page. When -iterating asynchronously, you no longer need to stop the iteration manually: just consume all the -rows in `currentPage()`, and then call `fetchNextPage` to retrieve the next page asynchronously. You -will find more information about asynchronous iterations in the manual pages about [asynchronous -programming][4.x async programming] and [paging][4.x paging]. - -[3.x async paging]: http://docs.datastax.com/en/developer/java-driver/3.2/manual/async/#async-paging -[4.x async programming]: ../manual/core/async/ -[4.x paging]: ../manual/core/paging/ - -#### CQL to Java type mappings - -Since the driver now has access to Java 8 types, some of the [CQL to Java type mappings] have -changed when it comes to [temporal types] such as `date` and `timestamp`: - -* `getDate` has been replaced by `getLocalDate` and returns [java.time.LocalDate]; -* `getTime` has been replaced by `getLocalTime` and returns [java.time.LocalTime] instead of a - `long` representing nanoseconds since midnight; -* `getTimestamp` has been replaced by `getInstant` and returns [java.time.Instant] instead of - [java.util.Date]. - -The corresponding setter methods were also changed to expect these new types as inputs. - -[CQL to Java type mappings]: ../manual/core#cql-to-java-type-mapping -[temporal types]: ../manual/core/temporal_types -[java.time.LocalDate]: https://docs.oracle.com/javase/8/docs/api/java/time/LocalDate.html -[java.time.LocalTime]: https://docs.oracle.com/javase/8/docs/api/java/time/LocalTime.html -[java.time.Instant]: https://docs.oracle.com/javase/8/docs/api/java/time/Instant.html -[java.util.Date]: https://docs.oracle.com/javase/8/docs/api/java/util/Date.html - -#### Metrics - -[Metrics](../manual/core/metrics/) are now divided into two categories: session-wide and per-node. -Each metric can be enabled or disabled individually in the configuration: - -``` -datastax-java-driver { - advanced.metrics { - // more are available, see reference.conf for the full list - session.enabled = [ bytes-sent, bytes-received, cql-requests ] - node.enabled = [ bytes-sent, bytes-received, pool.in-flight ] - } -} -``` - -Note that unlike 3.x, JMX is not supported out of the box. You'll need to add the dependency -explicitly: - -```xml - - io.dropwizard.metrics - metrics-jmx - 4.0.2 - -``` - -#### Metadata - -`Session.getMetadata()` is now immutable and updated atomically. The node list, schema metadata and -token map exposed by a given `Metadata` instance are guaranteed to be in sync. This is convenient -for analytics clients that need a consistent view of the cluster at a given point in time; for -example, a keyspace in `metadata.getKeyspaces()` will always have a corresponding entry in -`metadata.getTokenMap()`. - -On the other hand, this means you have to call `getMetadata()` again each time you need a fresh -copy; do not cache the result: - -```java -Metadata metadata = session.getMetadata(); -Optional ks = metadata.getKeyspace("test"); -assert !ks.isPresent(); - -session.execute( - "CREATE KEYSPACE IF NOT EXISTS test " - + "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); - -// This is still the same metadata from before the CREATE -ks = metadata.getKeyspace("test"); -assert !ks.isPresent(); - -// You need to fetch the whole metadata again -metadata = session.getMetadata(); -ks = metadata.getKeyspace("test"); -assert ks.isPresent(); -``` - -Refreshing the metadata can be CPU-intensive, in particular the token map. To help alleviate that, -it can now be filtered to a subset of keyspaces. This is useful if your application connects to a -shared cluster, but does not use the whole schema: - -``` -datastax-java-driver { - // defaults to empty (= all keyspaces) - advanced.metadata.schema.refreshed-keyspaces = [ "users", "products" ] -} -``` - -See the [manual](../manual/core/metadata/) for all the details. - -#### Query builder - -The query builder is now distributed as a separate artifact: - -```xml - - com.datastax.oss - java-driver-query-builder - 4.0.0 - -``` - -It is more cleanly separated from the core driver, and only focuses on query string generation. -Built queries are no longer directly executable, you need to convert them into a string or a -statement: - -```java -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.*; - -BuildableQuery query = - insertInto("user") - .value("id", bindMarker()) - .value("first_name", bindMarker()) - .value("last_name", bindMarker()); - -String cql = query.asCql(); -// INSERT INTO user (id,first_name,last_name) VALUES (?,?,?) - -SimpleStatement statement = query - .builder() - .addNamedValue("id", 0) - .addNamedValue("first_name", "Jane") - .addNamedValue("last_name", "Doe") - .build(); -``` - -All query builder types are immutable, making them inherently thread-safe and share-safe. - -The query builder has its own [manual chapter](../manual/query_builder/), where the syntax is -covered in detail. - -#### Dedicated type for CQL identifiers - -Instead of raw strings, the names of schema objects (keyspaces, tables, columns, etc.) are now -wrapped in a dedicated `CqlIdentifier` type. This avoids ambiguities with regard to [case -sensitivity](../manual/case_sensitivity). - -#### Pluggable request execution logic + ```java + String imgName = ...; + ByteBuffer imgBytes = ...; + session.execute("INSERT INTO images(name, bytes) VALUES (?, ?)", imgName, imgBytes); + ``` -`Session` is now a high-level abstraction capable of executing arbitrary requests. Out of the box, -the driver exposes a more familiar subtype `CqlSession`, that provides familiar signatures for CQL -queries (`execute(Statement)`, `prepare(String)`, etc). +3. SELECT queries are now "paged" under the hood. In other words, if a query + yields a very large result, only the beginning of the `ResultSet` will be fetched + initially, the rest being fetched "on-demand". In practice, this means that: -However, the request execution logic is completely pluggable, and supports arbitrary request types -(as long as you write the boilerplate to convert them to protocol messages). + ```java + for (Row r : session.execute("SELECT * FROM mytable")) + ... process r ... + ``` -We use that in our DSE driver to implement a reactive API and support for DSE graph. You can also -take advantage of it to plug your own request types (if you're interested, take a look at -`RequestProcessor` in the internal API). + should not timeout or OOM the server anymore even if "mytable" contains a lot + of data. In general paging should be transparent for the application (as in + the example above), but the implementation provides a number of knobs to + fine tune the behavior of that paging: + * the size of each "page" can be set per-query (`Statement#setFetchSize()`) + * the `ResultSet` object provides 2 methods to check the state of paging + (`ResultSet#getAvailableWithoutFetching` and `ResultSet#isFullyFetched`) + as well as a mean to force the pre-fetching of the next page (`ResultSet#fetchMoreResults`).